Merge branch '3.3.6' into merge/mainto3.3.6
|
@ -6,12 +6,14 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
- 'tools/tdgpt/**'
|
||||
- 'source/libs/executor/src/forecastoperator.c'
|
||||
- 'source/libs/executor/src/anomalywindowoperator.c'
|
||||
- 'source/dnode/mnode/impl/src/mndAnode.c'
|
||||
- 'include/common/tanalytics.h'
|
||||
- 'source/common/src/tanalytics.c'
|
||||
- 'tests/parallel/tdgpt_cases.task'
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
# Run unit-test and system-test cases for TDgpt when TDgpt code is changed.
|
||||
|
||||
name: TDgpt Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.0'
|
||||
- '3.3.6'
|
||||
paths:
|
||||
- 'tools/tdgpt/**'
|
||||
- 'source/libs/executor/src/forecastoperator.c'
|
||||
- 'source/libs/executor/src/anomalywindowoperator.c'
|
||||
- 'source/dnode/mnode/impl/src/mndAnode.c'
|
||||
- 'include/common/tanalytics.h'
|
||||
- 'source/common/src/tanalytics.c'
|
||||
- 'tests/parallel/tdgpt_cases.task'
|
||||
- 'tests/script/tsim/analytics'
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/tools/tdgpt
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install flake8 pytest pylint
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
|
||||
- name: Checking the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py') --exit-zero
|
||||
|
||||
- name: Checking the code with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Run test cases with pytest
|
||||
run: |
|
||||
pytest
|
||||
|
||||
function-test:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, testing]
|
||||
|
||||
env:
|
||||
CONTAINER_NAME: 'taosd-test'
|
||||
WKDIR: '/var/lib/jenkins/workspace'
|
||||
WK: '/var/lib/jenkins/workspace/TDinternal'
|
||||
WKC: '/var/lib/jenkins/workspace/TDinternal/community'
|
||||
SOURCE_BRANCH: ${{ github.event.pull_request.head.ref }}
|
||||
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
steps:
|
||||
- name: Output the environment information
|
||||
run: |
|
||||
echo "::group::Environment Info"
|
||||
date
|
||||
hostname
|
||||
env
|
||||
echo "Runner: ${{ runner.name }}"
|
||||
echo "Workspace: ${{ env.WKDIR }}"
|
||||
git --version
|
||||
echo "${{ env.WKDIR }}/restore.sh -p PR-${{ env.PR_NUMBER }} -n ${{ github.run_number }} -c ${{ env.CONTAINER_NAME }}"
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Prepare repositories
|
||||
run: |
|
||||
set -euo pipefail
|
||||
prepare_environment() {
|
||||
cd "$1"
|
||||
git reset --hard
|
||||
git clean -f
|
||||
git remote prune origin
|
||||
git fetch
|
||||
git checkout "$2"
|
||||
}
|
||||
prepare_environment "${{ env.WK }}" "${{ env.TARGET_BRANCH }}"
|
||||
prepare_environment "${{ env.WKC }}" "${{ env.TARGET_BRANCH }}"
|
||||
|
||||
- name: Get latest codes and logs
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git remote prune origin
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "`date "+%Y%m%d-%H%M%S"` TDengineTest/${{ env.PR_NUMBER }}:${{ github.run_number }}:${{ env.TARGET_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "CHANGE_BRANCH:${{ env.SOURCE_BRANCH }}" >>${{ env.WKDIR }}/jenkins.log
|
||||
echo "community log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
git fetch origin +refs/pull/${{ env.PR_NUMBER }}/merge
|
||||
git checkout -qf FETCH_HEAD
|
||||
git log -5
|
||||
echo "community log merged: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
cd ${{ env.WK }}
|
||||
git pull >/dev/null
|
||||
git log -5
|
||||
echo "TDinternal log: `git log -5`" >>${{ env.WKDIR }}/jenkins.log
|
||||
|
||||
- name: Update submodule
|
||||
run: |
|
||||
cd ${{ env.WKC }}
|
||||
git submodule update --init --recursive
|
||||
|
||||
- name: Detect non-doc files changed
|
||||
run: |
|
||||
mkdir -p ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}
|
||||
cd ${{ env.WKC }}
|
||||
changed_files_non_doc=$(git --no-pager diff --name-only \
|
||||
FETCH_HEAD \
|
||||
$(git merge-base FETCH_HEAD ${{ env.TARGET_BRANCH }}) | \
|
||||
grep -v "^docs/en/" | \
|
||||
grep -v "^docs/zh/" | \
|
||||
grep -v ".md$" | \
|
||||
tr '\n' ' ' || : \
|
||||
)
|
||||
echo $changed_files_non_doc > \
|
||||
${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt
|
||||
|
||||
- name: Check assert testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_assert_container.sh -d ${{ env.WKDIR }}
|
||||
|
||||
- name: Check void function testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_check_void_container.sh -d ${{ env.WKDIR }}
|
||||
|
||||
- name: Build docker container
|
||||
run: |
|
||||
date
|
||||
rm -rf ${{ env.WKC }}/debug
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
time ./container_build.sh -w ${{ env.WKDIR }} -e
|
||||
|
||||
- name: Get parameters for testing
|
||||
id: get_param
|
||||
run: |
|
||||
log_server_file="/home/log_server.json"
|
||||
timeout_cmd=""
|
||||
extra_param=""
|
||||
|
||||
if [ -f "$log_server_file" ]; then
|
||||
log_server_enabled=$(jq '.enabled' "$log_server_file")
|
||||
timeout_param=$(jq '.timeout' "$log_server_file")
|
||||
if [ "$timeout_param" != "null" ] && [ "$timeout_param" != "0" ]; then
|
||||
timeout_cmd="timeout $timeout_param"
|
||||
fi
|
||||
|
||||
if [ "$log_server_enabled" == "1" ]; then
|
||||
log_server=$(jq '.server' "$log_server_file" | sed 's/\\\"//g')
|
||||
if [ "$log_server" != "null" ] && [ "$log_server" != "" ]; then
|
||||
extra_param="-w $log_server"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
echo "timeout_cmd=$timeout_cmd" >> $GITHUB_OUTPUT
|
||||
echo "extra_param=$extra_param" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run function returns with a null pointer scan testing
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
./run_scan_container.sh \
|
||||
-d ${{ env.WKDIR }} \
|
||||
-b ${{ env.PR_NUMBER }}_${{ github.run_number }} \
|
||||
-f ${{ env.WKDIR }}/tmp/${{ env.PR_NUMBER }}_${{ github.run_number }}/docs_changed.txt \
|
||||
${{ steps.get_param.outputs.extra_param }}
|
||||
|
||||
- name: Run tdgpt test cases
|
||||
run: |
|
||||
cd ${{ env.WKC }}/tests/parallel_test
|
||||
export DEFAULT_RETRY_TIME=2
|
||||
date
|
||||
timeout 600 time ./run.sh -e \
|
||||
-m /home/m.json \
|
||||
-t tdgpt_cases.task \
|
||||
-b "${{ env.PR_NUMBER }}_${{ github.run_number }}" \
|
||||
-l ${{ env.WKDIR }}/log \
|
||||
-o 300 ${{ steps.get_param.outputs.extra_param }}
|
|
@ -0,0 +1,43 @@
|
|||
# Scheduled updates for the TDgpt service.
|
||||
|
||||
name: TDgpt Update Service
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 00 * * *'
|
||||
|
||||
env:
|
||||
WKC: "/root/TDengine"
|
||||
|
||||
jobs:
|
||||
update-service:
|
||||
runs-on:
|
||||
group: CI
|
||||
labels: [self-hosted, Linux, X64, tdgpt-anode-service]
|
||||
steps:
|
||||
- name: Update TDengine codes
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}
|
||||
git checkout 3.0
|
||||
|
||||
- name: Package the TDGpt Anode Service
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}/tools/tdgpt/script && ./release.sh
|
||||
|
||||
- name: Reinstall and restart the TDGpt Anode Service
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cd ${{ env.WKC }}/tools/tdgpt/release
|
||||
if [[ -f "TDengine-enterprise-anode-1.0.1.tar.gz" ]]; then
|
||||
tar -xzf TDengine-enterprise-anode-1.0.1.tar.gz
|
||||
cd TDengine-enterprise-anode-1.0.1
|
||||
./install.sh
|
||||
fi
|
||||
systemctl restart taosanoded
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if [[ -f ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1 ]] then rm -rf ${{ env.WKC }}/tools/tdgpt/release/TDengine-enterprise-anode-1.0.1; fi
|
|
@ -51,7 +51,6 @@ pysim/
|
|||
tests/script/api/batchprepare
|
||||
taosadapter
|
||||
taosadapter-debug
|
||||
tools/taos-tools/*
|
||||
tools/taosws-rs/*
|
||||
tools/taosadapter/*
|
||||
tools/upx*
|
||||
|
@ -133,14 +132,12 @@ tools/THANKS
|
|||
tools/NEWS
|
||||
tools/COPYING
|
||||
tools/BUGS
|
||||
tools/taos-tools
|
||||
tools/taosws-rs
|
||||
tags
|
||||
.clangd
|
||||
*CMakeCache*
|
||||
*CMakeFiles*
|
||||
.history/
|
||||
*.txt
|
||||
*.tcl
|
||||
*.pc
|
||||
contrib/geos
|
||||
|
@ -156,6 +153,9 @@ pcre2_grep_test.sh
|
|||
pcre2_chartables.c
|
||||
geos-config
|
||||
config.h
|
||||
!contrib/xml2-cmake
|
||||
!contrib/xml2-cmake/linux_x86_64/include/config.h
|
||||
!contrib/xml2-cmake/CMakeLists.txt
|
||||
pcre2.h
|
||||
zconf.h
|
||||
version.h
|
||||
|
|
|
@ -11,36 +11,29 @@ if(NOT DEFINED TD_SOURCE_DIR)
|
|||
endif()
|
||||
|
||||
SET(TD_COMMUNITY_DIR ${PROJECT_SOURCE_DIR})
|
||||
|
||||
set(TD_SUPPORT_DIR "${TD_SOURCE_DIR}/cmake")
|
||||
set(TD_CONTRIB_DIR "${TD_SOURCE_DIR}/contrib")
|
||||
|
||||
include(${TD_SUPPORT_DIR}/cmake.platform)
|
||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.options)
|
||||
include(${TD_SUPPORT_DIR}/cmake.define)
|
||||
include(${TD_SUPPORT_DIR}/cmake.version)
|
||||
|
||||
# contrib
|
||||
add_subdirectory(contrib)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
enable_testing()
|
||||
|
||||
set_property(GLOBAL PROPERTY GLOBAL_DEPENDS_NO_CYCLES OFF)
|
||||
add_subdirectory(contrib)
|
||||
|
||||
# api
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
|
||||
# src
|
||||
if(${BUILD_TEST})
|
||||
include(CTest)
|
||||
enable_testing()
|
||||
add_subdirectory(examples/c)
|
||||
endif(${BUILD_TEST})
|
||||
|
||||
add_library(api INTERFACE)
|
||||
target_include_directories(api INTERFACE "include/client")
|
||||
add_subdirectory(source)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(examples/c)
|
||||
add_subdirectory(tests)
|
||||
include(${TD_SUPPORT_DIR}/cmake.install)
|
||||
|
||||
# docs
|
||||
add_subdirectory(docs/doxgen)
|
||||
|
|
178
README-CN.md
|
@ -8,30 +8,30 @@
|
|||
</a>
|
||||
</p>
|
||||
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/careers/)
|
||||
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看 [这里](https://www.taosdata.com/careers/)
|
||||
|
||||
# 目录
|
||||
|
||||
1. [TDengine 简介](#1-tdengine-简介)
|
||||
1. [文档](#2-文档)
|
||||
1. [必备工具](#3-必备工具)
|
||||
- [3.1 Linux预备](#31-linux系统)
|
||||
- [3.2 macOS预备](#32-macos系统)
|
||||
- [3.3 Windows预备](#33-windows系统)
|
||||
- [3.1 Linux 预备](#31-Linux系统)
|
||||
- [3.2 macOS 预备](#32-macOS系统)
|
||||
- [3.3 Windows 预备](#3.3-Windows系统)
|
||||
- [3.4 克隆仓库](#34-克隆仓库)
|
||||
1. [构建](#4-构建)
|
||||
- [4.1 Linux系统上构建](#41-linux系统上构建)
|
||||
- [4.2 macOS系统上构建](#42-macos系统上构建)
|
||||
- [4.3 Windows系统上构建](#43-windows系统上构建)
|
||||
- [4.1 Linux 系统上构建](#41-Linux系统上构建)
|
||||
- [4.2 macOS 系统上构建](#42-macOS系统上构建)
|
||||
- [4.3 Windows 系统上构建](#43-Windows系统上构建)
|
||||
1. [打包](#5-打包)
|
||||
1. [安装](#6-安装)
|
||||
- [6.1 Linux系统上安装](#61-linux系统上安装)
|
||||
- [6.2 macOS系统上安装](#62-macos系统上安装)
|
||||
- [6.3 Windows系统上安装](#63-windows系统上安装)
|
||||
- [6.1 Linux 系统上安装](#61-Linux系统上安装)
|
||||
- [6.2 macOS 系统上安装](#62-macOS系统上安装)
|
||||
- [6.3 Windows 系统上安装](#63-Windows系统上安装)
|
||||
1. [快速运行](#7-快速运行)
|
||||
- [7.1 Linux系统上运行](#71-linux系统上运行)
|
||||
- [7.2 macOS系统上运行](#72-macos系统上运行)
|
||||
- [7.3 Windows系统上运行](#73-windows系统上运行)
|
||||
- [7.1 Linux 系统上运行](#71-Linux系统上运行)
|
||||
- [7.2 macOS 系统上运行](#72-macOS系统上运行)
|
||||
- [7.3 Windows 系统上运行](#73-Windows系统上运行)
|
||||
1. [测试](#8-测试)
|
||||
1. [版本发布](#9-版本发布)
|
||||
1. [工作流](#10-工作流)
|
||||
|
@ -41,46 +41,48 @@
|
|||
|
||||
# 1. 简介
|
||||
|
||||
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供缓存、数据订阅、流式计算等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。与其他时序数据库相比,TDengine 的主要优势如下:
|
||||
TDengine 是一款开源、高性能、云原生、AI驱动的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供缓存、数据订阅、流式计算、AI智能体等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。与其他时序数据库相比,TDengine 的主要优势如下:
|
||||
|
||||
- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
|
||||
- **高性能**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的 1/10。
|
||||
|
||||
- **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
|
||||
- **云原生**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持 k8s 部署,可运行在公有云、私有云和混合云上。
|
||||
|
||||
- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
|
||||
- **极简时序数据平台**:TDengine 内建消息队列、缓存、流式计算、AI智能体等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
|
||||
|
||||
- **分析能力**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
|
||||
- **分析能力**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数以及AI Agent等技术,TDengine 具备强大的分析能力。
|
||||
|
||||
- **AI智能体**:内置时序数据智能体TDgpt, 无缝连接时序数据基础模型、大语言模型、机器学习、传统统计算法等,提供时序数据预测、异常检测、数据补全和数据分类的功能。
|
||||
|
||||
- **简单易用**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
|
||||
|
||||
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到 2022 年 8 月 1 日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
|
||||
了解TDengine高级功能的完整列表,请 [点击](https://tdengine.com/tdengine/)。体验TDengine最简单的方式是通过[TDengine云平台](https://cloud.tdengine.com)。
|
||||
了解TDengine高级功能的完整列表,请 [点击](https://tdengine.com/tdengine/)。体验 TDengine 最简单的方式是通过 [TDengine云平台](https://cloud.tdengine.com)。对最新发布的TDengine 组件 TDgpt,请访问[TDgpt README](./tools/tdgpt/README.md) 了解细节。
|
||||
|
||||
# 2. 文档
|
||||
|
||||
关于完整的使用手册,系统架构和更多细节,请参考 [TDengine](https://www.taosdata.com/) 或者 [TDengine 官方文档](https://docs.taosdata.com)。
|
||||
|
||||
用户可根据需求选择通过[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)、[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装或直接使用无需安装部署的[云服务](https://cloud.taosdata.com/)。本快速指南是面向想自己编译、打包、测试的开发者的。
|
||||
用户可根据需求选择通过 [容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)、[Kubernetes](https://docs.taosdata.com/deployment/k8s/) 来安装或直接使用无需安装部署的 [云服务](https://cloud.taosdata.com/)。本快速指南是面向想自己编译、打包、测试的开发者的。
|
||||
|
||||
如果想编译或测试TDengine连接器,请访问以下仓库: [JDBC连接器](https://github.com/taosdata/taos-connector-jdbc), [Go连接器](https://github.com/taosdata/driver-go), [Python连接器](https://github.com/taosdata/taos-connector-python), [Node.js连接器](https://github.com/taosdata/taos-connector-node), [C#连接器](https://github.com/taosdata/taos-connector-dotnet), [Rust连接器](https://github.com/taosdata/taos-connector-rust).
|
||||
如果想编译或测试 TDengine 连接器,请访问以下仓库:[JDBC连接器](https://github.com/taosdata/taos-connector-jdbc)、[Go连接器](https://github.com/taosdata/driver-go)、[Python连接器](https://github.com/taosdata/taos-connector-python)、[Node.js连接器](https://github.com/taosdata/taos-connector-node)、[C#连接器](https://github.com/taosdata/taos-connector-dotnet)、[Rust连接器](https://github.com/taosdata/taos-connector-rust)。
|
||||
|
||||
# 3. 前置条件
|
||||
|
||||
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||
TDengine 目前可以在 Linux 和 macOS 平台上安装和运行 (企业版支持 Windows)。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64、ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
|
||||
|
||||
如果你想要编译 taosAdapter 或者 taosKeeper,需要安装 Go 1.18 及以上版本。
|
||||
|
||||
## 3.1 Linux系统
|
||||
## 3.1 Linux 系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装Linux必备工具</summary>
|
||||
<summary>安装 Linux 必备工具</summary>
|
||||
|
||||
### Ubuntu 18.04、20.04、22.04
|
||||
|
||||
```bash
|
||||
sudo apt-get udpate
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
|
||||
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
|
||||
```
|
||||
|
@ -96,13 +98,13 @@ yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatom
|
|||
|
||||
</details>
|
||||
|
||||
## 3.2 macOS系统
|
||||
## 3.2 macOS 系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装macOS必备工具</summary>
|
||||
<summary>安装 macOS 必备工具</summary>
|
||||
|
||||
根据提示安装依赖工具 [brew](https://brew.sh/).
|
||||
根据提示安装依赖工具 [brew](https://brew.sh/)
|
||||
|
||||
```bash
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
|
@ -110,11 +112,11 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
</details>
|
||||
|
||||
## 3.3 Windows系统
|
||||
## 3.3 Windows 系统
|
||||
|
||||
<details>
|
||||
|
||||
<summary>安装Windows必备工具</summary>
|
||||
<summary>安装 Windows 必备工具</summary>
|
||||
|
||||
进行中。
|
||||
|
||||
|
@ -122,7 +124,7 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
## 3.4 克隆仓库
|
||||
|
||||
通过如下命令将TDengine仓库克隆到指定计算机:
|
||||
通过如下命令将 TDengine 仓库克隆到指定计算机:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
|
@ -131,23 +133,23 @@ cd TDengine
|
|||
|
||||
# 4. 构建
|
||||
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools,您可以在编译 TDengine 时使用 `cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
|
||||
|
||||
为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
|
||||
为了构建 TDengine,请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
|
||||
|
||||
## 4.1 Linux系统上构建
|
||||
## 4.1 Linux 系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上构建步骤</summary>
|
||||
<summary>Linux 系统上构建步骤</summary>
|
||||
|
||||
可以通过以下命令使用脚本 `build.sh` 编译TDengine和taosTools,包括taosBenchmark和taosdump:
|
||||
可以通过以下命令使用脚本 `build.sh` 编译 TDengine 和 taosTools,包括 taosBenchmark 和 taosdump。
|
||||
|
||||
```bash
|
||||
./build.sh
|
||||
```
|
||||
|
||||
也可以通过以下命令进行构建:
|
||||
也可以通过以下命令进行构建:
|
||||
|
||||
```bash
|
||||
mkdir debug && cd debug
|
||||
|
@ -159,13 +161,13 @@ make
|
|||
|
||||
如果你想要编译 taosKeeper,需要添加 `-DBUILD_KEEPER=true` 选项。
|
||||
|
||||
可以使用Jemalloc作为内存分配器,而不是使用glibc:
|
||||
可以使用 Jemalloc 作为内存分配器,而不是使用 glibc:
|
||||
|
||||
```bash
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
cmake .. -DJEMALLOC_ENABLED=ON
|
||||
```
|
||||
TDengine构建脚本可以自动检测x86、x86-64、arm64平台上主机的体系结构。
|
||||
您也可以通过CPUTYPE选项手动指定架构:
|
||||
TDengine 构建脚本可以自动检测 x86、x86-64、arm64 平台上主机的体系结构。
|
||||
您也可以通过 CPUTYPE 选项手动指定架构:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
|
@ -173,13 +175,13 @@ cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
|||
|
||||
</details>
|
||||
|
||||
## 4.2 macOS系统上构建
|
||||
## 4.2 macOS 系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上构建步骤</summary>
|
||||
<summary>macOS 系统上构建步骤</summary>
|
||||
|
||||
请安装XCode命令行工具和cmake。使用XCode 11.4+在Catalina和Big Sur上完成验证。
|
||||
请安装 XCode 命令行工具和 cmake。使用 XCode 11.4+ 在 Catalina 和 Big Sur 上完成验证。
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
|
@ -192,14 +194,14 @@ cmake .. && cmake --build .
|
|||
|
||||
</details>
|
||||
|
||||
## 4.3 Windows系统上构建
|
||||
## 4.3 Windows 系统上构建
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上构建步骤</summary>
|
||||
<summary>Windows 系统上构建步骤</summary>
|
||||
|
||||
如果您使用的是Visual Studio 2013,请执行“cmd.exe”打开命令窗口执行如下命令。
|
||||
执行vcvarsall.bat时,64位的Windows请指定“amd64”,32位的Windows请指定“x86”。
|
||||
如果您使用的是 Visual Studio 2013,请执行 “cmd.exe” 打开命令窗口执行如下命令。
|
||||
执行 vcvarsall.bat 时,64 位的 Windows 请指定 “amd64”,32 位的 Windows 请指定 “x86”。
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
|
@ -208,19 +210,19 @@ cmake .. -G "NMake Makefiles"
|
|||
nmake
|
||||
```
|
||||
|
||||
如果您使用Visual Studio 2019或2017:
|
||||
如果您使用 Visual Studio 2019 或 2017:
|
||||
|
||||
请执行“cmd.exe”打开命令窗口执行如下命令。
|
||||
执行vcvarsall.bat时,64位的Windows请指定“x64”,32位的Windows请指定“x86”。
|
||||
请执行 “cmd.exe” 打开命令窗口执行如下命令。
|
||||
执行 vcvarsall.bat 时,64 位的 Windows 请指定 “x64”,32 位的 Windows 请指定 “x86”。
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
||||
或者,您可以通过点击Windows开始菜单打开命令窗口->“Visual Studio < 2019 | 2017 >”文件夹->“x64原生工具命令提示符VS < 2019 | 2017 >”或“x86原生工具命令提示符VS < 2019 | 2017 >”取决于你的Windows是什么架构,然后执行命令如下:
|
||||
或者,您可以通过点击 Windows 开始菜单打开命令窗口 -> `Visual Studio < 2019 | 2017 >` 文件夹 -> `x64 原生工具命令提示符 VS < 2019 | 2017 >` 或 `x86 原生工具命令提示符 < 2019 | 2017 >` 取决于你的 Windows 是什么架构,然后执行命令如下:
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
|
@ -231,33 +233,33 @@ nmake
|
|||
|
||||
# 5. 打包
|
||||
|
||||
由于一些组件依赖关系,TDengine社区安装程序不能仅由该存储库创建。我们仍在努力改进。
|
||||
由于一些组件依赖关系,TDengine 社区安装程序不能仅由该存储库创建。我们仍在努力改进。
|
||||
|
||||
# 6. 安装
|
||||
|
||||
|
||||
## 6.1 Linux系统上安装
|
||||
## 6.1 Linux 系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上安装详细步骤</summary>
|
||||
<summary>Linux 系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
构建成功后,TDengine 可以通过以下命令进行安装:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
从源代码安装还将为TDengine配置服务管理。用户也可以使用[TDengine安装包](https://docs.taosdata.com/get-started/package/)进行安装。
|
||||
从源代码安装还将为 TDengine 配置服务管理。用户也可以使用 [TDengine安装包](https://docs.taosdata.com/get-started/package/)进行安装。
|
||||
|
||||
</details>
|
||||
|
||||
## 6.2 macOS系统上安装
|
||||
## 6.2 macOS 系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上安装详细步骤</summary>
|
||||
<summary>macOS 系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
|
@ -265,13 +267,13 @@ sudo make install
|
|||
|
||||
</details>
|
||||
|
||||
## 6.3 Windows系统上安装
|
||||
## 6.3 Windows 系统上安装
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上安装详细步骤</summary>
|
||||
<summary>Windows 系统上安装详细步骤</summary>
|
||||
|
||||
构建成功后,TDengine可以通过以下命令进行安装:
|
||||
构建成功后,TDengine 可以通过以下命令进行安装:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
|
@ -281,32 +283,32 @@ nmake install
|
|||
|
||||
# 7. 快速运行
|
||||
|
||||
## 7.1 Linux系统上运行
|
||||
## 7.1 Linux 系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Linux系统上运行详细步骤</summary>
|
||||
<summary>Linux 系统上运行详细步骤</summary>
|
||||
|
||||
在Linux系统上安装TDengine完成后,在终端运行如下命令启动服务:
|
||||
在Linux 系统上安装 TDengine 完成后,在终端运行如下命令启动服务:
|
||||
|
||||
```bash
|
||||
sudo systemctl start taosd
|
||||
```
|
||||
然后用户可以通过如下命令使用TDengine命令行连接TDengine服务:
|
||||
然后用户可以通过如下命令使用 TDengine 命令行连接 TDengine 服务:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示连接错误信息。
|
||||
如果 TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示连接错误信息。
|
||||
|
||||
如果您不想将TDengine作为服务运行,您可以在当前终端中运行它。例如,要在构建完成后快速启动TDengine服务器,在终端中运行以下命令:(我们以Linux为例,Windows上的命令为 `taosd.exe`)
|
||||
如果您不想将 TDengine 作为服务运行,您可以在当前终端中运行它。例如,要在构建完成后快速启动 TDengine 服务器,在终端中运行以下命令:(以 Linux 为例,Windows 上的命令为 `taosd.exe`)
|
||||
|
||||
```bash
|
||||
./build/bin/taosd -c test/cfg
|
||||
```
|
||||
|
||||
在另一个终端上,使用TDengine命令行连接服务器:
|
||||
在另一个终端上,使用 TDengine 命令行连接服务器:
|
||||
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
|
@ -316,42 +318,42 @@ taos
|
|||
|
||||
</details>
|
||||
|
||||
## 7.2 macOS系统上运行
|
||||
## 7.2 macOS 系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>macOS系统上运行详细步骤</summary>
|
||||
<summary>macOS 系统上运行详细步骤</summary>
|
||||
|
||||
在macOS上安装完成后启动服务,双击/applications/TDengine启动程序,或者在终端中执行如下命令:
|
||||
在 macOS 上安装完成后启动服务,双击 `/applications/TDengine` 启动程序,或者在终端中执行如下命令:
|
||||
|
||||
```bash
|
||||
sudo launchctl start com.tdengine.taosd
|
||||
```
|
||||
|
||||
然后在终端中使用如下命令通过TDengine命令行连接TDengine服务器:
|
||||
然后在终端中使用如下命令通过 TDengine 命令行连接 TDengine 服务器:
|
||||
|
||||
```bash
|
||||
taos
|
||||
```
|
||||
|
||||
如果TDengine命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示错误信息。
|
||||
如果 TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示错误信息。
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## 7.3 Windows系统上运行
|
||||
## 7.3 Windows 系统上运行
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Windows系统上运行详细步骤</summary>
|
||||
<summary>Windows 系统上运行详细步骤</summary>
|
||||
|
||||
您可以使用以下命令在Windows平台上启动TDengine服务器:
|
||||
您可以使用以下命令在 Windows 平台上启动 TDengine 服务器:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taosd.exe -c test\cfg
|
||||
```
|
||||
|
||||
在另一个终端上,使用TDengine命令行连接服务器:
|
||||
在另一个终端上,使用 TDengine 命令行连接服务器:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taos.exe -c test\cfg
|
||||
|
@ -363,25 +365,25 @@ taos
|
|||
|
||||
# 8. 测试
|
||||
|
||||
有关如何在TDengine上运行不同类型的测试,请参考 [TDengine测试](./tests/README-CN.md)
|
||||
有关如何在 TDengine 上运行不同类型的测试,请参考 [TDengine测试](./tests/README-CN.md)
|
||||
|
||||
# 9. 版本发布
|
||||
|
||||
TDengine发布版本的完整列表,请参考 [版本列表](https://github.com/taosdata/TDengine/releases)
|
||||
TDengine 发布版本的完整列表,请参考 [版本列表](https://github.com/taosdata/TDengine/releases)
|
||||
|
||||
# 10. 工作流
|
||||
|
||||
TDengine构建检查工作流可以在参考 [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml), 更多的工作流正在创建中,将很快可用。
|
||||
TDengine 构建检查工作流可以在参考 [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml),更多的工作流正在创建中,将很快可用。
|
||||
|
||||
# 11. 覆盖率
|
||||
|
||||
最新的TDengine测试覆盖率报告可参考 [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
最新的 TDengine 测试覆盖率报告可参考 [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
|
||||
<details>
|
||||
|
||||
<summary>如何在本地运行测试覆盖率报告?</summary>
|
||||
|
||||
在本地创建测试覆盖率报告(HTML格式),请运行以下命令:
|
||||
在本地创建测试覆盖率报告(HTML 格式),请运行以下命令:
|
||||
|
||||
```bash
|
||||
cd tests
|
||||
|
@ -389,8 +391,8 @@ bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
|
|||
# on main branch and run cases in longtimeruning_cases.task
|
||||
# for more infomation about options please refer to ./run_local_coverage.sh -h
|
||||
```
|
||||
> **注意:**
|
||||
> 请注意,-b和-i选项将使用-DCOVER=true选项重新编译TDengine,这可能需要花费一些时间。
|
||||
> **注意**:
|
||||
> 请注意,-b 和 -i 选项将使用 -DCOVER=true 选项重新编译 TDengine,这可能需要花费一些时间。
|
||||
|
||||
</details>
|
||||
|
||||
|
|
14
README.md
|
@ -54,21 +54,23 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine
|
|||
|
||||
# 1. Introduction
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||
TDengine is an open source, high-performance, cloud native and AI powered [time-series database](https://tdengine.com/tsdb/) designed for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and analysis of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||
|
||||
- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
|
||||
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing, data subscription and AI agent features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **[AI Powered](https://tdengine.com/tdengine/tdgpt/)**: Through the built in AI agent TDgpt, TDengine can connect to a variety of time series foundation model, large language model, machine learning and traditional algorithms to provide time series data forecasting, anomly detection, imputation and classification.
|
||||
|
||||
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and AI agent, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 19.9k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature and AI agent, are all available under open source licenses. It has gathered 23.7k stars on GitHub. There is an active developer community, and over 730k running instances worldwide.
|
||||
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com). For the latest TDengine component TDgpt, please refer to [TDgpt README](./tools/tdgpt/README.md) for details.
|
||||
|
||||
# 2. Documentation
|
||||
|
||||
|
@ -173,7 +175,7 @@ If you want to compile taosKeeper, you need to add the `-DBUILD_KEEPER=true` opt
|
|||
You can use Jemalloc as memory allocator instead of glibc:
|
||||
|
||||
```bash
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
cmake .. -DJEMALLOC_ENABLED=ON
|
||||
```
|
||||
|
||||
TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform.
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
ExternalProject_Add(azure
|
||||
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||
DEPENDS xml2
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||
|
|
|
@ -116,9 +116,6 @@ ELSE()
|
|||
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz")
|
||||
ENDIF()
|
||||
|
||||
# force set all platform to JEMALLOC_ENABLED = false
|
||||
# SET(JEMALLOC_ENABLED OFF)
|
||||
|
||||
IF(TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
|
||||
|
@ -259,10 +256,16 @@ ELSE()
|
|||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF(${JEMALLOC_ENABLED})
|
||||
MESSAGE(STATUS "JEMALLOC_ENABLED Enabled")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=attributes")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=attributes")
|
||||
ELSE()
|
||||
MESSAGE(STATUS "JEMALLOC_ENABLED Disabled")
|
||||
|
||||
IF(TD_LINUX_64)
|
||||
IF(${JEMALLOC_ENABLED})
|
||||
MESSAGE(STATUS "JEMALLOC Enabled")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=attributes")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=attributes")
|
||||
SET(LINK_JEMALLOC "-L${CMAKE_BINARY_DIR}/build/lib -ljemalloc")
|
||||
ADD_DEFINITIONS(-DTD_JEMALLOC_ENABLED -I${CMAKE_BINARY_DIR}/build/include -L${CMAKE_BINARY_DIR}/build/lib -Wl,-rpath,${CMAKE_BINARY_DIR}/build/lib)
|
||||
ELSE()
|
||||
MESSAGE(STATUS "JEMALLOC Disabled")
|
||||
SET(LINK_JEMALLOC "")
|
||||
ENDIF()
|
||||
ENDIF()
|
|
@ -9,61 +9,61 @@ option(
|
|||
)
|
||||
|
||||
IF(${TD_WINDOWS})
|
||||
IF(NOT TD_ASTRA)
|
||||
MESSAGE("build pthread Win32")
|
||||
option(
|
||||
BUILD_PTHREAD
|
||||
"If build pthread on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build pthread Win32")
|
||||
option(
|
||||
BUILD_PTHREAD
|
||||
"If build pthread on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build gnu regex for Windows")
|
||||
option(
|
||||
BUILD_GNUREGEX
|
||||
"If build gnu regex on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build gnu regex for Windows")
|
||||
option(
|
||||
BUILD_GNUREGEX
|
||||
"If build gnu regex on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build iconv Win32")
|
||||
option(
|
||||
BUILD_WITH_ICONV
|
||||
"If build iconv on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build iconv Win32")
|
||||
option(
|
||||
BUILD_WITH_ICONV
|
||||
"If build iconv on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build msvcregex Win32")
|
||||
option(
|
||||
BUILD_MSVCREGEX
|
||||
"If build msvcregex on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build msvcregex Win32")
|
||||
option(
|
||||
BUILD_MSVCREGEX
|
||||
"If build msvcregex on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build wcwidth Win32")
|
||||
option(
|
||||
BUILD_WCWIDTH
|
||||
"If build wcwidth on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build wcwidth Win32")
|
||||
option(
|
||||
BUILD_WCWIDTH
|
||||
"If build wcwidth on Windows"
|
||||
ON
|
||||
)
|
||||
MESSAGE("build wingetopt Win32")
|
||||
option(
|
||||
BUILD_WINGETOPT
|
||||
"If build wingetopt on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
MESSAGE("build wingetopt Win32")
|
||||
option(
|
||||
BUILD_WINGETOPT
|
||||
"If build wingetopt on Windows"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
TDENGINE_3
|
||||
"TDengine 3.x for taos-tools"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_CRASHDUMP
|
||||
"If build crashdump on Windows"
|
||||
ON
|
||||
)
|
||||
option(
|
||||
TDENGINE_3
|
||||
"TDengine 3.x for taos-tools"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_CRASHDUMP
|
||||
"If build crashdump on Windows"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
ELSEIF (TD_DARWIN_64)
|
||||
IF(${BUILD_TEST})
|
||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||
|
@ -71,58 +71,102 @@ ELSEIF (TD_DARWIN_64)
|
|||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build with geos"
|
||||
BUILD_WITH_LEMON
|
||||
"If build with lemon"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UDF
|
||||
"If build with UDF"
|
||||
ON
|
||||
)
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
option(
|
||||
BUILD_GEOS
|
||||
"If build with geos"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_PCRE2
|
||||
"If build with pcre2"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SHARED_LIBS
|
||||
""
|
||||
OFF
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SANITIZER
|
||||
"If build sanitizer"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_ADDR2LINE
|
||||
"If build addr2line"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
ON
|
||||
option(
|
||||
BUILD_WITH_ROCKSDB
|
||||
"If build with rocksdb"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_PCRE2
|
||||
"If build with pcre2"
|
||||
ON
|
||||
)
|
||||
option(
|
||||
BUILD_WITH_LZ4
|
||||
"If build with lz4"
|
||||
ON
|
||||
)
|
||||
ELSE ()
|
||||
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
OFF
|
||||
option(
|
||||
BUILD_WITH_LZMA2
|
||||
"If build with lzma2"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SANITIZER
|
||||
"If build sanitizer"
|
||||
OFF
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_ADDR2LINE
|
||||
"If build addr2line"
|
||||
OFF
|
||||
)
|
||||
ADD_DEFINITIONS(-DUSE_AUDIT)
|
||||
ADD_DEFINITIONS(-DUSE_GEOS)
|
||||
ADD_DEFINITIONS(-DUSE_UDF)
|
||||
ADD_DEFINITIONS(-DUSE_STREAM)
|
||||
ADD_DEFINITIONS(-DUSE_PRCE2)
|
||||
ADD_DEFINITIONS(-DUSE_RSMA)
|
||||
ADD_DEFINITIONS(-DUSE_TSMA)
|
||||
ADD_DEFINITIONS(-DUSE_TQ)
|
||||
ADD_DEFINITIONS(-DUSE_TOPIC)
|
||||
ADD_DEFINITIONS(-DUSE_MONITOR)
|
||||
ADD_DEFINITIONS(-DUSE_REPORT)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LEVELDB
|
||||
"If build with leveldb"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_ROCKSDB
|
||||
"If build with rocksdb"
|
||||
ON
|
||||
)
|
||||
IF(${TD_ASTRA_RPC})
|
||||
ADD_DEFINITIONS(-DTD_ASTRA_RPC)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_LINUX})
|
||||
|
||||
|
@ -150,6 +194,12 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_LZMA2
|
||||
"If build with lzma2"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
IF(NOT TD_ENTERPRISE)
|
||||
|
@ -191,6 +241,14 @@ option(BUILD_WITH_COS "If build with cos" OFF)
|
|||
|
||||
ENDIF ()
|
||||
|
||||
IF(${TAOSD_INTEGRATED})
|
||||
add_definitions(-DTAOSD_INTEGRATED)
|
||||
ENDIF()
|
||||
|
||||
IF(${TD_AS_LIB})
|
||||
add_definitions(-DTD_AS_LIB)
|
||||
ENDIF()
|
||||
|
||||
option(
|
||||
BUILD_WITH_SQLITE
|
||||
"If build with sqlite"
|
||||
|
@ -209,6 +267,14 @@ option(
|
|||
off
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_WITH_NURAFT
|
||||
"If build with NuRaft"
|
||||
OFF
|
||||
)
|
||||
|
||||
IF(NOT TD_ASTRA)
|
||||
|
||||
option(
|
||||
BUILD_WITH_UV
|
||||
"If build with libuv"
|
||||
|
@ -242,6 +308,7 @@ option(
|
|||
"If use invertedIndex"
|
||||
ON
|
||||
)
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_RELEASE
|
||||
|
|
|
@ -12,7 +12,7 @@ ExternalProject_Add(curl2
|
|||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
|
||||
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl --without-librtmp #--enable-debug
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
|
||||
# xz
|
||||
|
||||
if (${TD_LINUX})
|
||||
if (${BUILD_WITH_LZMA2})
|
||||
ExternalProject_Add(lzma2
|
||||
GIT_REPOSITORY https://github.com/conor42/fast-lzma2.git
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/lzma2"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG main
|
||||
GIT_TAG 3.3.6
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -1,19 +1,16 @@
|
|||
|
||||
# xml2
|
||||
ExternalProject_Add(xml2
|
||||
URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz
|
||||
URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6
|
||||
#https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz
|
||||
#GIT_REPOSITORY https://github.com/GNOME/libxml2
|
||||
#GIT_TAG v2.11.5
|
||||
URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz
|
||||
URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/xml2"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -92,7 +92,9 @@ if(${BUILD_TEST})
|
|||
endif(${BUILD_TEST})
|
||||
|
||||
# lz4
|
||||
cat("${TD_SUPPORT_DIR}/lz4_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
if(${BUILD_WITH_LZ4})
|
||||
cat("${TD_SUPPORT_DIR}/lz4_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_LZ4})
|
||||
|
||||
# zlib
|
||||
cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -159,7 +161,6 @@ elseif(${BUILD_WITH_COS})
|
|||
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif()
|
||||
|
@ -187,16 +188,22 @@ if(${BUILD_PCRE2})
|
|||
cat("${TD_SUPPORT_DIR}/pcre2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
find_program(C_COMPILER_LEMON NAMES gcc)
|
||||
if(C_COMPILER_LEMON)
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
else()
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
endif()
|
||||
|
||||
# lemon
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
if(${BUILD_WITH_LEMON})
|
||||
if(${TD_ACORE})
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
else()
|
||||
find_program(C_COMPILER_LEMON NAMES gcc)
|
||||
endif()
|
||||
if(C_COMPILER_LEMON)
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
else()
|
||||
set(C_COMPILER_LEMON ${CMAKE_C_COMPILER})
|
||||
message(STATUS "LEMON C compiler: ${C_COMPILER_LEMON}")
|
||||
endif()
|
||||
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
|
||||
IF(${TD_DARWIN})
|
||||
|
@ -274,11 +281,13 @@ unset(CMAKE_PROJECT_INCLUDE_BEFORE)
|
|||
# endif()
|
||||
|
||||
# lz4
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lz4_static
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/lz4/lib
|
||||
)
|
||||
if(${BUILD_WITH_LZ4})
|
||||
add_subdirectory(lz4/build/cmake EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
lz4_static
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/lz4/lib
|
||||
)
|
||||
endif(${BUILD_WITH_LZ4})
|
||||
|
||||
# zlib
|
||||
set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in")
|
||||
|
@ -665,7 +674,12 @@ if(${BUILD_PCRE2})
|
|||
endif(${BUILD_PCRE2})
|
||||
|
||||
if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||
set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
|
||||
string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||
add_subdirectory(xml2-cmake)
|
||||
set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS})
|
||||
|
||||
add_subdirectory(azure-cmake)
|
||||
endif()
|
||||
|
||||
IF(TD_LINUX)
|
||||
|
|
|
@ -36,10 +36,6 @@ target_include_directories(
|
|||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CURL_LIBRARY curl)
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
|
@ -50,9 +46,8 @@ target_link_libraries(
|
|||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
|
||||
# PRIVATE xml2
|
||||
PRIVATE _libxml2
|
||||
PRIVATE zlib
|
||||
|
||||
# PRIVATE ${CoreFoundation_Library}
|
||||
|
|
|
@ -20,9 +20,9 @@ if(${BUILD_WITH_SQLITE})
|
|||
add_subdirectory(sqlite)
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
if(${BUILD_S3})
|
||||
add_subdirectory(azure)
|
||||
endif()
|
||||
# if(${BUILD_S3})
|
||||
# add_subdirectory(azure)
|
||||
# endif()
|
||||
|
||||
add_subdirectory(tdev)
|
||||
add_subdirectory(lz4)
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2")
|
||||
|
||||
set(SRCS
|
||||
"${LIBXML2_SOURCE_DIR}/SAX.c"
|
||||
"${LIBXML2_SOURCE_DIR}/entities.c"
|
||||
"${LIBXML2_SOURCE_DIR}/encoding.c"
|
||||
"${LIBXML2_SOURCE_DIR}/error.c"
|
||||
"${LIBXML2_SOURCE_DIR}/parserInternals.c"
|
||||
"${LIBXML2_SOURCE_DIR}/parser.c"
|
||||
"${LIBXML2_SOURCE_DIR}/tree.c"
|
||||
"${LIBXML2_SOURCE_DIR}/hash.c"
|
||||
"${LIBXML2_SOURCE_DIR}/list.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlIO.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlmemory.c"
|
||||
"${LIBXML2_SOURCE_DIR}/uri.c"
|
||||
"${LIBXML2_SOURCE_DIR}/valid.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xlink.c"
|
||||
"${LIBXML2_SOURCE_DIR}/HTMLparser.c"
|
||||
"${LIBXML2_SOURCE_DIR}/HTMLtree.c"
|
||||
"${LIBXML2_SOURCE_DIR}/debugXML.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xpath.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xpointer.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xinclude.c"
|
||||
"${LIBXML2_SOURCE_DIR}/nanohttp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/nanoftp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/catalog.c"
|
||||
"${LIBXML2_SOURCE_DIR}/globals.c"
|
||||
"${LIBXML2_SOURCE_DIR}/threads.c"
|
||||
"${LIBXML2_SOURCE_DIR}/c14n.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlstring.c"
|
||||
"${LIBXML2_SOURCE_DIR}/buf.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlregexp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlschemas.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlschemastypes.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlunicode.c"
|
||||
"${LIBXML2_SOURCE_DIR}/triostr.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlreader.c"
|
||||
"${LIBXML2_SOURCE_DIR}/relaxng.c"
|
||||
"${LIBXML2_SOURCE_DIR}/dict.c"
|
||||
"${LIBXML2_SOURCE_DIR}/SAX2.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlwriter.c"
|
||||
"${LIBXML2_SOURCE_DIR}/legacy.c"
|
||||
"${LIBXML2_SOURCE_DIR}/chvalid.c"
|
||||
"${LIBXML2_SOURCE_DIR}/pattern.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlsave.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlmodule.c"
|
||||
"${LIBXML2_SOURCE_DIR}/schematron.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xzlib.c"
|
||||
)
|
||||
add_library(_libxml2 ${SRCS})
|
||||
|
||||
#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib)
|
||||
target_link_libraries(_libxml2 PRIVATE zlib)
|
||||
|
||||
target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include")
|
||||
target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include")
|
||||
|
||||
add_library(td_contrib::libxml2 ALIAS _libxml2)
|
|
@ -0,0 +1,285 @@
|
|||
/* config.h. Generated from config.h.in by configure. */
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Type cast for the gethostbyname() argument */
|
||||
#define GETHOSTBYNAME_ARG_CAST /**/
|
||||
|
||||
/* Define to 1 if you have the <arpa/inet.h> header file. */
|
||||
#define HAVE_ARPA_INET_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_H 1
|
||||
|
||||
/* Whether struct sockaddr::__ss_family exists */
|
||||
/* #undef HAVE_BROKEN_SS_FAMILY */
|
||||
|
||||
/* Define to 1 if you have the <ctype.h> header file. */
|
||||
#define HAVE_CTYPE_H 1
|
||||
|
||||
/* Define to 1 if you have the <dirent.h> header file. */
|
||||
#define HAVE_DIRENT_H 1
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Have dlopen based dso */
|
||||
#define HAVE_DLOPEN /**/
|
||||
|
||||
/* Define to 1 if you have the <dl.h> header file. */
|
||||
/* #undef HAVE_DL_H */
|
||||
|
||||
/* Define to 1 if you have the <errno.h> header file. */
|
||||
#define HAVE_ERRNO_H 1
|
||||
|
||||
/* Define to 1 if you have the <fcntl.h> header file. */
|
||||
#define HAVE_FCNTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <float.h> header file. */
|
||||
#define HAVE_FLOAT_H 1
|
||||
|
||||
/* Define to 1 if you have the `fprintf' function. */
|
||||
#define HAVE_FPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `ftime' function. */
|
||||
#define HAVE_FTIME 1
|
||||
|
||||
/* Define if getaddrinfo is there */
|
||||
#define HAVE_GETADDRINFO /**/
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
#define HAVE_GETTIMEOFDAY 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the `isascii' function. */
|
||||
#define HAVE_ISASCII 1
|
||||
|
||||
/* Define if isinf is there */
|
||||
#define HAVE_ISINF /**/
|
||||
|
||||
/* Define if isnan is there */
|
||||
#define HAVE_ISNAN /**/
|
||||
|
||||
/* Define if history library is there (-lhistory) */
|
||||
/* #undef HAVE_LIBHISTORY */
|
||||
|
||||
/* Define if pthread library is there (-lpthread) */
|
||||
#define HAVE_LIBPTHREAD /**/
|
||||
|
||||
/* Define if readline library is there (-lreadline) */
|
||||
/* #undef HAVE_LIBREADLINE */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
|
||||
/* Define to 1 if you have the `localtime' function. */
|
||||
#define HAVE_LOCALTIME 1
|
||||
|
||||
/* Define to 1 if you have the <lzma.h> header file. */
|
||||
/* #undef HAVE_LZMA_H */
|
||||
|
||||
/* Define to 1 if you have the <malloc.h> header file. */
|
||||
#define HAVE_MALLOC_H 1
|
||||
|
||||
/* Define to 1 if you have the <math.h> header file. */
|
||||
#define HAVE_MATH_H 1
|
||||
|
||||
/* Define to 1 if you have the <memory.h> header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if you have the `mmap' function. */
|
||||
#define HAVE_MMAP 1
|
||||
|
||||
/* Define to 1 if you have the `munmap' function. */
|
||||
#define HAVE_MUNMAP 1
|
||||
|
||||
/* mmap() is no good without munmap() */
|
||||
#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP)
|
||||
# undef /**/ HAVE_MMAP
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
|
||||
/* #undef HAVE_NDIR_H */
|
||||
|
||||
/* Define to 1 if you have the <netdb.h> header file. */
|
||||
#define HAVE_NETDB_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <poll.h> header file. */
|
||||
#define HAVE_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have the `printf' function. */
|
||||
#define HAVE_PRINTF 1
|
||||
|
||||
/* Define if <pthread.h> is there */
|
||||
#define HAVE_PTHREAD_H /**/
|
||||
|
||||
/* Define to 1 if you have the `putenv' function. */
|
||||
#define HAVE_PUTENV 1
|
||||
|
||||
/* Define to 1 if you have the `rand' function. */
|
||||
#define HAVE_RAND 1
|
||||
|
||||
/* Define to 1 if you have the `rand_r' function. */
|
||||
#define HAVE_RAND_R 1
|
||||
|
||||
/* Define to 1 if you have the <resolv.h> header file. */
|
||||
#define HAVE_RESOLV_H 1
|
||||
|
||||
/* Have shl_load based dso */
|
||||
/* #undef HAVE_SHLLOAD */
|
||||
|
||||
/* Define to 1 if you have the `signal' function. */
|
||||
#define HAVE_SIGNAL 1
|
||||
|
||||
/* Define to 1 if you have the <signal.h> header file. */
|
||||
#define HAVE_SIGNAL_H 1
|
||||
|
||||
/* Define to 1 if you have the `snprintf' function. */
|
||||
#define HAVE_SNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `sprintf' function. */
|
||||
#define HAVE_SPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `srand' function. */
|
||||
#define HAVE_SRAND 1
|
||||
|
||||
/* Define to 1 if you have the `sscanf' function. */
|
||||
#define HAVE_SSCANF 1
|
||||
|
||||
/* Define to 1 if you have the `stat' function. */
|
||||
#define HAVE_STAT 1
|
||||
|
||||
/* Define to 1 if you have the <stdarg.h> header file. */
|
||||
#define HAVE_STDARG_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the `strftime' function. */
|
||||
#define HAVE_STRFTIME 1
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
|
||||
*/
|
||||
/* #undef HAVE_SYS_DIR_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/mman.h> header file. */
|
||||
#define HAVE_SYS_MMAN_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
|
||||
*/
|
||||
/* #undef HAVE_SYS_NDIR_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/timeb.h> header file. */
|
||||
#define HAVE_SYS_TIMEB_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the `time' function. */
|
||||
#define HAVE_TIME 1
|
||||
|
||||
/* Define to 1 if you have the <time.h> header file. */
|
||||
#define HAVE_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Whether va_copy() is available */
|
||||
#define HAVE_VA_COPY 1
|
||||
|
||||
/* Define to 1 if you have the `vfprintf' function. */
|
||||
#define HAVE_VFPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `vsnprintf' function. */
|
||||
#define HAVE_VSNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `vsprintf' function. */
|
||||
#define HAVE_VSPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the <zlib.h> header file. */
|
||||
/* #undef HAVE_ZLIB_H */
|
||||
|
||||
/* Whether __va_copy() is available */
|
||||
/* #undef HAVE___VA_COPY */
|
||||
|
||||
/* Define as const if the declaration of iconv() needs const. */
|
||||
#define ICONV_CONST
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "libxml2"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT ""
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME ""
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING ""
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME ""
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION ""
|
||||
|
||||
/* Type cast for the send() function 2nd arg */
|
||||
#define SEND_ARG2_CAST /**/
|
||||
|
||||
/* Define to 1 if you have the ANSI C header files. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Support for IPv6 */
|
||||
#define SUPPORT_IP6 /**/
|
||||
|
||||
/* Define if va_list is an array type */
|
||||
#define VA_LIST_IS_ARRAY 1
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "2.9.8"
|
||||
|
||||
/* Determine what socket length (socklen_t) data type is */
|
||||
#define XML_SOCKLEN_T socklen_t
|
||||
|
||||
/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
|
||||
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
|
||||
#define below would cause a syntax error. */
|
||||
/* #undef _UINT32_T */
|
||||
|
||||
/* ss_family is not defined here, use __ss_family instead */
|
||||
/* #undef ss_family */
|
||||
|
||||
/* Define to the type of an unsigned integer type of width exactly 32 bits if
|
||||
such a type exists and the standard includes do not define it. */
|
||||
/* #undef uint32_t */
|
|
@ -0,0 +1,501 @@
|
|||
/*
|
||||
* Summary: compile-time version information
|
||||
* Description: compile-time version information for the XML library
|
||||
*
|
||||
* Copy: See Copyright for the status of this software.
|
||||
*
|
||||
* Author: Daniel Veillard
|
||||
*/
|
||||
|
||||
#ifndef __XML_VERSION_H__
|
||||
#define __XML_VERSION_H__
|
||||
|
||||
#include <libxml/xmlexports.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* use those to be sure nothing nasty will happen if
|
||||
* your library and includes mismatch
|
||||
*/
|
||||
#ifndef LIBXML2_COMPILING_MSCCDEF
|
||||
XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
|
||||
#endif /* LIBXML2_COMPILING_MSCCDEF */
|
||||
|
||||
/**
|
||||
* LIBXML_DOTTED_VERSION:
|
||||
*
|
||||
* the version string like "1.2.3"
|
||||
*/
|
||||
#define LIBXML_DOTTED_VERSION "2.10.3"
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION:
|
||||
*
|
||||
* the version number: 1.2.3 value is 10203
|
||||
*/
|
||||
#define LIBXML_VERSION 21003
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION_STRING:
|
||||
*
|
||||
* the version number string, 1.2.3 value is "10203"
|
||||
*/
|
||||
#define LIBXML_VERSION_STRING "21003"
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION_EXTRA:
|
||||
*
|
||||
* extra version information, used to show a git commit description
|
||||
*/
|
||||
#define LIBXML_VERSION_EXTRA ""
|
||||
|
||||
/**
|
||||
* LIBXML_TEST_VERSION:
|
||||
*
|
||||
* Macro to check that the libxml version in use is compatible with
|
||||
* the version the software has been compiled against
|
||||
*/
|
||||
#define LIBXML_TEST_VERSION xmlCheckVersion(21003);
|
||||
|
||||
#ifndef VMS
|
||||
#if 0
|
||||
/**
|
||||
* WITH_TRIO:
|
||||
*
|
||||
* defined if the trio support need to be configured in
|
||||
*/
|
||||
#define WITH_TRIO
|
||||
#else
|
||||
/**
|
||||
* WITHOUT_TRIO:
|
||||
*
|
||||
* defined if the trio support should not be configured in
|
||||
*/
|
||||
#define WITHOUT_TRIO
|
||||
#endif
|
||||
#else /* VMS */
|
||||
/**
|
||||
* WITH_TRIO:
|
||||
*
|
||||
* defined if the trio support need to be configured in
|
||||
*/
|
||||
#define WITH_TRIO 1
|
||||
#endif /* VMS */
|
||||
|
||||
/**
|
||||
* LIBXML_THREAD_ENABLED:
|
||||
*
|
||||
* Whether the thread support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_THREAD_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_THREAD_ALLOC_ENABLED:
|
||||
*
|
||||
* Whether the allocation hooks are per-thread
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_THREAD_ALLOC_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_TREE_ENABLED:
|
||||
*
|
||||
* Whether the DOM like tree manipulation API support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_TREE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_OUTPUT_ENABLED:
|
||||
*
|
||||
* Whether the serialization/saving support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_OUTPUT_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_PUSH_ENABLED:
|
||||
*
|
||||
* Whether the push parsing interfaces are configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_PUSH_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_READER_ENABLED:
|
||||
*
|
||||
* Whether the xmlReader parsing interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_READER_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_PATTERN_ENABLED:
|
||||
*
|
||||
* Whether the xmlPattern node selection interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_PATTERN_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_WRITER_ENABLED:
|
||||
*
|
||||
* Whether the xmlWriter saving interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_WRITER_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SAX1_ENABLED:
|
||||
*
|
||||
* Whether the older SAX1 interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SAX1_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_FTP_ENABLED:
|
||||
*
|
||||
* Whether the FTP support is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_FTP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_HTTP_ENABLED:
|
||||
*
|
||||
* Whether the HTTP support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_HTTP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_VALID_ENABLED:
|
||||
*
|
||||
* Whether the DTD validation support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_VALID_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_HTML_ENABLED:
|
||||
*
|
||||
* Whether the HTML support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_HTML_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_LEGACY_ENABLED:
|
||||
*
|
||||
* Whether the deprecated APIs are compiled in for compatibility
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_LEGACY_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_C14N_ENABLED:
|
||||
*
|
||||
* Whether the Canonicalization support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_C14N_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_CATALOG_ENABLED:
|
||||
*
|
||||
* Whether the Catalog support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_CATALOG_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPATH_ENABLED:
|
||||
*
|
||||
* Whether XPath is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XPATH_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPTR_ENABLED:
|
||||
*
|
||||
* Whether XPointer is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XPTR_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPTR_LOCS_ENABLED:
|
||||
*
|
||||
* Whether support for XPointer locations is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_XPTR_LOCS_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XINCLUDE_ENABLED:
|
||||
*
|
||||
* Whether XInclude is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XINCLUDE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ICONV_ENABLED:
|
||||
*
|
||||
* Whether iconv support is available
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_ICONV_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ICU_ENABLED:
|
||||
*
|
||||
* Whether icu support is available
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_ICU_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ISO8859X_ENABLED:
|
||||
*
|
||||
* Whether ISO-8859-* support is made available in case iconv is not
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_ISO8859X_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_DEBUG_ENABLED:
|
||||
*
|
||||
* Whether Debugging module is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_DEBUG_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* DEBUG_MEMORY_LOCATION:
|
||||
*
|
||||
* Whether the memory debugging is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define DEBUG_MEMORY_LOCATION
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_DEBUG_RUNTIME:
|
||||
*
|
||||
* Whether the runtime debugging is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_DEBUG_RUNTIME
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_UNICODE_ENABLED:
|
||||
*
|
||||
* Whether the Unicode related interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_UNICODE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_REGEXP_ENABLED:
|
||||
*
|
||||
* Whether the regular expressions interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_REGEXP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_AUTOMATA_ENABLED:
|
||||
*
|
||||
* Whether the automata interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_AUTOMATA_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_EXPR_ENABLED:
|
||||
*
|
||||
* Whether the formal expressions interfaces are compiled in
|
||||
*
|
||||
* This code is unused and disabled unconditionally for now.
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_EXPR_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SCHEMAS_ENABLED:
|
||||
*
|
||||
* Whether the Schemas validation interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SCHEMAS_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SCHEMATRON_ENABLED:
|
||||
*
|
||||
* Whether the Schematron validation interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SCHEMATRON_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_MODULES_ENABLED:
|
||||
*
|
||||
* Whether the module interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_MODULES_ENABLED
|
||||
/**
|
||||
* LIBXML_MODULE_EXTENSION:
|
||||
*
|
||||
* the string suffix used by dynamic modules (usually shared libraries)
|
||||
*/
|
||||
#define LIBXML_MODULE_EXTENSION ".so"
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ZLIB_ENABLED:
|
||||
*
|
||||
* Whether the Zlib support is compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_ZLIB_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_LZMA_ENABLED:
|
||||
*
|
||||
* Whether the Lzma support is compiled in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_LZMA_ENABLED
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
/**
|
||||
* ATTRIBUTE_UNUSED:
|
||||
*
|
||||
* Macro used to signal to GCC unused function parameters
|
||||
*/
|
||||
|
||||
#ifndef ATTRIBUTE_UNUSED
|
||||
# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7)))
|
||||
# define ATTRIBUTE_UNUSED __attribute__((unused))
|
||||
# else
|
||||
# define ATTRIBUTE_UNUSED
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ATTR_ALLOC_SIZE:
|
||||
*
|
||||
* Macro used to indicate to GCC this is an allocator function
|
||||
*/
|
||||
|
||||
#ifndef LIBXML_ATTR_ALLOC_SIZE
|
||||
# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))))
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x)))
|
||||
# else
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
# endif
|
||||
#else
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ATTR_FORMAT:
|
||||
*
|
||||
* Macro used to indicate to GCC the parameter are printf like
|
||||
*/
|
||||
|
||||
#ifndef LIBXML_ATTR_FORMAT
|
||||
# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)))
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args)))
|
||||
# else
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
# endif
|
||||
#else
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
#endif
|
||||
|
||||
#ifndef XML_DEPRECATED
|
||||
# ifdef IN_LIBXML
|
||||
# define XML_DEPRECATED
|
||||
# else
|
||||
/* Available since at least GCC 3.1 */
|
||||
# define XML_DEPRECATED __attribute__((deprecated))
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#else /* ! __GNUC__ */
|
||||
/**
|
||||
* ATTRIBUTE_UNUSED:
|
||||
*
|
||||
* Macro used to signal to GCC unused function parameters
|
||||
*/
|
||||
#define ATTRIBUTE_UNUSED
|
||||
/**
|
||||
* LIBXML_ATTR_ALLOC_SIZE:
|
||||
*
|
||||
* Macro used to indicate to GCC this is an allocator function
|
||||
*/
|
||||
#define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
/**
|
||||
* LIBXML_ATTR_FORMAT:
|
||||
*
|
||||
* Macro used to indicate to GCC the parameter are printf like
|
||||
*/
|
||||
#define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
/**
|
||||
* XML_DEPRECATED:
|
||||
*
|
||||
* Macro used to indicate that a function, variable, type or struct member
|
||||
* is deprecated.
|
||||
*/
|
||||
#ifndef XML_DEPRECATED
|
||||
#define XML_DEPRECATED
|
||||
#endif
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
#endif
|
|
@ -69,10 +69,10 @@ This statement creates a subscription that includes all table data in the databa
|
|||
|
||||
## Delete Topic
|
||||
|
||||
If you no longer need to subscribe to the data, you can delete the topic. Note that only topics that are not currently subscribed can be deleted.
|
||||
If you no longer need to subscribe to the data, you can delete the topic. If the current topic is subscribed to by a consumer, it can be forcibly deleted using the FORCE syntax. After the forced deletion, the subscribed consumer will consume data with errors (FORCE syntax supported from version 3.3.6.0).
|
||||
|
||||
```sql
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
## View Topics
|
||||
|
@ -99,10 +99,10 @@ Displays information about all consumers in the current database, including the
|
|||
|
||||
### Delete Consumer Group
|
||||
|
||||
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted with the following statement when there are no consumers in the group:
|
||||
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted. If there are consumers in the current consumer group who are consuming, the FORCE syntax can be used to force deletion. After forced deletion, subscribed consumers will consume data with errors (FORCE syntax supported from version 3.3.6.0).
|
||||
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
## Data Subscription
|
||||
|
@ -137,6 +137,7 @@ If the following 3 data entries were written, then during replay, the first entr
|
|||
|
||||
When using the data subscription's replay feature, note the following:
|
||||
|
||||
- Enable replay function by configuring the consumption parameter enable.replay to true
|
||||
- The replay function of data subscription only supports data playback for query subscriptions; supertable and database subscriptions do not support playback.
|
||||
- Replay does not support progress saving.
|
||||
- Because data playback itself requires processing time, there is a precision error of several tens of milliseconds in playback.
|
||||
|
|
|
@ -26,11 +26,11 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
|
|||
SUBTABLE(expression) AS subquery
|
||||
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -108,6 +108,13 @@ Under normal circumstances, stream computation tasks will not process data that
|
|||
|
||||
By enabling the fill_history option, the created stream computation task will be capable of processing data written before, during, and after the creation of the stream. This means that data written either before or after the creation of the stream will be included in the scope of stream computation, thus ensuring data integrity and consistency. This setting provides users with greater flexibility, allowing them to flexibly handle historical and new data according to actual needs.
|
||||
|
||||
Tips:
|
||||
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
|
||||
|
||||
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
|
||||
|
||||
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
|
||||
|
||||
For example, create a stream to count the number of data entries generated by all smart meters every 10s, and also calculate historical data. SQL as follows:
|
||||
|
||||
```sql
|
||||
|
@ -135,8 +142,12 @@ When creating a stream, you can specify the trigger mode of stream computing thr
|
|||
1. AT_ONCE: Triggered immediately upon writing.
|
||||
2. WINDOW_CLOSE: Triggered when the window closes (the closing of the window is determined by the event time, can be used in conjunction with watermark).
|
||||
3. MAX_DELAY time: If the window closes, computation is triggered. If the window has not closed, and the duration since it has not closed exceeds the time specified by max delay, computation is triggered.
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does support sliding); In this mode, FILL_HISTORY is automatically set to 0, IGNORE EXPIRED is automatically set to 1 and IGNORE UPDATE is automatically set to 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
- This mode can be used to implement continuous queries, such as creating a stream that queries the number of data entries in the past 10 seconds window every 1 second。SQL as follows:
|
||||
```sql
|
||||
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
|
||||
```
|
||||
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
|
||||
The closing of the window is determined by the event time, such as when the event stream is interrupted or continuously delayed, at which point the event time cannot be updated, possibly leading to outdated computation results.
|
||||
|
||||
Therefore, stream computing provides the MAX_DELAY trigger mode that combines event time with processing time: MAX_DELAY mode triggers computation immediately when the window closes, and its unit can be specified, specific units: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). Additionally, when data is written, if the time that triggers computation exceeds the time specified by MAX_DELAY, computation is triggered immediately.
|
||||
|
|
|
@ -44,6 +44,9 @@ There are many parameters for creating consumers, which flexibly support various
|
|||
| `enable.replay` | boolean | Whether to enable data replay function | Default is off |
|
||||
| `session.timeout.ms` | integer | Timeout after consumer heartbeat is lost, after which rebalance logic is triggered, and upon success, that consumer will be removed (supported from version 3.3.3.0) | Default is 12000, range [6000, 1800000] |
|
||||
| `max.poll.interval.ms` | integer | The longest time interval for consumer poll data fetching, exceeding this time will be considered as the consumer being offline, triggering rebalance logic, and upon success, that consumer will be removed (supported from version 3.3.3.0) | Default is 300000, range [1000, INT32_MAX] |
|
||||
| `fetch.max.wait.ms` | integer | The maximum time it takes for the server to return data once (supported from version 3.3.6.0) | Default is 1000, range [1, INT32_MAX] |
|
||||
| `min.poll.rows` | integer | The minimum number of data returned by the server once (supported from version 3.3.6.0) | Default is 4096, range [1, INT32_MAX]
|
||||
| `msg.consume.rawdata` | integer | When consuming data, the data type pulled is binary and cannot be parsed. It is an internal parameter and is only used for taosx data migration(supported from version 3.3.6.0) | The default value of 0 indicates that it is not effective, and non-zero indicates that it is effective |
|
||||
|
||||
Below are the connection parameters for connectors in various languages:
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
|
|
@ -298,13 +298,53 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery;
|
|||
|
||||
</details>
|
||||
|
||||
#### Aggregate Function Example 3 Split string and calculate average value [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c)
|
||||
|
||||
The `extract_avg` function converts a comma-separated string sequence into a set of numerical values, counts the results of all rows, and calculates the final average. Note when implementing:
|
||||
- `interBuf->numOfResult` needs to return 1 or 0 and cannot be used for count.
|
||||
- Count can use additional caches, such as the `SumCount` structure.
|
||||
- Use `varDataVal` to obtain the string.
|
||||
|
||||
Create table:
|
||||
|
||||
```shell
|
||||
create table scores(ts timestamp, varStr varchar(128));
|
||||
```
|
||||
|
||||
Create custom function:
|
||||
|
||||
```shell
|
||||
create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C';
|
||||
```
|
||||
|
||||
Use custom function:
|
||||
|
||||
```shell
|
||||
select extract_avg(valStr) from scores;
|
||||
```
|
||||
|
||||
Generate `.so` file
|
||||
```bash
|
||||
gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>max_vol.c</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/max_vol.c}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## Developing UDFs in Python Language
|
||||
|
||||
### Environment Setup
|
||||
|
||||
The specific steps to prepare the environment are as follows:
|
||||
|
||||
- Step 1, prepare the Python runtime environment.
|
||||
- Step 1, prepare the Python runtime environment. If you compile and install Python locally, be sure to enable the `--enable-shared` option, otherwise the subsequent installation of taospyudf will fail due to failure to generate a shared library.
|
||||
- Step 2, install the Python package taospyudf. The command is as follows.
|
||||
|
||||
```shell
|
||||
|
@ -495,10 +535,10 @@ taos> select myfun(v1, v2) from t;
|
|||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
Unfortunately, the execution failed. What could be the reason? Check the udfd process logs.
|
||||
Unfortunately, the execution failed. What could be the reason? Check the taosudf process logs.
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
tail -10 /var/log/taos/taosudf.log
|
||||
```
|
||||
|
||||
Found the following error messages.
|
||||
|
|
|
@ -16,8 +16,10 @@ TDengine is designed for various writing scenarios, and many of these scenarios
|
|||
### Syntax
|
||||
|
||||
```sql
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
SHOW COMPACTS [compact_id];
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
|
||||
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
|
||||
SHOW COMPACTS;
|
||||
SHOW COMPACT compact_id;
|
||||
KILL COMPACT compact_id;
|
||||
```
|
||||
|
||||
|
@ -28,6 +30,7 @@ KILL COMPACT compact_id;
|
|||
- COMPACT will merge multiple STT files
|
||||
- You can specify the start time of the COMPACT data with the start with keyword
|
||||
- You can specify the end time of the COMPACT data with the end with keyword
|
||||
- You can specify the META_ONLY keyword to only compact the meta data which are not compacted by default
|
||||
- The COMPACT command will return the ID of the COMPACT task
|
||||
- COMPACT tasks are executed asynchronously in the background, and you can view the progress of COMPACT tasks using the SHOW COMPACTS command
|
||||
- The SHOW command will return the ID of the COMPACT task, and you can terminate the COMPACT task using the KILL COMPACT command
|
||||
|
|
|
@ -18,7 +18,10 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
|||
The parameters are explained as follows.
|
||||
|
||||
- user_name: Up to 23 B long.
|
||||
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||
- password: The password must be between 8 and 255 characters long. The password include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters, special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`, and this reqirement is able to be closed by adding enableStrongPassword 0 in taos.cfg, or by the following SQL:
|
||||
```sql
|
||||
alter all dnode 'EnableStrongPassword' '0'
|
||||
```
|
||||
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
||||
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. The default value is 0. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||
|
||||
|
|
|
@ -0,0 +1,278 @@
|
|||
---
|
||||
sidebar_label: Security Configuration
|
||||
title: Security Configuration
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgEcosys from '../assets/tdengine-components-01.png';
|
||||
|
||||
## Background
|
||||
|
||||
The distributed and multi-component nature of TDengine makes its security configuration a concern in production systems. This document aims to explain the security issues of various TDengine components and different deployment methods, and provide deployment and configuration suggestions to support the security of user data.
|
||||
|
||||
## Components Involved in Security Configuration
|
||||
|
||||
TDengine includes multiple components:
|
||||
|
||||
- `taosd`: Core component.
|
||||
- `taosc`: Client library.
|
||||
- `taosAdapter`: REST API and WebSocket service.
|
||||
- `taosKeeper`: Monitoring service component.
|
||||
- `taosX`: Data pipeline and backup recovery component.
|
||||
- `taosxAgent`: Auxiliary component for external data source access.
|
||||
- `taosExplorer`: Web visualization management interface.
|
||||
|
||||
In addition to TDengine deployment and applications, there are also the following components:
|
||||
|
||||
- Applications that access and use the TDengine database through various connectors.
|
||||
- External data sources: Other data sources that access TDengine, such as MQTT, OPC, Kafka, etc.
|
||||
|
||||
The relationship between the components is as follows:
|
||||
|
||||
<figure>
|
||||
<Image img={imgEcosys} alt="TDengine ecosystem"/>
|
||||
<figcaption>TDengine ecosystem</figcaption>
|
||||
</figure>
|
||||
|
||||
## TDengine Security Settings
|
||||
|
||||
### `taosd`
|
||||
|
||||
The `taosd` cluster uses TCP connections based on its own protocol for data exchange, which has low risk, but the transmission process is not encrypted, so there is still some security risk.
|
||||
|
||||
Enabling compression may help with TCP data obfuscation.
|
||||
|
||||
- **compressMsgSize**: Whether to compress RPC messages. Integer, optional: -1: Do not compress any messages; 0: Compress all messages; N (N>0): Only compress messages larger than N bytes.
|
||||
|
||||
To ensure the traceability of database operations, it is recommended to enable the audit function.
|
||||
|
||||
- **audit**: Audit function switch, 0 is off, 1 is on. Default is on.
|
||||
- **auditInterval**: Reporting interval, in milliseconds. Default is 5000.
|
||||
- **auditCreateTable**: Whether to enable the audit function for creating sub-tables. 0 is off, 1 is on. Default is on.
|
||||
|
||||
To ensure the security of data files, database encryption can be enabled.
|
||||
|
||||
- **encryptAlgorithm**: Data encryption algorithm.
|
||||
- **encryptScope**: Data encryption scope.
|
||||
|
||||
Enabling the whitelist can restrict access addresses and further enhance privacy.
|
||||
|
||||
- **enableWhiteList**: Whitelist function switch, 0 is off, 1 is on; default is off.
|
||||
|
||||
### `taosc`
|
||||
|
||||
Users and other components use the native client library (`taosc`) and its own protocol to connect to `taosd`, which has low data security risk, but the transmission process is still not encrypted, so there is some security risk.
|
||||
|
||||
### `taosAdapter`
|
||||
|
||||
`taosAdapter` uses the native client library (`taosc`) and its own protocol to connect to `taosd`, and also supports RPC message compression, so there is no data security issue.
|
||||
|
||||
Applications and other components connect to `taosAdapter` through various language connectors. By default, the connection is based on HTTP 1.1 and is not encrypted. To ensure the security of data transmission between `taosAdapter` and other components, SSL encrypted connections need to be configured. Modify the following configuration in the `/etc/taos/taosadapter.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
enable = true
|
||||
certFile = "/path/to/certificate-file"
|
||||
keyFile = "/path/to/private-key"
|
||||
```
|
||||
|
||||
Configure HTTPS/SSL access in the connector to complete encrypted access.
|
||||
|
||||
To further enhance security, the whitelist function can be enabled, and configured in `taosd`, which also applies to the `taosAdapter` component.
|
||||
|
||||
### `taosX`
|
||||
|
||||
`taosX` includes REST API and gRPC interfaces, where the gRPC interface is used for `taos-agent` connections.
|
||||
|
||||
- The REST API interface is based on HTTP 1.1 and is not encrypted, posing a security risk.
|
||||
- The gRPC interface is based on HTTP 2 and is not encrypted, posing a security risk.
|
||||
|
||||
To ensure data security, it is recommended that the `taosX` API interface is limited to internal access only. Modify the following configuration in the `/etc/taos/taosx.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
listen = "127.0.0.1:6050"
|
||||
grpc = "127.0.0.1:6055"
|
||||
```
|
||||
|
||||
Starting from TDengine 3.3.6.0, `taosX` supports HTTPS connections. Add the following configuration in the `/etc/taos/taosx.toml` file:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
ssl_cert = "/path/to/server.pem"
|
||||
ssl_key = "/path/to/server.key"
|
||||
ssl_ca = "/path/to/ca.pem"
|
||||
```
|
||||
|
||||
And modify the API address to HTTPS connection in Explorer:
|
||||
|
||||
```toml
|
||||
# Local connection to taosX API
|
||||
x_api = "https://127.0.01:6050"
|
||||
# Public IP or domain address
|
||||
grpc = "https://public.domain.name:6055"
|
||||
```
|
||||
|
||||
### `taosExplorer`
|
||||
|
||||
Similar to the `taosAdapter` component, the `taosExplorer` component provides HTTP services for external access. Modify the following configuration in the `/etc/taos/explorer.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
# SSL certificate file
|
||||
certificate = "/path/to/ca.file"
|
||||
|
||||
# SSL certificate private key
|
||||
certificate_key = "/path/to/key.file"
|
||||
```
|
||||
|
||||
Then, use HTTPS to access Explorer, such as [https://192.168.12.34](https://192.168.12.34:6060).
|
||||
|
||||
### `taosxAgent`
|
||||
|
||||
After `taosX` enables HTTPS, the `Agent` component and `taosX` use HTTP 2 encrypted connections, using Arrow-Flight RPC for data exchange. The transmission content is in binary format, and only registered `Agent` connections are valid, ensuring data security.
|
||||
|
||||
It is recommended to always enable HTTPS connections for `Agent` services in insecure or public network environments.
|
||||
|
||||
### `taosKeeper`
|
||||
|
||||
`taosKeeper` uses WebSocket connections to communicate with `taosAdapter`, writing monitoring information reported by other components into TDengine.
|
||||
|
||||
The current version of `taosKeeper` has security risks:
|
||||
|
||||
- The monitoring address cannot be restricted to the local machine. By default, it monitors all addresses on port 6043, posing a risk of network attacks. This risk can be ignored when deploying with Docker or Kubernetes without exposing the `taosKeeper` port.
|
||||
- The configuration file contains plaintext passwords, so the visibility of the configuration file needs to be reduced. In `/etc/taos/taoskeeper.toml`:
|
||||
|
||||
```toml
|
||||
[tdengine]
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
usessl = false
|
||||
```
|
||||
|
||||
## Security Enhancements
|
||||
|
||||
We recommend using TDengine within a local area network.
|
||||
|
||||
If you must provide access outside the local area network, consider adding the following configurations:
|
||||
|
||||
### Load Balancing
|
||||
|
||||
Use load balancing to provide `taosAdapter` services externally.
|
||||
|
||||
Take Nginx as an example to configure multi-node load balancing:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 6041;
|
||||
|
||||
location / {
|
||||
proxy_pass http://websocket;
|
||||
# Headers for websocket compatible
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
# Forwarded headers
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Server $hostname;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
upstream websocket {
|
||||
server 192.168.11.61:6041;
|
||||
server 192.168.11.62:6041;
|
||||
server 192.168.11.63:6041;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If the `taosAdapter` component is not configured with SSL secure connections, SSL needs to be configured to ensure secure access. SSL can be configured at a higher-level API Gateway or in Nginx; if you have stronger security requirements for the connections between components, you can configure SSL in all components. The Nginx configuration is as follows:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
|
||||
ssl_certificate /path/to/your/certificate.crt;
|
||||
ssl_certificate_key /path/to/your/private.key;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Security Gateway
|
||||
|
||||
In modern internet production systems, the use of security gateways is also very common. [traefik](https://traefik.io/) is a good open-source choice. We take traefik as an example to explain the security configuration in the API gateway.
|
||||
|
||||
Traefik provides various security configurations through middleware, including:
|
||||
|
||||
1. Authentication: Traefik provides multiple authentication methods such as BasicAuth, DigestAuth, custom authentication middleware, and OAuth 2.0.
|
||||
2. IP Whitelist: Restrict the allowed client IPs.
|
||||
3. Rate Limit: Control the number of requests sent to the service.
|
||||
4. Custom Headers: Add configurations such as `allowedHosts` through custom headers to improve security.
|
||||
|
||||
A common middleware example is as follows:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
|
||||
- "traefik.http.routers.tdengine.entrypoints=https"
|
||||
- "traefik.http.routers.tdengine.tls.certresolver=default"
|
||||
- "traefik.http.routers.tdengine.service=tdengine"
|
||||
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
|
||||
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
|
||||
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
|
||||
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
|
||||
```
|
||||
|
||||
The above example completes the following configurations:
|
||||
|
||||
- TLS authentication uses the `default` configuration, which can be configured in the configuration file or traefik startup parameters, as follows:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: "traefik:v2.3.2"
|
||||
hostname: "traefik"
|
||||
networks:
|
||||
- traefik
|
||||
command:
|
||||
- "--log.level=INFO"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--providers.docker.swarmmode=true"
|
||||
- "--providers.docker.network=traefik"
|
||||
- "--providers.docker.watch=true"
|
||||
- "--entrypoints.http.address=:80"
|
||||
- "--entrypoints.https.address=:443"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge=true"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
|
||||
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
|
||||
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
|
||||
```
|
||||
|
||||
The above startup parameters configure the `default` TSL certificate resolver and automatic acme authentication (automatic certificate application and renewal).
|
||||
|
||||
- Middleware `redirect-to-https`: Configure redirection from HTTP to HTTPS, forcing the use of secure connections.
|
||||
|
||||
```yaml
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
```
|
||||
|
||||
- Middleware `check-header`: Configure custom header checks. External access must add custom headers and match header values to prevent unauthorized access. This is a very simple and effective security mechanism when providing API access.
|
||||
- Middleware `tdengine-ipwhitelist`: Configure IP whitelist. Only allow specified IPs to access, using CIDR routing rules for matching, and can set internal and external IP addresses.
|
||||
|
||||
## Summary
|
||||
|
||||
Data security is a key indicator of the TDengine product. These measures are designed to protect TDengine deployments from unauthorized access and data breaches while maintaining performance and functionality. However, the security configuration of TDengine itself is not the only guarantee in production. It is more important to develop solutions that better match customer needs in combination with the user's business system.
|
|
@ -53,6 +53,8 @@ It is not necessary to configure your cluster specifically for active-active mod
|
|||
- The sink endpoint is the FQDN of TDengine on the secondary node.
|
||||
- You can use the native connection (port 6030) or WebSocket connection (port 6041).
|
||||
- You can specify one or more databases to replicate only the data contained in those databases. If you do not specify a database, all databases on the node are replicated except for `information_schema`, `performance_schema`, `log`, and `audit`.
|
||||
- New databases in both sides will be detected periodically to start replication, with optional `--new-database-checking-interval <SECONDS>` argument.
|
||||
- New databases checking will be disabled with `--no-new-databases`.
|
||||
|
||||
When the command is successful, the replica ID is displayed. You can use this ID to add other databases to the replication task if necessary.
|
||||
|
||||
|
@ -97,7 +99,6 @@ You can manage your active-active deployment with the following commands:
|
|||
:::note
|
||||
- This command cannot create duplicate tasks. It only adds the specified databases to the specified task.
|
||||
- The replica ID is globally unique within a taosX instance and is independent of the source/sink combination.
|
||||
|
||||
:::
|
||||
|
||||
2. Check the status of a task:
|
||||
|
@ -124,6 +125,8 @@ You can manage your active-active deployment with the following commands:
|
|||
|
||||
If you specify a database, replication for that database is stopped. If you do not specify a database, all replication tasks on the ID are stopped. If you do not specify an ID, all replication tasks on the instance are stopped.
|
||||
|
||||
Use `--no-new-databases` to not stop new-databases checking.
|
||||
|
||||
4. Restart a replication task:
|
||||
|
||||
```shell
|
||||
|
@ -132,6 +135,14 @@ You can manage your active-active deployment with the following commands:
|
|||
|
||||
If you specify a database, replication for that database is restarted. If you do not specify a database, all replication tasks in the instance are restarted. If you do not specify an ID, all replication tasks on the instance are restarted.
|
||||
|
||||
5. Update new databases checking interval:
|
||||
|
||||
```shell
|
||||
taosx replica update id --new-database-checking-interval <SECONDS>
|
||||
```
|
||||
|
||||
This command will only update the checking interval for new databases.
|
||||
|
||||
5. Check the progress of a replication task:
|
||||
|
||||
```shell
|
||||
|
|
|
@ -231,6 +231,7 @@ The effective value of charset is UTF-8.
|
|||
|udf | |Supported, effective after restart|Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||
|udfdResFuncs | |Supported, effective after restart|Internal parameter, for setting UDF result sets|
|
||||
|udfdLdLibPath | |Supported, effective after restart|Internal parameter, indicates the library path for loading UDF|
|
||||
|enableStrongPassword | After 3.3.5.0 |Supported, effective after restart|The password include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters, special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? \| ~ , .`; 0: disable, 1: enable; default value 1 |
|
||||
|
||||
### Stream Computing Parameters
|
||||
|
||||
|
@ -243,6 +244,11 @@ The effective value of charset is UTF-8.
|
|||
| concurrentCheckpoint | |Supported, effective immediately | Internal parameter, whether to check checkpoints concurrently |
|
||||
| maxStreamBackendCache | |Supported, effective immediately | Internal parameter, maximum cache used by stream computing |
|
||||
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||
| streamNotifyMessageSize | After 3.3.6.0 | Not supported | Internal parameter, controls the message size for event notifications, default value is 8192 |
|
||||
| streamNotifyFrameSize | After 3.3.6.0 | Not supported | Internal parameter, controls the underlying frame size when sending event notification messages, default value is 256 |
|
||||
| adapterFqdn | After 3.3.6.0 | Not supported | Internal parameter, The address of the taosadapter services, default value is localhost |
|
||||
| adapterPort | After 3.3.6.0 | Not supported | Internal parameter, The port of the taosadapter services, default value is 6041 |
|
||||
| adapterToken | After 3.3.6.0 | Not supported | Internal parameter, The string obtained by Base64-encoding `{username}:{password}`, default value is `cm9vdDp0YW9zZGF0YQ==` |
|
||||
|
||||
### Log Related
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ The TDengine client driver provides all the APIs needed for application programm
|
|||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||
|compareAsStrInGreatest | v3.3.6.0 |Supported, effective immediately |When the greatest and least functions have both numeric and string types as parameters, the comparison type conversion rules are as follows: Integer; 1: uniformly converted to string comparison, 0: uniformly converted to numeric type comparison.|
|
||||
|
||||
### Writing Related
|
||||
|
||||
|
|
|
@ -188,9 +188,12 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
|
||||
The parameters listed in this section apply to all functional modes.
|
||||
|
||||
- **filetype**: The function to test, possible values are `insert`, `query`, and `subscribe`. Corresponding to insert, query, and subscribe functions. Only one can be specified in each configuration file.
|
||||
- **filetype**: The function to test, possible values are `insert`, `query`, `subscribe` and `csvfile`. Corresponding to insert, query, subscribe and generate csv file functions. Only one can be specified in each configuration file.
|
||||
|
||||
- **cfgdir**: Directory where the TDengine client configuration file is located, default path is /etc/taos.
|
||||
|
||||
- **output_dir**: The directory specified for output files. When the feature category is csvfile, it refers to the directory where the generated csv files will be saved. The default value is ./output/.
|
||||
|
||||
- **host**: Specifies the FQDN of the TDengine server to connect to, default value is localhost.
|
||||
|
||||
- **port**: The port number of the TDengine server to connect to, default value is 6030.
|
||||
|
@ -283,6 +286,27 @@ Parameters related to supertable creation are configured in the `super_tables` s
|
|||
- **repeat_ts_max** : Numeric type, when composite primary key is enabled, specifies the maximum number of records with the same timestamp to be generated
|
||||
- **sqls** : Array of strings type, specifies the array of sql to be executed after the supertable is successfully created, the table name specified in sql must be prefixed with the database name, otherwise an unspecified database error will occur
|
||||
|
||||
- **csv_file_prefix**: String type, sets the prefix for the names of the generated csv files. Default value is "data".
|
||||
|
||||
- **csv_ts_format**: String type, sets the format of the time string in the names of the generated csv files, following the `strftime` format standard. If not set, files will not be split by time intervals. Supported patterns include:
|
||||
- %Y: Year as a four-digit number (e.g., 2025)
|
||||
- %m: Month as a two-digit number (01 to 12)
|
||||
- %d: Day of the month as a two-digit number (01 to 31)
|
||||
- %H: Hour in 24-hour format as a two-digit number (00 to 23)
|
||||
- %M: Minute as a two-digit number (00 to 59)
|
||||
- %S: Second as a two-digit number (00 to 59)
|
||||
|
||||
- **csv_ts_interval**: String type, sets the time interval for splitting generated csv file names. Supports daily, hourly, minute, and second intervals such as 1d/2h/30m/40s. The default value is "1d".
|
||||
|
||||
- **csv_output_header**: String type, sets whether the generated csv files should contain column header descriptions. The default value is "yes".
|
||||
|
||||
- **csv_tbname_alias**: String type, sets the alias for the tbname field in the column header descriptions of csv files. The default value is "device_id".
|
||||
|
||||
- **csv_compress_level**: String type, sets the compression level for generating csv-encoded data and automatically compressing it into gzip file. This process directly encodes and compresses the data, rather than first generating a csv file and then compressing it. Possible values are:
|
||||
- none: No compression
|
||||
- fast: gzip level 1 compression
|
||||
- balance: gzip level 6 compression
|
||||
- best: gzip level 9 compression
|
||||
|
||||
#### Tag and Data Columns
|
||||
|
||||
|
@ -505,6 +529,17 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to
|
|||
|
||||
</details>
|
||||
|
||||
### Export CSV File Example
|
||||
|
||||
<details>
|
||||
<summary>csv-export.json</summary>
|
||||
|
||||
```json
|
||||
{{#include /TDengine/tools/taos-tools/example/csv-export.json}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
Other json examples see [here](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
||||
|
||||
## Output Performance Indicators
|
||||
|
|
|
@ -43,6 +43,7 @@ In TDengine, the following data types can be used in the data model of basic tab
|
|||
| 16 | VARCHAR | Custom | Alias for BINARY type |
|
||||
| 17 | GEOMETRY | Custom | Geometry type, supported starting from version 3.1.0.0 |
|
||||
| 18 | VARBINARY | Custom | Variable-length binary data, supported starting from version 3.1.1.0 |
|
||||
| 19 | DECIMAL | 8 or 16 | High-precision numeric type. The range of values depends on the precision and scale specified in the type. Supported starting from version 3.3.6. See the description below. |
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -61,6 +62,18 @@ In TDengine, the following data types can be used in the data model of basic tab
|
|||
- VARBINARY is a data type for storing binary data, with a maximum length of 65,517 bytes for data columns and 16,382 bytes for label columns. Binary data can be written via SQL or schemaless methods (needs to be converted to a string starting with \x), or through stmt methods (can use binary directly). Displayed as hexadecimal starting with \x.
|
||||
|
||||
:::
|
||||
### DECIMAL Data Type
|
||||
|
||||
The `DECIMAL` data type is used for high-precision numeric storage and is supported starting from version 3.3.6. The definition syntax is: `DECIMAL(18, 2)`, `DECIMAL(38, 10)`, where two parameters must be specified: `precision` and `scale`. `Precision` refers to the maximum number of significant digits supported, and `scale` refers to the maximum number of decimal places. For example, `DECIMAL(8, 4)` represents a range of `[-9999.9999, 9999.9999]`. When defining the `DECIMAL` data type, the range of `precision` is `[1, 38]`, and the range of `scale` is `[0, precision]`. If `scale` is 0, it represents integers only. You can also omit `scale`, in which case it defaults to 0. For example, `DECIMAL(18)` is equivalent to `DECIMAL(18, 0)`.
|
||||
|
||||
When the `precision` value is less than or equal to 18, 8 bytes of storage (DECIMAL64) are used internally. When the `precision` is in the range `(18, 38]`, 16 bytes of storage (DECIMAL) are used. When writing `DECIMAL` type data in SQL, numeric values can be written directly. If the value exceeds the maximum representable value for the type, a `DECIMAL_OVERFLOW` error will be reported. If the value does not exceed the maximum representable value but the number of decimal places exceeds the `scale`, it will be automatically rounded. For example, if the type is defined as `DECIMAL(10, 2)` and the value `10.987` is written, the actual stored value will be `10.99`.
|
||||
|
||||
The `DECIMAL` type only supports regular columns and does not currently support tag columns. The `DECIMAL` type supports SQL-based writes only and does not currently support `stmt` or schemaless writes.
|
||||
|
||||
When performing operations between integer types and the `DECIMAL` type, the integer type is converted to the `DECIMAL` type before the calculation. When the `DECIMAL` type is involved in calculations with `DOUBLE`, `FLOAT`, `VARCHAR`, or `NCHAR` types, it is converted to `DOUBLE` type for computation.
|
||||
|
||||
When querying `DECIMAL` type expressions, if the intermediate result of the calculation exceeds the maximum value that the current type can represent, a `DECIMAL_OVERFLOW` error is reported.
|
||||
|
||||
|
||||
## Constants
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ table_options:
|
|||
table_option: {
|
||||
COMMENT 'string_value'
|
||||
| SMA(col_name [, col_name] ...)
|
||||
| KEEP value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -34,6 +35,7 @@ table_option: {
|
|||
- TAGS can have up to 128 columns, at least 1, with a total length not exceeding 16 KB.
|
||||
4. For the use of `ENCODE` and `COMPRESS`, please refer to [Column Compression](../manage-data-compression/)
|
||||
5. For explanations of parameters in table_option, please refer to [Table SQL Description](../manage-tables/)
|
||||
6. Regarding the keep parameter in table_option, it only takes effect for super tables. For detailed explanation of the keep parameter, please refer to [Database Description](02-database.md). The only difference is that the super table's keep parameter does not immediately affect query results, but only takes effect after compaction.
|
||||
|
||||
## View Supertables
|
||||
|
||||
|
@ -144,6 +146,7 @@ alter_table_options:
|
|||
|
||||
alter_table_option: {
|
||||
COMMENT 'string_value'
|
||||
| KEEP value
|
||||
}
|
||||
|
||||
```
|
||||
|
|
|
@ -55,14 +55,13 @@ join_clause:
|
|||
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| STATE_WINDOW(col) [TRUE_FOR(true_for_duration)]
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition [TRUE_FOR(true_for_duration)]
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
| RANGE(ts_val, surrounding_time_val) FILL(fill_mod_and_val)
|
||||
RANGE(ts_val [, ts_val] [, surrounding_time_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY partition_by_expr [, partition_by_expr] ...
|
||||
|
|
|
@ -124,7 +124,39 @@ FLOOR(expr)
|
|||
```
|
||||
|
||||
**Function Description**: Gets the floor of the specified field.
|
||||
Other usage notes see CEIL function description.
|
||||
Other usage notes see [CEIL](#ceil) function description.
|
||||
|
||||
#### GREATEST
|
||||
```sql
|
||||
GREATEST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**Function Description**: Get the maximum value of all input parameters. The minimum number of parameters for this function is 2.
|
||||
|
||||
**Version**:ver-3.3.6.0
|
||||
|
||||
**Return Type**:Refer to the comparison rules. The comparison type is the final return type.
|
||||
|
||||
**Applicable Data Types**:
|
||||
- Numeric types: timestamp, bool, integer and floating point types
|
||||
- Strings types: nchar and varchar types.
|
||||
|
||||
**Comparison rules**: The following rules describe the conversion method of the comparison operation:
|
||||
- If any parameter is NULL, the comparison result is NULL.
|
||||
- If all parameters in the comparison operation are string types, compare them as string types
|
||||
- If all parameters are numeric types, compare them as numeric types.
|
||||
- If there are both string types and numeric types in the parameters, according to the `compareAsStrInGreatest` configuration item, they are uniformly compared as strings or numeric values. By default, they are compared as strings.
|
||||
- In all cases, when different types are compared, the comparison type will choose the type with a larger range for comparison. For example, when comparing integer types, if there is a BIGINT type, BIGINT will definitely be selected as the comparison type.
|
||||
|
||||
**Related configuration items**: Client configuration, compareAsStrInGreatest is 1, which means that both string types and numeric types are converted to string comparisons, and 0 means that they are converted to numeric types. The default is 1.
|
||||
|
||||
|
||||
#### LEAST
|
||||
```sql
|
||||
LEAST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**Function Description**:Get the minimum value of all input parameters. The rest of the description is the same as the [GREATEST](#greatest) function.
|
||||
|
||||
#### LOG
|
||||
|
||||
|
@ -1154,6 +1186,7 @@ CAST(expr AS type_name)
|
|||
1) Invalid character situations when converting string types to numeric types, e.g., "a" might convert to 0, but will not throw an error.
|
||||
2) When converting to numeric types, if the value exceeds the range that `type_name` can represent, it will overflow, but will not throw an error.
|
||||
3) When converting to string types, if the converted length exceeds the length specified in `type_name`, it will be truncated, but will not throw an error.
|
||||
- The DECIMAL type does not support conversion to or from JSON, VARBINARY, or GEOMETRY types.
|
||||
|
||||
#### TO_ISO8601
|
||||
|
||||
|
@ -1659,12 +1692,14 @@ AVG(expr)
|
|||
|
||||
**Function Description**: Calculates the average value of the specified field.
|
||||
|
||||
**Return Data Type**: DOUBLE.
|
||||
**Return Data Type**: DOUBLE, DECIMAL.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**: When the input type is DECIMAL, the output type is also DECIMAL. The precision and scale of the output conform to the rules described in the data type section. The result type is obtained by dividing the SUM type by UINT64. If the SUM result causes a DECIMAL type overflow, a DECIMAL OVERFLOW error is reported.
|
||||
|
||||
### COUNT
|
||||
|
||||
```sql
|
||||
|
@ -1815,12 +1850,14 @@ SUM(expr)
|
|||
|
||||
**Function Description**: Calculates the sum of a column in a table/supertable.
|
||||
|
||||
**Return Data Type**: DOUBLE, BIGINT.
|
||||
**Return Data Type**: DOUBLE, BIGINT,DECIMAL.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**: When the input type is DECIMAL, the output type is DECIMAL(38, scale), where precision is the maximum value currently supported, and scale is the scale of the input type. If the SUM result overflows, a DECIMAL OVERFLOW error is reported.
|
||||
|
||||
### HYPERLOGLOG
|
||||
|
||||
```sql
|
||||
|
@ -1932,6 +1969,7 @@ FIRST(expr)
|
|||
- If all values in a column in the result set are NULL, the return for that column is also NULL;
|
||||
- If all columns in the result set are NULL, no results are returned.
|
||||
- For tables with composite primary keys, if there are multiple entries with the smallest timestamp, only the data with the smallest composite primary key is returned.
|
||||
|
||||
### LAST
|
||||
|
||||
```sql
|
||||
|
@ -2088,6 +2126,28 @@ UNIQUE(expr)
|
|||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
### COLS
|
||||
|
||||
```sql
|
||||
COLS(func(expr), output_expr1, [, output_expr2] ... )
|
||||
```
|
||||
|
||||
**Function Description**: On the data row where the execution result of function func(expr) is located, execute the expression output_expr1, [, output_expr2], return its result, and the result of func (expr) is not output.
|
||||
|
||||
**Return Data Type**: Returns multiple columns of data, and the data type of each column is the type of the result returned by the corresponding expression.
|
||||
|
||||
**Applicable Data Types**: All type fields.
|
||||
|
||||
**Applicable to**: Tables and Super Tables.
|
||||
|
||||
**Usage Instructions**:
|
||||
- Func function type: must be a single-line selection function (output result is a single-line selection function, for example, last is a single-line selection function, but top is a multi-line selection function).
|
||||
- Mainly used to obtain the associated columns of multiple selection function results in a single SQL query. For example: select cols(max(c0), ts), cols(max(c1), ts) from ... can be used to get the different ts values of the maximum values of columns c0 and c1.
|
||||
- The result of the parameter func is not returned. If you need to output the result of func, you can add additional output columns, such as: select first(ts), cols(first(ts), c1) from ..
|
||||
- When there is only one column in the output, you can set an alias for the function. For example, you can do it like this: "select cols(first (ts), c1) as c11 from ...".
|
||||
- Output one or more columns, and you can set an alias for each output column of the function. For example, you can do it like this: "select (first (ts), c1 as c11, c2 as c22) from ...".
|
||||
|
||||
|
||||
## Time-Series Specific Functions
|
||||
|
||||
Time-Series specific functions are tailor-made by TDengine to meet the query scenarios of time-series data. In general databases, implementing similar functionalities usually requires complex query syntax and is inefficient. TDengine has built these functionalities into functions, greatly reducing the user's cost of use.
|
||||
|
@ -2199,6 +2259,7 @@ ignore_null_values: {
|
|||
- INTERP is used to obtain the record value of a specified column at the specified time slice. It has a dedicated syntax (interp_clause) when used. For syntax introduction, see [reference link](../query-data/#interp).
|
||||
- When there is no row data that meets the conditions at the specified time slice, the INTERP function will interpolate according to the settings of the [FILL](../time-series-extensions/#fill-clause) parameter.
|
||||
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
|
||||
- When using INTERP with FILL PREV/NEXT/NEAR modes, its behavior differs from window queries. If data exists at the slice, no FILL operation will be performed, even if the current value is NULL.
|
||||
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
|
||||
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
|
||||
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.
|
||||
|
|
|
@ -53,9 +53,9 @@ The syntax for the window clause is as follows:
|
|||
```sql
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| STATE_WINDOW(col) [TRUE_FOR(true_for_duration)]
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition [TRUE_FOR(true_for_duration)]
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
}
|
||||
```
|
||||
|
@ -84,10 +84,10 @@ The FILL statement specifies the filling mode when data is missing in a window i
|
|||
|
||||
1. No filling: NONE (default filling mode).
|
||||
2. VALUE filling: Fixed value filling, where the fill value must be specified. For example: FILL(VALUE, 1.23). Note that the final fill value is determined by the type of the corresponding column, such as FILL(VALUE, 1.23), if the corresponding column is of INT type, then the fill value is 1. If multiple columns in the query list need FILL, then each FILL column must specify a VALUE, such as `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note, only ordinary columns in the SELECT expression need to specify FILL VALUE, such as `_wstart`, `_wstart+1a`, `now`, `1+1` and the partition key (like tbname) used with partition by do not need to specify VALUE, like `timediff(last(ts), _wstart)` needs to specify VALUE.
|
||||
3. PREV filling: Fill data using the previous non-NULL value. For example: FILL(PREV).
|
||||
3. PREV filling: Fill data using the previous value. For example: FILL(PREV).
|
||||
4. NULL filling: Fill data with NULL. For example: FILL(NULL).
|
||||
5. LINEAR filling: Perform linear interpolation filling based on the nearest non-NULL values before and after. For example: FILL(LINEAR).
|
||||
6. NEXT filling: Fill data using the next non-NULL value. For example: FILL(NEXT).
|
||||
6. NEXT filling: Fill data using the next value. For example: FILL(NEXT).
|
||||
|
||||
Among these filling modes, except for the NONE mode which does not fill by default, other modes will be ignored if there is no data in the entire query time range, resulting in no fill data and an empty query result. This behavior is reasonable under some modes (PREV, NEXT, LINEAR) because no data means no fill value can be generated. For other modes (NULL, VALUE), theoretically, fill values can be generated, and whether to output fill values depends on the application's needs. To meet the needs of applications that require forced filling of data or NULL, without breaking the compatibility of existing filling modes, two new filling modes have been added starting from version 3.0.3.0:
|
||||
|
||||
|
@ -177,6 +177,12 @@ TDengine also supports using CASE expressions in state quantities, which can exp
|
|||
SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END status FROM meters PARTITION BY tbname STATE_WINDOW(CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END);
|
||||
```
|
||||
|
||||
The state window supports using the TRUE_FOR parameter to set its minimum duration. If the window's duration is less than the specified value, it will be discarded automatically and no result will be returned. For example, setting the minimum duration to 3 seconds:
|
||||
|
||||
```
|
||||
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status) TRUE_FOR (3s);
|
||||
```
|
||||
|
||||
### Session Window
|
||||
|
||||
The session window is determined based on the timestamp primary key values of the records. As shown in the diagram below, if the continuous interval of the timestamps is set to be less than or equal to 12 seconds, the following 6 records form 2 session windows, which are: [2019-04-28 14:22:10, 2019-04-28 14:22:30] and [2019-04-28 14:23:10, 2019-04-28 14:23:30]. This is because the interval between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, exceeding the continuous interval (12 seconds).
|
||||
|
@ -212,6 +218,12 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
|||
<Image img={imgStep04} alt=""/>
|
||||
</figure>
|
||||
|
||||
The event window supports using the TRUE_FOR parameter to set its minimum duration. If the window's duration is less than the specified value, it will be discarded automatically and no result will be returned. For example, setting the minimum duration to 3 seconds:
|
||||
|
||||
```
|
||||
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10 true_for (3s);
|
||||
```
|
||||
|
||||
### Count Window
|
||||
|
||||
Count windows divide data into windows based on a fixed number of data rows. By default, data is sorted by timestamp, then divided into multiple windows based on the value of count_val, and aggregate calculations are performed. count_val represents the maximum number of data rows in each count window; if the total number of data rows is not divisible by count_val, the last window will have fewer rows than count_val. sliding_val is a constant that represents the number of rows the window slides, similar to the SLIDING in interval.
|
||||
|
|
|
@ -58,11 +58,11 @@ Note: Subscriptions to supertables and databases are advanced subscription modes
|
|||
|
||||
## Delete topic
|
||||
|
||||
If you no longer need to subscribe to data, you can delete the topic, but note: only TOPICS that are not currently being subscribed to can be deleted.
|
||||
If you no longer need to subscribe to the data, you can delete the topic. If the current topic is subscribed to by a consumer, it can be forcibly deleted using the FORCE syntax. After the forced deletion, the subscribed consumer will consume data with errors (FORCE syntax supported from version 3.3.6.0)
|
||||
|
||||
```sql
|
||||
/* Delete topic */
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
At this point, if there are consumers on this subscription topic, they will receive an error.
|
||||
|
@ -81,8 +81,10 @@ Consumer groups can only be created through the TDengine client driver or APIs p
|
|||
|
||||
## Delete consumer group
|
||||
|
||||
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted. If there are consumers in the current consumer group who are consuming, the FORCE syntax can be used to force deletion. After forced deletion, subscribed consumers will consume data with errors (FORCE syntax supported from version 3.3.6.0).
|
||||
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
Deletes the consumer group `cgroup_name` on the topic `topic_name`.
|
||||
|
|
|
@ -9,13 +9,13 @@ import imgStream from './assets/stream-processing-01.png';
|
|||
## Creating Stream Computing
|
||||
|
||||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -85,6 +85,8 @@ CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS
|
|||
SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
|
||||
```
|
||||
|
||||
notification_definition clause specifies the addresses to which notifications should be sent when designated events occur during window computations, such as window opening or closing. For more details, see [Stream Computing Event Notifications](#stream-computing-event-notifications).
|
||||
|
||||
## Stream Computation Partitioning
|
||||
|
||||
You can use `PARTITION BY TBNAME`, tags, regular columns, or expressions to partition a stream for multi-partition computation. Each partition's timeline and window are independent, aggregating separately, and writing into different subtables of the target table.
|
||||
|
@ -125,6 +127,13 @@ create stream if not exists s1 fill_history 1 into st1 as select count(*) from
|
|||
|
||||
If the stream task is completely outdated and you no longer want it to monitor or process data, you can manually delete it. The computed data will still be retained.
|
||||
|
||||
Tips:
|
||||
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
|
||||
|
||||
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
|
||||
|
||||
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
|
||||
|
||||
## Deleting Stream Computing
|
||||
|
||||
```sql
|
||||
|
@ -155,6 +164,7 @@ For non-window computations, the trigger of stream computing is real-time; for w
|
|||
2. WINDOW_CLOSE: Triggered when the window closes (window closure is determined by event time, can be used in conjunction with watermark)
|
||||
3. MAX_DELAY time: Trigger computation if the window closes. If the window does not close, and the time since it has not closed exceeds the time specified by max delay, then trigger computation.
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only compute and push the results of the currently closed window. The window is only computed once at the moment of closure and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
|
||||
|
||||
Since the closure of the window is determined by event time, if the event stream is interrupted or continuously delayed, the event time cannot be updated, which may result in not obtaining the latest computation results.
|
||||
|
||||
|
@ -305,3 +315,241 @@ CREATE SNODE ON DNODE [id]
|
|||
|
||||
The id is the serial number of the dnode in the cluster. Please be mindful of the selected dnode, as the intermediate state of stream computing will automatically be backed up on it.
|
||||
Starting from version 3.3.4.0, in a multi-replica environment, creating a stream will perform an **existence check** of snode, requiring the snode to be created first. If the snode does not exist, the stream cannot be created.
|
||||
|
||||
## Stream Computing Event Notifications
|
||||
|
||||
### User Guide
|
||||
|
||||
Stream computing supports sending event notifications to external systems when windows open or close. Users can specify the events to be notified and the target addresses for receiving notification messages using the notification_definition clause.
|
||||
|
||||
```sql
|
||||
notification_definition:
|
||||
NOTIFY (url [, url] ...) ON (event_type [, event_type] ...) [notification_options]
|
||||
|
||||
event_type:
|
||||
'WINDOW_OPEN'
|
||||
| 'WINDOW_CLOSE'
|
||||
|
||||
notification_options: {
|
||||
NOTIFY_HISTORY [0|1]
|
||||
ON_FAILURE [DROP|PAUSE]
|
||||
}
|
||||
```
|
||||
|
||||
The rules for the syntax above are as follows:
|
||||
1. `url`: Specifies the target address for the notification. It must include the protocol, IP or domain name, port, and may include a path and parameters. Currently, only the websocket protocol is supported. For example: 'ws://localhost:8080', 'ws://localhost:8080/notify', 'wss://localhost:8080/notify?key=foo'.
|
||||
2. `event_type`: Defines the events that trigger notifications. Supported event types include:
|
||||
1. 'WINDOW_OPEN': Window open event; triggered when any type of window opens.
|
||||
2. 'WINDOW_CLOSE': Window close event; triggered when any type of window closes.
|
||||
3. `NOTIFY_HISTORY`: Controls whether to trigger notifications during the computation of historical data. The default value is 0, which means no notifications are sent.
|
||||
4. `ON_FAILURE`: Determines whether to allow dropping some events if sending notifications fails (e.g., in poor network conditions). The default value is `PAUSE`:
|
||||
1. PAUSE means that the stream computing task is paused if sending a notification fails. taosd will retry until the notification is successfully delivered and the task resumes.
|
||||
2. DROP means that if sending a notification fails, the event information is discarded, and the stream computing task continues running unaffected.
|
||||
|
||||
For example, the following creates a stream that computes the per-minute average current from electric meters and sends notifications to two target addresses when the window opens and closes. It does not send notifications for historical data and does not allow dropping notifications on failure:
|
||||
|
||||
```sql
|
||||
CREATE STREAM avg_current_stream FILL_HISTORY 1
|
||||
AS SELECT _wstart, _wend, AVG(current) FROM meters
|
||||
INTERVAL (1m)
|
||||
NOTIFY ('ws://localhost:8080/notify', 'wss://192.168.1.1:8080/notify?key=foo')
|
||||
ON ('WINDOW_OPEN', 'WINDOW_CLOSE');
|
||||
NOTIFY_HISTORY 0
|
||||
ON_FAILURE PAUSE;
|
||||
```
|
||||
|
||||
When the specified events are triggered, taosd will send a POST request to the given URL(s) with a JSON message body. A single request may contain events from several streams, and the event types may differ.
|
||||
|
||||
The details of the event information depend on the type of window:
|
||||
|
||||
1. Time Window: At the opening, the start time is sent; at the closing, the start time, end time, and computation result are sent.
|
||||
2. State Window: At the opening, the start time, previous window's state, and current window's state are sent; at closing, the start time, end time, computation result, current window state, and next window state are sent.
|
||||
3. Session Window: At the opening, the start time is sent; at the closing, the start time, end time, and computation result are sent.
|
||||
4. Event Window: At the opening, the start time along with the data values and corresponding condition index that triggered the window opening are sent; at the closing, the start time, end time, computation result, and the triggering data value and condition index for window closure are sent.
|
||||
5. Count Window: At the opening, the start time is sent; at the closing, the start time, end time, and computation result are sent.
|
||||
|
||||
An example structure for the notification message is shown below:
|
||||
|
||||
```json
|
||||
{
|
||||
"messageId": "unique-message-id-12345",
|
||||
"timestamp": 1733284887203,
|
||||
"streams": [
|
||||
{
|
||||
"streamName": "avg_current_stream",
|
||||
"events": [
|
||||
{
|
||||
"tableName": "t_a667a16127d3b5a18988e32f3e76cd30",
|
||||
"eventType": "WINDOW_OPEN",
|
||||
"eventTime": 1733284887097,
|
||||
"windowId": "window-id-67890",
|
||||
"windowType": "Time",
|
||||
"windowStart": 1733284800000
|
||||
},
|
||||
{
|
||||
"tableName": "t_a667a16127d3b5a18988e32f3e76cd30",
|
||||
"eventType": "WINDOW_CLOSE",
|
||||
"eventTime": 1733284887197,
|
||||
"windowId": "window-id-67890",
|
||||
"windowType": "Time",
|
||||
"windowStart": 1733284800000,
|
||||
"windowEnd": 1733284860000,
|
||||
"result": {
|
||||
"_wstart": 1733284800000,
|
||||
"avg(current)": 1.3
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"streamName": "max_voltage_stream",
|
||||
"events": [
|
||||
{
|
||||
"tableName": "t_96f62b752f36e9b16dc969fe45363748",
|
||||
"eventType": "WINDOW_OPEN",
|
||||
"eventTime": 1733284887231,
|
||||
"windowId": "window-id-13579",
|
||||
"windowType": "Event",
|
||||
"windowStart": 1733284800000,
|
||||
"triggerCondition": {
|
||||
"conditionIndex": 0,
|
||||
"fieldValue": {
|
||||
"c1": 10,
|
||||
"c2": 15
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
"tableName": "t_96f62b752f36e9b16dc969fe45363748",
|
||||
"eventType": "WINDOW_CLOSE",
|
||||
"eventTime": 1733284887231,
|
||||
"windowId": "window-id-13579",
|
||||
"windowType": "Event",
|
||||
"windowStart": 1733284800000,
|
||||
"windowEnd": 1733284810000,
|
||||
"triggerCondition": {
|
||||
"conditionIndex": 1,
|
||||
"fieldValue": {
|
||||
"c1": 20
|
||||
"c2": 3
|
||||
}
|
||||
},
|
||||
"result": {
|
||||
"_wstart": 1733284800000,
|
||||
"max(voltage)": 220
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
The following sections explain the fields in the notification message.
|
||||
|
||||
### Root-Level Field Descriptions
|
||||
|
||||
1. "messageId": A string that uniquely identifies the notification message. It ensures that the entire message can be tracked and de-duplicated.
|
||||
2. "timestamp": A long integer timestamp representing the time when the notification message was generated, accurate to the millisecond (i.e., the number of milliseconds since '00:00, Jan 1 1970 UTC').
|
||||
3. "streams": An array containing the event information for multiple stream tasks. (See the following sections for details.)
|
||||
|
||||
### "stream" Object Field Descriptions
|
||||
|
||||
1. "streamName": A string representing the name of the stream task, used to identify which stream the events belong to.
|
||||
2. "events": An array containing the list of event objects for the stream task. Each event object includes detailed information. (See the next sections for details.)
|
||||
|
||||
### "event" Object Field Descriptions
|
||||
|
||||
#### Common Fields
|
||||
|
||||
These fields are common to all event objects.
|
||||
1. "tableName": A string indicating the name of the target subtable.
|
||||
2. "eventType": A string representing the event type ("WINDOW_OPEN", "WINDOW_CLOSE", or "WINDOW_INVALIDATION").
|
||||
3. "eventTime": A long integer timestamp that indicates when the event was generated, accurate to the millisecond (i.e., the number of milliseconds since '00:00, Jan 1 1970 UTC').
|
||||
4. "windowId": A string representing the unique identifier for the window. This ID ensures that the open and close events for the same window can be correlated. In the case that taosd restarts due to a fault, some events may be sent repeatedly, but the windowId remains constant for the same window.
|
||||
5. "windowType": A string that indicates the window type ("Time", "State", "Session", "Event", or "Count").
|
||||
|
||||
#### Fields for Time Windows
|
||||
|
||||
These fields are present only when "windowType" is "Time".
|
||||
1. When "eventType" is "WINDOW_OPEN", the following field is included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window, matching the time precision of the result table.
|
||||
2. When "eventType" is "WINDOW_CLOSE", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
1. "result": An object containing key-value pairs of the computed result columns and their corresponding values.
|
||||
|
||||
#### Fields for State Windows
|
||||
|
||||
These fields are present only when "windowType" is "State".
|
||||
1. When "eventType" is "WINDOW_OPEN", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "prevState": A value of the same type as the state column, representing the state of the previous window. If there is no previous window (i.e., this is the first window), it will be NULL.
|
||||
1. "curState": A value of the same type as the state column, representing the current window's state.
|
||||
2. When "eventType" is "WINDOW_CLOSE", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
1. "curState": The current window's state.
|
||||
1. "nextState": The state for the next window.
|
||||
1. "result": An object containing key-value pairs of the computed result columns and their corresponding values.
|
||||
|
||||
#### Fields for Session Windows
|
||||
|
||||
These fields are present only when "windowType" is "Session".
|
||||
1. When "eventType" is "WINDOW_OPEN", the following field is included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
2. When "eventType" is "WINDOW_CLOSE", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
1. "result": An object containing key-value pairs of the computed result columns and their corresponding values.
|
||||
|
||||
#### Fields for Event Windows
|
||||
|
||||
These fields are present only when "windowType" is "Event".
|
||||
1. When "eventType" is "WINDOW_OPEN", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "triggerCondition": An object that provides information about the condition that triggered the window to open. It includes:
|
||||
1. "conditionIndex": An integer representing the index of the condition that triggered the window, starting from 0.
|
||||
1. "fieldValue": An object containing key-value pairs of the column names related to the condition and their respective values.
|
||||
2. When "eventType" is "WINDOW_CLOSE", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
1. "triggerCondition": An object that provides information about the condition that triggered the window to close. It includes:
|
||||
1. "conditionIndex": An integer representing the index of the condition that triggered the closure, starting from 0.
|
||||
1. "fieldValue": An object containing key-value pairs of the related column names and their respective values.
|
||||
1. "result": An object containing key-value pairs of the computed result columns and their corresponding values.
|
||||
|
||||
#### Fields for Count Windows
|
||||
|
||||
These fields are present only when "windowType" is "Count".
|
||||
1. When "eventType" is "WINDOW_OPEN", the following field is included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
2. When "eventType" is "WINDOW_CLOSE", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
1. "result": An object containing key-value pairs of the computed result columns and their corresponding values.
|
||||
|
||||
#### Fields for Window Invalidation
|
||||
|
||||
Due to scenarios such as data disorder, updates, or deletions during stream computing, windows that have already been generated might be removed or their results need to be recalculated. In such cases, a notification with the eventType "WINDOW_INVALIDATION" is sent to inform which windows have been invalidated.
|
||||
|
||||
For events with "eventType" as "WINDOW_INVALIDATION", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
|
||||
## Support for Virtual Tables in Stream Computing
|
||||
|
||||
Starting with v3.3.6.0, stream computing can use virtual tables—including virtual regular tables, virtual sub-tables, and virtual super tables—as data sources for computation. The syntax is identical to that for non‑virtual tables.
|
||||
|
||||
However, because the behavior of virtual tables differs from that of non‑virtual tables, the following restrictions apply when using stream computing:
|
||||
|
||||
1. The schema of virtual regular tables/virtual sub-tables involved in stream computing cannot be modified.
|
||||
1. During stream computing, if the data source corresponding to a column in a virtual table is changed, the stream computation will not pick up the change; it will still read from the old data source.
|
||||
1. During stream computing, if the original table corresponding to a column in a virtual table is deleted and later a new table with the same name and a column with the same name is created, the stream computation will not read data from the new table.
|
||||
1. The watermark for stream computing must be 0; otherwise, an error will occur during creation.
|
||||
1. If the data source for stream computing is a virtual super table, sub-tables that are added after the stream computing task starts will not participate in the computation.
|
||||
1. The timestamps of different underlying tables in a virtual table may not be completely consistent; merging the data might produce null values, and interpolation is currently not supported.
|
||||
1. Out-of-order data, updates, or deletions are not handled. In other words, when creating a stream, you cannot specify `ignore update 0` or `ignore expired 0`; otherwise, an error will be reported.
|
||||
1. Historical data computation is not supported. That is, when creating a stream, you cannot specify `fill_history 1`; otherwise, an error will be reported.
|
||||
1. The trigger modes MAX_DELAY, CONTINUOUS_WINDOW_CLOSE and FORCE_WINDOW_CLOSE are not supported.
|
||||
1. The COUNT_WINDOW type is not supported.
|
||||
|
|
|
@ -43,7 +43,8 @@ TDengine supports `UNION ALL` and `UNION` operators. UNION ALL combines the resu
|
|||
| 9 | LIKE | BINARY, NCHAR, and VARCHAR | Matches the specified pattern string with wildcard |
|
||||
| 10 | NOT LIKE | BINARY, NCHAR, and VARCHAR | Does not match the specified pattern string with wildcard |
|
||||
| 11 | MATCH, NMATCH | BINARY, NCHAR, and VARCHAR | Regular expression match |
|
||||
| 12 | CONTAINS | JSON | Whether a key exists in JSON |
|
||||
| 12 | REGEXP, NOT REGEXP | BINARY, NCHAR, and VARCHAR | Regular expression match |
|
||||
| 13 | CONTAINS | JSON | Whether a key exists in JSON |
|
||||
|
||||
LIKE conditions use wildcard strings for matching checks, with the following rules:
|
||||
|
||||
|
@ -51,7 +52,7 @@ LIKE conditions use wildcard strings for matching checks, with the following rul
|
|||
- If you want to match an underscore character that is originally in the string, you can write it as \_ in the wildcard string, i.e., add a backslash to escape it.
|
||||
- The wildcard string cannot exceed 100 bytes in length. It is not recommended to use too long wildcard strings, as it may severely affect the performance of the LIKE operation.
|
||||
|
||||
MATCH and NMATCH conditions use regular expressions for matching, with the following rules:
|
||||
MATCH/REGEXP and NMATCH/NOT REGEXP conditions use regular expressions for matching, with the following rules:
|
||||
|
||||
- Supports regular expressions that comply with the POSIX standard, see Regular Expressions for specific standards.
|
||||
- When MATCH matches a regular expression, it returns TRUE. When NMATCH does not match a regular expression, it returns TRUE.
|
||||
|
|
|
@ -37,6 +37,6 @@ Removed `` ‘“`\ `` (single and double quotes, apostrophe, backslash, space)
|
|||
- Number of databases, supertables, and tables are not limited by the system, only by system resources
|
||||
- Number of replicas for a database can only be set to 1 or 3
|
||||
- Maximum length of username is 23 bytes
|
||||
- Maximum length of user password is 31 bytes
|
||||
- Maximum length of user password is 255 bytes
|
||||
- Total number of data rows depends on available resources
|
||||
- Maximum number of virtual nodes for a single database is 1024
|
||||
|
|
|
@ -23,11 +23,11 @@ The list of keywords is as follows:
|
|||
| ALIVE | |
|
||||
| ALL | |
|
||||
| ALTER | |
|
||||
| ANALYZE | Version 3.3.4.3 and later |
|
||||
| ANALYZE | 3.3.4.3+ |
|
||||
| AND | |
|
||||
| ANODE | Version 3.3.4.3 and later |
|
||||
| ANODES | Version 3.3.4.3 and later |
|
||||
| ANOMALY_WINDOW | Version 3.3.4.3 and later |
|
||||
| ANODE | 3.3.4.3+ |
|
||||
| ANODES | 3.3.4.3+ |
|
||||
| ANOMALY_WINDOW | 3.3.4.3+ |
|
||||
| ANTI | |
|
||||
| APPS | |
|
||||
| ARBGROUPS | |
|
||||
|
@ -35,8 +35,11 @@ The list of keywords is as follows:
|
|||
| AS | |
|
||||
| ASC | |
|
||||
| ASOF | |
|
||||
| ASYNC | 3.3.6.0+ |
|
||||
| AT_ONCE | |
|
||||
| ATTACH | |
|
||||
| AUTO | 3.3.5.0+ |
|
||||
| ASSIGN | 3.3.6.0+ |
|
||||
|
||||
### B
|
||||
|
||||
|
@ -78,12 +81,16 @@ The list of keywords is as follows:
|
|||
| CLIENT_VERSION | |
|
||||
| CLUSTER | |
|
||||
| COLON | |
|
||||
| COLS | 3.3.6.0+ |
|
||||
| COLUMN | |
|
||||
| COMMA | |
|
||||
| COMMENT | |
|
||||
| COMP | |
|
||||
| COMPACT | |
|
||||
| COMPACTS | |
|
||||
| COMPACT_INTERVAL | 3.3.5.0+ |
|
||||
| COMPACT_TIME_OFFSET | 3.3.5.0+ |
|
||||
| COMPACT_TIME_RANGE | 3.3.5.0+ |
|
||||
| CONCAT | |
|
||||
| CONFLICT | |
|
||||
| CONNECTION | |
|
||||
|
@ -92,6 +99,7 @@ The list of keywords is as follows:
|
|||
| CONSUMER | |
|
||||
| CONSUMERS | |
|
||||
| CONTAINS | |
|
||||
| CONTINUOUS_WINDOW_CLOSE | 3.3.6.0+ |
|
||||
| COPY | |
|
||||
| COUNT | |
|
||||
| COUNT_WINDOW | |
|
||||
|
@ -106,7 +114,7 @@ The list of keywords is as follows:
|
|||
| DATABASE | |
|
||||
| DATABASES | |
|
||||
| DBS | |
|
||||
| DECIMAL | |
|
||||
| DECIMAL | 3.3.6.0+ |
|
||||
| DEFERRED | |
|
||||
| DELETE | |
|
||||
| DELETE_MARK | |
|
||||
|
@ -114,6 +122,7 @@ The list of keywords is as follows:
|
|||
| DESC | |
|
||||
| DESCRIBE | |
|
||||
| DETACH | |
|
||||
| DISK_INFO | 3.3.5.0+ |
|
||||
| DISTINCT | |
|
||||
| DISTRIBUTED | |
|
||||
| DIVIDE | |
|
||||
|
@ -148,19 +157,19 @@ The list of keywords is as follows:
|
|||
|Keyword|Description|
|
||||
|----------------------|-|
|
||||
| FAIL | |
|
||||
| FHIGH | Version 3.3.4.3 and later |
|
||||
| FHIGH | 3.3.4.3+ |
|
||||
| FILE | |
|
||||
| FILL | |
|
||||
| FILL_HISTORY | |
|
||||
| FIRST | |
|
||||
| FLOAT | |
|
||||
| FLOW | Version 3.3.4.3 and later |
|
||||
| FLOW | 3.3.4.3+ |
|
||||
| FLUSH | |
|
||||
| FOR | |
|
||||
| FORCE | |
|
||||
| FORCE_WINDOW_CLOSE | Version 3.3.4.3 and later |
|
||||
| FORCE_WINDOW_CLOSE | 3.3.4.3+ |
|
||||
| FROM | |
|
||||
| FROWTS | Version 3.3.4.3 and later |
|
||||
| FROWTS | 3.3.4.3+ |
|
||||
| FULL | |
|
||||
| FUNCTION | |
|
||||
| FUNCTIONS | |
|
||||
|
@ -209,6 +218,7 @@ The list of keywords is as follows:
|
|||
| INTO | |
|
||||
| IPTOKEN | |
|
||||
| IROWTS | |
|
||||
| IROWTS_ORIGIN | 3.3.5.0+ |
|
||||
| IS | |
|
||||
| IS_IMPORT | |
|
||||
| ISFILLED | |
|
||||
|
@ -242,6 +252,7 @@ The list of keywords is as follows:
|
|||
| LEADER | |
|
||||
| LEADING | |
|
||||
| LEFT | |
|
||||
| LEVEL | 3.3.0.0 - 3.3.2.11 |
|
||||
| LICENCES | |
|
||||
| LIKE | |
|
||||
| LIMIT | |
|
||||
|
@ -263,6 +274,7 @@ The list of keywords is as follows:
|
|||
| MEDIUMBLOB | |
|
||||
| MERGE | |
|
||||
| META | |
|
||||
| META_ONLY | 3.3.6.0+ |
|
||||
| MINROWS | |
|
||||
| MINUS | |
|
||||
| MNODE | |
|
||||
|
@ -281,6 +293,8 @@ The list of keywords is as follows:
|
|||
| NONE | |
|
||||
| NORMAL | |
|
||||
| NOT | |
|
||||
| NOTIFY | 3.3.6.0+ |
|
||||
| NOTIFY_HISTORY | 3.3.6.0+ |
|
||||
| NOTNULL | |
|
||||
| NOW | |
|
||||
| NULL | |
|
||||
|
@ -295,6 +309,7 @@ The list of keywords is as follows:
|
|||
| OFFSET | |
|
||||
| ON | |
|
||||
| ONLY | |
|
||||
| ON_FAILURE | 3.3.6.0+ |
|
||||
| OR | |
|
||||
| ORDER | |
|
||||
| OUTER | |
|
||||
|
@ -345,6 +360,7 @@ The list of keywords is as follows:
|
|||
| RATIO | |
|
||||
| READ | |
|
||||
| RECURSIVE | |
|
||||
| REGEXP | 3.3.6.0+ |
|
||||
| REDISTRIBUTE | |
|
||||
| REM | |
|
||||
| REPLACE | |
|
||||
|
@ -418,7 +434,7 @@ The list of keywords is as follows:
|
|||
| TABLE_PREFIX | |
|
||||
| TABLE_SUFFIX | |
|
||||
| TABLES | |
|
||||
| tag | |
|
||||
| TAG | |
|
||||
| TAGS | |
|
||||
| TBNAME | |
|
||||
| THEN | |
|
||||
|
@ -435,6 +451,7 @@ The list of keywords is as follows:
|
|||
| TRANSACTIONS | |
|
||||
| TRIGGER | |
|
||||
| TRIM | |
|
||||
| TRUE_FOR | 3.3.6.0+ |
|
||||
| TSDB_PAGESIZE | |
|
||||
| TSERIES | |
|
||||
| TSMA | |
|
||||
|
|
|
@ -127,10 +127,11 @@ Displays created indexes.
|
|||
## SHOW LOCAL VARIABLES
|
||||
|
||||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
SHOW LOCAL VARIABLES [like pattern];
|
||||
```
|
||||
|
||||
Displays the runtime values of configuration parameters for the current client.
|
||||
You can use the like pattern to filter by name.
|
||||
|
||||
## SHOW MNODES
|
||||
|
||||
|
@ -304,9 +305,10 @@ Displays information about all topics in the current database.
|
|||
|
||||
```sql
|
||||
SHOW TRANSACTIONS;
|
||||
SHOW TRANSACTION [tranaction_id];
|
||||
```
|
||||
|
||||
Displays information about transactions currently being executed in the system (these transactions are only for metadata level, not for regular tables).
|
||||
Displays information about one of or all transaction(s) currently being executed in the system (these transactions are only for metadata level, not for regular tables).
|
||||
|
||||
## SHOW USERS
|
||||
|
||||
|
@ -319,11 +321,11 @@ Displays information about all users in the current system, including user-defin
|
|||
## SHOW CLUSTER VARIABLES (before version 3.0.1.6 it was SHOW VARIABLES)
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER VARIABLES;
|
||||
SHOW DNODE dnode_id VARIABLES;
|
||||
SHOW CLUSTER VARIABLES [like pattern];
|
||||
SHOW DNODE dnode_id VARIABLES [like pattern];
|
||||
```
|
||||
|
||||
Displays the runtime values of configuration parameters that need to be the same across nodes in the current system, or you can specify a DNODE to view its configuration parameters.
|
||||
Displays the runtime values of configuration parameters that need to be the same across nodes in the current system, or you can specify a DNODE to view its configuration parameters. And you can use the like pattern to filter by name.
|
||||
|
||||
## SHOW VGROUPS
|
||||
|
||||
|
|
|
@ -29,6 +29,17 @@ SELECT a.* FROM meters a LEFT ASOF JOIN meters b ON timetruncate(a.ts, 1s) < tim
|
|||
### Main Join Condition
|
||||
|
||||
As a time-series database, all join queries in TDengine revolve around the primary key timestamp column. Therefore, all join queries (except ASOF/Window Join) must include an equality condition on the primary key column, and the first primary key column equality condition that appears in the join conditions will be considered the main join condition. ASOF Join's main join condition can include non-equality conditions, while Window Join's main join condition is specified through `WINDOW_OFFSET`.
|
||||
Starting from version 3.3.6.0, TDengine supports constant timestamps in subqueries (including constant functions with return timestamps such as today (), now (), etc., constant timestamps and their addition and subtraction operations) as equivalent primary key columns that can appear in the main join condition. For example:
|
||||
|
||||
```sql
|
||||
SELECT * from d1001 a JOIN (SELECT today() as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
|
||||
```
|
||||
|
||||
The above example SQL will perform join operation between all records in table d1001 today and a certain time record in table d1002. It should be noticed that the constant time string appears in SQL will not be treated as a timestamp by default. For example, "2025-03-19 10:00:00.000" will only be treated as a string instead of a timestamp. Therefore, when it needs to be treated as a constant timestamp, you can specify the constant string as a timestamp type by using the type prefix timestamp. For example:
|
||||
|
||||
```sql
|
||||
SELECT * from d1001 a JOIN (SELECT timestamp '2025-03-19 10:00:00.000' as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
|
||||
```
|
||||
|
||||
Apart from Window Join, TDengine supports the `timetruncate` function operation in the main join condition, such as `ON timetruncate(a.ts, 1s) = timetruncate(b.ts, 1s)`, but does not support other functions and scalar operations.
|
||||
|
||||
|
@ -38,7 +49,7 @@ The characteristic ASOF/Window Join of time-series databases supports grouping t
|
|||
|
||||
### Primary Key Timeline
|
||||
|
||||
As a time-series database, TDengine requires each table (subtable) to have a primary key timestamp column, which will serve as the primary key timeline for many time-related operations. The result of a subquery or the result of a Join operation also needs to clearly identify which column will be considered the primary key timeline for subsequent time-related operations. In subqueries, the first appearing ordered primary key column (or its operation) or a pseudocolumn equivalent to the primary key column (`_wstart`/`_wend`) will be considered the primary key timeline of the output table. The selection of the primary key timeline in Join output results follows these rules:
|
||||
As a time-series database, TDengine requires each table (subtable) to have a primary key timestamp column, which will serve as the primary key timeline for many time-related operations. The result of a subquery or the result of a Join operation also needs to clearly identify which column will be considered the primary key timeline for subsequent time-related operations. In subqueries, the first appearing ordered primary key column (or its operation) or a pseudocolumn equivalent to the primary key column (`_wstart`/`_wend`) will be considered the primary key timeline of the output table. In addition, starting with version 3.3.6.0, TDengine also supports constant timestamp columns in subquery results as the primary key timeline for the output table. The selection of the primary key timeline in Join output results follows these rules:
|
||||
|
||||
- In the Left/Right Join series, the primary key column of the driving table (subquery) will be used as the primary key timeline for subsequent queries; additionally, within the Window Join window, since both tables are ordered, any table's primary key column can be used as the primary key timeline, with a preference for the primary key column of the same table.
|
||||
- Inner Join can use the primary key column of any table as the primary key timeline, but when there are grouping conditions similar to tag column equality conditions related by `AND` with the main join condition, it will not produce a primary key timeline.
|
||||
|
|
|
@ -36,6 +36,7 @@ In this document, it specifically refers to the internal levels of the second-le
|
|||
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|
||||
## SQL Syntax
|
||||
|
||||
|
|
|
@ -510,7 +510,6 @@ For the OpenTSDB text protocol, the parsing of timestamps follows its official p
|
|||
- tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object.
|
||||
- timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a WS_RES structure, which contains the received message. `NULL`: indicates no data, the error code can be obtained through ws_errno (NULL), please refer to the reference manual for specific error message. WS_RES results are consistent with taos_query results, and information in WS_RES can be obtained through various query interfaces, such as schema, etc.
|
||||
|
||||
- `int32_t ws_tmq_consumer_close(ws_tmq_t *tmq)`
|
||||
- **Interface Description**: Used to close the ws_tmq_t structure. Must be used in conjunction with ws_tmq_consumer_new.
|
||||
- tmq: [Input] Points to a valid ws_tmq_t structure pointer, which represents a TMQ consumer object.
|
||||
|
@ -831,6 +830,12 @@ This section introduces APIs that are all synchronous interfaces. After being ca
|
|||
- res: [Input] Result set.
|
||||
- **Return Value**: Non-`NULL`: successful, returns a pointer to a TAOS_FIELD structure, each element representing the metadata of a column. `NULL`: failure.
|
||||
|
||||
- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)`
|
||||
- **Interface Description**: Retrieves the attributes of each column in the query result set (column name, data type, column length). Used in conjunction with `taos_num_fields()`, it can be used to parse the data of a tuple (a row) returned by `taos_fetch_row()`. In addition to the basic information provided by TAOS_FIELD, TAOS_FIELD_E also includes `precision` and `scale` information for the data type.
|
||||
- **Parameter Description**:
|
||||
- res: [Input] Result set.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_FIELD_E structure, where each element represents the metadata of a column. `NULL`: Failure.
|
||||
|
||||
- `void taos_stop_query(TAOS_RES *res)`
|
||||
- **Interface Description**: Stops the execution of the current query.
|
||||
- **Parameter Description**:
|
||||
|
@ -1122,10 +1127,14 @@ In addition to using SQL or parameter binding APIs to insert data, you can also
|
|||
- conf: [Input] Pointer to a valid tmq_conf_t structure, representing a TMQ configuration object.
|
||||
- key: [Input] Configuration item key name.
|
||||
- value: [Input] Configuration item value.
|
||||
- **Return Value**: Returns a tmq_conf_res_t enum value, indicating the result of the configuration setting.
|
||||
- TMQ_CONF_OK: Successfully set the configuration item.
|
||||
- TMQ_CONF_INVALID_KEY: Invalid key value.
|
||||
- TMQ_CONF_UNKNOWN: Invalid key name.
|
||||
- **Return Value**: Returns a tmq_conf_res_t enum value, indicating the result of the configuration setting. tmq_conf_res_t defined as follows:
|
||||
```
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2, // invalid key
|
||||
TMQ_CONF_INVALID = -1, // invalid value
|
||||
TMQ_CONF_OK = 0, // success
|
||||
} tmq_conf_res_t;
|
||||
```
|
||||
|
||||
- `void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param)`
|
||||
- **Interface Description**: Sets the auto-commit callback function in the TMQ configuration object.
|
||||
|
@ -1196,7 +1205,7 @@ In addition to using SQL or parameter binding APIs to insert data, you can also
|
|||
- tmq: [Input] Points to a valid tmq_t structure pointer, representing a TMQ consumer object.
|
||||
- timeout: [Input] Polling timeout in milliseconds, a negative number indicates a default timeout of 1 second.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_RES structure containing the received messages. `NULL`: indicates no data, the error code can be obtained through taos_errno (NULL), please refer to the reference manual for specific error message. TAOS_RES results are consistent with taos_query results, and information in TAOS_RES can be obtained through various query interfaces, such as schema, etc.
|
||||
|
||||
|
||||
- `int32_t tmq_consumer_close(tmq_t *tmq)`
|
||||
- **Interface Description**: Used to close a tmq_t structure. Must be used in conjunction with tmq_consumer_new.
|
||||
- tmq: [Input] Points to a valid tmq_t structure pointer, which represents a TMQ consumer object.
|
||||
|
|
|
@ -121,6 +121,7 @@ Please refer to the specific error codes:
|
|||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
| 0x2390 | background thread write error in Efficient Writing | In the event of an efficient background thread write error, you can stop writing and rebuild the connection. |
|
||||
|
||||
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -148,6 +149,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th
|
|||
| JSON | java.lang.String | only supported in tags |
|
||||
| VARBINARY | byte[] | |
|
||||
| GEOMETRY | byte[] | |
|
||||
| DECIMAL | java.math.BigDecimal | |
|
||||
|
||||
**Note**: Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
|
||||
GEOMETRY type is binary data in little endian byte order, complying with the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
|
||||
|
@ -319,7 +321,15 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
|
||||
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE: Efficient Writing mode. Currently, only the `stmt` method is supported. Effective only when using WebSocket connections. DeDefault value is empty, meaning Efficient Writing mode is not enabled.
|
||||
- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM: In Efficient Writing mode, this refers to the number of background write threads. Effective only when using WebSocket connections. Default value is 10.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW: In Efficient Writing mode, this is the batch size for writing data, measured in rows. Effective only when using WebSocket connections. Default value is 1000.
|
||||
- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW: In Efficient Writing mode, this is the cache size, measured in rows. Effective only when using WebSocket connections. Default value is 10000.
|
||||
- TSDBDriver.PROPERTY_KEY_COPY_DATA: In Efficient Writing mode, this determines Whether to copy the binary data passed by the application through the `addBatch` method. Effective only when using WebSocket connections. Default value is false.
|
||||
- TSDBDriver.PROPERTY_KEY_STRICT_CHECK: In Efficient Writing mode, this determines whether to validate the length of table names and variable-length data types. Effective only when using WebSocket connections. Default value is false.
|
||||
- TSDBDriver.PROPERTY_KEY_RETRY_TIMES: In Efficient Writing mode, this is the number of retry attempts for failed write operations. Effective only when using WebSocket connections. Default value is 3.
|
||||
|
||||
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
|
||||
|
||||
**Priority of Configuration Parameters**
|
||||
|
|
|
@ -73,6 +73,8 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||
| 0x8000013D | Decimal value overflow | Decimal value overflow | Check query expression and decimal values |
|
||||
| 0x8000013E | Division by zero error | Division by zero | Check division expression |
|
||||
|
||||
|
||||
## tsc
|
||||
|
@ -109,6 +111,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x8000030C | Invalid query id | Internal error | Report issue |
|
||||
| 0x8000030E | Invalid connection id | Internal error | Report issue |
|
||||
| 0x80000315 | User is disabled | User is unavailable | Grant permissions |
|
||||
| 0x80000318 | Mnode internal error | Internal error | Report issue |
|
||||
| 0x80000320 | Object already there | Internal error | Report issue |
|
||||
| 0x80000322 | Invalid table type | Internal error | Report issue |
|
||||
| 0x80000323 | Object not there | Internal error | Report issue |
|
||||
|
@ -165,6 +168,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x8000038B | Index not exist | Does not exist | Confirm if the operation is correct |
|
||||
| 0x80000396 | Database in creating status | Database is being created | Retry |
|
||||
| 0x8000039A | Invalid system table name | Internal error | Report issue |
|
||||
| 0x8000039F | No VGroup's leader need to be balanced | Perform balance leader operation on VGroup | There is no VGroup's leader needs to be balanced |
|
||||
| 0x800003A0 | Mnode already exists | Already exists | Confirm if the operation is correct |
|
||||
| 0x800003A1 | Mnode not there | Already exists | Confirm if the operation is correct |
|
||||
| 0x800003A2 | Qnode already exists | Already exists | Confirm if the operation is correct |
|
||||
|
@ -370,98 +374,111 @@ This document details the server error codes that may be encountered when using
|
|||
|
||||
## parser
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Suggested Actions for Users |
|
||||
| ---------- | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 0x80002600 | syntax error near | SQL syntax error | Check and correct the SQL statement |
|
||||
| 0x80002601 | Incomplete SQL statement | Incomplete SQL statement | Check and correct the SQL statement |
|
||||
| 0x80002602 | Invalid column name | Illegal or non-existent column name | Check and correct the SQL statement |
|
||||
| 0x80002603 | Table does not exist | Table does not exist | Check and confirm the existence of the table in the SQL statement |
|
||||
| 0x80002604 | Column ambiguously defined | Column (alias) redefined | Check and correct the SQL statement |
|
||||
| 0x80002605 | Invalid value type | Illegal constant value | Check and correct the SQL statement |
|
||||
| 0x80002608 | There mustn't be aggregation | Aggregation function used in illegal clause | Check and correct the SQL statement |
|
||||
| 0x80002609 | ORDER BY item must be the number of a SELECT-list expression | Illegal position specified in Order by | Check and correct the SQL statement |
|
||||
| 0x8000260A | Not a GROUP BY expression | Illegal group by statement | Check and correct the SQL statement |
|
||||
| 0x8000260B | Not SELECTed expression | Illegal expression | Check and correct the SQL statement |
|
||||
| 0x8000260C | Not a single-group group function | Illegal use of column and function | Check and correct the SQL statement |
|
||||
| 0x8000260D | Tags number not matched | Mismatched number of tag columns | Check and correct the SQL statement |
|
||||
| 0x8000260E | Invalid tag name | Invalid or non-existent tag name | Check and correct the SQL statement |
|
||||
| 0x80002610 | Value is too long | Value length exceeds limit | Check and correct the SQL statement or API parameters |
|
||||
| 0x80002611 | Password too short or empty | Password is empty or less than 8 chars | Use a valid password |
|
||||
| 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | Illegal port number | Check and correct the port number |
|
||||
| 0x80002613 | Endpoint should be in the format of 'fqdn:port' | Incorrect address format | Check and correct the address information |
|
||||
| 0x80002614 | This statement is no longer supported | Feature has been deprecated | Refer to the feature documentation |
|
||||
| 0x80002615 | Interval too small | Interval value exceeds the allowed minimum | Change the INTERVAL value |
|
||||
| 0x80002616 | Database not specified | Database not specified | Specify the database for the current operation |
|
||||
| 0x80002617 | Invalid identifier name | Illegal or invalid length ID | Check the names of related libraries, tables, columns, TAGs, etc. in the statement |
|
||||
| 0x80002618 | Corresponding supertable not in this db | Supertable does not exist | Check if the corresponding supertable exists in the database |
|
||||
| 0x80002619 | Invalid database option | Illegal database option value | Check and correct the database option values |
|
||||
| 0x8000261A | Invalid table option | Illegal table option value | Check and correct the table option values |
|
||||
| 0x80002624 | GROUP BY and WINDOW-clause can't be used together | Group by and window cannot be used together | Check and correct the SQL statement |
|
||||
| 0x80002627 | Aggregate functions do not support nesting | Functions do not support nested use | Check and correct the SQL statement |
|
||||
| 0x80002628 | Only support STATE_WINDOW on integer/bool/varchar column | Unsupported STATE_WINDOW data type | Check and correct the SQL statement |
|
||||
| 0x80002629 | Not support STATE_WINDOW on tag column | STATE_WINDOW not supported on tag column | Check and correct the SQL statement |
|
||||
| 0x8000262A | STATE_WINDOW not support for supertable query | STATE_WINDOW not supported for supertable | Check and correct the SQL statement |
|
||||
| 0x8000262B | SESSION gap should be fixed time window, and greater than 0 | Illegal SESSION window value | Check and correct the SQL statement |
|
||||
| 0x8000262C | Only support SESSION on primary timestamp column | Illegal SESSION window column | Check and correct the SQL statement |
|
||||
| 0x8000262D | Interval offset cannot be negative | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x8000262E | Cannot use 'year' as offset when interval is 'month' | Illegal INTERVAL offset unit | Check and correct the SQL statement |
|
||||
| 0x8000262F | Interval offset should be shorter than interval | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x80002630 | Does not support sliding when interval is natural month/year | Illegal sliding unit | Check and correct the SQL statement |
|
||||
| 0x80002631 | sliding value no larger than the interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002632 | sliding value can not less than 1%% of interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002633 | Only one tag if there is a json tag | Only single JSON tag column supported | Check and correct the SQL statement |
|
||||
| 0x80002634 | Query block has incorrect number of result columns | Mismatched number of columns | Check and correct the SQL statement |
|
||||
| 0x80002635 | Incorrect TIMESTAMP value | Illegal primary timestamp column value | Check and correct the SQL statement |
|
||||
| 0x80002637 | soffset/offset can not be less than 0 | Illegal soffset/offset value | Check and correct the SQL statement |
|
||||
| 0x80002638 | slimit/soffset only available for PARTITION/GROUP BY query | slimit/soffset only supported for PARTITION BY/GROUP BY statements | Check and correct the SQL statement |
|
||||
| 0x80002639 | Invalid topic query | Unsupported TOPIC query | |
|
||||
| 0x8000263A | Cannot drop supertable in batch | Batch deletion of supertables not supported | Check and correct the SQL statement |
|
||||
| 0x8000263B | Start(end) time of query range required or time range too large | Window count exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000263C | Duplicated column names | Duplicate column names | Check and correct the SQL statement |
|
||||
| 0x8000263D | Tags length exceeds max length | tag value length exceeds maximum supported range | Check and correct the SQL statement |
|
||||
| 0x8000263E | Row length exceeds max length | Row length check and correct SQL statement | Check and correct the SQL statement |
|
||||
| 0x8000263F | Illegal number of columns | Incorrect number of columns | Check and correct the SQL statement |
|
||||
| 0x80002640 | Too many columns | Number of columns exceeds limit | Check and correct the SQL statement |
|
||||
| 0x80002641 | First column must be timestamp | The first column must be the primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002642 | Invalid binary/nchar column/tag length | Incorrect length for binary/nchar | Check and correct the SQL statement |
|
||||
| 0x80002643 | Invalid number of tag columns | Incorrect number of tag columns | Check and correct the SQL statement |
|
||||
| 0x80002644 | Permission denied | Permission error | Check and confirm user permissions |
|
||||
| 0x80002645 | Invalid stream query | Illegal stream statement | Check and correct the SQL statement |
|
||||
| 0x80002646 | Invalid _c0 or_rowts expression | Illegal use of _c0 or_rowts | Check and correct the SQL statement |
|
||||
| 0x80002647 | Invalid timeline function | Function depends on non-existent primary timestamp | Check and correct the SQL statement |
|
||||
| 0x80002648 | Invalid password | Password does not meet standards | Check and change the password |
|
||||
| 0x80002649 | Invalid alter table statement | Illegal modify table statement | Check and correct the SQL statement |
|
||||
| 0x8000264A | Primary timestamp column cannot be dropped | Primary timestamp column cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x8000264B | Only binary/nchar column length could be modified, and the length can only be increased, not decreased | Illegal column modification | Check and correct the SQL statement |
|
||||
| 0x8000264C | Invalid tbname pseudocolumn | Illegal use of tbname column | Check and correct the SQL statement |
|
||||
| 0x8000264D | Invalid function name | Illegal function name | Check and correct the function name |
|
||||
| 0x8000264E | Comment too long | Comment length exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000264F | Function(s) only allowed in SELECT list, cannot mixed with non scalar functions or columns | Illegal mixing of functions | Check and correct the SQL statement |
|
||||
| 0x80002650 | Window query not supported, since no valid timestamp column included in the result of subquery | Window query depends on non-existent primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002651 | No columns can be dropped | Essential columns cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x80002652 | Only tag can be json type | Normal columns do not support JSON type | Check and correct the SQL statement |
|
||||
| 0x80002655 | The DELETE statement must have a definite time window range | Illegal WHERE condition in DELETE statement | Check and correct the SQL statement |
|
||||
| 0x80002656 | The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes | Illegal number of DNODEs specified in REDISTRIBUTE VGROUP | Check and correct the SQL statement |
|
||||
| 0x80002657 | Fill now allowed | Function does not allow FILL feature | Check and correct the SQL statement |
|
||||
| 0x80002658 | Invalid windows pc | Illegal use of window pseudocolumn | Check and correct the SQL statement |
|
||||
| 0x80002659 | Window not allowed | Function cannot be used in window | Check and correct the SQL statement |
|
||||
| 0x8000265A | Stream not allowed | Function cannot be used in stream computation | Check and correct the SQL statement |
|
||||
| 0x8000265B | Group by not allowd | Function cannot be used in grouping | Check and correct the SQL statement |
|
||||
| 0x8000265D | Invalid interp clause | Illegal INTERP or related statement | Check and correct the SQL statement |
|
||||
| 0x8000265E | Not valid function ion window | Illegal window statement | Check and correct the SQL statement |
|
||||
| 0x8000265F | Only support single table | Function only supported in single table queries | Check and correct the SQL statement |
|
||||
| 0x80002660 | Invalid sma index | Illegal creation of SMA statement | Check and correct the SQL statement |
|
||||
| 0x80002661 | Invalid SELECTed expression | Invalid query statement | Check and correct the SQL statement |
|
||||
| 0x80002662 | Fail to get table info | Failed to retrieve table metadata information | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002663 | Not unique table/alias | Table name (alias) conflict | Check and correct the SQL statement |
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Suggested Actions for Users |
|
||||
|------------| ------------------------------------------------------------ |----------------------------------------------------------------------------| ------------------------------------------------------------ |
|
||||
| 0x80002600 | syntax error near | SQL syntax error | Check and correct the SQL statement |
|
||||
| 0x80002601 | Incomplete SQL statement | Incomplete SQL statement | Check and correct the SQL statement |
|
||||
| 0x80002602 | Invalid column name | Illegal or non-existent column name | Check and correct the SQL statement |
|
||||
| 0x80002603 | Table does not exist | Table does not exist | Check and confirm the existence of the table in the SQL statement |
|
||||
| 0x80002604 | Column ambiguously defined | Column (alias) redefined | Check and correct the SQL statement |
|
||||
| 0x80002605 | Invalid value type | Illegal constant value | Check and correct the SQL statement |
|
||||
| 0x80002608 | There mustn't be aggregation | Aggregation function used in illegal clause | Check and correct the SQL statement |
|
||||
| 0x80002609 | ORDER BY item must be the number of a SELECT-list expression | Illegal position specified in Order by | Check and correct the SQL statement |
|
||||
| 0x8000260A | Not a GROUP BY expression | Illegal group by statement | Check and correct the SQL statement |
|
||||
| 0x8000260B | Not SELECTed expression | Illegal expression | Check and correct the SQL statement |
|
||||
| 0x8000260C | Not a single-group group function | Illegal use of column and function | Check and correct the SQL statement |
|
||||
| 0x8000260D | Tags number not matched | Mismatched number of tag columns | Check and correct the SQL statement |
|
||||
| 0x8000260E | Invalid tag name | Invalid or non-existent tag name | Check and correct the SQL statement |
|
||||
| 0x80002610 | Value is too long | Value length exceeds limit | Check and correct the SQL statement or API parameters |
|
||||
| 0x80002611 | Password too short or empty | Password is empty or less than 8 chars | Use a valid password |
|
||||
| 0x80002612 | Port should be an integer that is less than 65535 and greater than 0 | Illegal port number | Check and correct the port number |
|
||||
| 0x80002613 | Endpoint should be in the format of 'fqdn:port' | Incorrect address format | Check and correct the address information |
|
||||
| 0x80002614 | This statement is no longer supported | Feature has been deprecated | Refer to the feature documentation |
|
||||
| 0x80002615 | Interval too small | Interval value exceeds the allowed minimum | Change the INTERVAL value |
|
||||
| 0x80002616 | Database not specified | Database not specified | Specify the database for the current operation |
|
||||
| 0x80002617 | Invalid identifier name | Illegal or invalid length ID | Check the names of related libraries, tables, columns, TAGs, etc. in the statement |
|
||||
| 0x80002618 | Corresponding supertable not in this db | Supertable does not exist | Check if the corresponding supertable exists in the database |
|
||||
| 0x80002619 | Invalid database option | Illegal database option value | Check and correct the database option values |
|
||||
| 0x8000261A | Invalid table option | Illegal table option value | Check and correct the table option values |
|
||||
| 0x80002624 | GROUP BY and WINDOW-clause can't be used together | Group by and window cannot be used together | Check and correct the SQL statement |
|
||||
| 0x80002627 | Aggregate functions do not support nesting | Functions do not support nested use | Check and correct the SQL statement |
|
||||
| 0x80002628 | Only support STATE_WINDOW on integer/bool/varchar column | Unsupported STATE_WINDOW data type | Check and correct the SQL statement |
|
||||
| 0x80002629 | Not support STATE_WINDOW on tag column | STATE_WINDOW not supported on tag column | Check and correct the SQL statement |
|
||||
| 0x8000262A | STATE_WINDOW not support for supertable query | STATE_WINDOW not supported for supertable | Check and correct the SQL statement |
|
||||
| 0x8000262B | SESSION gap should be fixed time window, and greater than 0 | Illegal SESSION window value | Check and correct the SQL statement |
|
||||
| 0x8000262C | Only support SESSION on primary timestamp column | Illegal SESSION window column | Check and correct the SQL statement |
|
||||
| 0x8000262D | Interval offset cannot be negative | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x8000262E | Cannot use 'year' as offset when interval is 'month' | Illegal INTERVAL offset unit | Check and correct the SQL statement |
|
||||
| 0x8000262F | Interval offset should be shorter than interval | Illegal INTERVAL offset value | Check and correct the SQL statement |
|
||||
| 0x80002630 | Does not support sliding when interval is natural month/year | Illegal sliding unit | Check and correct the SQL statement |
|
||||
| 0x80002631 | sliding value no larger than the interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002632 | sliding value can not less than 1%% of interval value | Illegal sliding value | Check and correct the SQL statement |
|
||||
| 0x80002633 | Only one tag if there is a json tag | Only single JSON tag column supported | Check and correct the SQL statement |
|
||||
| 0x80002634 | Query block has incorrect number of result columns | Mismatched number of columns | Check and correct the SQL statement |
|
||||
| 0x80002635 | Incorrect TIMESTAMP value | Illegal primary timestamp column value | Check and correct the SQL statement |
|
||||
| 0x80002637 | soffset/offset can not be less than 0 | Illegal soffset/offset value | Check and correct the SQL statement |
|
||||
| 0x80002638 | slimit/soffset only available for PARTITION/GROUP BY query | slimit/soffset only supported for PARTITION BY/GROUP BY statements | Check and correct the SQL statement |
|
||||
| 0x80002639 | Invalid topic query | Unsupported TOPIC query | |
|
||||
| 0x8000263A | Cannot drop supertable in batch | Batch deletion of supertables not supported | Check and correct the SQL statement |
|
||||
| 0x8000263B | Start(end) time of query range required or time range too large | Window count exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000263C | Duplicated column names | Duplicate column names | Check and correct the SQL statement |
|
||||
| 0x8000263D | Tags length exceeds max length | tag value length exceeds maximum supported range | Check and correct the SQL statement |
|
||||
| 0x8000263E | Row length exceeds max length | Row length check and correct SQL statement | Check and correct the SQL statement |
|
||||
| 0x8000263F | Illegal number of columns | Incorrect number of columns | Check and correct the SQL statement |
|
||||
| 0x80002640 | Too many columns | Number of columns exceeds limit | Check and correct the SQL statement |
|
||||
| 0x80002641 | First column must be timestamp | The first column must be the primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002642 | Invalid binary/nchar column/tag length | Incorrect length for binary/nchar | Check and correct the SQL statement |
|
||||
| 0x80002643 | Invalid number of tag columns | Incorrect number of tag columns | Check and correct the SQL statement |
|
||||
| 0x80002644 | Permission denied | Permission error | Check and confirm user permissions |
|
||||
| 0x80002645 | Invalid stream query | Illegal stream statement | Check and correct the SQL statement |
|
||||
| 0x80002646 | Invalid _c0 or_rowts expression | Illegal use of _c0 or_rowts | Check and correct the SQL statement |
|
||||
| 0x80002647 | Invalid timeline function | Function depends on non-existent primary timestamp | Check and correct the SQL statement |
|
||||
| 0x80002648 | Invalid password | Password does not meet standards | Check and change the password |
|
||||
| 0x80002649 | Invalid alter table statement | Illegal modify table statement | Check and correct the SQL statement |
|
||||
| 0x8000264A | Primary timestamp column cannot be dropped | Primary timestamp column cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x8000264B | Only binary/nchar column length could be modified, and the length can only be increased, not decreased | Illegal column modification | Check and correct the SQL statement |
|
||||
| 0x8000264C | Invalid tbname pseudocolumn | Illegal use of tbname column | Check and correct the SQL statement |
|
||||
| 0x8000264D | Invalid function name | Illegal function name | Check and correct the function name |
|
||||
| 0x8000264E | Comment too long | Comment length exceeds limit | Check and correct the SQL statement |
|
||||
| 0x8000264F | Function(s) only allowed in SELECT list, cannot mixed with non scalar functions or columns | Illegal mixing of functions | Check and correct the SQL statement |
|
||||
| 0x80002650 | Window query not supported, since no valid timestamp column included in the result of subquery | Window query depends on non-existent primary timestamp column | Check and correct the SQL statement |
|
||||
| 0x80002651 | No columns can be dropped | Essential columns cannot be deleted | Check and correct the SQL statement |
|
||||
| 0x80002652 | Only tag can be json type | Normal columns do not support JSON type | Check and correct the SQL statement |
|
||||
| 0x80002655 | The DELETE statement must have a definite time window range | Illegal WHERE condition in DELETE statement | Check and correct the SQL statement |
|
||||
| 0x80002656 | The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes | Illegal number of DNODEs specified in REDISTRIBUTE VGROUP | Check and correct the SQL statement |
|
||||
| 0x80002657 | Fill now allowed | Function does not allow FILL feature | Check and correct the SQL statement |
|
||||
| 0x80002658 | Invalid windows pc | Illegal use of window pseudocolumn | Check and correct the SQL statement |
|
||||
| 0x80002659 | Window not allowed | Function cannot be used in window | Check and correct the SQL statement |
|
||||
| 0x8000265A | Stream not allowed | Function cannot be used in stream computation | Check and correct the SQL statement |
|
||||
| 0x8000265B | Group by not allowd | Function cannot be used in grouping | Check and correct the SQL statement |
|
||||
| 0x8000265D | Invalid interp clause | Illegal INTERP or related statement | Check and correct the SQL statement |
|
||||
| 0x8000265E | Not valid function ion window | Illegal window statement | Check and correct the SQL statement |
|
||||
| 0x8000265F | Only support single table | Function only supported in single table queries | Check and correct the SQL statement |
|
||||
| 0x80002660 | Invalid sma index | Illegal creation of SMA statement | Check and correct the SQL statement |
|
||||
| 0x80002661 | Invalid SELECTed expression | Invalid query statement | Check and correct the SQL statement |
|
||||
| 0x80002662 | Fail to get table info | Failed to retrieve table metadata information | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002663 | Not unique table/alias | Table name (alias) conflict | Check and correct the SQL statement |
|
||||
| 0x80002664 | Join requires valid time-series input | Unsupported JOIN query without primary timestamp column output in subquery | Check and correct the SQL statement |
|
||||
| 0x80002665 | The _TAGS pseudocolumn can only be used for subtable and supertable queries | Illegal tag column query | Check and correct the SQL statement |
|
||||
| 0x80002666 | Subquery does not output primary timestamp column | Check and correct the SQL statement | |
|
||||
| 0x80002667 | Invalid usage of expr: %s | Illegal expression | Check and correct the SQL statement |
|
||||
| 0x80002687 | True_for duration cannot be negative | Use negative value as true_for duration | Check and correct the SQL statement |
|
||||
| 0x80002688 | Cannot use 'year' or 'month' as true_for duration | Use year or month as true_for_duration | Check and correct the SQL statement |
|
||||
| 0x80002689 | Invalid using cols function | Illegal using cols function | Check and correct the SQL statement |
|
||||
| 0x8000268A | Cols function's first param must be a select function that output a single row | The first parameter of the cols function should be a selection function | Check and correct the SQL statement |
|
||||
| 0x8000268B | Invalid using alias for cols function | Illegal cols function alias | Check and correct the SQL statement |
|
||||
| 0x8000268C | Join primary key col must be timestmap type | Join primary key data type error | Check and correct the SQL statement |
|
||||
| 0x8000268D | Invalid virtual table's ref column | Create/Update Virtual table using incorrect data source column | Check and correct the SQL statement |
|
||||
| 0x8000268E | Invalid table type | Incorrect Table type | Check and correct the SQL statement |
|
||||
| 0x8000268F | Invalid ref column type | Virtual table's column type and data source column's type are different | Check and correct the SQL statement |
|
||||
| 0x80002690 | Create child table using virtual super table | Create non-virtual child table using virtual super table | Check and correct the SQL statement |
|
||||
| 0x800026FF | Parser internal error | Internal error in parser | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002700 | Planner internal error | Internal error in planner | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002701 | Expect ts equal | JOIN condition validation failed | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002702 | Cross join not support | CROSS JOIN not supported | Check and correct the SQL statement |
|
||||
| 0x80002704 | Planner slot key not found | Planner cannot find slotId during making physic plan | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002705 | Planner invalid table type | Planner get invalid table type | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80002706 | Planner invalid query control plan type | Planner get invalid query control plan type during making physic plan | Preserve the scene and logs, report issue on GitHub |
|
||||
|
||||
## function
|
||||
|
||||
|
@ -479,10 +496,10 @@ This document details the server error codes that may be encountered when using
|
|||
| Error Code | Description | Possible Scenarios or Reasons | Recommended Actions |
|
||||
| ---------- | ---------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
|
||||
| 0x80002901 | udf is stopping | udf call received when dnode exits | Stop executing udf queries |
|
||||
| 0x80002902 | udf pipe read error | Error occurred when taosd reads from udfd pipe | udfd unexpectedly exits, 1) C udf crash 2) udfd crash |
|
||||
| 0x80002903 | udf pipe connect error | Error establishing pipe connection to udfd in taosd | 1) Corresponding udfd not started in taosd. Restart taosd |
|
||||
| 0x80002904 | udf pipe not exist | Connection error occurs between two phases of udf setup, call, and teardown, causing the connection to disappear, subsequent phases continue | udfd unexpectedly exits, 1) C udf crash 2) udfd crash |
|
||||
| 0x80002905 | udf load failure | Error loading udf in udfd | 1) udf does not exist in mnode 2) Error in udf loading. Check logs |
|
||||
| 0x80002902 | udf pipe read error | Error occurred when taosd reads from taosudf pipe | taosudf unexpectedly exits, 1) C udf crash 2) taosudf crash |
|
||||
| 0x80002903 | udf pipe connect error | Error establishing pipe connection to taosudf in taosd | 1) Corresponding taosudf not started in taosd. Restart taosd |
|
||||
| 0x80002904 | udf pipe not exist | Connection error occurs between two phases of udf setup, call, and teardown, causing the connection to disappear, subsequent phases continue | taosudf unexpectedly exits, 1) C udf crash 2) taosudf crash |
|
||||
| 0x80002905 | udf load failure | Error loading udf in taosudf | 1) udf does not exist in mnode 2) Error in udf loading. Check logs |
|
||||
| 0x80002906 | udf invalid function input | udf input check | udf function does not accept input, such as wrong column type |
|
||||
| 0x80002907 | udf invalid bufsize | Intermediate result in udf aggregation function exceeds specified bufsize | Increase bufsize, or reduce intermediate result size |
|
||||
| 0x80002908 | udf invalid output type | udf output type differs from the type specified when creating udf | Modify udf, or the type when creating udf, to match the result |
|
||||
|
@ -531,8 +548,24 @@ This document details the server error codes that may be encountered when using
|
|||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
| ---------- | --------------------- | ------------------------------------------------------------ | -------------------------------------------- |
|
||||
| 0x800003E6 | Consumer not exist | Consumer timeout offline | rebuild consumer to subscribe data again |
|
||||
| 0x800003EA | Consumer not ready | Consumer rebalancing | retry after 2s |
|
||||
| 0x80004000 | Invalid message | The subscribed data is illegal, generally does not occur | Check the client-side error logs for details |
|
||||
| 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error, not exposed to users |
|
||||
| 0x80004001 | Consumer mismatch | The vnode requested for subscription and the reassigned vnode are inconsistent, usually occurs when new consumers join the same consumer group | Internal error |
|
||||
| 0x80004002 | Consumer closed | The consumer no longer exists | Check if it has already been closed |
|
||||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |
|
||||
|
||||
|
||||
## virtual table
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|
|
||||
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
|
||||
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |
|
||||
| 0x80006205 | Virtual table not support in STMT query and STMT insert | Use virtual table in stmt query and stmt insert | do not use virtual table in stmt query and insert |
|
||||
| 0x80006206 | Virtual table not support in Topic | Use virtual table in topic | do not use virtual table in topic |
|
||||
| 0x80006206 | Virtual super table query not support origin table from different databases | Virtual super table ‘s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |
|
||||
|
|
|
@ -0,0 +1,297 @@
|
|||
---
|
||||
title: Usage of Special Characters in Passwords
|
||||
description: Usage of special characters in user passwords in TDengine
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine user passwords must meet the following rules:
|
||||
|
||||
1. The username must not exceed 23 bytes.
|
||||
2. The password length must be between 8 and 255 characters.
|
||||
3. The range of password characters:
|
||||
1. Uppercase letters: `A-Z`
|
||||
2. Lowercase letters: `a-z`
|
||||
3. Numbers: `0-9`
|
||||
4. Special characters: `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
|
||||
4. When strong password is enabled (EnableStrongPassword 1, enabled by default), the password must contain at least three of the following categories: uppercase letters, lowercase letters, numbers, and special characters. When not enabled, there are no restrictions on character types.
|
||||
|
||||
## Usage Guide for Special Characters in Different Components
|
||||
|
||||
Take the username `user1` and password `Ab1!@#$%^&*()-_+=[]{}` as an example.
|
||||
|
||||
```sql
|
||||
CREATE USER user1 PASS 'Ab1!@#$%^&*()-_+=[]{}';
|
||||
```
|
||||
|
||||
<Tabs defaultValue="shell" groupId="component">
|
||||
<TabItem label="CLI" value="shell">
|
||||
|
||||
In the [TDengine Command Line Interface (CLI)](../../tdengine-reference/tools/tdengine-cli/), note the following:
|
||||
|
||||
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
|
||||
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes must be used.
|
||||
|
||||
Login with user `user1`:
|
||||
|
||||
```shell
|
||||
taos -u user1 -p'Ab1!@#$%^&*()-_+=[]{}'
|
||||
taos -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="taosdump" value="taosdump">
|
||||
|
||||
In [taosdump](../../tdengine-reference/tools/taosdump/), note the following:
|
||||
|
||||
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
|
||||
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used.
|
||||
|
||||
Backup database `test` with user `user1`:
|
||||
|
||||
```shell
|
||||
taosdump -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -D test
|
||||
taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Benchmark" value="benchmark">
|
||||
|
||||
In [taosBenchmark](../../tdengine-reference/tools/taosbenchmark/), note the following:
|
||||
|
||||
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
|
||||
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used.
|
||||
|
||||
Example of data write test with user `user1`:
|
||||
|
||||
```shell
|
||||
taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y
|
||||
```
|
||||
|
||||
When using `taosBenchmark -f <JSON>`, there are no restrictions on the password in the JSON file.
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="taosX" value="taosx">
|
||||
|
||||
[taosX](../../tdengine-reference/components/taosx/) uses DSN to represent TDengine connections, in the format: `(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`, where `<pass>` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`.
|
||||
|
||||
Example of exporting data with user `user1`:
|
||||
|
||||
```shell
|
||||
taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' \
|
||||
-t 'csv:./test.csv'
|
||||
```
|
||||
|
||||
Note that if the password can be URL decoded, the URL decoded result will be used as the password. For example: `taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` is equivalent to `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041`.
|
||||
|
||||
No special handling is required in [Explorer](../../tdengine-reference/components/taosexplorer/), just use it directly.
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Java" value="java">
|
||||
|
||||
When using special character passwords in JDBC, the password needs to be URL encoded, as shown below:
|
||||
|
||||
```java
|
||||
package com.taosdata.example;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.SQLException;
|
||||
import java.util.Properties;
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
|
||||
public class JdbcPassDemo {
|
||||
public static void main(String[] args) throws Exception {
|
||||
String password = "Ab1!@#$%^&*()-_+=[]{}";
|
||||
String encodedPassword = URLEncoder.encode(password, StandardCharsets.UTF_8.toString());
|
||||
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041";
|
||||
Properties connProps = new Properties();
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "user1");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, encodedPassword);
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
|
||||
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) {
|
||||
System.out.println("Connected to " + jdbcUrl + " successfully.");
|
||||
|
||||
// you can use the connection for execute SQL here
|
||||
|
||||
} catch (Exception ex) {
|
||||
// please refer to the JDBC specifications for detailed exceptions info
|
||||
System.out.printf("Failed to connect to %s, %sErrMessage: %s%n",
|
||||
jdbcUrl,
|
||||
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
|
||||
ex.getMessage());
|
||||
// Print stack trace for context in examples. Use logging in production.
|
||||
ex.printStackTrace();
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Python" value="python">
|
||||
|
||||
No special handling is required for special character passwords in Python, as shown below:
|
||||
|
||||
```python
|
||||
import taos
|
||||
import taosws
|
||||
|
||||
|
||||
def create_connection():
|
||||
host = "localhost"
|
||||
port = 6030
|
||||
return taos.connect(
|
||||
user="user1",
|
||||
password="Ab1!@#$%^&*()-_+=[]{}",
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
|
||||
def create_ws_connection():
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
return taosws.connect(
|
||||
user="user1",
|
||||
password="Ab1!@#$%^&*()-_+=[]{}",
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
|
||||
|
||||
def show_databases(conn):
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("show databases")
|
||||
print(cursor.fetchall())
|
||||
cursor.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Connect with native protocol")
|
||||
conn = create_connection()
|
||||
show_databases(conn)
|
||||
print("Connect with websocket protocol")
|
||||
conn = create_ws_connection()
|
||||
show_databases(conn)
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Go" value="go">
|
||||
|
||||
Starting from version 3.6.0, Go supports passwords containing special characters, which need to be encoded using encodeURIComponent.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
|
||||
_ "github.com/taosdata/driver-go/v3/taosWS"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var user = "user1"
|
||||
var password = "Ab1!@#$%^&*()-_+=[]{}"
|
||||
var encodedPassword = url.QueryEscape(password)
|
||||
var taosDSN = user + ":" + encodedPassword + "@ws(localhost:6041)/"
|
||||
taos, err := sql.Open("taosWS", taosDSN)
|
||||
if err != nil {
|
||||
log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error())
|
||||
}
|
||||
fmt.Println("Connected to " + taosDSN + " successfully.")
|
||||
defer taos.Close()
|
||||
}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Rust" value="rust">
|
||||
|
||||
In Rust, DSN is used to represent TDengine connections, in the format: `(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`, where `<pass>` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`.
|
||||
|
||||
```rust
|
||||
let dsn = "taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041";
|
||||
let connection = TaosBuilder::from_dsn(&dsn)?.build().await?;
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Node.js" value="node">
|
||||
|
||||
Starting from version 3.1.5, the Node.js connector supports passwords containing all valid characters.
|
||||
|
||||
```js
|
||||
const taos = require("@tdengine/websocket");
|
||||
|
||||
let dsn = 'ws://localhost:6041';
|
||||
async function createConnect() {
|
||||
try {
|
||||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('user1');
|
||||
conf.setPwd('Ab1!@#$%^&*()-_+=[]{}');
|
||||
conf.setDb('test');
|
||||
conn = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
return conn;
|
||||
} catch (err) {
|
||||
console.log("Connection failed with code: " + err.code + ", message: " + err.message);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
createConnect()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="C#" value="csharp">
|
||||
|
||||
When using passwords in C#, note that connection strings do not support semicolons (as semicolons are delimiters). In this case, you can construct the `ConnectionStringBuilder` without a password, and then set the username and password.
|
||||
|
||||
As shown below:
|
||||
|
||||
```csharp
|
||||
var builder = new ConnectionStringBuilder("host=localhost;port=6030");
|
||||
builder.Username = "user1";
|
||||
builder.Password = "Ab1!@#$%^&*()-_+=[]{}";
|
||||
using (var client = DbDriver.Open(builder)){}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
There are no restrictions on passwords in C.
|
||||
|
||||
```c
|
||||
TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6030);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="REST" value="rest">
|
||||
|
||||
When using passwords in REST API, note the following:
|
||||
|
||||
- Passwords use Basic Auth, in the format `Authorization: Basic base64(<user>:<pass>)`.
|
||||
- Passwords containing colons `:` are not supported.
|
||||
|
||||
The following two methods are equivalent:
|
||||
|
||||
```shell
|
||||
curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' \
|
||||
-d 'show databases' http://localhost:6041/rest/sql
|
||||
curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' \
|
||||
-d 'show databases' http://localhost:6041/rest/sql
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
|
@ -58,12 +58,13 @@ static int DemoInsertData() {
|
|||
taos_cleanup();
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(result);
|
||||
|
||||
// you can check affectedRows here
|
||||
int rows = taos_affected_rows(result);
|
||||
fprintf(stdout, "Successfully inserted %d rows into power.meters.\n", rows);
|
||||
|
||||
taos_free_result(result);
|
||||
|
||||
// close & clean
|
||||
taos_close(taos);
|
||||
taos_cleanup();
|
||||
|
|
|
@ -64,10 +64,10 @@ CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name;
|
|||
|
||||
## 删除主题
|
||||
|
||||
如果不再需要订阅数据,可以删除 topic,需要注意只有当前未在订阅中的 topic 才能被删除。
|
||||
如果不再需要订阅数据,可以删除 topic,如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法从 v3.3.6.0 开始支持)。
|
||||
|
||||
```sql
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
## 查看主题
|
||||
|
@ -94,9 +94,9 @@ SHOW CONSUMERS;
|
|||
|
||||
### 删除消费组
|
||||
|
||||
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是消费者组在组内没有消费者时可以通过下面语句删除:
|
||||
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法从 v3.3.6.0 开始支持)。
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
## 数据订阅
|
||||
|
@ -129,6 +129,7 @@ TDengine 的数据订阅功能支持回放(replay)功能,允许用户按
|
|||
```
|
||||
|
||||
使用数据订阅的回放功能时需要注意如下几项:
|
||||
- 通过配置消费参数 enable.replay 为 true 开启回放功能。
|
||||
- 数据订阅的回放功能仅查询订阅支持数据回放,超级表和库订阅不支持回放。
|
||||
- 回放不支持进度保存。
|
||||
- 因为数据回放本身需要处理时间,所以回放的精度存在几十毫秒的误差。
|
||||
|
|
|
@ -23,11 +23,11 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
|
|||
SUBTABLE(expression) AS subquery
|
||||
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -101,6 +101,13 @@ PARTITION 子句中,为 tbname 定义了一个别名 tname, 在 PARTITION
|
|||
|
||||
通过启用 fill_history 选项,创建的流计算任务将具备处理创建前、创建过程中以及创建后写入的数据的能力。这意味着,无论数据是在流创建之前还是之后写入的,都将纳入流计算的范围,从而确保数据的完整性和一致性。这一设置为用户提供了更大的灵活性,使其能够根据实际需求灵活处理历史数据和新数据。
|
||||
|
||||
注意:
|
||||
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以通过 fill_history 1 async(v3.3.6.0 开始支持) 语法将创建流的任务放在后台处理,创建流的语句可立即返回,不阻塞后面的操作。async 只对 fill_history 1 起效,fill_history 0 时建流很快,不需要异步处理。
|
||||
|
||||
- 通过 show streams 可查看后台建流的进度(ready 状态表示成功,init 状态表示正在建流,failed 状态表示建流失败,失败时 message 列可以查看原因。对于建流失败的情况可以删除流重新建立)。
|
||||
|
||||
- 另外,不要同时异步创建多个流,可能由于事务冲突导致后面创建的流失败。
|
||||
|
||||
比如,创建一个流,统计所有智能电表每 10s 产生的数据条数,并且计算历史数据。SQL 如下:
|
||||
```sql
|
||||
create stream if not exists count_history_s fill_history 1 into count_history as select count(*) from power.meters interval(10s)
|
||||
|
@ -124,7 +131,12 @@ create stream if not exists count_history_s fill_history 1 into count_history as
|
|||
1. AT_ONCE:写入立即触发。
|
||||
2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用)。
|
||||
3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。
|
||||
4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。
|
||||
4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(支持滑动);该模式时,FILL_HISTORY 自动设置为 0,IGNORE EXPIRED 自动设置为 1,IGNORE UPDATE 自动设置为 1;FILL 只支持 PREV 、NULL、 NONE、VALUE。
|
||||
- 该模式可用于实现连续查询,比如,创建一个流,每隔 1s 查询一次过去 10s 窗口内的数据条数。SQL 如下:
|
||||
```sql
|
||||
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
|
||||
```
|
||||
5. CONTINUOUS_WINDOW_CLOSE:窗口关闭时输出结果。修改、删除数据,并不会立即触发重算,每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val,那么重算周期是 60 分钟。如果重算的时间长度超过 rec_time_val,在本次重算后,自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL,需要配置 adapter的相关信息:adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。
|
||||
|
||||
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
|
|
|
@ -37,6 +37,14 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||
在 **MQTT 端口** 中填写 MQTT 代理的端口,例如:`1883`
|
||||
|
||||
在 **TLS 校验** 中选择 TLS 证书的校验方式
|
||||
|
||||
1. 不开启:表示不进行 TLS 证书认证。在连接 MQTT 时,会先进行 TCP 连接,如果连接失败,会进行无证书认证模式的 TLS 连接。
|
||||
|
||||
2. 单向认证:开启 TLS 连接,并验证服务端证书,此时需要上传 CA 证书。
|
||||
|
||||
3. 双向认证:开启 TLS 连接,并与服务端进行双向认证,此时需要上传 CA 证书,客户端证书以及客户端密钥。
|
||||
|
||||
在 **用户** 中填写 MQTT 代理的用户名。
|
||||
|
||||
在 **密码** 中填写 MQTT 代理的密码。
|
||||
|
@ -44,13 +52,7 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||

|
||||
|
||||
### 4. 配置 SSL 证书
|
||||
|
||||
如果 MQTT 代理使用了 SSL 证书,需要在 **SSL证书** 中上传证书文件。
|
||||
|
||||

|
||||
|
||||
### 5. 配置采集信息
|
||||
### 4. 配置采集信息
|
||||
|
||||
在 **采集配置** 区域填写采集任务相关的配置参数。
|
||||
|
||||
|
@ -65,6 +67,8 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||
在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置: `{topic_name}::{qos}`(如:`my_topic::0`)。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。
|
||||
|
||||
在 **主题解析** 中填写 MQTT 主题解析规则,格式与 MQTT Topic 相同,将 MQTT Topic 各层级内容解析为对应变量名,`_` 表示解析时忽略当前层级。例如:MQTT Topic `a/+/c` 对应解析规则如果设置为 `v1/v2/_`,代表将第一层级的 `a` 赋值给变量 `v1`,第二层级的值(这里通配符 `+` 代表任意值)复制给变量 `v2`,第三层级的值 `c` 忽略,不会赋值给任何变量。在下方的 `payload 解析` 中,Topic 解析得到的变量同样可以参与各种转换和计算。
|
||||
|
||||
在 **数据压缩** 中,配置消息体压缩算法,taosX 在接收到消息后,使用对应的压缩算法对消息体进行解压缩获取原始数据。可选项 none(不压缩), gzip, snappy, lz4 和 zstd,默认为 none。
|
||||
|
||||
在 **字符编码** 中,配置消息体编码格式,taosX 在接收到消息后,使用对应的编码格式对消息体进行解码获取原始数据。可选项 UTF_8, GBK, GB18030, BIG5,默认为 UTF_8
|
||||
|
@ -73,13 +77,13 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
|
|||
|
||||

|
||||
|
||||
### 6. 配置 MQTT Payload 解析
|
||||
### 5. 配置 MQTT Payload 解析
|
||||
|
||||
在 **MQTT Payload 解析** 区域填写 Payload 解析相关的配置参数。
|
||||
|
||||
taosX 可以使用 JSON 提取器解析数据,并允许用户在数据库中指定数据模型,包括,指定表名称和超级表名,设置普通列和标签列等。
|
||||
|
||||
#### 6.1 解析
|
||||
#### 5.1 解析
|
||||
|
||||
有三种获取示例数据的方法:
|
||||
|
||||
|
@ -110,7 +114,7 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
#### 6.2 字段拆分
|
||||
#### 5.2 字段拆分
|
||||
|
||||
在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 message 字段拆分成 `message_0` 和 `message_1` 这2 个字段,选择 split 提取器,seperator 填写 -, number 填写 2。
|
||||
|
||||
|
@ -124,7 +128,7 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
#### 6.3 数据过滤
|
||||
#### 5.3 数据过滤
|
||||
|
||||
在 **过滤** 中,填写过滤条件,例如:填写`id != 1`,则只有 id 不为 1 的数据才会被写入 TDengine。
|
||||
|
||||
|
@ -136,9 +140,13 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
#### 6.4 表映射
|
||||
#### 5.4 表映射
|
||||
|
||||
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
|
||||
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮创建新的超级表。
|
||||
|
||||
当超级表需要根据消息动态生成时,可以选择 **创建模板**。其中,超级表名称,列名,列类型等均可以使用模板变量,当接收到数据后,程序会自动计算模板变量并生成对应的超级表模板,当数据库中超级表不存在时,会使用此模板创建超级表;对于已创建的超级表,如果缺少通过模板变量计算得到的列,也会自动创建对应列。
|
||||
|
||||

|
||||
|
||||
在 **映射** 中,填写目标超级表中的子表名称,例如:`t_{id}`。根据需求填写映射规则,其中 mapping 支持设置缺省值。
|
||||
|
||||
|
@ -148,7 +156,17 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
### 7. 高级选项
|
||||
如果超级表列为模板变量,在子表映射时会进行 pivot 操作,其中模板变量的值展开为列名,列的值为对应的映射列
|
||||
|
||||
例如:
|
||||
|
||||

|
||||
|
||||
预览结果为:
|
||||
|
||||

|
||||
|
||||
### 6. 高级选项
|
||||
|
||||
在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。
|
||||
|
||||
|
@ -166,12 +184,12 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
|
|||
|
||||

|
||||
|
||||
### 8. 异常处理策略
|
||||
### 7. 异常处理策略
|
||||
|
||||
import Contributing from './_03-exception-handling-strategy.mdx'
|
||||
|
||||
<Contributing />
|
||||
|
||||
### 9. 创建完成
|
||||
### 8. 创建完成
|
||||
|
||||
点击 **提交** 按钮,完成创建 MQTT 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
|
||||
|
|
|
@ -9,9 +9,15 @@
|
|||
> 丢弃:将异常数据忽略,不写入目标库
|
||||
> 报错:任务报错
|
||||
|
||||
- **目标库连接超时** 目标库连接失败,可选处理策略:归档、丢弃、报错、缓存
|
||||
> 缓存:当目标库状态异常(连接错误或资源不足等情况)时写入缓存文件(默认路径为 `${data_dir}/tasks/_id/.datetime`),目标库恢复正常后重新入库
|
||||
- **目标库不存在** 写入报错目标库不存在,可选处理策略:归档、丢弃、报错
|
||||
- **表不存在** 写入报错表不存在,可选处理策略:归档、丢弃、报错、自动建表
|
||||
> 自动建表:自动建表,建表成功后重试
|
||||
- **主键时间戳溢出** 检查数据中第一列时间戳是否在正确的时间范围内(now - keep1, now + 100y),可选处理策略:归档、丢弃、报错
|
||||
- **主键时间戳空** 检查数据中第一列时间戳是否为空,可选处理策略:归档、丢弃、报错、使用当前时间
|
||||
> 使用当前时间:使用当前时间填充到空的时间戳字段中
|
||||
- **复合主键空** 写入报错复合主键空,可选处理策略:归档、丢弃、报错
|
||||
- **表名长度溢出** 检查子表表名的长度是否超出限制(最大 192 字符),可选处理策略:归档、丢弃、报错、截断、截断且归档
|
||||
> 截断:截取原始表名的前 192 个字符作为新的表名
|
||||
> 截断且归档:截取原始表名的前 192 个字符作为新的表名,并且将此行记录写入归档文件
|
||||
|
@ -20,4 +26,20 @@
|
|||
- **表名模板变量空值** 检查子表表名模板中的变量是否为空,可选处理策略:丢弃、留空、变量替换为指定字符串
|
||||
> 留空:变量位置不做任何特殊处理,例如 `a_{x}` 转换为 `a_`
|
||||
> 变量替换为指定字符串:变量位置使用后方输入框中的指定字符串,例如 `a_{x}` 转换为 `a_b`
|
||||
- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错
|
||||
- **列名不存在** 写入报错列名不存在,可选处理策略:归档、丢弃、报错、自动增加缺失列
|
||||
> 自动增加缺失列:根据数据信息,自动修改表结构增加列,修改成功后重试
|
||||
- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错
|
||||
- **列自动扩容** 开关选项,打开时,列数据长度超长时将自动修改表结构并重试
|
||||
- **列长度溢出** 写入报错列长度溢出,可选处理策略:归档、丢弃、报错、截断、截断且归档
|
||||
> 截断:截取数据中符合长度限制的前 n 个字符
|
||||
> 截断且归档:截取数据中符合长度限制的前 n 个字符,并且将此行记录写入归档文件
|
||||
- **数据异常** 其他数据异常(未在上方列出的其他异常)的处理策略,可选处理策略:归档、丢弃、报错
|
||||
- **连接超时** 配置目标库连接超时时间,单位“秒”取值范围 1~600
|
||||
- **临时存储文件位置** 配置缓存文件的位置,实际生效位置 `$DATA_DIR/tasks/:id/{location}`
|
||||
- **归档数据保留天数** 非负整数,0 表示无限制
|
||||
- **归档数据可用空间** 0~65535,其中 0 表示无限制
|
||||
- **归档数据文件位置** 配置归档文件的位置,实际生效位置 `$DATA_DIR/tasks/:id/{location}`
|
||||
- **归档数据失败处理策略** 当写入归档文件报错时的处理策略,可选处理策略:删除旧文件、丢弃、报错并停止任务
|
||||
> 删除旧文件:删除旧文件,如果删除旧文件后仍然无法写入,则报错并停止任务
|
||||
> 丢弃:丢弃即将归档的数据
|
||||
> 报错并停止任务:报错并停止当前任务
|
|
@ -152,7 +152,11 @@ let v3 = data["voltage"].split(",");
|
|||
|
||||
使用 json 规则解析出的电压是字符串表达的带单位形式,最终入库希望能使用 int 类型记录电压值和电流值,便于统计分析,此时就需要对电压进一步拆分;另外日期期望拆分为日期和时间入库。
|
||||
|
||||
如下图所示可以对源字段`ts`使用 split 规则拆分成日期和时间,对字段`voltage`使用 regex 提取出电压值和电压单位。split 规则需要设置**分隔符**和**拆分数量**,拆分后的字段命名规则为`{原字段名}_{顺序号}`,Regex 规则同解析过程中的一样,使用**命名捕获组**命名提取字段。
|
||||
如下图所示
|
||||
|
||||
* 对字段 `ts` 使用 split 规则拆分成日期和时间。split 规则需要设置 **分隔符** 和 **拆分数量**,拆分后的字段命名规则为 `{原字段名}_{顺序号}`。
|
||||
* 对字段 `voltage` 使用正则表达式 `^(?<voltage>[0-9]+)(?<voltage_unit>[a-zA-Z]+)$` 提取出电压值和电压单位,Regex 规则同解析过程中的一样,使用 **命名捕获组** 命名提取字段。
|
||||
* 对字段 `location` 使用 convert 转换,填写一个 JSON map 对象,其中 key 为字段 `current` 的值,`value` 为转换后的值。如图,`location` 字段的值 `"beijing.chaoyang.datun"` 被转换为 `"beijing.chaoyang.datunludong"`。
|
||||
|
||||

|
||||
|
||||
|
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 37 KiB |
Before Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 60 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 46 KiB |
After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 43 KiB After Width: | Height: | Size: 98 KiB |
Before Width: | Height: | Size: 56 KiB After Width: | Height: | Size: 170 KiB |
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
title: "LSTM"
|
||||
sidebar_label: "LSTM"
|
||||
---
|
||||
|
||||
本节说明 LSTM 模型的使用方法。
|
||||
|
||||
## 功能概述
|
||||
|
||||
LSTM 模型即长短期记忆网络(Long Short Term Memory),是一种特殊的循环神经网络,适用于处理时间序列数据、自然语言处理等任务,通过其独特的门控机制,能够有效捕捉长期依赖关系,
|
||||
解决传统 RNN 的梯度消失问题,从而对序列数据进行准确预测,不过它不直接提供计算的置信区间范围结果。
|
||||
|
||||
|
||||
完整的调用 SQL 语句如下:
|
||||
```SQL
|
||||
SELECT _frowts, FORECAST(i32, "algo=lstm,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") from foo
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"rows": fc_rows, // 返回结果的行数
|
||||
"period": period, // 返回结果的周期性,同输入
|
||||
"alpha": alpha, // 返回结果的置信区间,同输入
|
||||
"algo": "lstm", // 返回结果使用的算法
|
||||
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
|
||||
"res": res // 列模式的结果
|
||||
}
|
||||
```
|
||||
|
||||
### 参考文献
|
||||
- [1] Hochreiter S. Long Short-term Memory[J]. Neural Computation MIT-Press, 1997.
|
|
@ -0,0 +1,35 @@
|
|||
---
|
||||
title: "MLP"
|
||||
sidebar_label: "MLP"
|
||||
---
|
||||
|
||||
本节说明 MLP 模型的使用方法。
|
||||
|
||||
## 功能概述
|
||||
|
||||
MLP(MutiLayers Perceptron,多层感知机)是一种典的神经网络模型,能够通过学习历史数据的非线性关系,
|
||||
捕捉时间序列中的模式并进行未来值预测。它通过多层全连接网络进行特征提取和映射,
|
||||
对输入的历史数据生成预测结果。由于不直接考虑趋势或季节性变化,通常需要结合数据预处理来提升效果,
|
||||
适合解决非线性和复杂的时间序列问题。
|
||||
|
||||
完整的调用SQL语句如下:
|
||||
|
||||
```SQL
|
||||
SELECT _frowts, FORECAST(i32, "algo=mlp") from foo
|
||||
```
|
||||
|
||||
```json5
|
||||
{
|
||||
"rows": fc_rows, // 返回结果的行数
|
||||
"period": period, // 返回结果的周期性,同输入
|
||||
"alpha": alpha, // 返回结果的置信区间,同输入
|
||||
"algo": "mlp", // 返回结果使用的算法
|
||||
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
|
||||
"res": res // 列模式的结果
|
||||
}
|
||||
```
|
||||
|
||||
### 参考文献
|
||||
- [1]Rumelhart D E, Hinton G E, Williams R J. Learning representations by back-propagating errors[J]. nature, 1986, 323(6088): 533-536.
|
||||
- [2]Rosenblatt F. The perceptron: a probabilistic model for information storage and organization in the brain[J]. Psychological review, 1958, 65(6): 386.
|
||||
- [3]LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.
|
|
@ -141,5 +141,5 @@ gen_figure = true
|
|||
|
||||
如果设置了 `gen_figure` 为 true,分析结果中还会有绘制的分析预测结果图(如下图所示)。
|
||||
|
||||
<img src={fc_result_figure} width="760" alt="预测对比结果" />
|
||||
<img src={fc_result_figure} width="540" alt="预测对比结果" />
|
||||
|
||||
|
|
|
@ -3,7 +3,9 @@ title: "机器学习算法"
|
|||
sidebar_label: "机器学习算法"
|
||||
---
|
||||
|
||||
Autoencoder<sup>[1]</sup>: TDgpt 内置使用自编码器(Autoencoder)的异常检测算法,对周期性的时间序列数据具有较好的检测结果。使用该模型需要针对输入时序数据进行预训练,同时将训练完成的模型保存在到服务目录 `ad_autoencoder` 中,然后在 SQL 语句中指定调用该算法模型即可使用。
|
||||
Autoencoder<sup>[1]</sup>: TDgpt 内置使用自编码器(Autoencoder)的异常检测算法,
|
||||
对周期性的时间序列数据具有较好的检测结果。使用该模型需要针对输入时序数据进行预训练,
|
||||
同时将训练完成的模型保存在到服务目录 `ad_autoencoder` 中,然后在 SQL 语句中指定调用该算法模型即可使用。
|
||||
|
||||
```SQL
|
||||
--- 在 options 中增加 model 的名称,ad_autoencoder_foo, 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测
|
||||
|
|
|
@ -102,5 +102,5 @@ lof={"algo":"auto", "n_neighbor": 3}
|
|||
|
||||
如果设置了 `gen_figure` 为 `true`,比较程序会自动将每个参与比较的算法分析结果采用图片方式呈现出来(如下图所示为 ksigma 的异常检测结果标注)。
|
||||
|
||||
<img src={ad_result_figure} width="760" alt="异常检测标注图" />
|
||||
<img src={ad_result_figure} width="540" alt="异常检测标注图" />
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ TDengine 消费者的概念跟 Kafka 类似,消费者通过订阅主题来接
|
|||
#### msg.with.table.name
|
||||
- 说明:是否允许从消息中解析表名
|
||||
- 类型:boolean
|
||||
- 备注:不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句),默认关闭。从 3.2.0.0 版本该参数废弃。
|
||||
- 备注:不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句),默认关闭。v3.2.0.0 该参数废弃。
|
||||
|
||||
#### enable.replay
|
||||
- 说明:是否开启数据回放功能
|
||||
|
@ -83,12 +83,27 @@ TDengine 消费者的概念跟 Kafka 类似,消费者通过订阅主题来接
|
|||
#### session.timeout.ms
|
||||
- 说明:consumer 心跳丢失后超时时间
|
||||
- 类型:integer
|
||||
- 备注:超时后会触发 rebalance 逻辑,成功后该 consumer 会被删除(从 3.3.3.0 版本开始支持)。默认值为 12000,取值范围 [6000,1800000]。
|
||||
- 备注:超时后会触发 rebalance 逻辑,成功后该 consumer 会被删除。默认值为 12000,取值范围 [6000,1800000]。v3.3.3.0 开始支持)
|
||||
|
||||
#### max.poll.interval.ms
|
||||
- 说明:consumer poll 拉取数据间隔的最长时间
|
||||
- 类型:integer
|
||||
- 备注:超过该时间,会认为该 consumer 离线,触发 rebalance 逻辑,成功后该 consumer 会被删除(从 3.3.3.0 版本开始支持)。默认值为 300000,[1000,INT32_MAX] 。
|
||||
- 备注:超过该时间,会认为该 consumer 离线,触发 rebalance 逻辑,成功后该 consumer 会被删除。默认值为 300000,[1000,INT32_MAX]。v3.3.3.0 开始支持。
|
||||
|
||||
#### fetch.max.wait.ms
|
||||
- 说明:服务端单次返回数据的最大耗时
|
||||
- 类型:integer
|
||||
- 备注:默认值为 1000,[1,INT32_MAX]。v3.3.6.0 开始支持。
|
||||
|
||||
#### min.poll.rows
|
||||
- 说明:服务端单次返回数据的最小条数
|
||||
- 类型:integer
|
||||
- 备注:默认值为 4096,[1,INT32_MAX]。v3.3.6.0 开始支持。
|
||||
|
||||
#### msg.consume.rawdata
|
||||
- 说明:消费数据时拉取数据类型为二进制类型,不可做解析操作 `内部参数,只用于 taosX 数据迁移`
|
||||
- 类型:integer
|
||||
- 备注:默认值为 0 表示不起效,非 0 为起效。v3.3.6.0 开始支持。
|
||||
|
||||
下面是各语言连接器创建参数:
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
|
|
@ -237,7 +237,7 @@ typedef struct SUdfInterBuf {
|
|||
|
||||
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
|
||||
|
||||
bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。
|
||||
bit_and 实现多列的按位与功能。如果只有一列,返回这一列。bit_and 忽略空值。
|
||||
|
||||
<details>
|
||||
<summary>bit_and.c</summary>
|
||||
|
@ -287,12 +287,46 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery;
|
|||
|
||||
</details>
|
||||
|
||||
#### 聚合函数示例3 切分字符串求平均值 [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c)
|
||||
|
||||
`extract_avg` 函数是将一个逗号分隔的字符串数列转为一组数值,统计所有行的结果,计算最终平均值。实现时需注意:
|
||||
- `interBuf->numOfResult` 需要返回 1 或者 0,不能用于 count 计数。
|
||||
- count 计数可使用额外的缓存,例如 `SumCount` 结构体。
|
||||
- 字符串的获取需使用`varDataVal`。
|
||||
|
||||
创建表:
|
||||
```bash
|
||||
create table scores(ts timestamp, varStr varchar(128));
|
||||
```
|
||||
创建自定义函数:
|
||||
```bash
|
||||
create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C';
|
||||
```
|
||||
使用自定义函数:
|
||||
```bash
|
||||
select extract_avg(valStr) from scores;
|
||||
```
|
||||
|
||||
生成 `.so` 文件
|
||||
```bash
|
||||
gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>extract_avg.c</summary>
|
||||
|
||||
```c
|
||||
{{#include tests/script/sh/extract_avg.c}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 用 Python 语言开发 UDF
|
||||
|
||||
### 准备环境
|
||||
|
||||
准备环境的具体步骤如下:
|
||||
- 第 1 步,准备好 Python 运行环境。
|
||||
- 第 1 步,准备好 Python 运行环境。本地编译安装 python 注意打开 `--enable-shared` 选项,不然后续安装 taospyudf 会因无法生成共享库而导致失败。
|
||||
- 第 2 步,安装 Python 包 taospyudf。命令如下。
|
||||
```shell
|
||||
pip3 install taospyudf
|
||||
|
@ -472,10 +506,10 @@ taos> select myfun(v1, v2) from t;
|
|||
DB error: udf function execution failure (0.011088s)
|
||||
```
|
||||
|
||||
不幸的是执行失败了,什么原因呢?查看 udfd 进程的日志。
|
||||
不幸的是执行失败了,什么原因呢?查看 taosudf 进程的日志。
|
||||
|
||||
```shell
|
||||
tail -10 /var/log/taos/udfd.log
|
||||
tail -10 /var/log/taos/taosudf.log
|
||||
```
|
||||
|
||||
发现以下错误信息。
|
||||
|
|
|
@ -17,24 +17,25 @@ TDengine 面向多种写入场景,而很多写入场景下,TDengine 的存
|
|||
### 语法
|
||||
|
||||
```SQL
|
||||
compact DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
compact [db_name.]vgroups IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
|
||||
show compacts;
|
||||
compact DATABASE db_name [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
|
||||
compact [db_name.]vgroups IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'] [META_ONLY];
|
||||
show compacts;
|
||||
show compact compact_id;
|
||||
kill compact compact_id;
|
||||
kill compact compact_id;
|
||||
```
|
||||
|
||||
### 效果
|
||||
|
||||
- 扫描并压缩指定的 DB 中所有 vgroup 中 vnode 的所有数据文件
|
||||
- 扫描并压缩 DB 中指定的 vgroup 列表中 vnode 的所有数据文件, 若 db_name 为空,则默认为当前数据库
|
||||
- compact 会删除被删除数据以及被删除的表的数据
|
||||
- compact 会合并多个 STT 文件
|
||||
- 可通过 start with 关键字指定 compact 数据的起始时间
|
||||
- 可通过 end with 关键字指定 compact 数据的终止时间
|
||||
- compact 命令会返回 compact 任务的 ID
|
||||
- compact 任务会在后台异步执行,可以通过 show compacts 命令查看 compact 任务的进度
|
||||
- show 命令会返回 compact 任务的 ID,可以通过 kill compact 命令终止 compact 任务
|
||||
- 扫描并压缩指定的 DB 中所有 vgroup 中 vnode 的所有数据文件
|
||||
- 扫描并压缩 DB 中指定的 vgroup 列表中 vnode 的所有数据文件, 若 db_name 为空,则默认为当前数据库
|
||||
- compact 会删除被删除数据以及被删除的表的数据
|
||||
- compact 会合并多个 STT 文件
|
||||
- 可通过 start with 关键字指定 compact 数据的起始时间
|
||||
- 可通过 end with 关键字指定 compact 数据的终止时间
|
||||
- 可通过 `META_ONLY` 关键字指定只 compact 元数据。元数据默认情况下不会 compact。
|
||||
- compact 命令会返回 compact 任务的 ID
|
||||
- compact 任务会在后台异步执行,可以通过 show compacts 命令查看 compact 任务的进度
|
||||
- show 命令会返回 compact 任务的 ID,可以通过 kill compact 命令终止 compact 任务
|
||||
|
||||
|
||||
### 补充说明
|
||||
|
|
|
@ -17,11 +17,14 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
|||
|
||||
相关参数说明如下。
|
||||
- user_name:用户名最长不超过 23 个字节。
|
||||
- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。(始自 3.3.5.0 版本)
|
||||
- sysinfo :用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。
|
||||
- createdb:用户是否可以创建数据库。1 表示可以创建,0 表示不可以创建。缺省值为 0。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
- password:密码长度必须为 8 到 255 个字节。密码至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`(始自 v3.3.5.0),可以通过在 taos.cfg 中添加参数 `enableStrongPassword 0` 关闭此强制要求,或者通过如下 SQL 关闭(始自 v3.3.6.0)。
|
||||
```sql
|
||||
alter all dnode 'EnableStrongPassword' '0'
|
||||
```
|
||||
- sysinfo:用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。
|
||||
- createdb:用户是否可以创建数据库。1 表示可以创建,0 表示不可以创建。缺省值为 0。从企业版 v3.3.2.0 开始支持。
|
||||
|
||||
如下 SQL 可以创建密码为 abc123!@# 且可以查看系统信息的用户 test。
|
||||
如下 SQL 可以创建密码为 `abc123!@#` 且可以查看系统信息的用户 test。
|
||||
|
||||
```sql
|
||||
create user test pass 'abc123!@#' sysinfo 1
|
||||
|
@ -55,8 +58,8 @@ alter_user_clause: {
|
|||
相关参数说明如下。
|
||||
- pass:修改用户密码。
|
||||
- enable:是否启用用户。1 表示启用此用户,0 表示禁用此用户。
|
||||
- sysinfo :用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息
|
||||
- createdb:用户是否可创建数据库。1 表示可以创建数据库,0 表示不可以创建数据库。从 TDengine 企业版 3.3.2.0 开始支持。
|
||||
- sysinfo:用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息
|
||||
- createdb:用户是否可创建数据库。1 表示可以创建数据库,0 表示不可以创建数据库。从企业版 v3.3.2.0 开始支持。
|
||||
|
||||
如下 SQL 禁用 test 用户。
|
||||
```sql
|
||||
|
|
|
@ -0,0 +1,274 @@
|
|||
---
|
||||
sidebar_label: 安全配置
|
||||
title: 安全配置
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
TDengine 的分布式、多组件特性导致 TDengine 的安全配置是生产系统中比较关注的问题。本文档旨在对 TDengine 各组件及在不同部署方式下的安全问题进行说明,并提供部署和配置建议,为用户的数据安全提供支持。
|
||||
|
||||
## 安全配置涉及组件
|
||||
|
||||
TDengine 包含多个组件,有:
|
||||
|
||||
- `taosd`: 内核组件。
|
||||
- `taosc`: 客户端库。
|
||||
- `taosAdapter`: REST API 和 WebSocket 服务。
|
||||
- `taosKeeper`:监控服务组件。
|
||||
- `taosX`:数据管道和备份恢复组件。
|
||||
- `taosxAgent`:外部数据源数据接入辅助组件。
|
||||
- `taosExplorer`:Web 可视化管理界面。
|
||||
|
||||
与 TDengine 部署和应用相关,还会存在以下组件:
|
||||
|
||||
- 通过各种连接器接入并使用 TDengine 数据库的应用。
|
||||
- 外部数据源:指接入 TDengine 的其他数据源,如 MQTT、OPC、Kafka 等。
|
||||
|
||||
各组件关系如下:
|
||||
|
||||

|
||||
|
||||
关于各组件的详细介绍,请参考 [组件介绍](./intro)。
|
||||
|
||||
## TDengine 安全设置
|
||||
|
||||
### `taosd`
|
||||
|
||||
taosd 集群间使用 TCP 连接基于自有协议进行数据交换,风险较低,但传输过程不是加密的,仍有一定安全风险。
|
||||
|
||||
启用压缩可能对 TCP 数据混淆有帮助。
|
||||
|
||||
- **compressMsgSize**:是否对 RPC 消息进行压缩,整数,可选:-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩。
|
||||
|
||||
为了保证数据库操作可追溯,建议启用审计功能。
|
||||
|
||||
- **audit**:审计功能开关,0 为关,1 为开。默认打开。
|
||||
- **auditInterval**:上报间隔,单位为毫秒。默认 5000。
|
||||
- **auditCreateTable**:是否针对创建子表开启申计功能。 0 为关,1 为开。默认打开。
|
||||
|
||||
为保证数据文件安全,可启用数据库加密。
|
||||
|
||||
- **encryptAlgorithm**:数据加密算法。
|
||||
- **encryptScope**:数据加密范围。
|
||||
|
||||
启用白名单可限制访问地址,进一步增强私密性。
|
||||
|
||||
- **enableWhiteList**:白名单功能开关,0 为关, 1 为开;默认关闭。
|
||||
|
||||
### `taosc`
|
||||
|
||||
用户和其他组件与 `taosd` 之间使用原生客户端库(taosc)和自有协议进行连接,数据安全风险较低,但传输过程仍然不是加密的,有一定安全风险。
|
||||
|
||||
### `taosAdapter`
|
||||
|
||||
taosadapter 与 taosd 之间使用原生客户端库(taosc)和自有协议进行连接,同样支持 RPC 消息压缩,不会造成数据安全问题。
|
||||
|
||||
应用和其他组件通过各语言连接器与 taosadapter 进行连接。默认情况下,连接是基于 HTTP 1.1 且不加密的。要保证 taosadapter 与其他组件之间的数据传输安全,需要配置 SSL 加密连接。在 `/etc/taos/taosadapter.toml` 配置文件中修改如下配置:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
enable = true
|
||||
certFile = "/path/to/certificate-file"
|
||||
keyFile = "/path/to/private-key"
|
||||
```
|
||||
|
||||
在连接器中配置 HTTPS/SSL 访问方式,完成加密访问。
|
||||
|
||||
为进一步增强安全性,可启用白名单功能,在 `taosd` 中配置,对 taosdapter 组件同样生效。
|
||||
|
||||
### `taosX`
|
||||
|
||||
`taosX` 对外包括 REST API 接口和 gRPC 接口,其中 gRPC 接口用于 taos-agent 连接。
|
||||
|
||||
- REST API 接口是基于 HTTP 1.1 且不加密的,有安全风险。
|
||||
- gRPC 接口基于 HTTP 2 且不加密,有安全风险 。
|
||||
|
||||
为了保证数据安全,建议 taosX API 接口仅限内部访问。在 `/etc/taos/taosx.toml` 配置文件中修改如下配置:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
listen = "127.0.0.1:6050"
|
||||
grpc = "127.0.0.1:6055"
|
||||
```
|
||||
|
||||
从 TDengine 3.3.6.0 开始,taosX 支持 HTTPS 连接,在 `/etc/taos/taosx.toml` 文件中添加如下配置:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
ssl_cert = "/path/to/server.pem"
|
||||
ssl_key = "/path/to/server.key"
|
||||
ssl_ca = "/path/to/ca.pem"
|
||||
```
|
||||
|
||||
并在 Explorer 中修改 API 地址为 HTTPS 连接:
|
||||
|
||||
```toml
|
||||
# taosX API 本地连接
|
||||
x_api = "https://127.0.01:6050"
|
||||
# Public IP 或者域名地址
|
||||
grpc = "https://public.domain.name:6055"
|
||||
```
|
||||
|
||||
### `taosExplorer`
|
||||
|
||||
与 `taosAdapter` 组件相似,`taosExplorer` 组件提供 HTTP 服务对外访问。在 `/etc/taos/explorer.toml` 配置文件中修改如下配置:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
# SSL certificate file
|
||||
certificate = "/path/to/ca.file"
|
||||
|
||||
# SSL certificate private key
|
||||
certificate_key = "/path/to/key.file"
|
||||
```
|
||||
|
||||
之后,使用 HTTPS 进行 Explorer 访问,如 [https://192.168.12.34](https://192.168.12.34:6060) 。
|
||||
|
||||
### `taosxAgent`
|
||||
|
||||
taosX 启用 HTTPS 后,Agent 组件与 taosx 之间使用 HTTP 2 加密连接,使用 Arrow-Flight RPC 进行数据交换,传输内容是二进制格式,且仅注册过的 Agent 连接有效,保障数据安全。
|
||||
|
||||
建议在不安全网络或公共网络环境下的 Agent 服务,始终开启 HTTPS 连接。
|
||||
|
||||
### `taosKeeper`
|
||||
|
||||
taosKeeper 使用 WebSocket 连接与 taosadpater 通信,将其他组件上报的监控信息写入 TDengine。
|
||||
|
||||
`taosKeeper` 当前版本存在安全风险:
|
||||
|
||||
- 监控地址不可限制在本机,默认监控 所有地址的 6043 端口,存在网络攻击风险。使用 Docker 或 Kubernetes 部署不暴露 taosKeeper 端口时,此风险可忽略。
|
||||
- 配置文件中配置明文密码,需要降低配置文件可见性。在 `/etc/taos/taoskeeper.toml` 中存在:
|
||||
|
||||
```toml
|
||||
[tdengine]
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
usessl = false
|
||||
```
|
||||
|
||||
## 安全增强
|
||||
|
||||
我们建议使用在局域网内部使用 TDengine。
|
||||
|
||||
如果必须在局域网外部提供访问,请考虑添加以下配置:
|
||||
|
||||
### 负载均衡
|
||||
|
||||
使用负载均衡对外提供 taosAdapter 服务。
|
||||
|
||||
以 Nginx 为例,配置多节点负载均衡:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 6041;
|
||||
|
||||
location / {
|
||||
proxy_pass http://websocket;
|
||||
# Headers for websocket compatible
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
# Forwarded headers
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Server $hostname;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
upstream websocket {
|
||||
server 192.168.11.61:6041;
|
||||
server 192.168.11.62:6041;
|
||||
server 192.168.11.63:6041;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
如果 taosAdapter 组件未配置 SSL 安全连接,还需要配置 SSL 才能保证安全访问。SSL 可以配置在更上层的 API Gateway,也可以配置在 Nginx 中;如果你对各组件之间的安全性有更强的要求,您可以在所有组件中都配置 SSL。Nginx 配置如下:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
|
||||
ssl_certificate /path/to/your/certificate.crt;
|
||||
ssl_certificate_key /path/to/your/private.key;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 安全网关
|
||||
|
||||
在现在互联网生产系统中,安全网关使用也很普遍。[traefik](https://traefik.io/) 是一个很好的开源选择,我们以 traefik 为例,解释在 API 网关中的安全配置。
|
||||
|
||||
Traefik 中通过 middleware 中间件提供多种安全配置,包括:
|
||||
|
||||
1. 认证(Authentication):Traefik 提供 BasicAuth、DigestAuth、自定义认证中间件、OAuth 2.0 等多种认证方式。
|
||||
2. IP 白名单(IPWhitelist):限制允许访问的客户端 IP。
|
||||
3. 频率限制(RateLimit):控制发送到服务的请求数。
|
||||
4. 自定义 Headers:通过自定义 Headers 添加 `allowedHosts` 等配置,提高安全性。
|
||||
|
||||
一个常见的中间件示例如下:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
|
||||
- "traefik.http.routers.tdengine.entrypoints=https"
|
||||
- "traefik.http.routers.tdengine.tls.certresolver=default"
|
||||
- "traefik.http.routers.tdengine.service=tdengine"
|
||||
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
|
||||
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
|
||||
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
|
||||
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
|
||||
```
|
||||
|
||||
上面的示例完成以下配置:
|
||||
|
||||
- TLS 认证使用 `default` 配置,这个配置可使用配置文件或 traefik 启动参数中配置,如下:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: "traefik:v2.3.2"
|
||||
hostname: "traefik"
|
||||
networks:
|
||||
- traefik
|
||||
command:
|
||||
- "--log.level=INFO"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--providers.docker.swarmmode=true"
|
||||
- "--providers.docker.network=traefik"
|
||||
- "--providers.docker.watch=true"
|
||||
- "--entrypoints.http.address=:80"
|
||||
- "--entrypoints.https.address=:443"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge=true"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
|
||||
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
|
||||
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
|
||||
```
|
||||
|
||||
上面的启动参数配置了 `default` TSL 证书解析器和自动 acme 认证(自动证书申请和延期)。
|
||||
|
||||
- 中间件 `redirect-to-https`:配置从 HTTP 到 HTTPS 的转发,强制使用安全连接。
|
||||
|
||||
```yaml
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
```
|
||||
|
||||
- 中间件 `check-header`:配置自定义 Headers 检查。外部访问必须添加自定义 Header 并匹配 Header 值,避免非法访问。这在提供 API 访问时是一个非常简单有效的安全机制。
|
||||
- 中间件 `tdengine-ipwhitelist`:配置 IP 白名单。仅允许指定 IP 访问,使用 CIDR 路由规则进行匹配,可以设置内网及外网 IP 地址。
|
||||
|
||||
## 总结
|
||||
|
||||
数据安全是 TDengine 产品的一项关键指标,这些措施旨在保护 TDengine 部署免受未经授权的访问和数据泄露,同时保持性能和功能。但 TDengine 自身的安全配置不是生产中的唯一保障,结合用户业务系统制定更加匹配客户需求的解决方案更加重要。
|
|
@ -67,11 +67,15 @@ alter database <dbname> replica 2|1
|
|||
|
||||
| 异常场景 | 集群状态 |
|
||||
| ------- | ------ |
|
||||
| 没有 Vnode 发生故障: Arbitrator 故障(Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** |
|
||||
| 没有 Vnode 发生故障:Arbitrator 故障(Mnode 宕机节点超过一个,导致 Mnode 无法选主)| **持续提供服务** |
|
||||
| 仅一个 Vnode 故障:VGroup 已经达成同步后,某一个 Vnode 才发生故障的 | **持续提供服务** |
|
||||
| 仅一个 Vnode 故障:2 个 Vnode 同时故障,故障前 VGroup 达成同步,但是只有一个 Vnode 从故障中恢复服务,另一个 Vnode 服务故障 | **通过下面的命令,强制指定 leader, 继续提供服务** |
|
||||
| 仅一个 Vnode 故障:离线 Vnode 启动后,VGroup 未达成同步前,另一个 Vnode 服务故障的 | **无法提供服务** |
|
||||
| 两个 Vnode 都发生故障 | **无法提供服务** |
|
||||
|
||||
```sql
|
||||
ASSIGN LEADER FORCE;
|
||||
```
|
||||
|
||||
## 常见问题
|
||||
|
||||
|
|
|
@ -81,7 +81,12 @@ taosx replica start -f source_endpoint -t sink_endpoint [database...]
|
|||
taosx replica start -f td1:6030 -t td2:6030
|
||||
```
|
||||
|
||||
该示例命令会自动创建除 information_schema、performance_schema、log、audit 库之外的同步任务。可以使用 `http://td2:6041` 指定该 endpoint 使用 websocket 接口(默认是原生接口)。也可以指定数据库同步:taosx replica start -f td1:6030 -t td2:6030 db1 仅创建指定的数据库同步任务。
|
||||
该示例命令会自动创建除 information_schema、performance_schema、log、audit 库之外的同步任务,并持续监听新增的数据库,当 td1 和 td2 中新增同名数据库时可自动启动新增数据库的数据复制任务。需要说明的是:
|
||||
|
||||
- 可以使用 `http://td2:6041` 指定该 endpoint 使用 websocket 接口(默认是原生接口)。
|
||||
- 可以使用 `--new-database-checking-interval <SECONDS>` 指定新增数据库的检查间隔,默认为 30 分钟。
|
||||
- 可以使用 `--no-new-databases` 禁用监听行为。
|
||||
- 也可以指定数据库同步:taosx replica start -f td1:6030 -t td2:6030 db1 仅创建指定的数据库同步任务。此时相当于配置了 `--no-new-databases`,不会开启新增数据库自动同步。
|
||||
|
||||
2. 方法二
|
||||
|
||||
|
@ -121,6 +126,7 @@ taosx replica stop id [db...]
|
|||
该命令作用如下:
|
||||
1. 停止指定 Replica ID 下所有或指定数据库的双副本同步任务。
|
||||
2. 使用 `taosx replica stop id1 db1` 表示停止 id1 replica 下 db1的同步任务。
|
||||
3. `--no-new-databases` 选项启用时,不停止新增数据库监听任务,仅停止当前同步中的数据库。
|
||||
|
||||
### 重启双活任务
|
||||
|
||||
|
@ -145,8 +151,8 @@ taosx replica diff id [db....]
|
|||
| replica | database | source | sink | vgroup_id | current | latest | diff |
|
||||
+---------+----------+----------+----------+-----------+---------+---------+------+
|
||||
| a | opc | td1:6030 | td2:6030 | 2 | 17600 | 17600 | 0 |
|
||||
| ad | opc | td2:6030 | td2:6030 | 3 | 17600 | 17600 | 0 |
|
||||
```
|
||||
| a | opc | td2:6030 | td2:6030 | 3 | 17600 | 17600 | 0 |
|
||||
```
|
||||
|
||||
### 删除双活任务
|
||||
|
||||
|
@ -156,6 +162,16 @@ taosx replica remove id [--force]
|
|||
|
||||
删除当前所有双活同步任务。正常情况下要想删除同步任务,需要先 stop 该任务;但当 --force 启用时,会强制停止并清除任务。
|
||||
|
||||
`--no-new-databases` 选项启用时,不会删除新增数据库同步任务,仅删除当前数据库的同步任务。当 taosx 重启后,如果删除的数据库任务对应的数据库仍然存在,则会继续创建同步任务;不重启 taosx 或者不更新双活监听任务时,也不会再新建这些数据库的同步任务。
|
||||
|
||||
### 更新双活新增数据库检查间隔
|
||||
|
||||
```shell
|
||||
taosx replica update id --new-database-checking-interval <SECONDS>
|
||||
```
|
||||
|
||||
更新双活新增数据库的检查间隔,单位为秒。
|
||||
|
||||
### 推荐使用步骤
|
||||
|
||||
1. 假定在机器 A 上运行,需要首先使用 taosx replica start 来配置 taosX,其输入参数是待同步的源端和目标端服务器地址 ,在完成配置后会自动启动同步服务和任务。此处假定 taosx 服务使用标准端口,同步任务使用原生连接。
|
||||
|
|
|
@ -12,7 +12,7 @@ Power BI 是由 Microsoft 提供的一种商业分析工具。通过配置使用
|
|||
- TDengine 3.3.4.0 以上版本集群已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 参考手册](../../../reference/components/taosadapter)。
|
||||
- 从 TDengine 官网下载最新的 Windows 操作系统 X64 客户端驱动程序并进行安装,详细参考 [安装 ODBC 驱动](../../../reference/connector/odbc/#安装)。
|
||||
- 安装完成 Power BI Desktop 软件并运行(如未安装,请从其官方地址下载最新的Windows操作系统 32/64 位版本)。
|
||||
- 安装完成 Power BI Desktop 软件并运行(如未安装,请从其官方地址下载最新的 Windows 操作系统 32/64 位版本)。
|
||||
|
||||
## 配置数据源
|
||||
|
||||
|
@ -76,4 +76,4 @@ select _wstart date, count(*) from test.meters interval(1d) having count(*)>0
|
|||
|
||||
**第 7 步**,制作报告。在柱状图、饼图等控件中使用这些数据。
|
||||
|
||||
由于 TDengine 处理时序数据的超强性能,使得用户在数据导入及每日定期刷新数据时,都可以得到非常好的体验。更多有关 Power BI 视觉效果的构建方法,请参照 Power BI 的官方文档。
|
||||
由于 TDengine 处理时序数据的超强性能,使得用户在数据导入及每日定期刷新数据时,都可以得到非常好的体验。更多有关 Power BI 视觉效果的构建方法,请参照 Power BI 的官方文档。
|
||||
|
|
|
@ -33,21 +33,21 @@ taosd 命令行参数如下:
|
|||
- 类型:endpoint
|
||||
- 默认值:localhost:6030
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
- 支持版本:v3.0.0.0 引入
|
||||
|
||||
#### secondEp
|
||||
- 说明:taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint。
|
||||
- 类型:endpoint
|
||||
- 默认值:无
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
- 支持版本:v3.0.0.0 引入
|
||||
|
||||
#### fqdn
|
||||
- 说明:taosd 监听的服务地址
|
||||
- 类型:fqdn
|
||||
- 默认值:所在服务器上配置的第一个 hostname
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
- 支持版本:v3.0.0.0 引入
|
||||
|
||||
#### serverPort
|
||||
- 说明:taosd 监听的端口
|
||||
|
@ -56,7 +56,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:1
|
||||
- 最大值:65056
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
- 支持版本:v3.0.0.0 引入
|
||||
|
||||
#### compressMsgSize
|
||||
- 说明:是否对 RPC 消息进行压缩
|
||||
|
@ -121,7 +121,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:1
|
||||
- 最大值:10240
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.4.0 版本之后取消
|
||||
- 支持版本:v3.3.4.0 之后取消
|
||||
|
||||
#### timeToGetAvailableConn
|
||||
- 说明:获得可用连接的最长等待时间
|
||||
|
@ -131,7 +131,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:20
|
||||
- 最大值:1000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.4.0 版本之后取消
|
||||
- 支持版本:v3.3.4.0 之后取消
|
||||
|
||||
#### maxShellConns
|
||||
- 说明:允许创建的最大连接数
|
||||
|
@ -140,7 +140,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:10
|
||||
- 最大值:50000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.4.0 版本之后取消
|
||||
- 支持版本:v3.3.4.0 之后取消
|
||||
|
||||
#### maxRetryWaitTime
|
||||
- 说明:重连最大超时时间,从重试时候开始计算。
|
||||
|
@ -258,7 +258,7 @@ taosd 命令行参数如下:
|
|||
- 类型:fqdn
|
||||
- 默认值:telemetry.taosdata.com
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
- 支持版本:v3.0.0.0 引入
|
||||
|
||||
#### telemetryPort
|
||||
- 说明:telemetry 服务器端口号
|
||||
|
@ -267,7 +267,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:1
|
||||
- 最大值:65056
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
- 支持版本:v3.0.0.0 引入
|
||||
|
||||
#### telemetryInterval
|
||||
- 说明:telemetry 上传时间间隔
|
||||
|
@ -306,7 +306,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:0
|
||||
- 最大值:1
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### queryBufferSize
|
||||
- 说明:查询可用的缓存大小
|
||||
|
@ -334,7 +334,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:0
|
||||
- 最大值:1
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.3.5.0 版本开始引入
|
||||
- 支持版本:v3.3.5.0 引入
|
||||
|
||||
#### minReservedMemorySize
|
||||
- 说明:最小预留的系统可用内存数量,除预留外的内存都可以被用于查询。
|
||||
|
@ -344,7 +344,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:1024
|
||||
- 最大值:1000000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.3.5.0 版本开始引入
|
||||
- 支持版本:v3.3.5.0 引入
|
||||
|
||||
#### singleQueryMaxMemorySize
|
||||
- 说明:单个查询在单个节点(dnode)上可以使用的内存上限,超过该上限将返回错误信息。
|
||||
|
@ -354,7 +354,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:0
|
||||
- 最大值:1000000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.3.5.0 版本开始引入
|
||||
- 支持版本:v3.3.5.0 引入
|
||||
|
||||
#### filterScalarMode
|
||||
- 说明:强制使用标量过滤模式
|
||||
|
@ -363,7 +363,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:0
|
||||
- 最大值:1
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### queryNoFetchTimeoutSec
|
||||
- 说明:查询中当应用长时间不 FETCH 数据时的超时时间,从最后一次响应起计时,超时自动清除任务。 **`内部参数`**
|
||||
|
@ -409,7 +409,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:INT64_M
|
||||
- 最大值:INT64_MAX
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### queryRsmaTolerance
|
||||
- 说明:查询计划的分配方法 **`内部参数`**
|
||||
|
@ -418,7 +418,7 @@ taosd 命令行参数如下:
|
|||
- 最小值:0
|
||||
- 最大值:900000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### enableQueryHb
|
||||
- 说明:是否发送查询心跳消息 **`内部参数`**
|
||||
|
@ -437,26 +437,26 @@ taosd 命令行参数如下:
|
|||
- 最小值:1
|
||||
- 最大值:10240
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
### 区域相关
|
||||
#### timezone
|
||||
- 说明:时区
|
||||
- 默认值:从系统中动态获取当前的时区设置
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### locale
|
||||
- 说明:系统区位信息及编码格式
|
||||
- 默认值:从系统中获取
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### charset
|
||||
- 说明:字符集编码
|
||||
- 默认值:从系统中获取
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
:::info
|
||||
#### 区域相关参数说明
|
||||
|
@ -541,7 +541,7 @@ charset 的有效值是 UTF-8。
|
|||
- 类型:字符串
|
||||
- 默认值:/var/lib/taos
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### diskIDCheckEnabled
|
||||
- 说明:在重启 dnode 时增加了检查 dataDir 所在磁盘 id 是否发生改变
|
||||
|
@ -549,14 +549,14 @@ charset 的有效值是 UTF-8。
|
|||
- 默认值:1
|
||||
- 最小值:0
|
||||
- 最大值:1
|
||||
- 支持版本:从 v3.3.5.0 版本开始引入
|
||||
- 支持版本:v3.3.5.0 引入
|
||||
|
||||
#### tempDir
|
||||
- 说明:指定所有系统运行过程中的临时文件生成的目录
|
||||
- 类型:字符串
|
||||
- 默认值:/tmp
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### minimalDataDirGB
|
||||
- 说明:dataDir 指定的时序数据存储目录所需要保留的最小空间
|
||||
|
@ -566,7 +566,7 @@ charset 的有效值是 UTF-8。
|
|||
- 最小值:0.001f
|
||||
- 最大值:10000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### minimalTmpDirGB
|
||||
- 说明:tempDir 所指定的临时文件目录所需要保留的最小空间
|
||||
|
@ -576,7 +576,7 @@ charset 的有效值是 UTF-8。
|
|||
- 最小值:0.001f
|
||||
- 最大值:10000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### minDiskFreeSize
|
||||
- 说明:当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。 **`企业版参数`**
|
||||
|
@ -763,7 +763,7 @@ charset 的有效值是 UTF-8。
|
|||
- 最小值:1
|
||||
- 最大值:31572500
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### ttlPushInterval
|
||||
- 说明:ttl 检测超时频率
|
||||
|
@ -861,13 +861,13 @@ charset 的有效值是 UTF-8。
|
|||
- 说明:数据加密算法 **`企业版参数`**
|
||||
- 类型:字符串
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### encryptScope
|
||||
- 说明:加密范围 **`企业版参数`**
|
||||
- 类型:字符串
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### enableWhiteList
|
||||
- 说明:白名单功能开关 **`企业版参数`**
|
||||
|
@ -892,19 +892,19 @@ charset 的有效值是 UTF-8。
|
|||
- 说明:用于同步模块调试 **`内部参数`**
|
||||
- 类型:整数
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### syncHeartbeatInterval
|
||||
- 说明:用于同步模块调试 **`内部参数`**
|
||||
- 类型:整数
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### syncHeartbeatTimeout
|
||||
- 说明:用于同步模块调试 **`内部参数`**
|
||||
- 类型:整数
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### syncSnapReplMaxWaitN
|
||||
- 说明:用于同步模块调试 **`内部参数`**
|
||||
|
@ -946,7 +946,7 @@ charset 的有效值是 UTF-8。
|
|||
- 说明:用于授权检查 **`内部参数`**
|
||||
- 类型:整数
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### trimVDbIntervalSec
|
||||
- 说明:用于删除过期数据 **`内部参数`**
|
||||
|
@ -1017,6 +1017,13 @@ charset 的有效值是 UTF-8。
|
|||
- 动态修改:支持通过 SQL 修改,重启生效。
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
|
||||
#### enableStrongPassword
|
||||
- 说明:密码要符合一个要求:至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
|
||||
- 类型:整数;0:不启用,1:启用
|
||||
- 默认值:1
|
||||
- 动态修改:支持通过 SQL 修改,重启生效
|
||||
- 支持版本:v3.3.5.0 引入
|
||||
|
||||
### 流计算参数
|
||||
|
||||
#### disableStream
|
||||
|
@ -1042,7 +1049,7 @@ charset 的有效值是 UTF-8。
|
|||
- 说明:并发进行聚合计算的数目 **`内部参数`**
|
||||
- 类型:整数
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### checkpointInterval
|
||||
- 说明:checkponit 同步间隔 **`内部参数`**
|
||||
|
@ -1065,9 +1072,52 @@ charset 的有效值是 UTF-8。
|
|||
#### streamSinkDataRate
|
||||
- 说明:用于控制流计算结果的写入速度 **`内部参数`**
|
||||
- 类型:整数
|
||||
- 动态修改:支持通过 SQL 修改,立即生效。
|
||||
- 动态修改:支持通过 SQL 修改,立即生效。
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
|
||||
#### streamNotifyMessageSize
|
||||
- 说明:用于控制事件通知的消息大小 `内部参数`
|
||||
- 类型:整数
|
||||
- 单位:KB
|
||||
- 默认值:8192
|
||||
- 最小值:8
|
||||
- 最大值:1048576
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### streamNotifyFrameSize
|
||||
- 说明:用于控制事件通知消息发送时底层的帧大小 `内部参数`
|
||||
- 类型:整数
|
||||
- 单位:KB
|
||||
- 默认值:256
|
||||
- 最小值:8
|
||||
- 最大值:1048576
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### adapterFqdn
|
||||
- 说明:taosAdapter 服务的地址 `内部参数`
|
||||
- 类型:fqdn
|
||||
- 默认值:localhost
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### adapterPort
|
||||
- 说明:taosAdapter 服务的端口号 `内部参数`
|
||||
- 类型:整数
|
||||
- 默认值:6041
|
||||
- 最小值:1
|
||||
- 最大值:65056
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### adapterToken
|
||||
- 说明:为 `{username}:{password}` 经过 Base64 编码之后的字符串 `内部参数`
|
||||
- 类型:字符串
|
||||
- 默认值:`cm9vdDp0YW9zZGF0YQ==`
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
### 日志相关
|
||||
|
||||
#### logDir
|
||||
|
@ -1075,7 +1125,7 @@ charset 的有效值是 UTF-8。
|
|||
- 类型:字符串
|
||||
- 默认值:/var/log/taos
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### minimalLogDirGB
|
||||
- 说明:日志文件夹所在磁盘可用空间大小小于该值时,停止写日志。
|
||||
|
@ -1085,7 +1135,7 @@ charset 的有效值是 UTF-8。
|
|||
- 最小值:0.001f
|
||||
- 最大值:10000000
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### numOfLogLines
|
||||
- 说明:单个日志文件允许的最大行数
|
||||
|
@ -1325,7 +1375,7 @@ charset 的有效值是 UTF-8。
|
|||
- 说明:配置文件所在目录
|
||||
- 类型:字符串
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### forceReadConfig
|
||||
- 说明:配置文件所在目录
|
||||
|
@ -1334,13 +1384,13 @@ charset 的有效值是 UTF-8。
|
|||
- 最小值:0
|
||||
- 最大值:1
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.3.5.0 版本开始引入
|
||||
- 支持版本:v3.3.5.0 引入
|
||||
|
||||
#### scriptDir
|
||||
- 说明:测试工具的脚本目录 **`内部参数`**
|
||||
- 类型:字符串
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### assert
|
||||
- 说明:断言控制开关
|
||||
|
@ -1349,7 +1399,7 @@ charset 的有效值是 UTF-8。
|
|||
- 最小值:0
|
||||
- 最大值:1
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### randErrorChance
|
||||
- 说明:用于随机失败测试 **`内部参数`**
|
||||
|
@ -1379,17 +1429,17 @@ charset 的有效值是 UTF-8。
|
|||
#### simdEnable
|
||||
- 说明:用于测试 SIMD 加速 **`内部参数`**
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.3.4.3 版本开始引入
|
||||
- 支持版本:v3.3.4.3 引入
|
||||
|
||||
#### AVX512Enable
|
||||
- 说明:用于测试 AVX512 加速 **`内部参数`**
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.3.4.3 版本开始引入
|
||||
- 支持版本:v3.3.4.3 引入
|
||||
|
||||
#### rsyncPort
|
||||
- 说明:用于调试流计算 **`内部参数`**
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### snodeAddress
|
||||
- 说明:用于调试流计算 **`内部参数`**
|
||||
|
@ -1404,12 +1454,12 @@ charset 的有效值是 UTF-8。
|
|||
#### enableAuditDelete
|
||||
- 说明:用于测试审计功能 **`内部参数`**
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### slowLogThresholdTest
|
||||
- 说明:用于测试慢日志 **`内部参数`**
|
||||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
- 支持版本:v3.1.0.0 引入
|
||||
|
||||
#### bypassFlag
|
||||
- 说明:配置文件所在目录
|
||||
|
|
|
@ -221,6 +221,12 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
- 动态修改:不支持
|
||||
- 支持版本:从 v3.0.0.0 版本开始引入
|
||||
|
||||
#### compareAsStrInGreatest
|
||||
- 说明:用于决定 greatest、least 函数的参数既有数值类型又有字符串类型时,比较类型的转换规则。
|
||||
- 类型:整数;1:统一转为字符串比较,0:统一转为数值类型比较。
|
||||
- 动态修改:支持通过 SQL 修改,立即生效
|
||||
- 支持版本:从 v3.3.6.0 版本开始引入
|
||||
|
||||
### 写入相关
|
||||
|
||||
#### smlChildTableName
|
||||
|
|
|
@ -3,7 +3,7 @@ title: taosX-Agent 参考手册
|
|||
sidebar_label: taosX-Agent
|
||||
---
|
||||
|
||||
本节讲述如何部署 `Agent` (for `taosX`)。使用之前需要安装 TDengine Enterprise 安装包之后,taosX-Agent 用于在部分数据接入场景,如 Pi, OPC UA, OPC DA 等对访问数据源有一定限制或者网络环境特殊的场景下,可以将 taosX-Agent 部署在靠近数据源的环境中甚至与数据源在相同的服务器上,由 taosX-Agent 负责从数据源读取数据并发送给 taosX。
|
||||
本节讲述如何部署 `Agent` (for `taosX`)。使用之前需要安装 TDengine Enterprise 安装包之后,taosX-Agent 用于在部分数据接入场景,如 Pi、OPC UA、OPC DA 等对访问数据源有一定限制或者网络环境特殊的场景下,可以将 taosX-Agent 部署在靠近数据源的环境中甚至与数据源在相同的服务器上,由 taosX-Agent 负责从数据源读取数据并发送给 taosX。
|
||||
|
||||
## 配置
|
||||
|
||||
|
@ -12,12 +12,14 @@ sidebar_label: taosX-Agent
|
|||
- `endpoint`:必填,`taosX` 的 GRPC 服务地址。
|
||||
- `token`:必填,在 `Explorer` 上创建 `Agent` 时,产生的 Token。
|
||||
- `instanceId`:当前 taosx-agent 服务的实例 ID,如果同一台机器上启动了多个 taosx-agent 实例,必须保证各个实例的实例 ID 互不相同。
|
||||
- `compression`:非必填,可配置为 `true` 或 `false`, 默认为 `false`。配置为`true`, 则开启 `Agent` 和 `taosX` 通信数据压缩。
|
||||
- `compression`:非必填,可配置为 `true` 或 `false`,默认为 `false`。配置为`true`,则开启 `Agent` 和 `taosX` 通信数据压缩。
|
||||
- `in_memory_cache_capacity`:非必填,表示可在内存中缓存的最大消息批次数,可配置为大于 0 的整数。默认为 `64`。
|
||||
- `log_level`:非必填,日志级别,默认为 `info`, 同 `taosX` 一样,支持 `error`,`warn`,`info`,`debug`,`trace` 五级。已弃用,请使用 `log.level` 代替。
|
||||
- `client_port_range.min`:非必填,取值范围 `[49152-65535]`,默认为 `49152`,当 agent 向 taosx 创建 socket 连接时,socket 客户端会随机监听一个端口,此配置限制了端口范围的最小值。
|
||||
- `client_port_range.max`:非必填,取值范围 `[49152-65535]`,默认为 `65535`,此配置限制了端口范围的最大值。
|
||||
- `log_level`:非必填,日志级别,默认为 `info`,同 `taosX` 一样,支持 `error`、`warn`、`info`、`debug`、`trace` 五级。已弃用,请使用 `log.level` 代替。
|
||||
- `log_keep_days`:非必填,日志保存天数,默认为 `30` 天。已弃用,请使用 `log.keepDays` 代替。
|
||||
- `log.path`:日志文件存放的目录。
|
||||
- `log.level`:日志级别,可选值为 "error", "warn", "info", "debug", "trace"。
|
||||
- `log.level`:日志级别,可选值为 "error"、"warn"、"info"、"debug"、"trace"。
|
||||
- `log.compress`:日志文件滚动后的文件是否进行压缩。
|
||||
- `log.rotationCount`:日志文件目录下最多保留的文件数,超出数量的旧文件被删除。
|
||||
- `log.rotationSize`:触发日志文件滚动的文件大小(单位为字节),当日志文件超出此大小后会生成一个新文件,新的日志会写入新文件。
|
||||
|
@ -49,6 +51,15 @@ sidebar_label: taosX-Agent
|
|||
#
|
||||
#in_memory_cache_capacity = 64
|
||||
|
||||
[client_port_range]
|
||||
# Minimum boundary of listening port of agent, can not less than 49152
|
||||
#
|
||||
# min = 49152
|
||||
|
||||
# Maximum boundary of listening port of agent, can not greater than 65535
|
||||
#
|
||||
# max = 65535
|
||||
|
||||
# log configuration
|
||||
[log]
|
||||
# All log files are stored in this directory
|
||||
|
|
|
@ -25,7 +25,7 @@ taosBenchmark
|
|||
```
|
||||
|
||||
在无参数运行时,taosBenchmark 默认连接 `/etc/taos/taos.cfg` 中指定的 TDengine 集群。
|
||||
连接成功后,会默认创建智能电表示例数据库 test,创建超级表 meters, 创建子表 1 万,每子写入数据 1 万条,若 test 库已存在,默认会先删再建。
|
||||
连接成功后,会默认创建智能电表示例数据库 test,创建超级表 meters,创建子表 1 万,每子写入数据 1 万条,若 test 库已存在,默认会先删再建。
|
||||
|
||||
### 命令行模式
|
||||
|
||||
|
@ -58,22 +58,22 @@ taosBenchmark -f <json file>
|
|||
| -p/--password \<passwd> | 用于连接 TDengine 服务端的密码,默认值为 taosdata |
|
||||
| -o/--output \<file> | 结果输出文件的路径,默认值为 ./output.txt |
|
||||
| -T/--thread \<threadNum> | 插入数据的线程数量,默认为 8 |
|
||||
| -B/--interlace-rows \<rowNum> |启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入 |
|
||||
| -B/--interlace-rows \<rowNum> |启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0,即向一张子表完成数据插入后才会向下一张子表进行数据插入 |
|
||||
| -i/--insert-interval \<timeInterval> | 指定交错插入模式的插入间隔,单位为 ms,默认值为 0。只有当 `-B/--interlace-rows` 大于 0 时才起作用 |意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入 |
|
||||
| -r/--rec-per-req \<rowNum> | 每次向 TDengine 请求写入的数据行数,默认值为 30000 |
|
||||
| -t/--tables \<tableNum> | 指定子表的数量,默认为 10000 |
|
||||
| -S/--timestampstep \<stepLength> | 每个子表中插入数据的时间戳步长,单位是 ms,默认值是 1 |
|
||||
| -n/--records \<recordNum> | 每个子表插入的记录数,默认值为 10000 |
|
||||
| -d/--database \<dbName> | 所使用的数据库的名称,默认值为 test |
|
||||
| -b/--data-type \<colType> | 指定超级表普通列数据类型, 多个使用逗号分隔,默认值:"FLOAT,INT,FLOAT" 如:`taosBenchmark -b "FLOAT,BINARY(8),NCHAR(16)"`|
|
||||
| -b/--data-type \<colType> | 指定超级表普通列数据类型,多个使用逗号分隔,默认值:"FLOAT,INT,FLOAT" 如:`taosBenchmark -b "FLOAT,BINARY(8),NCHAR(16)"`|
|
||||
| -A/--tag-type \<tagType> | 指定超级表标签列数据类型,多个使用逗号分隔,默认值:"INT,BINARY(24)" 如:`taosBenchmark -A "INT,BINARY(8),NCHAR(8)"`|
|
||||
| -l/--columns \<colNum> | 超级表的数据列的总数量。如果同时设置了该参数和 `-b/--data-type`,则最后的结果列数为两者取大。如果本参数指定的数量大于 `-b/--data-type` 指定的列数,则未指定的列类型默认为 INT, 例如: `-l 5 -b float,double`, 那么最后的列为 `FLOAT,DOUBLE,INT,INT,INT`。如果 columns 指定的数量小于或等于 `-b/--data-type` 指定的列数,则结果为 `-b/--data-type` 指定的列和类型,例如:`-l 3 -b float,double,float,bigint`,那么最后的列为 `FLOAT,DOUBLE,FLOAT,BIGINT` |
|
||||
| -l/--columns \<colNum> | 超级表的数据列的总数量。如果同时设置了该参数和 `-b/--data-type`,则最后的结果列数为两者取大。如果本参数指定的数量大于 `-b/--data-type` 指定的列数,则未指定的列类型默认为 INT,例如 `-l 5 -b float,double`,那么最后的列为 `FLOAT,DOUBLE,INT,INT,INT`。如果 columns 指定的数量小于或等于 `-b/--data-type` 指定的列数,则结果为 `-b/--data-type` 指定的列和类型,例如:`-l 3 -b float,double,float,bigint`,那么最后的列为 `FLOAT,DOUBLE,FLOAT,BIGINT` |
|
||||
| -L/--partial-col-num \<colNum> | 指定某些列写入数据,其他列数据为 NULL。默认所有列都写入数据 |
|
||||
| -w/--binwidth \<length> | nchar 和 binary 类型的默认长度,默认值为 64 |
|
||||
| -m/--table-prefix \<tablePrefix> | 子表名称的前缀,默认值为 "d" |
|
||||
| -E/--escape-character | 开关参数,指定在超级表和子表名称中是否使用转义字符。默认值为不使用 |
|
||||
| -C/--chinese | 开关参数,指定 nchar 和 binary 是否使用 Unicode 中文字符。默认值为不使用 |
|
||||
| -N/--normal-table | 开关参数,指定只创建普通表,不创建超级表。默认值为 false。仅当插入模式为 taosc, stmt, rest 模式下可以使用 |
|
||||
| -N/--normal-table | 开关参数,指定只创建普通表,不创建超级表。默认值为 false。仅当插入模式为 taosc、stmt、rest 模式下可以使用 |
|
||||
| -M/--random | 开关参数,插入数据为生成的随机值。默认值为 false。若配置此参数,则随机生成要插入的数据。对于数值类型的 标签列/数据列,其值为该类型取值范围内的随机值。对于 NCHAR 和 BINARY 类型的 标签列/数据列,其值为指定长度范围内的随机字符串 |
|
||||
| -x/--aggr-func | 开关参数,指示插入后查询聚合函数。默认值为 false |
|
||||
| -y/--answer-yes | 开关参数,要求用户在提示后确认才能继续 |默认值为 false 。
|
||||
|
@ -93,14 +93,17 @@ taosBenchmark -f <json file>
|
|||
|
||||
本节所列参数适用于所有功能模式。
|
||||
|
||||
- **filetype**:功能分类,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。
|
||||
- **filetype**:功能分类,可选值为 `insert`、`query`、`subscribe` 和 `csvfile`。分别对应插入、查询、订阅和生成 csv 文件功能。每个配置文件中只能指定其中之一。
|
||||
|
||||
- **cfgdir**:TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。
|
||||
|
||||
- **output_dir**:指定输出文件的目录,当功能分类是 `csvfile` 时,指生成的 csv 文件的保存目录,默认值为 ./output/ 。
|
||||
|
||||
- **host**:指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost 。
|
||||
|
||||
- **port**:要连接的 TDengine 服务器的端口号,默认值为 6030 。
|
||||
|
||||
- **user**:用于连接 TDengine 服务端的用户名,默认为 root 。
|
||||
- **user**:用于连接 TDengine 服务端的用户名,默认值为 root 。
|
||||
|
||||
- **password**:用于连接 TDengine 服务端的密码,默认值为 taosdata。
|
||||
|
||||
|
@ -115,9 +118,9 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **continue_if_fail**:允许用户定义失败后行为。
|
||||
|
||||
“continue_if_fail”:“no”, 失败 taosBenchmark 自动退出,默认行为。
|
||||
“continue_if_fail”:“yes”, 失败 taosBenchmark 警告用户,并继续写入。
|
||||
“continue_if_fail”:“smart”, 如果子表不存在失败,taosBenchmark 会建立子表并继续写入。
|
||||
“continue_if_fail”:“no”,失败 taosBenchmark 自动退出,默认行为。
|
||||
“continue_if_fail”:“yes”,失败 taosBenchmark 警告用户,并继续写入。
|
||||
“continue_if_fail”:“smart”,如果子表不存在失败,taosBenchmark 会建立子表并继续写入。
|
||||
|
||||
#### 数据库相关
|
||||
|
||||
|
@ -125,7 +128,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **name**:数据库名。
|
||||
|
||||
- **drop**:数据库已存在时是否删除,可选项为 "yes" 或 "no", 默认为 “yes” 。
|
||||
- **drop**:数据库已存在时是否删除,可选项为 "yes" 或 "no",默认为 “yes” 。
|
||||
|
||||
#### 超级表相关
|
||||
|
||||
|
@ -141,17 +144,17 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **escape_character**:超级表和子表名称中是否包含转义字符,默认值为 "no",可选值为 "yes" 或 "no" 。
|
||||
|
||||
- **auto_create_table**:仅当 insert_mode 为 taosc, rest, stmt 并且 child_table_exists 为 "no" 时生效,该参数为 "yes" 表示 taosBenchmark 在插入数据时会自动创建不存在的表;为 "no" 则表示先提前建好所有表再进行插入。
|
||||
- **auto_create_table**:仅当 insert_mode 为 taosc、rest、stmt 并且 child_table_exists 为 "no" 时生效,该参数为 "yes" 表示 taosBenchmark 在插入数据时会自动创建不存在的表;为 "no" 则表示先提前建好所有表再进行插入。
|
||||
|
||||
- **batch_create_tbl_num**:创建子表时每批次的建表数量,默认为 10。注:实际的批数不一定与该值相同,当执行的 SQL 语句大于支持的最大长度时,会自动截断再执行,继续创建。
|
||||
|
||||
- **data_source**:数据的来源,默认为 taosBenchmark 随机产生,可以配置为 "rand" 和 "sample"。为 "sample" 时使用 sample_file 参数指定的文件内的数据。
|
||||
|
||||
- **insert_mode**:插入模式,可选项有 taosc, rest, stmt, sml, sml-rest, 分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc 。
|
||||
- **insert_mode**:插入模式,可选项有 taosc、rest、stmt、sml、sml-rest,分别对应普通写入、restful 接口写入、参数绑定接口写入、schemaless 接口写入、restful schemaless 接口写入 (由 taosAdapter 提供)。默认值为 taosc 。
|
||||
|
||||
- **non_stop_mode**:指定是否持续写入,若为 "yes" 则 insert_rows 失效,直到 Ctrl + C 停止程序,写入才会停止。默认值为 "no",即写入指定数量的记录后停止。注:即使在持续写入模式下 insert_rows 失效,但其也必须被配置为一个非零正整数。
|
||||
|
||||
- **line_protocol**:使用行协议插入数据,仅当 insert_mode 为 sml 或 sml-rest 时生效,可选项为 line, telnet, json 。
|
||||
- **line_protocol**:使用行协议插入数据,仅当 insert_mode 为 sml 或 sml-rest 时生效,可选项为 line、telnet、json 。
|
||||
|
||||
- **tcp_transfer**:telnet 模式下的通信协议,仅当 insert_mode 为 sml-rest 并且 line_protocol 为 telnet 时生效。如果不配置,则默认为 http 协议。
|
||||
|
||||
|
@ -161,11 +164,11 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **childtable_limit**:仅当 child_table_exists 为 yes 时生效,指定从超级表获取子表列表的上限。
|
||||
|
||||
- **interlace_rows**:启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。
|
||||
- **interlace_rows**:启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0,即向一张子表完成数据插入后才会向下一张子表进行数据插入。
|
||||
|
||||
- **insert_interval**:指定交错插入模式的插入间隔,单位为 ms,默认值为 0。只有当 `-B/--interlace-rows` 大于 0 时才起作用。意味着数据插入线程在为每个子表插入隔行扫描记录后,会等待该值指定的时间间隔后再进行下一轮写入。
|
||||
|
||||
- **partial_col_num**:若该值为正数 n 时, 则仅向前 n 列写入,仅当 insert_mode 为 taosc 和 rest 时生效,如果 n 为 0 则是向全部列写入。
|
||||
- **partial_col_num**:若该值为正数 n 时,则仅向前 n 列写入,仅当 insert_mode 为 taosc 和 rest 时生效,如果 n 为 0 则是向全部列写入。
|
||||
|
||||
- **disorder_ratio**:指定乱序数据的百分比概率,其值域为 [0,50]。默认为 0,即没有乱序数据。
|
||||
|
||||
|
@ -179,15 +182,39 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **sample_file**:指定 csv 格式的文件作为数据源,仅当 data_source 为 sample 时生效。若 csv 文件内的数据行数小于等于 prepared_rand,那么会循环读取 csv 文件数据直到与 prepared_rand 相同;否则则会只读取 prepared_rand 个数的行的数据。也即最终生成的数据行数为二者取小。
|
||||
|
||||
- **use_sample_ts**:仅当 data_source 为 sample 时生效,表示 sample_file 指定的 csv 文件内是否包含第一列时间戳,默认为 no。若设置为 yes, 则使用 csv 文件第一列作为时间戳,由于同一子表时间戳不能重复,生成的数据量取决于 csv 文件内的数据行数相同,此时 insert_rows 失效。
|
||||
- **use_sample_ts**:仅当 data_source 为 sample 时生效,表示 sample_file 指定的 csv 文件内是否包含第一列时间戳,默认为 no。若设置为 yes,则使用 csv 文件第一列作为时间戳,由于同一子表时间戳不能重复,生成的数据量取决于 csv 文件内的数据行数相同,此时 insert_rows 失效。
|
||||
|
||||
- **tags_file**:仅当 insert_mode 为 taosc, rest 的模式下生效。最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。
|
||||
- **tags_file**:仅当 insert_mode 为 taosc,rest 的模式下生效。最终的 tag 的数值与 childtable_count 有关,如果 csv 文件内的 tag 数据行小于给定的子表数量,那么会循环读取 csv 文件数据直到生成 childtable_count 指定的子表数量;否则则只会读取 childtable_count 行 tag 数据。也即最终生成的子表数量为二者取小。
|
||||
|
||||
- **primary_key**:指定超级表是否有复合主键,取值 1 和 0,复合主键列只能是超级表的第二列,指定生成复合主键后要确保第二列符合复合主键的数据类型,否则会报错。
|
||||
|
||||
- **repeat_ts_min**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最小个数,生成相同时间戳记录的个数是在范围[repeat_ts_min, repeat_ts_max] 内的随机值,最小值等于最大值时为固定个数。
|
||||
|
||||
- **primary_key**:指定超级表是否有复合主键,取值 1 和 0, 复合主键列只能是超级表的第二列,指定生成复合主键后要确保第二列符合复合主键的数据类型,否则会报错。
|
||||
- **repeat_ts_min**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最小个数,生成相同时间戳记录的个数是在范围[repeat_ts_min, repeat_ts_max] 内的随机值, 最小值等于最大值时为固定个数。
|
||||
- **repeat_ts_max**:数值类型,复合主键开启情况下指定生成相同时间戳记录的最大个数。
|
||||
|
||||
- **sqls**:字符串数组类型,指定超级表创建成功后要执行的 sql 数组,sql 中指定表名前面要带数据库名,否则会报未指定数据库错误。
|
||||
|
||||
- **csv_file_prefix**:字符串类型,设置生成的 csv 文件名称的前缀,默认值为 data 。
|
||||
|
||||
- **csv_ts_format**:字符串类型,设置生成的 csv 文件名称中时间字符串的格式,格式遵循 `strftime` 格式标准,如果没有设置表示不按照时间段切分文件。支持的模式有:
|
||||
- %Y: 年份,四位数表示(例如:2025)
|
||||
- %m: 月份,两位数表示(01到12)
|
||||
- %d: 一个月中的日子,两位数表示(01到31)
|
||||
- %H: 小时,24小时制,两位数表示(00到23)
|
||||
- %M: 分钟,两位数表示(00到59)
|
||||
- %S: 秒,两位数表示(00到59)
|
||||
|
||||
- **csv_ts_interval**:字符串类型,设置生成的 csv 文件名称中时间段间隔,支持天、小时、分钟、秒级间隔,如 1d/2h/30m/40s,默认值为 1d 。
|
||||
|
||||
- **csv_output_header**:字符串类型,设置生成的 csv 文件是否包含列头描述,默认值为 yes 。
|
||||
|
||||
- **csv_tbname_alias**:字符串类型,设置 csv 文件列头描述中 tbname 字段的别名,默认值为 device_id 。
|
||||
|
||||
- **csv_compress_level**:字符串类型,设置生成 csv 编码数据并自动压缩成 gzip 格式文件的压缩等级。此过程直接编码并压缩,而非先生成 csv 文件再压缩。可选值为:
|
||||
- none:不压缩
|
||||
- fast:gzip 1级压缩
|
||||
- balance:gzip 6级压缩
|
||||
- best:gzip 9级压缩
|
||||
|
||||
#### 标签列与数据列
|
||||
|
||||
|
@ -196,11 +223,11 @@ taosBenchmark -f <json file>
|
|||
- **type**:指定列类型,可选值请参考 TDengine 支持的数据类型。
|
||||
注:JSON 数据类型比较特殊,只能用于标签,当使用 JSON 类型作为 tag 时有且只能有这一个标签,此时 count 和 len 代表的意义分别是 JSON tag 内的 key-value pair 的个数和每个 KV pair 的 value 的值的长度,value 默认为 string。
|
||||
|
||||
- **len**:指定该数据类型的长度,对 NCHAR,BINARY 和 JSON 数据类型有效。如果对其他数据类型配置了该参数,若为 0, 则代表该列始终都是以 null 值写入;如果不为 0 则被忽略。
|
||||
- **len**:指定该数据类型的长度,对 NCHAR,BINARY 和 JSON 数据类型有效。如果对其他数据类型配置了该参数,若为 0,则代表该列始终都是以 null 值写入;如果不为 0 则被忽略。
|
||||
|
||||
- **count**:指定该类型列连续出现的数量,例如 "count":4096 即可生成 4096 个指定类型的列。
|
||||
|
||||
- **name**:列的名字,若与 count 同时使用,比如 "name":"current", "count":3, 则 3 个列的名字分别为 current、current_2、current_3。
|
||||
- **name**:列的名字,若与 count 同时使用,比如 "name":"current","count":3,则 3 个列的名字分别为 current、current_2、current_3。
|
||||
|
||||
- **min**:数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。
|
||||
|
||||
|
@ -208,7 +235,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **scalingFactor**:浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。
|
||||
|
||||
- **fun**:此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式:角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。
|
||||
- **fun**:此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式:角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int、bigint、float、double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。
|
||||
|
||||
- **values**:nchar/binary 列/标签的值域,将从值中随机选择。
|
||||
|
||||
|
@ -220,23 +247,23 @@ taosBenchmark -f <json file>
|
|||
|
||||
- **level**:字符串类型,指定此列两级压缩中的第二级加密算法的压缩率高低,详细参见创建超级表。
|
||||
|
||||
- **gen**:字符串类型,指定此列生成数据的方式,不指定为随机,若指定为 “order”, 会按自然数顺序增长。
|
||||
- **gen**:字符串类型,指定此列生成数据的方式,不指定为随机,若指定为 “order”,会按自然数顺序增长。
|
||||
|
||||
- **fillNull**:字符串类型,指定此列是否随机插入 NULL 值,可指定为 “true” 或 "false", 只有当 generate_row_rule 为 2 时有效。
|
||||
- **fillNull**:字符串类型,指定此列是否随机插入 NULL 值,可指定为 “true” 或 "false",只有当 generate_row_rule 为 2 时有效。
|
||||
|
||||
#### 写入行为相关
|
||||
|
||||
- **thread_count**:插入数据的线程数量,默认为 8。
|
||||
|
||||
- **thread_bind_vgroup**:写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度, 取值为 "yes" 或 "no",默认值为 “no”, 设置为 “no” 后与原来行为一致。当设为 “yes” 时,如果 thread_count 大于写入数据库 vgroups 数量, thread_count 自动调整为 vgroups 数量;如果 thread_count 小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。
|
||||
- **thread_bind_vgroup**:写入时 vgroup 是否和写入线程绑定,绑定后可提升写入速度,取值为 "yes" 或 "no",默认值为 “no”,设置为 “no” 后与原来行为一致。当设为 “yes” 时,如果 thread_count 大于写入数据库 vgroups 数量,thread_count 自动调整为 vgroups 数量;如果 thread_count 小于 vgroups 数量,写入线程数量不做调整,一个线程写完一个 vgroup 数据后再写下一个,同时保持一个 vgroup 同时只能由一个线程写入的规则。
|
||||
|
||||
- **create_table_thread_count**:建表的线程数量,默认为 8。
|
||||
|
||||
- **result_file**:结果输出文件的路径,默认值为 ./output.txt 。
|
||||
|
||||
- **confirm_parameter_prompt**:开关参数,要求用户在提示后确认才能继续, 可取值 "yes" or "no"。默认值为 "no" 。
|
||||
- **confirm_parameter_prompt**:开关参数,要求用户在提示后确认才能继续,可取值 "yes" or "no"。默认值为 "no" 。
|
||||
|
||||
- **interlace_rows**:启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0, 即向一张子表完成数据插入后才会向下一张子表进行数据插入。
|
||||
- **interlace_rows**:启用交错插入模式并同时指定向每个子表每次插入的数据行数。交错插入模式是指依次向每张子表插入由本参数所指定的行数并重复这个过程,直到所有子表的数据都插入完成。默认值为 0,即向一张子表完成数据插入后才会向下一张子表进行数据插入。
|
||||
在 `super_tables` 中也可以配置该参数,若配置则以 `super_tables` 中的配置为高优先级,覆盖全局设置。
|
||||
|
||||
- **insert_interval**:
|
||||
|
@ -323,11 +350,11 @@ taosBenchmark -f <json file>
|
|||
订阅配置参数在 `tmq_info` 项下设置,参数如下:
|
||||
|
||||
- **concurrent**:消费订阅的消费者数量,或称并发消费数量,默认值:1。
|
||||
- **create_mode**:创建消费者模式,可取值 sequential:顺序创建, parallel:并发同时创建,必填项,无默认值。
|
||||
- **group_mode**:生成消费者 groupId 模式,可取值 share:所有消费者只生成一个 groupId, independent:每个消费者生成一个独立的 groupId,如果 `group.id` 未设置,此项为必填项,无默认值。
|
||||
- **create_mode**:创建消费者模式,可取值 sequential:顺序创建,parallel:并发同时创建,必填项,无默认值。
|
||||
- **group_mode**:生成消费者 groupId 模式,可取值 share:所有消费者只生成一个 groupId,independent:每个消费者生成一个独立的 groupId,如果 `group.id` 未设置,此项为必填项,无默认值。
|
||||
- **poll_delay**:调用 tmq_consumer_poll 传入的轮询超时时间,单位为毫秒,负数表示默认超时 1 秒。
|
||||
- **enable.manual.commit**:是否允许手动提交,可取值 true:允许手动提交,每次消费完消息后手动调用 tmq_commit_sync 完成提交, false:不进行提交,默认值:false。
|
||||
- **rows_file**:存储消费数据的文件,可以为全路径或相对路径,带文件名。实际保存的文件会在后面加上消费者序号,如 rows_file 为 result, 实际文件名为 result_1(消费者 1) result_2(消费者 2) ...
|
||||
- **enable.manual.commit**:是否允许手动提交,可取值 true:允许手动提交,每次消费完消息后手动调用 tmq_commit_sync 完成提交,false:不进行提交,默认值:false。
|
||||
- **rows_file**:存储消费数据的文件,可以为全路径或相对路径,带文件名。实际保存的文件会在后面加上消费者序号,如 rows_file 为 result,实际文件名为 result_1(消费者 1) result_2(消费者 2) ...
|
||||
- **expect_rows**:期望每个消费者消费的行数,数据类型,当消费达到这个数,消费会退出,不设置会一直消费。
|
||||
- **topic_list**:指定消费的 topic 列表,数组类型。topic 列表格式示例:`{"name": "topic1", "sql": "select * from test.meters;"}`,name:指定 topic 名,sql:指定创建 topic 的 sql 语句,需保证 sql 正确,框架会自动创建出 topic。
|
||||
|
||||
|
@ -411,6 +438,17 @@ taosBenchmark -f <json file>
|
|||
|
||||
</details>
|
||||
|
||||
### 生成 CSV 文件 JSON 示例
|
||||
|
||||
<details>
|
||||
<summary>csv-export.json</summary>
|
||||
|
||||
```json
|
||||
{{#include /TDengine/tools/taos-tools/example/csv-export.json}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
查看更多 json 配置文件示例可 [点击这里](https://github.com/taosdata/TDengine/tree/main/tools/taos-tools/example)
|
||||
|
||||
## 输出性能指标
|
||||
|
@ -427,7 +465,7 @@ SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.187
|
|||
- real:写入总耗时(调用引擎),此耗时已抛去测试框架准备数据时间,纯统计在引擎调用上花费的时间,示例为 8.117379 秒,8.527298 - 8.117379 = 0.409919 秒则为测试框架准备数据消耗时间
|
||||
- rows:写入总行数,为 1000 万条数据。
|
||||
- threads:写入线程数,这里是 8 个线程同时写入。
|
||||
- records/second 写入速度 = `写入总耗时`/ `写入总行数`, 括号中 `real` 同前,表示纯引擎写入速度。
|
||||
- records/second 写入速度 = `写入总耗时`/ `写入总行数`,括号中 `real` 同前,表示纯引擎写入速度。
|
||||
第二行单个写入延时统计:
|
||||
- min:写入最小延时。
|
||||
- avg:写入平时延时。
|
||||
|
@ -439,7 +477,7 @@ SUCC: insert delay, min: 19.6780ms, avg: 64.9390ms, p90: 94.6900ms, p95: 105.187
|
|||
|
||||
#### 查询指标
|
||||
|
||||
查询性能测试主要输出查询请求速度 QPS 指标, 输出格式如下:
|
||||
查询性能测试主要输出查询请求速度 QPS 指标,输出格式如下:
|
||||
``` bash
|
||||
complete query with 3 threads and 10000 query delay avg: 0.002686s min: 0.001182s max: 0.012189s p90: 0.002977s p95: 0.003493s p99: 0.004645s SQL command: select ...
|
||||
INFO: Spend 26.9530 second completed total queries: 30000, the QPS of all threads: 1113.049
|
||||
|
@ -464,4 +502,4 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
```
|
||||
- 1 ~ 3 行实时输出每个消费者当前的消费速度,`msgs/s` 表示消费消息个数,每个消息中包含多行数据,`rows/s` 表示按行数统计的消费速度。
|
||||
- 4 ~ 6 行是测试完成后每个消费者总体统计,统计共消费了多少条消息,共计多少行。
|
||||
- 第 7 行所有消费者总体统计,`msgs` 表示共消费了多少条消息, `rows` 表示共消费了多少行数据。
|
||||
- 第 7 行所有消费者总体统计,`msgs` 表示共消费了多少条消息,`rows` 表示共消费了多少行数据。
|
||||
|
|
|
@ -44,6 +44,7 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型,3.1.0.0 版本开始支持
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持|
|
||||
| 19 | DECIMAL | 8或16 | 高精度数值类型, 取值范围取决于类型中指定的precision和scale, 自3.3.6开始支持, 见下文描述|
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -63,6 +64,18 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
|
||||
:::
|
||||
|
||||
### DECIMAL 数据类型
|
||||
`DECIMAL` 数据类型用于高精度数值存储,自 v3.3.6.0 开始支持, 定义语法:`DECIMAL(18, 2)`,`DECIMAL(38, 10)`, 其中需要指定两个参数, 分别为 `precision` 和 `scale`。`precision` 是指最大支持的有效数字个数,`scale` 是指最大支持的小数位数。如 `DECIMAL(8, 4)`,可表示范围即 `[-9999.9999, 9999.9999]`。定义 DECIMAL 数据类型时,`precision` 范围为:`[1, 38]`, scale 的范围为:`[0, precision]`,scale 为 0 时,仅表示整数。也可以不指定 scale,默认为 0,例如 `DECIMAL(18)`,与 `DECIMAL(18, 0)` 相同。
|
||||
|
||||
当 `precision` 值不大于 18 时, 内部使用 8 字节存储(DECIMAL64), 当 `precision` 范围为 `(18, 38]` 时, 使用 16 字节存储(DECIMAL)。SQL 中写入 DECIMAL 类型数据时,可直接使用数值写入,当写入值大于类型可表示的最大值时会报 DECIMAL_OVERFLOW 错误, 当未大于类型表示的最大值, 但小数位数超过 SCALE 时, 会自动四舍五入处理。如定义类型 DECIMAL(10, 2),写入10.987,则实际存储值为 10.99 。
|
||||
|
||||
DECIMAL 类型仅支持普通列,暂不支持 tag 列。DECIMAL 类型只支持 SQL 写入,暂不支持 stmt 写入和 schemeless 写入。
|
||||
|
||||
整数类型和 DECIMAL 类型操作时, 会将整数类型转换为 DECIMAL 类型再进行计算。DECIMAL 类型与 DOUBLE/FLOAT/VARCHAR/NCHAR 等类型计算时, 转换为 DOUBLE 类型进行计算。
|
||||
|
||||
查询 DECIMAL 类型表达式时,若计算的中间结果超出当前类型可表示的最大值时,报 DECIMAL OVERFLOW 错误.
|
||||
|
||||
|
||||
## 常量
|
||||
|
||||
TDengine 支持多个类型的常量,细节如下表:
|
||||
|
|
|
@ -21,6 +21,7 @@ table_options:
|
|||
table_option: {
|
||||
COMMENT 'string_value'
|
||||
| SMA(col_name [, col_name] ...)
|
||||
| KEEP value
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -34,6 +35,7 @@ table_option: {
|
|||
- TAGS 最多允许 128 个,至少 1 个,总长度不超过 16 KB。
|
||||
4. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考 [按列压缩](../compress)
|
||||
5. 关于 table_option 中的参数说明,请参考 [建表 SQL 说明](../table)
|
||||
6. 关于 table_option 中的 keep 参数,仅对超级表生效,keep 参数的详细说明可以参考 [数据库说明](02-database.md),唯一不同的是超级表 keep 不会立即影响查询结果,仅在 compact 后生效。
|
||||
|
||||
## 查看超级表
|
||||
|
||||
|
@ -145,6 +147,7 @@ alter_table_options:
|
|||
|
||||
alter_table_option: {
|
||||
COMMENT 'string_value'
|
||||
| KEEP value
|
||||
}
|
||||
|
||||
```
|
||||
|
|
|
@ -56,14 +56,13 @@ join_clause:
|
|||
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| STATE_WINDOW(col) [TRUE_FOR(true_for_duration)]
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition [TRUE_FOR(true_for_duration)]
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
|
||||
interp_clause:
|
||||
RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
| RANGE(ts_val, surrounding_time_val) FILL(fill_mod_and_val)
|
||||
RANGE(ts_val [, ts_val] [, surrounding_time_val]) EVERY(every_val) FILL(fill_mod_and_val)
|
||||
|
||||
partition_by_clause:
|
||||
PARTITION BY partition_by_expr [, partition_by_expr] ...
|
||||
|
|
|
@ -186,7 +186,38 @@ FLOOR(expr)
|
|||
```
|
||||
|
||||
**功能说明**:获得指定字段的向下取整数的结果。
|
||||
其他使用说明参见 CEIL 函数描述。
|
||||
其他使用说明参见 [CEIL](#ceil) 函数描述。
|
||||
|
||||
#### GREATEST
|
||||
```sql
|
||||
GREATEST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**功能说明**:获得输入的所有参数中的最大值。该函数最小参数个数为 2 个。
|
||||
|
||||
**使用说明**:ver-3.3.6.0
|
||||
|
||||
**返回结果类型**:参考比较规则,比较类型即为最终返回类型。
|
||||
|
||||
**适用数据类型**:
|
||||
- 数值类型:包括 bool 型,整型和浮点型
|
||||
- 字符串类型:包括 nchar 和 varchar 类型。
|
||||
|
||||
**比较规则**:以下规则描述了比较操作的转换方式:
|
||||
- 如果有任何一个参数为 NULL,则比较结果为 NULL。
|
||||
- 如果比较操作中的所有参数都是字符串类型,按照字符串类型比较
|
||||
- 如果所有参数都是数值类型,则将它们作为数值类型进行比较。
|
||||
- 如果参数中既有字符串类型,也有数值类型,根据 compareAsStrInGreatest 配置项,统一作为字符串或者数值进行比较。默认按照字符串比较。
|
||||
- 在所有情况下,不同类型比较,比较类型会选择范围更大的类型进行比较,例如作为整数类型比较时,如果存在 BIGINT 类型,必定会选择 BIGINT 作为比较类型。
|
||||
|
||||
**相关配置项**:客户端配置,compareAsStrInGreatest 为 1 表示同时存在字符串类型和数值类型统一转为字符串比较,为 0 表示统一转为数值类型比较。默认为 1。
|
||||
|
||||
#### LEAST
|
||||
```sql
|
||||
LEAST(expr1, expr2[, expr]...)
|
||||
```
|
||||
|
||||
**功能说明**:获得输入的所有参数中的最小值。其余部分说明同 [GREATEST](#greatest) 函数。
|
||||
|
||||
#### LN
|
||||
|
||||
|
@ -613,6 +644,7 @@ taos> select ascii('testascii');
|
|||
```
|
||||
|
||||
#### CHAR
|
||||
|
||||
```sql
|
||||
CHAR(expr1 [, expr2] [, epxr3] ...)
|
||||
```
|
||||
|
@ -704,7 +736,7 @@ CONCAT(expr1, expr2 [, expr] ... )
|
|||
|
||||
**功能说明**:字符串连接函数。
|
||||
|
||||
**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。
|
||||
**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含 NCHAR 类型,则结果类型为 NCHAR。如果参数包含NULL值,则输出值为 NULL。
|
||||
|
||||
**适用数据类型**:VARCHAR、NCHAR。该函数最小参数个数为 2 个,最大参数个数为 8 个。
|
||||
|
||||
|
@ -720,7 +752,7 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
|
||||
**功能说明**:带分隔符的字符串连接函数。
|
||||
|
||||
**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。
|
||||
**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含 NCHAR 类型,则结果类型为 NCHAR。如果参数包含 NULL 值,则输出值为 NULL。
|
||||
|
||||
**适用数据类型**:VARCHAR、NCHAR。该函数最小参数个数为 3 个,最大参数个数为 9 个。
|
||||
|
||||
|
@ -744,7 +776,6 @@ LENGTH(expr)
|
|||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
#### LOWER
|
||||
|
||||
```sql
|
||||
|
@ -1106,6 +1137,7 @@ CAST(expr AS type_name)
|
|||
- 字符串类型转换数值类型时可能出现的无效字符情况,例如 "a" 可能转为 0,但不会报错。
|
||||
- 转换到数值类型时,数值大于 type_name 可表示的范围时,则会溢出,但不会报错。
|
||||
- 转换到字符串类型时,如果转换后长度超过 type_name 中指定的长度,则会截断,但不会报错。
|
||||
- DECIMAL 类型不支持与 JSON、VARBINARY、GEOMERTY 类型的互转。
|
||||
|
||||
#### TO_CHAR
|
||||
|
||||
|
@ -1113,17 +1145,17 @@ CAST(expr AS type_name)
|
|||
TO_CHAR(ts, format_str_literal)
|
||||
```
|
||||
|
||||
**功能说明**:将timestamp类型按照指定格式转换为字符串
|
||||
**功能说明**:将 timestamp 类型按照指定格式转换为字符串。
|
||||
|
||||
**版本**:v3.2.2.0
|
||||
**使用说明**:vv3.2.2.0
|
||||
|
||||
**返回结果数据类型**:VARCHAR
|
||||
**返回结果数据类型**:VARCHAR。
|
||||
|
||||
**应用字段**:TIMESTAMP
|
||||
**适用数据类型**:TIMESTAMP。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**支持的格式**
|
||||
|
||||
|
@ -1152,7 +1184,7 @@ TO_CHAR(ts, format_str_literal)
|
|||
| DDD | 年日,001-366 | |
|
||||
| D,d | 周日,数字,1-7,Sunday(1) to Saturday(7) | |
|
||||
| HH24,hh24 | 小时,00-23 | 2023-01-30 23:59:59 |
|
||||
| hh12,HH12,hh,HH | 小时,01-12 | 2023-01-30 12:59:59PM |
|
||||
| hh12,HH12,hh,HH | 小时,01-12 | 2023-01-30 12:59:59PM |
|
||||
| MI,mi | 分钟,00-59 | |
|
||||
| SS,ss | 秒,00-59 | |
|
||||
| MS,ms | 毫秒,000-999 | |
|
||||
|
@ -1168,7 +1200,6 @@ TO_CHAR(ts, format_str_literal)
|
|||
- 推荐在时间格式中带时区信息,如果不带则默认输出的时区为服务端或客户端所配置的时区。
|
||||
- 输入时间戳的精度由所查询表的精度确定,若未指定表,则精度为毫秒。
|
||||
|
||||
|
||||
#### TO_ISO8601
|
||||
|
||||
```sql
|
||||
|
@ -1190,7 +1221,6 @@ TO_ISO8601(expr [, timezone])
|
|||
- timezone 参数允许输入的时区格式为:[z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
|
||||
- 输入时间戳的精度由所查询表的精度确定,若未指定表,则精度为毫秒.
|
||||
|
||||
|
||||
#### TO_JSON
|
||||
|
||||
```sql
|
||||
|
@ -1213,29 +1243,29 @@ TO_JSON(str_literal)
|
|||
TO_TIMESTAMP(ts_str_literal, format_str_literal)
|
||||
```
|
||||
|
||||
**功能说明**:将字符串按照指定格式转化为时间戳.
|
||||
**功能说明**:将字符串按照指定格式转化为时间戳。
|
||||
|
||||
**版本**:v3.2.2.0
|
||||
**使用说明**:v3.2.2.0
|
||||
|
||||
**返回结果数据类型**:TIMESTAMP
|
||||
**返回结果数据类型**:TIMESTAMP。
|
||||
|
||||
**应用字段**:VARCHAR
|
||||
**适用数据类型**:VARCHAR。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**支持的格式**:与`to_char`相同
|
||||
**使用说明**:与 `to_char` 相同。
|
||||
|
||||
**使用说明**:
|
||||
- 若 `ms`、`us`、`ns` 同时指定,那么结果时间戳包含上述三个字段的和。如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳。
|
||||
- 若 `ms`、`us`、`ns` 同时指定,那么结果时间戳包含上述三个字段的和,如 `to_timestamp('2023-10-10 10:10:10.123.000456.000000789', 'yyyy-mm-dd hh:mi:ss.ms.us.ns')` 输出为 `2023-10-10 10:10:10.123456789`对应的时间戳。
|
||||
- `MONTH`、`MON`、`DAY`、`DY` 以及其他输出为数字的格式的大小写意义相同,如 `to_timestamp('2023-JANUARY-01', 'YYYY-month-dd')`,`month`可以被替换为 `MONTH` 或者 `Month`。
|
||||
- 如果同一字段被指定了多次,那么前面的指定将会被覆盖。如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`,输出年份是`2022`。
|
||||
- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如'2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。
|
||||
- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`,未指定部分使用该默认值中的对应部分。暂不支持只指定年日而不指定月日的格式,如 'yyyy-mm-DDD',支持 'yyyy-mm-DD'。
|
||||
- 如果格式串中有`AM`,`PM`等,那么小时必须是12小时制,范围必须是01-12。
|
||||
- `to_timestamp`转换具有一定的容错机制,在格式串和时间戳串不完全对应时,有时也可转换,如 `to_timestamp('200101/2', 'yyyyMM1/dd')`,格式串中多出来的 1 会被丢弃. 格式串与时间戳串中多余的空格字符(空格、tab 等)也会被 自动忽略。如 `to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换。虽然 `MM` 等字段需要两个数字对应(只有一位时前面补 0),在 `to_timestamp` 时,一个数字也可以成功转换。
|
||||
- 输出时间戳的精度与查询表的精度相同,若查询未指定表,则输出精度为毫秒。如 `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')` 的输出将会把微秒和纳秒进行截断. 如果指定一张纳秒表,那么就不会发生截断,如 `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`。
|
||||
- 如果同一字段被指定了多次,那么前面的指定将会被覆盖,如 `to_timestamp('2023-22-10-10', 'yyyy-yy-MM-dd')`,输出年份是 `2022`。
|
||||
- 为避免转换时使用了非预期的时区,推荐在时间中携带时区信息,例如 '2023-10-10 10:10:10+08',如果未指定时区则默认时区为服务端或客户端指定的时区。
|
||||
- 如果没有指定完整的时间,那么默认时间值为指定或默认时区的 `1970-01-01 00:00:00`,未指定部分使用该默认值中的对应部分。暂不支持只指定年日而不指定月日的格式,如 'yyyy-mm-DDD',支持'yyyy-mm-DD'。
|
||||
- 如果格式串中有`AM`、`PM`等,那么小时必须是 12 小时制,范围必须是 01-12。
|
||||
- `to_timestamp` 转换具有一定的容错机制,在格式串和时间戳串不完全对应时,有时也可转换,如 `to_timestamp('200101/2', 'yyyyMM1/dd')`,格式串中多出来的1会被丢弃。格式串与时间戳串中多余的空格字符(空格、tab 等)也会被自动忽略,如 `to_timestamp(' 23 年 - 1 月 - 01 日 ', 'yy 年-MM月-dd日')` 可以被成功转换。虽然 `MM` 等字段需要两个数字对应(只有一位时前面补 0), 在 `to_timestamp` 时,一个数字也可以成功转换。
|
||||
- 输出时间戳的精度与查询表的精度相同,若查询未指定表,则输出精度为毫秒,如 `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns')` 的输出将会把微秒和纳秒进行截断、如果指定一张纳秒表,那么就不会发生截断,如 `select to_timestamp('2023-08-1 10:10:10.123456789', 'yyyy-mm-dd hh:mi:ss.ns') from db_ns.table_ns limit 1`。
|
||||
|
||||
#### TO_UNIXTIMESTAMP
|
||||
|
||||
|
@ -1342,12 +1372,12 @@ TIMEDIFF(expr1, expr2 [, time_unit])
|
|||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
- 支持的时间单位 `time_unit` 如下:1b(纳秒)、1u(微秒)、1a(毫秒)、1s(秒)、1m(分)、1h(小时)、1d(天)、1w(周)。
|
||||
- 支持的时间单位 `time_unit` 包括 1b(纳秒)、1u(微秒)、1a(毫秒)、1s(秒)、1m(分)、1h(小时)、1d(天)、1w(周)。
|
||||
- 如果时间单位 `time_unit` 未指定,返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
|
||||
- 输入包含不符合时间日期格式的字符串则返回 NULL。
|
||||
- `expr1` 或 `expr2` 为 NULL,返回 NULL。
|
||||
- `time_unit` 为 NULL,等同于未指定时间单位。
|
||||
- 输入时间戳的精度由所查询表的精度确定,若未指定表,则精度为毫秒.
|
||||
- 输入时间戳的精度由所查询表的精度确定,若未指定表,则精度为毫秒。
|
||||
|
||||
**举例**:
|
||||
```sql
|
||||
|
@ -1382,14 +1412,13 @@ use_current_timezone: {
|
|||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
- 支持的时间单位 time_unit 如下:
|
||||
1b(纳秒)、1u(微秒)、1a(毫秒)、1s(秒)、1m(分)、1h(小时)、1d(天)、1w(周)。
|
||||
- 支持的时间单位 time_unit 包括 1b(纳秒)、1u(微秒)、1a(毫秒)、1s(秒)、1m(分)、,1h(小时)、1d(天)、1w(周)。
|
||||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
- 输入时间戳的精度由所查询表的精度确定,若未指定表,则精度为毫秒.
|
||||
- 输入时间戳的精度由所查询表的精度确定,若未指定表,则精度为毫秒。
|
||||
- 输入包含不符合时间日期格式的字符串则返回 NULL。
|
||||
- 当使用 1d/1w 作为时间单位对时间戳进行截断时,可通过设置 use_current_timezone 参数指定是否根据当前时区进行截断处理。
|
||||
值 0 表示使用 UTC 时区进行截断,值 1 表示使用当前时区进行截断。
|
||||
例如客户端所配置时区为 UTC+0800, 则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为东八区时间 '2020-01-01 08:00:00'。
|
||||
例如客户端所配置时区为 UTC+0800,则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为东八区时间 '2020-01-01 08:00:00'。
|
||||
而使用 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) 时,返回结果为东八区时间 '2020-01-01 00:00:00'。
|
||||
当不指定 use_current_timezone 时,use_current_timezone 默认值为 1。
|
||||
- 当将时间值截断到一周(1w)时,timetruncate 的计算是基于 Unix 时间戳(1970年1月1日00:00:00 UTC)进行的。Unix 时间戳始于星期四,
|
||||
|
@ -1405,7 +1434,7 @@ TIMEZONE()
|
|||
|
||||
**返回结果数据类型**:VARCHAR。
|
||||
|
||||
**应用字段**:无
|
||||
**应用字段**:无。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
@ -1578,7 +1607,7 @@ algo_type: {
|
|||
**适用于**:表和超级表。
|
||||
|
||||
**说明**:
|
||||
- p 值范围是 [0,100],当为 0 时等同 于MIN,为 100 时等同于 MAX。
|
||||
- p 值范围是 [0,100],当为 0 时等同 于 MIN,为 100 时等同于 MAX。
|
||||
- algo_type 取值为 "default" 或 "t-digest"。输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用 t-digest 算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
|
||||
- t-digest 算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。
|
||||
|
||||
|
@ -1590,12 +1619,14 @@ AVG(expr)
|
|||
|
||||
**功能说明**:统计指定字段的平均值。
|
||||
|
||||
**返回数据类型**:DOUBLE。
|
||||
**返回数据类型**:DOUBLE、DECIMAL。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**说明**: 当输入类型为 DECIMAL 类型时,输出类型也为 DECIMAL 类型,输出的 precision 和 scale 大小符合数据类型章节中的描述规则,通过计算 SUM 类型和 UINT64 的除法得到结果类型,若 SUM 的结果导致 DECIMAL 类型溢出, 则报 DECIMAL OVERFLOW 错误。
|
||||
|
||||
### COUNT
|
||||
|
||||
```sql
|
||||
|
@ -1627,7 +1658,7 @@ ELAPSED(ts_primary_key [, time_unit])
|
|||
|
||||
**适用数据类型**:TIMESTAMP。
|
||||
|
||||
**适用于**:表,超级表,嵌套查询的外层查询
|
||||
**适用于**:表、超级表、嵌套查询的外层查询。
|
||||
|
||||
**说明**:
|
||||
- ts_primary_key 参数只能是表的第一列,即 TIMESTAMP 类型的主键列。
|
||||
|
@ -1669,7 +1700,6 @@ HISTOGRAM(expr,bin_type, bin_description, normalized)
|
|||
生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
|
||||
- normalized 是否将返回结果归一化到 0~1 之间。有效输入为 0 和 1。
|
||||
|
||||
|
||||
### HYPERLOGLOG
|
||||
|
||||
```sql
|
||||
|
@ -1778,12 +1808,14 @@ SUM(expr)
|
|||
|
||||
**功能说明**:统计表/超级表中某列的和。
|
||||
|
||||
**返回数据类型**:DOUBLE、BIGINT。
|
||||
**返回数据类型**:DOUBLE、BIGINT、DECIMAL。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**说明**: 输入类型为 DECIMAL 类型时,输出类型为 DECIMAL(38, scale) ,precision 为当前支持的最大值,scale 为输入类型的 scale,若 SUM 的结果溢出时,报 DECIMAL OVERFLOW 错误.
|
||||
|
||||
### VAR_POP
|
||||
|
||||
```sql
|
||||
|
@ -1837,8 +1869,8 @@ BOTTOM(expr, k)
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- *k* 值取值范围 1≤*k*≤100;
|
||||
- 系统同时返回该记录关联的时间戳列;
|
||||
- *k* 值取值范围 1≤*k*≤100。
|
||||
- 系统同时返回该记录关联的时间戳列。
|
||||
- 限制:BOTTOM 函数不支持 FILL 子句。
|
||||
|
||||
### FIRST
|
||||
|
@ -1857,10 +1889,11 @@ FIRST(expr)
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*);查询超级表,且 multiResultFunctionStarReturnTags 设置为 0 (默认值) 时,FIRST(\*) 只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||
- 如果要返回各个列的首个(时间戳最小)非 NULL 值,可以使用 FIRST(\*) 查询超级表,且 multiResultFunctionStarReturnTags 设置为 0 (默认值) 时,FIRST(\*) 只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||
- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;
|
||||
- 如果结果集中所有列全部为 NULL 值,则不返回结果。
|
||||
- 对于存在复合主键的表的查询,若最小时间戳的数据有多条,则只有对应的复合主键最小的数据被返回。
|
||||
|
||||
### LAST
|
||||
|
||||
```sql
|
||||
|
@ -1875,9 +1908,9 @@ LAST(expr)
|
|||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
**使用说明**:
|
||||
|
||||
- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*);查询超级表,且 multiResultFunctionStarReturnTags 设置为 0 (默认值) 时,LAST(\*) 只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||
- 如果要返回各个列的最后(时间戳最大)一个非 NULL 值,可以使用 LAST(\*) 查询超级表,且 multiResultFunctionStarReturnTags 设置为 0 (默认值) 时,LAST(\*) 只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||
- 如果结果集中的某列全部为 NULL 值,则该列的返回结果也是 NULL;如果结果集中所有列全部为 NULL 值,则不返回结果。
|
||||
- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
||||
- 对于存在复合主键的表的查询,若最大时间戳的数据有多条,则只有对应的复合主键最大的数据被返回。
|
||||
|
@ -1897,7 +1930,7 @@ LAST_ROW(expr)
|
|||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
- 如果要返回各个列的最后一条记录(时间戳最大),可以使用 LAST_ROW(\*);查询超级表,且 multiResultFunctionStarReturnTags 设置为 0 (默认值) 时,LAST_ROW(\*) 只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||
- 如果要返回各个列的最后一条记录(时间戳最大),可以使用 LAST_ROW(\*) 查询超级表,且 multiResultFunctionStarReturnTags 设置为 0 (默认值) 时,LAST_ROW(\*) 只返回超级表的普通列;设置为 1 时,返回超级表的普通列和标签列。
|
||||
- 在用于超级表时,时间戳完全一样且同为最大的数据行可能有多个,那么会从中随机返回一条,而并不保证多次运行所挑选的数据行必然一致。
|
||||
- 不能与 INTERVAL 一起使用。
|
||||
- 与 LAST 函数一样,对于存在复合主键的表的查询,若最大时间戳的数据有多条,则只有对应的复合主键最大的数据被返回。
|
||||
|
@ -1998,8 +2031,8 @@ TOP(expr, k)
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- *k* 值取值范围 1≤*k*≤100;
|
||||
- 系统同时返回该记录关联的时间戳列;
|
||||
- *k* 值取值范围 1≤*k*≤100。
|
||||
- 系统同时返回该记录关联的时间戳列。
|
||||
- 限制:TOP 函数不支持 FILL 子句。
|
||||
|
||||
### UNIQUE
|
||||
|
@ -2016,6 +2049,27 @@ UNIQUE(expr)
|
|||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
### COLS
|
||||
|
||||
```sql
|
||||
COLS(func(expr), output_expr1, [, output_expr2] ... )
|
||||
```
|
||||
|
||||
**功能说明**:在选择函数 func(expr) 执行结果所在数据行上,执行表达式 output_expr1, [, output_expr2],返回其结果,func(expr) 结果不输出。
|
||||
|
||||
**返回数据类型**:返回多列数据,每列数据类型为对应表达式返回结果的类型。
|
||||
|
||||
**适用数据类型**:全部类型字段。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
- func 函数类型:必须是单行选择函数(输出结果为一行的选择函数,例如 last 是单行选择函数,但 top 是多行选择函数)。
|
||||
- 主要用于一个 sql 中获取多个选择函数结果关联列的场景,例如 `select cols(max(c0), ts), cols(max(c1), ts) from ...` 可用于获取 c0、c1 列最大值的不同 ts 值。
|
||||
- 注意,函数 func 的结果并没有返回,如需输出 func 结果,可额外增加输出列,如 `select fist(ts), cols(first(ts), c1) from ...`
|
||||
- 输出只有一列时,可以对 cols 函数设置别名。例如 `select cols(first(ts), c1) as c11 from ...`
|
||||
- 输出一列或者多列时,可以对 cols 函数的每个输出列设置命名。例如 `select cols(first(ts), c1 as c11, c2 as c22)`
|
||||
|
||||
|
||||
## 时序数据特有函数
|
||||
|
||||
|
@ -2077,11 +2131,11 @@ ignore_option: {
|
|||
}
|
||||
```
|
||||
|
||||
**功能说明**:统计表中特定列与之前行的当前列有效值之差。ignore_option 取值为 0|1|2|3,可以不填,默认值为 0.
|
||||
- `0` 表示不忽略(diff结果)负值不忽略 null 值
|
||||
- `1` 表示(diff结果)负值作为 null 值
|
||||
- `2` 表示不忽略(diff结果)负值但忽略 null 值
|
||||
- `3` 表示忽略(diff结果)负值且忽略 null 值
|
||||
**功能说明**:统计表中特定列与之前行的当前列有效值之差。ignore_option 取值为 0|1|2|3,可以不填,默认值为 0。
|
||||
- `0` 表示 diff 结果不忽略负值不忽略 null 值
|
||||
- `1` 表示 diff 结果的负值作为 null 值
|
||||
- `2` 表示 diff 结果不忽略负值但忽略 null 值
|
||||
- `3` 表示 diff 结果忽略负值且忽略 null 值
|
||||
- 对于存在复合主键的表的查询,若时间戳相同的数据存在多条,则只有对应的复合主键最小的数据参与运算。
|
||||
|
||||
**返回数据类型**:bool、时间戳及整型数值类型均返回 bigint,浮点类型返回 double,若 diff 结果溢出则返回溢出后的值。
|
||||
|
@ -2093,13 +2147,13 @@ ignore_option: {
|
|||
**使用说明**:
|
||||
|
||||
- diff 是计算本行特定列与同列的前一个有效数据的差值,同列的前一个有效数据:指的是同一列中时间戳较小的最临近的非空值。
|
||||
- 数值类型 diff 结果为对应的算术差值;时间戳类型根据数据库的时间戳精度进行差值计算;bool 类型计算差值时 true 视为 1,false 视为 0
|
||||
- 如当前行数据为 null 或者没有找到同列前一个有效数据时,diff 结果为 null
|
||||
- 忽略负值时(ignore_option 设置为 1 或 3 ),如果 diff 结果为负值,则结果设置为 null,然后根据 null 值过滤规则进行过滤
|
||||
- 当 diff 结果发生溢出时,结果是否是 `应该忽略的负值` 取决于逻辑运算结果是正数还是负数,例如 9223372036854775800 - (-9223372036854775806) 的值超出 BIGINT 的范围,diff 结果会显示溢出值 -10,但并不会被作为负值忽略
|
||||
- 单个语句中可以使用单个或者多个 diff,并且每个 diff 可以指定相同或不同的 ignore_option,当单个语句中存在多个 diff 时当且仅当某行所有 diff 的结果都为 null,并且 ignore_option 都设置为忽略 null 值,该行才从结果集中剔除
|
||||
- 数值类型 diff 结果为对应的算术差值;时间戳类型根据数据库的时间戳精度进行差值计算;bool 类型计算差值时 true 视为 1,false 视为 0。
|
||||
- 如当前行数据为 null 或者没有找到同列前一个有效数据时,diff 结果为 null。
|
||||
- 忽略负值时(ignore_option 设置为 1 或 3 ),如果 diff 结果为负值,则结果设置为 null,然后根据 null 值过滤规则进行过滤。
|
||||
- 当 diff 结果发生溢出时,结果是否是 `应该忽略的负值` 取决于逻辑运算结果是正数还是负数,例如 9223372036854775800 - (-9223372036854775806) 的值超出 BIGINT 的范围,diff 结果会显示溢出值 -10,但并不会被作为负值忽略。
|
||||
- 单个语句中可以使用单个或者多个 diff,并且每个 diff 可以指定相同或不同的 ignore_option,当单个语句中存在多个 diff 时当且仅当某行所有 diff 的结果都为 null,并且 ignore_option 都设置为忽略 null 值,该行才从结果集中剔除。
|
||||
- 可以选择与相关联的列一起使用。例如 `select _rowts, DIFF() from`。
|
||||
- 当没有复合主键时,如果不同的子表有相同时间戳的数据,会提示 "Duplicate timestamps not allowed"
|
||||
- 当没有复合主键时,如果不同的子表有相同时间戳的数据,会提示 "Duplicate timestamps not allowed"。
|
||||
- 当使用复合主键时,不同子表的时间戳和主键组合可能相同,使用哪一行取决于先找到哪一行,这意味着在这种情况下多次运行 diff() 的结果可能会不同。
|
||||
|
||||
### INTERP
|
||||
|
@ -2125,6 +2179,7 @@ ignore_null_values: {
|
|||
- INTERP 用于在指定时间断面获取指定列的记录值,使用时有专用语法(interp_clause),语法介绍[参考链接](../select/#interp) 。
|
||||
- 当指定时间断面不存在符合条件的行数据时,INTERP 函数会根据 [FILL](../distinguished/#fill-子句) 参数的设定进行插值。
|
||||
- INTERP 作用于超级表时,会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||
- INTERP在FILL PREV/NEXT/NEAR时, 行为与窗口查询有所区别, 当截面存在数据时, 不会进行FILL, 即便当前值为NULL.
|
||||
- INTERP 可以与伪列 `_irowts` 一起使用,返回插值点所对应的时间戳(v3.0.2.0 以后支持)。
|
||||
- INTERP 可以与伪列 `_isfilled` 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(v3.0.3.0 以后支持)。
|
||||
- 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`, 用于返回 `interp` 函数所使用的原始数据的时间戳列。若范围内无值, 则返回 NULL。`_irowts_origin` 在 v3.3.4.9 以后支持。
|
||||
|
@ -2200,7 +2255,7 @@ STATEDURATION(expr, oper, val, unit)
|
|||
|
||||
**参数范围**:
|
||||
|
||||
- oper:`'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。
|
||||
- oper:`'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用 `''` 包括。
|
||||
- val:数值型
|
||||
- unit:时间长度的单位,可取值时间单位:1b(纳秒)、1u(微秒)、1a(毫秒)、1s(秒)、1m(分)、1h(小时)、1d(天)、1w(周)。如果省略,默认为当前数据库精度。
|
||||
|
||||
|
@ -2214,7 +2269,7 @@ STATEDURATION(expr, oper, val, unit)
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
|
||||
- 不能和窗口操作一起使用,例如 interval、state_window、session_window。
|
||||
|
||||
### TWA
|
||||
|
||||
|
@ -2254,7 +2309,7 @@ SELECT CURRENT_USER();
|
|||
SELECT DATABASE();
|
||||
```
|
||||
|
||||
**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用USE命令切换数据库,则返回NULL。
|
||||
**说明**:返回当前登录的数据库。如果登录的时候没有指定默认数据库,且没有使用 USE 命令切换数据库,则返回 NULL。
|
||||
|
||||
### SERVER_STATUS
|
||||
|
||||
|
|
|
@ -46,9 +46,9 @@ TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温
|
|||
```sql
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
| STATE_WINDOW(col)
|
||||
| STATE_WINDOW(col) [TRUE_FOR(true_for_duration)]
|
||||
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)]
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
|
||||
| EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition [TRUE_FOR(true_for_duration)]
|
||||
| COUNT_WINDOW(count_val[, sliding_val])
|
||||
}
|
||||
```
|
||||
|
@ -76,11 +76,11 @@ window_clause: {
|
|||
FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
|
||||
|
||||
1. 不进行填充:NONE(默认填充模式)。
|
||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如 FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1,若查询列表中有多列需要 FILL,则需要给每一个 FILL 列指定 VALUE,如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`,注意,SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE,如 `_wstart`、`_wstart+1a`、`now`、`1+1` 以及使用 partition by 时的 partition key (如 tbname)都不需要指定 VALUE,如 `timediff(last(ts), _wstart)` 则需要指定 VALUE。
|
||||
3. PREV 填充:使用前一个非 NULL 值填充数据。例如 FILL(PREV)。
|
||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如 `FILL(VALUE, 1.23)`。这里需要注意,最终填充的值受由相应列的类型决定,如 `FILL(VALUE, 1.23)`,相应列为 INT 类型,则填充值为 1,若查询列表中有多列需要 FILL,则需要给每一个 FILL 列指定 VALUE,如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`,注意,SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE,如 `_wstart`、`_wstart+1a`、`now`、`1+1` 以及使用 `partition by` 时的 `partition key` (如 tbname)都不需要指定 VALUE,如 `timediff(last(ts), _wstart)` 则需要指定 VALUE。
|
||||
3. PREV 填充:使用前一个值填充数据。例如 FILL(PREV)。
|
||||
4. NULL 填充:使用 NULL 填充数据。例如 FILL(NULL)。
|
||||
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如 FILL(LINEAR)。
|
||||
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如 FILL(NEXT)。
|
||||
6. NEXT 填充:使用下一个值填充数据。例如 FILL(NEXT)。
|
||||
|
||||
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。而对另外一些模式(NULL、VALUE)来说,理论上是可以产生填充数值的,至于需不需要输出填充数值,取决于应用的需求。所以为了满足这类需要强制填充数据或 NULL 的应用的需求,同时不破坏现有填充模式的行为兼容性,从 v3.0.3.0 开始,增加了两种新的填充模式:
|
||||
|
||||
|
@ -165,6 +165,12 @@ TDengine 还支持将 CASE 表达式用在状态量,可以表达某个状态
|
|||
SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END status FROM meters PARTITION BY tbname STATE_WINDOW(CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE 0 END);
|
||||
```
|
||||
|
||||
状态窗口支持使用 TRUE_FOR 参数来设定窗口的最小持续时长。如果某个状态窗口的宽度低于该设定值,则会自动舍弃,不返回任何计算结果。例如,设置最短持续时长为 3s。
|
||||
|
||||
```
|
||||
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status) TRUE_FOR (3s);
|
||||
```
|
||||
|
||||
### 会话窗口
|
||||
|
||||
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是 [2019-04-28 14:22:10,2019-04-28 14:22:30] 和 [2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。
|
||||
|
@ -196,6 +202,12 @@ select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c
|
|||
|
||||

|
||||
|
||||
事件窗口支持使用 TRUE_FOR 参数来设定窗口的最小持续时长。如果某个事件窗口的宽度低于该设定值,则会自动舍弃,不返回任何计算结果。例如,设置最短持续时长为 3s。
|
||||
|
||||
```
|
||||
select _wstart, _wend, count(*) from t event_window start with c1 > 0 end with c2 < 10 true_for (3s);
|
||||
```
|
||||
|
||||
### 计数窗口
|
||||
|
||||
计数窗口按固定的数据行数来划分窗口。默认将数据按时间戳排序,再按照 count_val 的值,将数据划分为多个窗口,然后做聚合计算。count_val 表示每个 count window 包含的最大数据行数,总数据行数不能整除 count_val 时,最后一个窗口的行数会小于 count_val。sliding_val 是常量,表示窗口滑动的数量,类似于 interval 的 SLIDING。
|
||||
|
@ -211,7 +223,7 @@ select _wstart, _wend, count(*) from t count_window(4);
|
|||
|
||||
### 时间戳伪列
|
||||
|
||||
窗口聚合查询结果中,如果 SQL 语句中没有指定输出查询结果中的时间戳列,那么最终结果中不会自动包含窗口的时间列信息。如果需要在结果中输出聚合结果所对应的时间窗口信息,需要在 SELECT 子句中使用时间戳相关的伪列: 时间窗口起始时间 (\_WSTART), 时间窗口结束时间 (\_WEND), 时间窗口持续时间 (\_WDURATION), 以及查询整体窗口相关的伪列:查询窗口起始时间(\_QSTART) 和查询窗口结束时间(\_QEND)。需要注意的是时间窗口起始时间和结束时间均是闭区间,时间窗口持续时间是数据当前时间分辨率下的数值。例如,如果当前数据库的时间分辨率是毫秒,那么结果中 500 就表示当前时间窗口的持续时间是 500毫秒 (500 ms)。
|
||||
窗口聚合查询结果中,如果 SQL 语句中没有指定输出查询结果中的时间戳列,那么最终结果中不会自动包含窗口的时间列信息。如果需要在结果中输出聚合结果所对应的时间窗口信息,需要在 SELECT 子句中使用时间戳相关的伪列:时间窗口起始时间 (\_WSTART),时间窗口结束时间 (\_WEND),时间窗口持续时间 (\_WDURATION),以及查询整体窗口相关的伪列:查询窗口起始时间(\_QSTART) 和查询窗口结束时间(\_QEND)。需要注意的是时间窗口起始时间和结束时间均是闭区间,时间窗口持续时间是数据当前时间分辨率下的数值。例如,如果当前数据库的时间分辨率是毫秒,那么结果中 500 就表示当前时间窗口的持续时间是 500毫秒 (500 ms)。
|
||||
|
||||
### 示例
|
||||
|
||||
|
|
|
@ -59,11 +59,11 @@ CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name;
|
|||
|
||||
## 删除 topic
|
||||
|
||||
如果不再需要订阅数据,可以删除 topic,需要注意:只有当前未在订阅中的 TOPIC 才能被删除。
|
||||
如果不再需要订阅数据,可以删除 topic,如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法3.3.6.0版本开始支持)。
|
||||
|
||||
```sql
|
||||
/* 删除 topic */
|
||||
DROP TOPIC [IF EXISTS] topic_name;
|
||||
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
|
||||
```
|
||||
|
||||
此时如果该订阅主题上存在 consumer,则此 consumer 会收到一个错误。
|
||||
|
@ -82,8 +82,10 @@ SHOW TOPICS;
|
|||
|
||||
## 删除消费组
|
||||
|
||||
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除,强制删除后订阅的消费者会消费数据会出错(FORCE 语法3.3.6.0版本开始支持)。
|
||||
|
||||
```sql
|
||||
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
|
||||
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
|
||||
```
|
||||
|
||||
删除主题 topic_name 上的消费组 cgroup_name。
|
||||
|
|
|
@ -8,13 +8,13 @@ description: 流式计算的相关 SQL 的详细语法
|
|||
## 创建流式计算
|
||||
|
||||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE| CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
FILL_HISTORY [0|1]
|
||||
FILL_HISTORY [0|1] [ASYNC]
|
||||
IGNORE UPDATE [0|1]
|
||||
}
|
||||
|
||||
|
@ -85,6 +85,8 @@ CREATE STREAM streams1 IGNORE EXPIRED 1 WATERMARK 100s INTO streamt1 AS
|
|||
SELECT _wstart, count(*), avg(voltage) from meters PARTITION BY tbname COUNT_WINDOW(10);
|
||||
```
|
||||
|
||||
notification_definition 子句定义了窗口计算过程中,在窗口打开/关闭等指定事件发生时,需要向哪些地址发送通知。详见 [流式计算的事件通知](#流式计算的事件通知)
|
||||
|
||||
## 流式计算的 partition
|
||||
|
||||
可以使用 PARTITION BY TBNAME,tag,普通列或者表达式,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。
|
||||
|
@ -125,6 +127,13 @@ create stream if not exists s1 fill_history 1 into st1 as select count(*) from
|
|||
|
||||
如果该流任务已经彻底过期,并且您不再想让它检测或处理数据,您可以手动删除它,被计算出的数据仍会被保留。
|
||||
|
||||
注意:
|
||||
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以通过 fill_history 1 async(v3.3.6.0 开始支持) 语法将创建流的任务放在后台处理,创建流的语句可立即返回,不阻塞后面的操作。async 只对 fill_history 1 起效,fill_history 0 时建流很快,不需要异步处理。
|
||||
|
||||
- 通过 show streams 可查看后台建流的进度(ready 状态表示成功,init 状态表示正在建流,failed 状态表示建流失败,失败时 message 列可以查看原因。对于建流失败的情况可以删除流重新建立)。
|
||||
|
||||
- 另外,不要同时异步创建多个流,可能由于事务冲突导致后面创建的流失败。
|
||||
|
||||
## 删除流式计算
|
||||
|
||||
```sql
|
||||
|
@ -156,8 +165,11 @@ SELECT * from information_schema.`ins_streams`;
|
|||
2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用)
|
||||
|
||||
3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。
|
||||
|
||||
4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY 必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV、NULL、NONE、VALUE。
|
||||
|
||||
5. CONTINUOUS_WINDOW_CLOSE:窗口关闭时输出结果。修改、删除数据,并不会立即触发重算,每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val,那么重算周期是60分钟。如果重算的时间长度超过 rec_time_val,在本次重算后,自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL,需要配置 adapter的相关信息:adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
|
||||
|
||||
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 最小时间是 5s,如果低于 5s,创建流计算时会报错。
|
||||
|
@ -298,4 +310,241 @@ RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
|
|||
CREATE SNODE ON DNODE [id]
|
||||
```
|
||||
其中的 id 是集群中的 dnode 的序号。请注意选择的dnode,流计算的中间状态将自动在其上进行备份。
|
||||
从 3.3.4.0 版本开始,在多副本环境中创建流会进行 snode 的**存在性检查**,要求首先创建 snode。如果 snode 不存在,无法创建流。
|
||||
从 v3.3.4.0 开始,在多副本环境中创建流会进行 snode 的**存在性检查**,要求首先创建 snode。如果 snode 不存在,无法创建流。
|
||||
|
||||
## 流式计算的事件通知
|
||||
|
||||
### 使用说明
|
||||
|
||||
流式计算支持在窗口打开/关闭时,向外部系统发送相关的事件通知。用户通过 `notification_definition` 来指定需要通知的事件,以及用于接收通知消息的目标地址。
|
||||
|
||||
```sql
|
||||
notification_definition:
|
||||
NOTIFY (url [, url] ...) ON (event_type [, event_type] ...) [notification_options]
|
||||
|
||||
event_type:
|
||||
'WINDOW_OPEN'
|
||||
| 'WINDOW_CLOSE'
|
||||
|
||||
notification_options: {
|
||||
NOTIFY_HISTORY [0|1]
|
||||
ON_FAILURE [DROP|PAUSE]
|
||||
}
|
||||
```
|
||||
|
||||
上述语法中的相关规则含义如下:
|
||||
1. `url`:指定通知的目标地址,必须包括协议、IP 或域名、端口号,并允许包含路径、参数。目前仅支持 websocket 协议。例如:`ws://localhost:8080`、`ws://localhost:8080/notify`、`wss://localhost:8080/notify?key=foo`。
|
||||
1. `event_type`:定义需要通知的事件,支持的事件类型有:
|
||||
1. WINDOW_OPEN:窗口打开事件,所有类型的窗口打开时都会触发。
|
||||
1. WINDOW_CLOSE:窗口关闭事件,所有类型的窗口关闭时都会触发。
|
||||
1. `NOTIFY_HISTORY`:控制是否在计算历史数据时触发通知,默认值为 0,即不触发。
|
||||
1. `ON_FAILURE`:向通知地址发送通知失败时(比如网络不佳场景)是否允许丢弃部分事件,默认值为 `PAUSE`。
|
||||
1. PAUSE 表示发送通知失败时暂停流计算任务。taosd 会重试发送通知,直到发送成功后,任务自动恢复运行。
|
||||
1. DROP 表示发送通知失败时直接丢弃事件信息,流计算任务继续运行,不受影响。
|
||||
|
||||
比如,以下示例创建一个流,计算电表电流的每分钟平均值,并在窗口打开、关闭时向两个通知地址发送通知,计算历史数据时不发送通知,不允许在通知发送失败时丢弃通知:
|
||||
|
||||
```sql
|
||||
CREATE STREAM avg_current_stream FILL_HISTORY 1
|
||||
AS SELECT _wstart, _wend, AVG(current) FROM meters
|
||||
INTERVAL (1m)
|
||||
NOTIFY ('ws://localhost:8080/notify', 'wss://192.168.1.1:8080/notify?key=foo')
|
||||
ON ('WINDOW_OPEN', 'WINDOW_CLOSE');
|
||||
NOTIFY_HISTORY 0
|
||||
ON_FAILURE PAUSE;
|
||||
```
|
||||
|
||||
当触发指定的事件时,taosd 会向指定的 URL 发送 POST 请求,消息体为 JSON 格式。一个请求可能包含若干个流的若干个事件,且事件类型不一定相同。
|
||||
事件信息视窗口类型而定:
|
||||
|
||||
1. 时间窗口:开始时发送起始时间;结束时发送起始时间、结束时间、计算结果。
|
||||
1. 状态窗口:开始时发送起始时间、前一个窗口的状态值、当前窗口的状态值;结束时发送起始时间、结束时间、计算结果、当前窗口的状态值、下一个窗口的状态值。
|
||||
1. 会话窗口:开始时发送起始时间;结束时发送起始时间、结束时间、计算结果。
|
||||
1. 事件窗口:开始时发送起始时间,触发窗口打开的数据值和对应条件编号;结束时发送起始时间、结束时间、计算结果、触发窗口关闭的数据值和对应条件编号。
|
||||
1. 计数窗口:开始时发送起始时间;结束时发送起始时间、结束时间、计算结果。
|
||||
|
||||
通知消息的结构示例如下:
|
||||
|
||||
```json
|
||||
{
|
||||
"messageId": "unique-message-id-12345",
|
||||
"timestamp": 1733284887203,
|
||||
"streams": [
|
||||
{
|
||||
"streamName": "avg_current_stream",
|
||||
"events": [
|
||||
{
|
||||
"tableName": "t_a667a16127d3b5a18988e32f3e76cd30",
|
||||
"eventType": "WINDOW_OPEN",
|
||||
"eventTime": 1733284887097,
|
||||
"windowId": "window-id-67890",
|
||||
"windowType": "Time",
|
||||
"windowStart": 1733284800000
|
||||
},
|
||||
{
|
||||
"tableName": "t_a667a16127d3b5a18988e32f3e76cd30",
|
||||
"eventType": "WINDOW_CLOSE",
|
||||
"eventTime": 1733284887197,
|
||||
"windowId": "window-id-67890",
|
||||
"windowType": "Time",
|
||||
"windowStart": 1733284800000,
|
||||
"windowEnd": 1733284860000,
|
||||
"result": {
|
||||
"_wstart": 1733284800000,
|
||||
"avg(current)": 1.3
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"streamName": "max_voltage_stream",
|
||||
"events": [
|
||||
{
|
||||
"tableName": "t_96f62b752f36e9b16dc969fe45363748",
|
||||
"eventType": "WINDOW_OPEN",
|
||||
"eventTime": 1733284887231,
|
||||
"windowId": "window-id-13579",
|
||||
"windowType": "Event",
|
||||
"windowStart": 1733284800000,
|
||||
"triggerCondition": {
|
||||
"conditionIndex": 0,
|
||||
"fieldValue": {
|
||||
"c1": 10,
|
||||
"c2": 15
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
"tableName": "t_96f62b752f36e9b16dc969fe45363748",
|
||||
"eventType": "WINDOW_CLOSE",
|
||||
"eventTime": 1733284887231,
|
||||
"windowId": "window-id-13579",
|
||||
"windowType": "Event",
|
||||
"windowStart": 1733284800000,
|
||||
"windowEnd": 1733284810000,
|
||||
"triggerCondition": {
|
||||
"conditionIndex": 1,
|
||||
"fieldValue": {
|
||||
"c1": 20
|
||||
"c2": 3
|
||||
}
|
||||
},
|
||||
"result": {
|
||||
"_wstart": 1733284800000,
|
||||
"max(voltage)": 220
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
后续小节是通知消息中各个字段的说明。
|
||||
|
||||
### 根级字段说明
|
||||
|
||||
1. messageId:字符串类型,是通知消息的唯一标识符,确保整条消息可以被追踪和去重。
|
||||
1. timestamp:长整型时间戳,表示通知消息生成的时间,精确到毫秒,即: '00:00, Jan 1 1970 UTC' 以来的毫秒数。
|
||||
1. streams:对象数组,包含多个流任务的事件信息。(详细信息见下节)
|
||||
|
||||
### stream 对象的字段说明
|
||||
|
||||
1. streamName:字符串类型,流任务的名称,用于标识事件所属的流。
|
||||
1. events:对象数组,该流任务下的事件列表,包含一个或多个事件对象。(详细信息见下节)
|
||||
|
||||
### event 对象的字段说明
|
||||
|
||||
#### 通用字段
|
||||
|
||||
这部分是所有 event 对象所共有的字段。
|
||||
1. tableName:字符串类型,是对应目标子表的表名。
|
||||
1. eventType:字符串类型,表示事件类型,支持 WINDOW_OPEN、WINDOW_CLOSE、WINDOW_INVALIDATION 三种类型。
|
||||
1. eventTime:长整型时间戳,表示事件生成时间,精确到毫秒,即:'00:00, Jan 1 1970 UTC' 以来的毫秒数。
|
||||
1. windowId:字符串类型,窗口的唯一标识符,确保打开和关闭事件的 ID 一致,便于外部系统将两者关联。如果 taosd 发生故障重启,部分事件可能会重复发送,会保证同一窗口的 windowId 保持不变。
|
||||
1. windowType:字符串类型,表示窗口类型,支持 Time、State、Session、Event、Count 五种类型。
|
||||
|
||||
#### 时间窗口相关字段
|
||||
|
||||
这部分是 windowType 为 Time 时 event 对象才有的字段。
|
||||
1. 如果 eventType 为 WINDOW_OPEN,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. 如果 eventType 为 WINDOW_CLOSE,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd:长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
1. result:计算结果,为键值对形式,包含窗口计算的结果列列名及其对应的值。
|
||||
|
||||
#### 状态窗口相关字段
|
||||
|
||||
这部分是 windowType 为 State 时 event 对象才有的字段。
|
||||
1. 如果 eventType 为 WINDOW_OPEN,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. prevState:与状态列的类型相同,表示上一个窗口的状态值。如果没有上一个窗口(即:现在是第一个窗口),则为 NULL。
|
||||
1. curState:与状态列的类型相同,表示当前窗口的状态值。
|
||||
1. 如果 eventType 为 WINDOW_CLOSE,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd:长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
1. curState:与状态列的类型相同,表示当前窗口的状态值。
|
||||
1. nextState:与状态列的类型相同,表示下一个窗口的状态值。
|
||||
1. result:计算结果,为键值对形式,包含窗口计算的结果列列名及其对应的值。
|
||||
|
||||
#### 会话窗口相关字段
|
||||
|
||||
这部分是 windowType 为 Session 时 event 对象才有的字段。
|
||||
1. 如果 eventType 为 WINDOW_OPEN,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. 如果 eventType 为 WINDOW_CLOSE,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd:长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
1. result:计算结果,为键值对形式,包含窗口计算的结果列列名及其对应的值。
|
||||
|
||||
#### 事件窗口相关字段
|
||||
|
||||
这部分是 windowType 为 Event 时 event 对象才有的字段。
|
||||
1. 如果 eventType 为 WINDOW_OPEN,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. triggerCondition:触发窗口开始的条件信息,包括以下字段:
|
||||
1. conditionIndex:整型,表示满足的触发窗口开始的条件的索引,从0开始编号。
|
||||
1. fieldValue:键值对形式,包含条件列列名及其对应的值。
|
||||
1. 如果 eventType 为 WINDOW_CLOSE,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd:长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
1. triggerCondition:触发窗口关闭的条件信息,包括以下字段:
|
||||
1. conditionIndex:整型,表示满足的触发窗口关闭的条件的索引,从0开始编号。
|
||||
1. fieldValue:键值对形式,包含条件列列名及其对应的值。
|
||||
1. result:计算结果,为键值对形式,包含窗口计算的结果列列名及其对应的值。
|
||||
|
||||
#### 计数窗口相关字段
|
||||
|
||||
这部分是 windowType 为 Count 时 event 对象才有的字段。
|
||||
1. 如果 eventType 为 WINDOW_OPEN,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. 如果 eventType 为 WINDOW_CLOSE,则包含如下字段:
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd:长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
1. result:计算结果,为键值对形式,包含窗口计算的结果列列名及其对应的值。
|
||||
|
||||
#### 窗口失效相关字段
|
||||
|
||||
因为流计算过程中会遇到数据乱序、更新、删除等情况,可能造成已生成的窗口被删除,或者结果需要重新计算。此时会向通知地址发送一条 WINDOW_INVALIDATION 的通知,说明哪些窗口已经被删除。
|
||||
|
||||
这部分是 eventType 为 WINDOW_INVALIDATION 时,event 对象才有的字段。
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd: 长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
|
||||
## 流式计算对虚拟表的支持
|
||||
|
||||
从 v3.3.6.0 开始,流计算能够使用虚拟表(包括虚拟普通表、虚拟子表、虚拟超级表)作为数据源进行计算,语法和非虚拟表完全一致。
|
||||
|
||||
但是虚拟表的行为与非虚拟表存在差异,所以目前在使用流计算对虚拟表进行计算时存在以下限制:
|
||||
|
||||
1. 流计算中涉及的虚拟普通表/虚拟子表的 schema 不允许更改。
|
||||
1. 流计算过程中,如果修改虚拟表某一列对应的数据源,对流计算来说不生效。即:流计算仍只读取老的数据源。
|
||||
1. 流计算过程中,如果虚拟表某一列对应的原始表被删除,之后新建了同名的表和同名的列,流计算不会读取新表的数据。
|
||||
1. 流计算的 watermark 只能是 0,否则创建时就报错。
|
||||
1. 如果流计算的数据源是虚拟超级表,流计算任务启动后新增的子表不参与计算。
|
||||
1. 虚拟表的不同原始表的时间戳不完全一致,数据合并后可能会产生空值,暂不支持插值处理。
|
||||
1. 不处理数据的乱序、更新或删除。即:流创建时不能指定 `ignore update 0` 或者 `ignore expired 0`,否则报错。
|
||||
1. 不支持历史数据计算,即:流创建时不能指定 `fill_history 1`,否则报错。
|
||||
1. 不支持触发模式:MAX_DELAY, FORCE_WINDOW_CLOSE, CONTINUOUS_WINDOW_CLOSE。
|
||||
1. 不支持窗口类型:COUNT_WINDOW。
|
||||
|
|
|
@ -45,7 +45,8 @@ TDengine 支持 `UNION ALL` 和 `UNION` 操作符。UNION ALL 将查询返回的
|
|||
| 9 | LIKE | BINARY、NCHAR 和 VARCHAR | 通配符匹配所指定的模式串 |
|
||||
| 10 | NOT LIKE | BINARY、NCHAR 和 VARCHAR | 通配符不匹配所指定的模式串 |
|
||||
| 11 | MATCH, NMATCH | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 |
|
||||
| 12 | CONTAINS | JSON | JSON 中是否存在某键 |
|
||||
| 12 | REGEXP, NOT REGEXP | BINARY、NCHAR 和 VARCHAR | 正则表达式匹配 |
|
||||
| 13 | CONTAINS | JSON | JSON 中是否存在某键 |
|
||||
|
||||
LIKE 条件使用通配符字符串进行匹配检查,规则如下:
|
||||
|
||||
|
@ -53,10 +54,10 @@ LIKE 条件使用通配符字符串进行匹配检查,规则如下:
|
|||
- 如果希望匹配字符串中原本就带有的 \_(下划线)字符,那么可以在通配符字符串中写作 \_,即加一个反斜线来进行转义。
|
||||
- 通配符字符串最长不能超过 100 字节。不建议使用太长的通配符字符串,否则将有可能严重影响 LIKE 操作的执行性能。
|
||||
|
||||
MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
|
||||
MATCH/REGEXP 条件和 NMATCH/NOT REGEXP 条件使用正则表达式进行匹配,规则如下:
|
||||
|
||||
- 支持符合 POSIX 规范的正则表达式,具体规范内容可参见 Regular Expressions。
|
||||
- MATCH 和正则表达式匹配时, 返回 TURE. NMATCH 和正则表达式不匹配时, 返回 TRUE.
|
||||
- MATCH 和正则表达式匹配时,返回 TURE。NMATCH 和正则表达式不匹配时,返回 TRUE.
|
||||
- 只能针对子表名(即 tbname)、字符串类型的标签值进行正则表达式过滤,不支持普通列的过滤。
|
||||
- 正则匹配字符串长度不能超过 128 字节。可以通过参数 maxRegexStringLen 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启客户端才能生效
|
||||
|
||||
|
@ -64,7 +65,7 @@ MATCH 条件和 NMATCH 条件使用正则表达式进行匹配,规则如下:
|
|||
|
||||
| # | **运算符** | **支持的类型** | **说明** |
|
||||
| --- | :--------: | -------------- | --------------------------------------------------------------------------- |
|
||||
| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE, 则返回 TRUE。如果任一为 FALSE,则返回 FALSE |
|
||||
| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE, 则返回 TRUE。如果两者都是 FALSE,则返回 FALSE |
|
||||
| 1 | AND | BOOL | 逻辑与,如果两个条件均为 TRUE,则返回 TRUE。如果任一为 FALSE,则返回 FALSE |
|
||||
| 2 | OR | BOOL | 逻辑或,如果任一条件为 TRUE,则返回 TRUE。如果两者都是 FALSE,则返回 FALSE |
|
||||
|
||||
TDengine 在计算逻辑条件时,会进行短路径优化,即对于 AND,第一个条件为 FALSE,则不再计算第二个条件,直接返回 FALSE;对于 OR,第一个条件为 TRUE,则不再计算第二个条件,直接返回 TRUE。
|
||||
|
|
|
@ -33,7 +33,7 @@ description: 对 JSON 类型如何使用的详细说明
|
|||
|
||||
## 支持的操作
|
||||
|
||||
1. 在 where 条件中时,支持函数 match/nmatch/between and/like/and/or/is null/is not null,不支持 in
|
||||
1. 在 where 条件中时,支持函数 `match`、`nmatch`、`between and`、`like`、`and`、`or`、`is null`、`is not null`,不支持 `in`
|
||||
|
||||
```
|
||||
select * from s1 where info->'k1' match 'v*';
|
||||
|
@ -47,7 +47,7 @@ description: 对 JSON 类型如何使用的详细说明
|
|||
|
||||
2. 支持 json tag 放在 group by、order by、join 子句、union all 以及子查询中,比如 group by json->'key'
|
||||
|
||||
3. 支持 distinct 操作.
|
||||
3. 支持 distinct 操作
|
||||
|
||||
```
|
||||
select distinct info->'k1' from s1
|
||||
|
@ -69,8 +69,8 @@ description: 对 JSON 类型如何使用的详细说明
|
|||
|
||||
3. json 格式限制:
|
||||
|
||||
1. json 输入字符串可以为空("","\t"," "或 null)或 object,不能为非空的字符串,布尔型和数组。
|
||||
2. object 可为{},如果 object 为{},则整个 json 串记为空。key 可为"",若 key 为"",则 json 串中忽略该 k-v 对。
|
||||
1. json 输入字符串可以为空(""、"\t"、" " 或 null)或 object,不能为非空的字符串,布尔型和数组。
|
||||
2. object 可为 {},如果 object 为 {},则整个 json 串记为空。key 可为 "",若 key 为 "",则 json 串中忽略该 k-v 对。
|
||||
3. value 可以为数字(int/double)或字符串或 bool 或 null,暂不可以为数组。不允许嵌套。
|
||||
4. 若 json 字符串中出现两个相同的 key,则第一个生效。
|
||||
5. json 字符串里暂不支持转义。
|
||||
|
|
|
@ -21,8 +21,8 @@ description: TDengine 中使用转义字符的详细规则
|
|||
## 转义字符使用规则
|
||||
|
||||
1. 标识符里有转义字符(数据库名、表名、列名、别名)
|
||||
1. 普通标识符: 直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
|
||||
2. 反引号``标识符: 保持原样,不转义
|
||||
1. 普通标识符:直接提示错误的标识符,因为标识符规定必须是数字、字母和下划线,并且不能以数字开头。
|
||||
2. 反引号 `` 标识符:保持原样,不转义
|
||||
2. 数据里有转义字符
|
||||
1. 遇到上面定义的转义字符会转义(`%`和`_`见下面说明),如果没有匹配的转义字符会忽略掉转义符`\ `(`\x`保持原样)。
|
||||
2. 对于`%`和`_`,因为在`like`里这两个字符是通配符,所以在模式匹配`like`里用`\%`和`\_`表示字符里本身的`%`和`_`,如果在`like`模式匹配上下文之外使用`\%`或`\_`,则它们的计算结果为字符串`\%`和`\_`,而不是`%`和`_`。
|
||||
1. 遇到上面定义的转义字符会转义(`%`和`_`见下面说明),如果没有匹配的转义字符会忽略掉转义符 `\ `(`\x`保持原样)。
|
||||
2. 对于 `%` 和 `_`,因为在 `like` 里这两个字符是通配符,所以在模式匹配 `like` 里用 `\%` 和 `\_` 表示字符里本身的 `%` 和 `_`,如果在 `like` 模式匹配上下文之外使用 `\%` 或 `\_`,则它们的计算结果为字符串 `\%` 和 `\_`,而不是 `%` 和 `_`。
|
||||
|
|
|
@ -9,13 +9,13 @@ description: 合法字符集和命名中的限制规则
|
|||
1. 合法字符:英文字符、数字和下划线。
|
||||
1. 允许英文字符或下划线开头,不允许以数字开头。
|
||||
1. 不区分大小写。
|
||||
1. 不能是[保留关键字](./20-keywords.md)。
|
||||
1. 不能是 [保留关键字](./20-keywords.md)。
|
||||
1. 转义后表(列)名规则:
|
||||
为了兼容支持更多形式的表(列)名,TDengine 引入新的转义符 "`"。使用转义字符以后:
|
||||
- 不再对转义字符中的内容进行大小写统一,即可以保留用户指定表名中的大小写属性,例如:\`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
|
||||
- 不再对转义字符中的内容进行大小写统一,即可以保留用户指定表名中的大小写属性,例如 \`aBc\` 和 \`abc\` 是不同的表(列)名,但是 abc 和 aBc 是相同的表(列)名。
|
||||
- 可以创建包含字母、数字和下划线以外字符的表(列)名,例如:\`abc@TD\`,但是转义后名称中仍然不能包含`.`,否则会提示`The table name cannot contain '.'`。
|
||||
- 可以创建以数字开头的表(列)名,例如\`1970\`。
|
||||
- 可以创建以[保留关键字](./20-keywords.md)命名的表(列)名,例如\`select\`。
|
||||
- 可以创建以数字开头的表(列)名,例如 \`1970\`。
|
||||
- 可以创建以 [保留关键字](./20-keywords.md) 命名的表(列)名,例如 \`select\`。
|
||||
|
||||
## 密码合法字符集
|
||||
|
||||
|
@ -27,7 +27,7 @@ description: 合法字符集和命名中的限制规则
|
|||
|
||||
- 数据库名最大长度为 64 字节
|
||||
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
|
||||
- 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB) (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 每行数据最大长度 48KB(从 v3.0.5.0 开始为 64KB)(注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64 字节
|
||||
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
|
||||
- 标签名最大长度为 64 字节
|
||||
|
@ -37,6 +37,6 @@ description: 合法字符集和命名中的限制规则
|
|||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
- 数据库的副本数只能设置为 1 或 3
|
||||
- 用户名的最大长度是 23 字节
|
||||
- 用户密码的长度范围是 8-16 字节
|
||||
- 用户密码的长度范围是 8-255 字节
|
||||
- 总数据行数取决于可用资源
|
||||
- 单个数据库的虚拟结点数上限为 1024
|
||||
|
|
|
@ -23,11 +23,11 @@ description: TDengine 保留关键字的详细列表
|
|||
| ALIVE | |
|
||||
| ALL | |
|
||||
| ALTER | |
|
||||
| ANALYZE | 3.3.4.3 及后续版本 |
|
||||
| ANALYZE | 3.3.4.3+ |
|
||||
| AND | |
|
||||
| ANODE | 3.3.4.3 及后续版本 |
|
||||
| ANODES | 3.3.4.3 及后续版本 |
|
||||
| ANOMALY_WINDOW | 3.3.4.3 及后续版本 |
|
||||
| ANODE | 3.3.4.3+ |
|
||||
| ANODES | 3.3.4.3+ |
|
||||
| ANOMALY_WINDOW | 3.3.4.3+ |
|
||||
| ANTI | |
|
||||
| APPS | |
|
||||
| ARBGROUPS | |
|
||||
|
@ -35,8 +35,11 @@ description: TDengine 保留关键字的详细列表
|
|||
| AS | |
|
||||
| ASC | |
|
||||
| ASOF | |
|
||||
| ASYNC | 3.3.6.0+ |
|
||||
| AT_ONCE | |
|
||||
| ATTACH | |
|
||||
| AUTO | 3.3.5.0+ |
|
||||
| ASSIGN | 3.3.6.0+ |
|
||||
|
||||
### B
|
||||
|关键字|说明|
|
||||
|
@ -76,12 +79,16 @@ description: TDengine 保留关键字的详细列表
|
|||
| CLIENT_VERSION | |
|
||||
| CLUSTER | |
|
||||
| COLON | |
|
||||
| COLS | 3.3.6.0+ |
|
||||
| COLUMN | |
|
||||
| COMMA | |
|
||||
| COMMENT | |
|
||||
| COMP | |
|
||||
| COMPACT | |
|
||||
| COMPACTS | |
|
||||
| COMPACT_INTERVAL | 3.3.5.0+ |
|
||||
| COMPACT_TIME_OFFSET | 3.3.5.0+ |
|
||||
| COMPACT_TIME_RANGE | 3.3.5.0+ |
|
||||
| CONCAT | |
|
||||
| CONFLICT | |
|
||||
| CONNECTION | |
|
||||
|
@ -90,6 +97,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| CONSUMER | |
|
||||
| CONSUMERS | |
|
||||
| CONTAINS | |
|
||||
| CONTINUOUS_WINDOW_CLOSE | 3.3.6.0+ |
|
||||
| COPY | |
|
||||
| COUNT | |
|
||||
| COUNT_WINDOW | |
|
||||
|
@ -103,7 +111,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| DATABASE | |
|
||||
| DATABASES | |
|
||||
| DBS | |
|
||||
| DECIMAL | |
|
||||
| DECIMAL | 3.3.6.0+ |
|
||||
| DEFERRED | |
|
||||
| DELETE | |
|
||||
| DELETE_MARK | |
|
||||
|
@ -111,6 +119,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| DESC | |
|
||||
| DESCRIBE | |
|
||||
| DETACH | |
|
||||
| DISK_INFO | 3.3.5.0+ |
|
||||
| DISTINCT | |
|
||||
| DISTRIBUTED | |
|
||||
| DIVIDE | |
|
||||
|
@ -143,19 +152,19 @@ description: TDengine 保留关键字的详细列表
|
|||
|关键字|说明|
|
||||
|----------------------|-|
|
||||
| FAIL | |
|
||||
| FHIGH | 3.3.4.3 及后续版本 |
|
||||
| FHIGH | 3.3.4.3+ |
|
||||
| FILE | |
|
||||
| FILL | |
|
||||
| FILL_HISTORY | |
|
||||
| FIRST | |
|
||||
| FLOAT | |
|
||||
| FLOW | 3.3.4.3 及后续版本 |
|
||||
| FLOW | 3.3.4.3+ |
|
||||
| FLUSH | |
|
||||
| FOR | |
|
||||
| FORCE | |
|
||||
| FORCE_WINDOW_CLOSE | 3.3.4.3 及后续版本 |
|
||||
| FORCE_WINDOW_CLOSE | 3.3.4.3+ |
|
||||
| FROM | |
|
||||
| FROWTS | 3.3.4.3 及后续版本 |
|
||||
| FROWTS | 3.3.4.3+ |
|
||||
| FULL | |
|
||||
| FUNCTION | |
|
||||
| FUNCTIONS | |
|
||||
|
@ -201,6 +210,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| INTO | |
|
||||
| IPTOKEN | |
|
||||
| IROWTS | |
|
||||
| IROWTS_ORIGIN | 3.3.5.0+ |
|
||||
| IS | |
|
||||
| IS_IMPORT | |
|
||||
| ISFILLED | |
|
||||
|
@ -231,7 +241,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| LEADER | |
|
||||
| LEADING | |
|
||||
| LEFT | |
|
||||
| LEVEL | 3.3.0.0 到 3.3.2.11 的所有版本 |
|
||||
| LEVEL | 3.3.0.0 - 3.3.2.11 |
|
||||
| LICENCES | |
|
||||
| LIKE | |
|
||||
| LIMIT | |
|
||||
|
@ -252,6 +262,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| MEDIUMBLOB | |
|
||||
| MERGE | |
|
||||
| META | |
|
||||
| META_ONLY | 3.3.6.0+ |
|
||||
| MINROWS | |
|
||||
| MINUS | |
|
||||
| MNODE | |
|
||||
|
@ -269,6 +280,8 @@ description: TDengine 保留关键字的详细列表
|
|||
| NONE | |
|
||||
| NORMAL | |
|
||||
| NOT | |
|
||||
| NOTIFY | 3.3.6.0+ |
|
||||
| NOTIFY_HISTORY | 3.3.6.0+ |
|
||||
| NOTNULL | |
|
||||
| NOW | |
|
||||
| NULL | |
|
||||
|
@ -282,6 +295,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| OFFSET | |
|
||||
| ON | |
|
||||
| ONLY | |
|
||||
| ON_FAILURE | 3.3.6.0+ |
|
||||
| OR | |
|
||||
| ORDER | |
|
||||
| OUTER | |
|
||||
|
@ -329,6 +343,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| RATIO | |
|
||||
| READ | |
|
||||
| RECURSIVE | |
|
||||
| REGEXP | 3.3.6.0+ |
|
||||
| REDISTRIBUTE | |
|
||||
| REM | |
|
||||
| REPLACE | |
|
||||
|
@ -417,6 +432,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| TRANSACTIONS | |
|
||||
| TRIGGER | |
|
||||
| TRIM | |
|
||||
| TRUE_FOR | 3.3.6.0+ |
|
||||
| TSDB_PAGESIZE | |
|
||||
| TSERIES | |
|
||||
| TSMA | |
|
||||
|
@ -475,7 +491,7 @@ description: TDengine 保留关键字的详细列表
|
|||
| WINDOW_OFFSET | |
|
||||
| WITH | |
|
||||
| WRITE | |
|
||||
| WSTART | |
|
||||
| WSTART | |
|
||||
|
||||
### \_
|
||||
|
||||
|
|
|
@ -32,9 +32,9 @@ DROP DNODE dnode_id [force] [unsafe]
|
|||
|
||||
注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。
|
||||
|
||||
只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定force选项。
|
||||
只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定 force 选项。
|
||||
|
||||
当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定unsafe,并且数据不可再恢复。
|
||||
当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定 unsafe,并且数据不可再恢复。
|
||||
|
||||
## 修改数据节点配置
|
||||
|
||||
|
@ -44,27 +44,27 @@ ALTER DNODE dnode_id dnode_option
|
|||
ALTER ALL DNODES dnode_option
|
||||
```
|
||||
|
||||
对于支持动态修改的配置参数,您可以使用 ALTER DNODE 或 ALTER ALL DNODES 语法修改 dnode 中配置参数的值,自 3.3.4.0 后,修改的配置参数将自动持久化,即便数据库服务重启后仍然生效。
|
||||
对于支持动态修改的配置参数,您可以使用 ALTER DNODE 或 ALTER ALL DNODES 语法修改 dnode 中配置参数的值,自 v3.3.4.0 后,修改的配置参数将自动持久化,即便数据库服务重启后仍然生效。
|
||||
|
||||
对于一个配置参数是否支持动态修改,请您参考以下页面:[taosd 参考手册](../01-components/01-taosd.md)
|
||||
对于一个配置参数是否支持动态修改,请您参考 [taosd 参考手册](../01-components/01-taosd.md)
|
||||
|
||||
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug:
|
||||
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug。
|
||||
|
||||
```sql
|
||||
ALTER DNODE 1 'debugFlag' '143';
|
||||
```
|
||||
|
||||
### 补充说明:
|
||||
配置参数在 dnode 中被分为全局配置参数与局部配置参数,您可以查看 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 中的 category 字段来确认配置参数属于全局配置参数还是局部配置参数:
|
||||
配置参数在 dnode 中被分为全局配置参数与局部配置参数,您可以查看 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 中的 category 字段来确认配置参数属于全局配置参数还是局部配置参数。
|
||||
1. 局部配置参数:您可以使用 ALTER DNODE 或 ALTER ALL DNODES 来更新某一个 dnode 或全部 dnodes 的局部配置参数。
|
||||
2. 全局配置参数:全局配置参数要求各个 dnode 保持一致,所以您只可以使用 ALTER ALL DNODES 来更新全部 dnodes 的全局配置参数。
|
||||
|
||||
配置参数是否可以动态修改,有以下三种情况:
|
||||
1. 支持动态修改 立即生效
|
||||
2. 支持动态修改 重启生效
|
||||
1. 支持动态修改,立即生效
|
||||
2. 支持动态修改,重启生效
|
||||
3. 不支持动态修改
|
||||
|
||||
对于重启后生效的配置参数,您可以通过 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 看到修改后的值,但是需要重启数据库服务才使其生效。
|
||||
对于重启后生效的配置参数,您可以通过 `SHOW VARIABLES` 或 `SHOW DNODE dnode_id VARIABLE` 看到修改后的值,但是需要重启数据库服务才使其生效。
|
||||
|
||||
## 添加管理节点
|
||||
|
||||
|
@ -96,7 +96,7 @@ DROP MNODE ON DNODE dnode_id;
|
|||
CREATE QNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 DNODE 上只能创建一个 QNODE。一个 DNODE 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。
|
||||
系统启动默认没有 QNODE,用户可以创建 QNODE 来实现计算和存储的分离。一个 dnode 上只能创建一个 QNODE。一个 dnode 的 `supportVnodes` 参数如果不为 0,同时又在其上创建上 QNODE,则在该 dnode 中既有负责存储管理的 vnode 又有负责查询计算的 qnode,如果还在该 dnode 上创建了 mnode,则一个 dnode 上最多三种逻辑节点都可以存在。但通过配置也可以使其彻底分离。将一个 dnode 的`supportVnodes`配置为 0,可以选择在其上创建 mnode 或者 qnode 中的一种,这样可以实现三种逻辑节点在物理上的彻底分离。
|
||||
|
||||
## 查看查询节点
|
||||
|
||||
|
@ -104,7 +104,7 @@ CREATE QNODE ON DNODE dnode_id;
|
|||
SHOW QNODES;
|
||||
```
|
||||
|
||||
列出集群中所有查询节点,包括 ID,及所在 DNODE。
|
||||
列出集群中所有查询节点,包括 ID,及所在 dnode
|
||||
|
||||
## 删除查询节点
|
||||
|
||||
|
@ -112,7 +112,7 @@ SHOW QNODES;
|
|||
DROP QNODE ON DNODE dnode_id;
|
||||
```
|
||||
|
||||
删除 ID 为 dnode_id 的 DNODE 上的 QNODE,但并不会影响该 dnode 的状态。
|
||||
删除 ID 为 dnode_id 的 dnode 上的 qnode,但并不会影响该 dnode 的状态。
|
||||
|
||||
## 查询集群状态
|
||||
|
||||
|
@ -120,7 +120,10 @@ DROP QNODE ON DNODE dnode_id;
|
|||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
查询当前集群的状态是否可用,返回值
|
||||
- 0:不可用
|
||||
- 1:完全可用
|
||||
- 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
|
||||
## 修改客户端配置
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ title: 元数据
|
|||
description: Information_Schema 数据库中存储了系统中所有的元数据信息
|
||||
---
|
||||
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
|
||||
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 `SELECT ... FROM INFORMATION_SCHEMA.tablename` 具有以下优点。
|
||||
|
||||
1. 可以使用 USE 语句将 INFORMATION_SCHEMA 设为默认数据库
|
||||
2. 可以使用 SELECT 语句熟悉的语法,只需要学习一些表名和列名
|
||||
|
@ -15,7 +15,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
:::info
|
||||
|
||||
- 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
|
||||
- 系统表中的一些列可能是关键字,在查询时需要使用转义符'\`',例如查询数据库 test 有几个 VGROUP:
|
||||
- 系统表中的一些列可能是关键字,在查询时需要使用转义符 '\`',例如查询数据库 test 有几个 VGROUP。
|
||||
```sql
|
||||
select `vgroups` from ins_databases where name = 'test';
|
||||
```
|
||||
|
@ -26,11 +26,11 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_DNODES
|
||||
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
提供 dnode 的相关信息。也可以使用 SHOW DNODES 来查询这些信息。SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数。需要注意,`vnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 1 | vnodes | SMALLINT | dnode 中的实际 vnode 个数。需要注意,`vnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 2 | support_vnodes | SMALLINT | 最多支持的 vnode 个数 |
|
||||
| 3 | status | BINARY(10) | 当前状态 |
|
||||
| 4 | note | BINARY(256) | 离线原因等信息 |
|
||||
|
@ -40,7 +40,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_MNODES
|
||||
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。 SYSINFO 为 0 的用户不能查看此表。
|
||||
提供 mnode 的相关信息。也可以使用 SHOW MNODES 来查询这些信息。SYSINFO 为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ------------------ |
|
||||
|
@ -73,7 +73,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_CLUSTER
|
||||
|
||||
存储集群相关信息。 SYSINFO 属性为 0 的用户不能查看此表。
|
||||
存储集群相关信息。SYSINFO 属性为 0 的用户不能查看此表。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :---------: | ------------ | ---------- |
|
||||
|
@ -90,31 +90,31 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 1 | name | VARCHAR(64) | 数据库名 |
|
||||
| 2 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 3 | ntables | INT | 数据库中表的数量,包含子表和普通表但不包含超级表 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | vgroups | INT | 数据库中有多少个 vgroup。需要注意,`vgroups` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | replica | INT | 副本数。需要注意,`replica` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | strict | VARCHAR(4) | 废弃参数 |
|
||||
| 8 | duration | VARCHAR(10) | 单文件存储数据的时间跨度。需要注意,`duration` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。内部存储单位为分钟,查询时有可能转换为天或小时展示 |
|
||||
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 内部存储单位为分钟,查询时有可能转换为天或小时展示 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 13 | minrows | INT | 文件块中记录的最小条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最大条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | keep | VARCHAR(32) | 数据保留时长。需要注意,`keep` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 内部存储单位为分钟,查询时有可能转换为天或小时展示 |
|
||||
| 10 | buffer | INT | 每个 vnode 写缓存的内存块大小,单位 MB。需要注意,`buffer` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | pagesize | INT | 每个 VNODE 中元数据存储引擎的页大小,单位为 KB。需要注意,`pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 12 | pages | INT | 每个 vnode 元数据存储引擎的缓存页个数。需要注意,`pages` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 13 | minrows | INT | 文件块中记录的最小条数。需要注意,`minrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 14 | maxrows | INT | 文件块中记录的最大条数。需要注意,`maxrows` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 15 | comp | INT | 数据压缩方式。需要注意,`comp` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 16 | precision | VARCHAR(2) | 时间分辨率。需要注意,`precision` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 17 | status | VARCHAR(10) | 数据库状态 |
|
||||
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长,单位为秒。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 28 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 29 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 18 | retentions | VARCHAR(60) | 数据的聚合周期和保存时长。需要注意,`retentions` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 19 | single_stable | BOOL | 表示此数据库中是否只可以创建一个超级表。需要注意,`single_stable` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 20 | cachemodel | VARCHAR(60) | 表示是否在内存中缓存子表的最近数据。需要注意,`cachemodel` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 21 | cachesize | INT | 表示每个 vnode 中用于缓存子表最近数据的内存大小。需要注意,`cachesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 22 | wal_level | INT | WAL 级别。需要注意,`wal_level` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 23 | wal_fsync_period | INT | 数据落盘周期。需要注意,`wal_fsync_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 24 | wal_retention_period | INT | WAL 的保存时长,单位为秒。需要注意,`wal_retention_period` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 25 | wal_retention_size | INT | WAL 的保存上限。需要注意,`wal_retention_size` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 26 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数。需要注意,`stt_trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 27 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。需要注意,`table_prefix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 28 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。需要注意,`table_suffix` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 29 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小。需要注意,`tsdb_pagesize` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_FUNCTIONS
|
||||
|
||||
|
@ -123,15 +123,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------- | --------------------------------------------------------------------------------------------- |
|
||||
| 1 | name | VARCHAR(64) | 函数名 |
|
||||
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 2 | comment | VARCHAR(255) | 补充说明。需要注意,`comment` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | aggregate | INT | 是否为聚合函数。需要注意,`aggregate` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 4 | output_type | VARCHAR(31) | 输出类型 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | code_len | INT | 代码长度 |
|
||||
| 7 | bufsize | INT | buffer 大小 |
|
||||
| 8 | func_language | VARCHAR(31) | 自定义函数编程语言 |
|
||||
| 9 | func_body | VARCHAR(16384) | 函数体定义 |
|
||||
| 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。 |
|
||||
| 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。 |
|
||||
|
||||
|
||||
## INS_INDEXES
|
||||
|
@ -145,7 +145,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 3 | index_name | VARCHAR(192) | 索引名 |
|
||||
| 4 | column_name | VARCHAR(64) | 建索引的列的列名 |
|
||||
| 5 | index_type | VARCHAR(10) | 目前有 SMA 和 tag |
|
||||
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。 |
|
||||
| 6 | index_extensions | VARCHAR(256) | 索引的额外信息。对 SMA/tag 类型的索引,是函数名的列表。|
|
||||
|
||||
## INS_STABLES
|
||||
|
||||
|
@ -157,12 +157,12 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -177,7 +177,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | stable_name | VARCHAR(192) | 所属的超级表表名 |
|
||||
| 6 | uid | BIGINT | 表 id |
|
||||
| 7 | vgroup_id | INT | vgroup id |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | ttl | INT | 表的生命周期。需要注意 `ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 9 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 10 | type | VARCHAR(21) | 表类型 |
|
||||
|
||||
|
@ -215,7 +215,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 1 | name | VARCHAR(24) | 用户名 |
|
||||
| 2 | super | TINYINT | 用户是否为超级用户,1:是,0:否 |
|
||||
| 3 | enable | TINYINT | 用户是否启用,1:是,0:否 |
|
||||
| 4 | sysinfo | TINYINT | 用户是否可查看系统信息,1:是, 0:否 |
|
||||
| 4 | sysinfo | TINYINT | 用户是否可查看系统信息,1:是,0:否 |
|
||||
| 5 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 6 | allowed_host | VARCHAR(49152)| IP 白名单 |
|
||||
|
||||
|
@ -227,13 +227,13 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :---------: | ------------ | --------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | version | VARCHAR(9) | 企业版授权说明:official(官方授权的)/trial(试用的) |
|
||||
| 2 | cpu_cores | VARCHAR(9) | 授权使用的 CPU 核心数量 |
|
||||
| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | dnodes | VARCHAR(10) | 授权使用的 dnode 节点数量。需要注意,`dnodes` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 4 | streams | VARCHAR(10) | 授权创建的流数量。需要注意,`streams` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 5 | users | VARCHAR(10) | 授权创建的用户数量。需要注意,`users` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | accounts | VARCHAR(10) | 授权创建的帐户数量。需要注意,`accounts` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 7 | storage | VARCHAR(21) | 授权使用的存储空间大小。需要注意,`storage` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | connections | VARCHAR(21) | 授权使用的客户端连接数量。需要注意,`connections` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 9 | databases | VARCHAR(11) | 授权使用的数据库数量。需要注意,`databases` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | speed | VARCHAR(9) | 授权使用的数据点每秒写入数量 |
|
||||
| 11 | querytime | VARCHAR(9) | 授权使用的查询总时长 |
|
||||
| 12 | timeseries | VARCHAR(21) | 授权使用的测点数量 |
|
||||
|
@ -248,7 +248,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :-------: | ------------ | ------------------------------------------------------------------------------------------------ |
|
||||
| 1 | vgroup_id | INT | vgroup id |
|
||||
| 2 | db_name | VARCHAR(32) | 数据库名 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | tables | INT | 此 vgroup 内有多少表。需要注意,`tables` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 4 | status | VARCHAR(10) | 此 vgroup 的状态 |
|
||||
| 5 | v1_dnode | INT | 第一个成员所在的 dnode 的 id |
|
||||
| 6 | v1_status | VARCHAR(10) | 第一个成员的状态 |
|
||||
|
@ -258,7 +258,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 10 | v3_status | VARCHAR(10) | 第三个成员的状态 |
|
||||
| 11 | nfiles | INT | 此 vgroup 中数据/元数据文件的数量 |
|
||||
| 12 | file_size | INT | 此 vgroup 中数据/元数据文件的大小 |
|
||||
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是, 0: 否 |
|
||||
| 13 | tsma | TINYINT | 此 vgroup 是否专用于 Time-range-wise SMA,1: 是,0: 否 |
|
||||
|
||||
## INS_CONFIGS
|
||||
|
||||
|
@ -267,7 +267,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 2 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_DNODE_VARIABLES
|
||||
|
||||
|
@ -277,7 +277,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| --- | :------: | ------------ | --------------------------------------------------------------------------------------- |
|
||||
| 1 | dnode_id | INT | dnode 的 ID |
|
||||
| 2 | name | VARCHAR(32) | 配置项名称 |
|
||||
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 3 | value | VARCHAR(64) | 该配置项的值。需要注意,`value` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_TOPICS
|
||||
|
||||
|
@ -312,7 +312,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 5 | source_db | VARCHAR(64) | 源数据库 |
|
||||
| 6 | target_db | VARCHAR(64) | 目的数据库 |
|
||||
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
|
||||
## INS_USER_PRIVILEGES
|
||||
|
@ -333,14 +333,14 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|:----|:-----------|:------------|:--------|
|
||||
| 1 | db_name | VARCHAR(32) | 数据库名称 |
|
||||
| 2 | vgroup_id | INT | vgroup 的 ID |
|
||||
| 3 | wal | BIGINT | wal 文件大小, 单位为 K |
|
||||
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为KB |
|
||||
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB |
|
||||
| 6 | data3 | BIGINT | 三级存储上数据文件的大小, 单位为KB |
|
||||
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为KB |
|
||||
| 8 | table_meta | BIGINT | meta 文件的大小, 单位为KB |
|
||||
| 9 | s3 | BIGINT | s3 上占用的大小, 单位为KB |
|
||||
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB |
|
||||
| 3 | wal | BIGINT | wal 文件大小,单位为 KB |
|
||||
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为 KB |
|
||||
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB |
|
||||
| 6 | data3 | BIGINT | 三级存储上数据文件的大小,单位为 KB |
|
||||
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为 KB |
|
||||
| 8 | table_meta | BIGINT | meta 文件的大小,单位为 KB |
|
||||
| 9 | s3 | BIGINT | s3 上占用的大小,单位为 KB |
|
||||
| 10 | raw_data | BIGINT | 预估的原始数据的大小,单位为 KB |
|
||||
|
||||
|
||||
## INS_FILESETS
|
||||
|
|
|
@ -14,7 +14,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
|
|||
| --- | :----------: | ------------ | ------------------------------- |
|
||||
| 1 | app_id | UBIGINT | 客户端 ID |
|
||||
| 2 | ip | BINARY(16) | 客户端地址 |
|
||||
| 3 | pid | INT | 客户端进程 号 |
|
||||
| 3 | pid | INT | 客户端进程号 |
|
||||
| 4 | name | BINARY(24) | 客户端名称 |
|
||||
| 5 | start_time | TIMESTAMP | 客户端启动时间 |
|
||||
| 6 | insert_req | UBIGINT | insert 请求次数 |
|
||||
|
@ -69,7 +69,7 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
|
|||
| 1 | consumer_id | BIGINT | 消费者的唯一 ID |
|
||||
| 2 | consumer_group | BINARY(192) | 消费者组 |
|
||||
| 3 | client_id | BINARY(192) | 用户自定义字符串,通过创建 consumer 时指定 client_id 来展示 |
|
||||
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括:ready(正常可用)、 lost(连接已丢失)、 rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
|
||||
| 4 | status | BINARY(20) | 消费者当前状态。消费者状态包括:ready(正常可用)、lost(连接已丢失)、rebalancing(消费者所属 vgroup 正在分配中)、unknown(未知状态)|
|
||||
| 5 | topics | BINARY(204) | 被订阅的 topic。若订阅多个 topic,则展示为多行 |
|
||||
| 6 | up_time | TIMESTAMP | 第一次连接 taosd 的时间 |
|
||||
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
|
||||
|
|
|
@ -20,7 +20,7 @@ SHOW APPS;
|
|||
SHOW CLUSTER;
|
||||
```
|
||||
|
||||
显示当前集群的信息
|
||||
显示当前集群的信息。
|
||||
|
||||
## SHOW CLUSTER ALIVE
|
||||
|
||||
|
@ -28,17 +28,22 @@ SHOW CLUSTER;
|
|||
SHOW CLUSTER ALIVE;
|
||||
```
|
||||
|
||||
查询当前集群的状态是否可用,返回值: 0:不可用 1:完全可用 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
查询当前集群的状态是否可用,返回值如下
|
||||
- 0:不可用
|
||||
- 1:完全可用
|
||||
- 2:部分可用(集群中部分节点下线,但其它节点仍可以正常使用)
|
||||
|
||||
## SHOW CLUSTER MACHINES
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER MACHINES; // 从 TDengine 3.2.3.0 版本开始支持
|
||||
SHOW CLUSTER MACHINES;
|
||||
```
|
||||
|
||||
显示集群的机器码等信息。
|
||||
|
||||
注:企业版独有
|
||||
备注
|
||||
- 企业版功能
|
||||
- v3.2.3.0 开始支持
|
||||
|
||||
## SHOW CONNECTIONS
|
||||
|
||||
|
@ -70,7 +75,7 @@ SHOW CREATE DATABASE db_name;
|
|||
SHOW CREATE STABLE [db_name.]stb_name;
|
||||
```
|
||||
|
||||
显示 tb_name 指定的超级表的创建语句
|
||||
显示 tb_name 指定的超级表的创建语句。
|
||||
|
||||
## SHOW CREATE TABLE
|
||||
|
||||
|
@ -114,7 +119,8 @@ SHOW GRANTS FULL; // 从 TDengine 3.2.3.0 版本开始支持
|
|||
|
||||
显示企业版许可授权的信息。
|
||||
|
||||
注:企业版独有
|
||||
备注
|
||||
- 企业版功能
|
||||
|
||||
## SHOW INDEXES
|
||||
|
||||
|
@ -128,10 +134,10 @@ SHOW INDEXES FROM [db_name.]tbl_name;
|
|||
## SHOW LOCAL VARIABLES
|
||||
|
||||
```sql
|
||||
SHOW LOCAL VARIABLES;
|
||||
SHOW LOCAL VARIABLES [like pattern];
|
||||
```
|
||||
|
||||
显示当前客户端配置参数的运行值。
|
||||
显示当前客户端配置参数的运行值,可使用 like pattern 根据 name 进行过滤。
|
||||
|
||||
## SHOW MNODES
|
||||
|
||||
|
@ -147,7 +153,7 @@ SHOW MNODES;
|
|||
SHOW QNODES;
|
||||
```
|
||||
|
||||
显示当前系统中 QNODE (查询节点)的信息。
|
||||
显示当前系统中 QNODE(查询节点)的信息。
|
||||
|
||||
## SHOW QUERIES
|
||||
|
||||
|
@ -155,7 +161,7 @@ SHOW QNODES;
|
|||
SHOW QUERIES;
|
||||
```
|
||||
|
||||
显示当前系统中正在进行的写入(更新)/查询/删除。(由于内部 API 命名原因,所以统称 QUERIES)
|
||||
显示当前系统中正在进行的写入(更新)、查询、删除。(由于内部 API 命名原因,所以统称 QUERIES)
|
||||
|
||||
## SHOW SCORES
|
||||
|
||||
|
@ -165,7 +171,8 @@ SHOW SCORES;
|
|||
|
||||
显示系统被许可授权的容量的信息。
|
||||
|
||||
注:企业版独有。
|
||||
备注
|
||||
- 企业版功能
|
||||
|
||||
## SHOW STABLES
|
||||
|
||||
|
@ -219,11 +226,11 @@ SHOW TABLE DISTRIBUTED table_name;
|
|||
|
||||
_block_dist: Total_Blocks=[5] Total_Size=[93.65 KB] Average_size=[18.73 KB] Compression_Ratio=[23.98 %]
|
||||
|
||||
Total_Blocks: 表 d0 占用的 block 个数为 5 个
|
||||
Total_Blocks:表 d0 占用的 block 个数为 5 个
|
||||
|
||||
Total_Size: 表 d0 所有 block 在文件中占用的大小为 93.65 KB
|
||||
Total_Size: 表 d0 所有 block 在文件中占用的大小为 93.65 KB
|
||||
|
||||
Average_size: 平均每个 block 在文件中占用的空间大小为 18.73 KB
|
||||
Average_size:平均每个 block 在文件中占用的空间大小为 18.73 KB
|
||||
|
||||
Compression_Ratio: 数据压缩率 23.98%
|
||||
|
||||
|
@ -232,7 +239,7 @@ Compression_Ratio: 数据压缩率 23.98%
|
|||
|
||||
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
|
||||
|
||||
Total_Rows: 统计表 d0 的存储在磁盘上行数 20000 行(该数值仅供参考,不是精确的行数。获得精确的行数需要使用 count 函数)
|
||||
Total_Rows:统计表 d0 的存储在磁盘上行数 20000 行(该数值仅供参考,不是精确的行数。获得精确的行数需要使用 count 函数)
|
||||
|
||||
Inmem_Rows: 存储在写缓存中的数据行数(没有落盘),0 行表示内存缓存中没有数据
|
||||
|
||||
|
@ -247,7 +254,7 @@ Average_Rows: 每个 BLOCK 中的平均行数,此时为 4000 行
|
|||
|
||||
_block_dist: Total_Tables=[1] Total_Files=[2] Total_Vgroups=[1]
|
||||
|
||||
Total_Tables: 子表的个数,这里为 1
|
||||
Total_Tables: 子表的个数,这里为 1
|
||||
|
||||
Total_Files: 表数据被分别保存的数据文件数量,这里是 2 个文件
|
||||
|
||||
|
@ -281,7 +288,7 @@ Query OK, 24 row(s) in set (0.002444s)
|
|||
</code></pre>
|
||||
</details>
|
||||
|
||||
上面是块中包含数据行数的块儿分布情况图,这里的 0100 0299 0498 … 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 3483 ~3681 行的块有 1 个,占整个块的 20%,分布在 3881 ~ 4096(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
|
||||
上面是块中包含数据行数的块儿分布情况图,这里的 `0100 0299 0498 …` 表示的是每个块中包含的数据行数,上面的意思就是这个表的 5 个块,分布在 `3483 ~ 3681` 行的块有 1 个,占整个块的 20%,分布在 `3881 ~ 4096`(最大行数)的块数为 4 个,占整个块的 80%, 其它区域内分布块数为 0。
|
||||
|
||||
需要注意,这里只会显示 data 文件中数据块的信息,stt 文件中的数据的信息不会被显示。
|
||||
|
||||
|
@ -306,9 +313,10 @@ SHOW TOPICS;
|
|||
|
||||
```sql
|
||||
SHOW TRANSACTIONS;
|
||||
SHOW TRANSACTION [tranaction_id];
|
||||
```
|
||||
|
||||
显示当前系统中正在执行的事务的信息(该事务仅针对除普通表以外的元数据级别)
|
||||
显示当前系统中正在执行的所有或者某一个事务的信息(该事务仅针对除普通表以外的元数据级别)。
|
||||
|
||||
## SHOW USERS
|
||||
|
||||
|
@ -321,11 +329,12 @@ SHOW USERS;
|
|||
## SHOW CLUSTER VARIABLES(3.0.1.6 之前为 SHOW VARIABLES)
|
||||
|
||||
```sql
|
||||
SHOW CLUSTER VARIABLES;
|
||||
SHOW DNODE dnode_id VARIABLES;
|
||||
SHOW CLUSTER VARIABLES [like pattern];;
|
||||
SHOW DNODE dnode_id VARIABLES [like pattern];;
|
||||
```
|
||||
|
||||
显示当前系统中各节点需要相同的配置参数的运行值,也可以指定 DNODE 来查看其的配置参数。
|
||||
可使用 like pattern 根据 name 进行过滤。
|
||||
|
||||
## SHOW VGROUPS
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
|||
|
||||
`SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`。
|
||||
|
||||
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。从企业版 v3.3.2.0 开始支持。
|
||||
|
||||
在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。
|
||||
|
||||
|
@ -76,12 +76,12 @@ alter_user_clause: {
|
|||
}
|
||||
```
|
||||
|
||||
- PASS: 修改密码,后跟新密码
|
||||
- ENABLE: 启用或禁用该用户,`1` 表示启用,`0` 表示禁用
|
||||
- SYSINFO: 允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
|
||||
- CREATEDB: 允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
- PASS:修改密码,后跟新密码
|
||||
- ENABLE:启用或禁用该用户,`1` 表示启用,`0` 表示禁用
|
||||
- SYSINFO:允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
|
||||
- CREATEDB:允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止。从企业版 v3.3.2.0 开始支持。
|
||||
|
||||
下面的示例禁用了名为 `test` 的用户:
|
||||
下面的示例禁用了名为 `test` 的用户。
|
||||
|
||||
```sql
|
||||
taos> alter user test enable 0;
|
||||
|
|
|
@ -3,7 +3,7 @@ toc_max_heading_level: 4
|
|||
title: 权限管理
|
||||
---
|
||||
|
||||
TDengine 中的权限管理分为[用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
|
||||
TDengine 中的权限管理分为 [用户管理](../user)、数据库授权管理以及消息订阅授权管理,本节重点说明数据库授权和订阅授权。
|
||||
授权管理仅在 TDengine 企业版中可用,请联系 TDengine 销售团队。授权语法在社区版可用,但不起作用。
|
||||
|
||||
## 数据库访问授权
|
||||
|
@ -33,19 +33,18 @@ priv_level : {
|
|||
对数据库的访问权限包含读和写两种权限,它们可以被分别授予,也可以被同时授予。
|
||||
|
||||
说明
|
||||
|
||||
- priv_level 格式中 "." 之前为数据库名称, "." 之后为表名称,意思为表级别的授权控制。如果 "." 之后为 "\*" ,意为 "." 前所指定的数据库中的所有表
|
||||
- "dbname.\*" 意思是名为 "dbname" 的数据库中的所有表
|
||||
- "\*.\*" 意思是所有数据库名中的所有表
|
||||
- priv_level 格式中 "." 之前为数据库名称,"." 之后为表名称,意思为表级别的授权控制。如果 "." 之后为 "\*",意为 "." 前所指定的数据库中的所有表
|
||||
- "dbname.\*" 意思是名为 "dbname" 的数据库中的所有表
|
||||
- "\*.\*" 意思是所有数据库名中的所有表
|
||||
|
||||
### 数据库权限说明
|
||||
|
||||
对 root 用户和普通用户的权限的说明如下表
|
||||
|
||||
| 用户 | 描述 | 权限说明 |
|
||||
| -------- | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 超级用户 | 只有 root 是超级用户 | DB 外部 所有操作权限,例如user、dnode、udf、qnode等的CRUD DB 权限,包括 创建 删除 更新,例如修改 Option,移动 Vgruop等 读 写 Enable/Disable 用户 |
|
||||
| 普通用户 | 除 root 以外的其它用户均为普通用户 | 在可读的 DB 中,普通用户可以进行读操作 select describe show subscribe 在可写 DB 的内部,用户可以进行写操作: 创建、删除、修改 超级表 创建、删除、修改 子表 创建、删除、修改 topic 写入数据 被限制系统信息时,不可进行如下操作 show dnode、mnode、vgroups、qnode、snode 修改用户包括自身密码 show db时只能看到自己的db,并且不能看到vgroups、副本、cache等信息 无论是否被限制系统信息,都可以 管理 udf 可以创建 DB 自己创建的 DB 具备所有权限 非自己创建的 DB ,参照读、写列表中的权限 |
|
||||
| 用户 | 描述 | 权限说明 |
|
||||
| -------- | --------------------------------- | -- |
|
||||
| 超级用户 | 只有 root 是超级用户 |<br/>DB 外部:所有操作权限,例如 user、dnode、udf、qnode 等的 CRUD <br/>DB 权限:包括创建、删除、修改 Option、移动 Vgruop、读、写、Enable/Disable 用户 |
|
||||
| 普通用户 | 除 root 以外的其它用户均为普通用户 | <br/>在可读的 DB 中:普通用户可以进行读操作 select、describe、show、subscribe <br/>在可写 DB 的内部,用户可以进行写操作,创建、删除、修改超级表,创建、删除、修改子表,创建、删除、修改 topic。写入数据 <br/>被限制系统信息时,不可进行如下操作 show dnode、mnode、vgroups、qnode、snode、修改用户包括自身密码、`show db` 时只能看到自己的 db,并且不能看到 vgroups、副本、cache等信息 <br/>无论是否被限制系统信息,都可以管理 udf,可以创建 DB、自己创建的 DB 具备所有权限、非自己创建的 DB,参照读、写列表中的权限 |
|
||||
|
||||
## 消息订阅授权
|
||||
|
||||
|
@ -61,7 +60,7 @@ REVOKE SUBSCRIBE ON topic_name FROM user_name
|
|||
|
||||
## 基于标签的授权(表级授权)
|
||||
|
||||
从 TDengine 3.0.5.0 开始,我们支持按标签授权某个超级表中部分特定的子表。具体的 SQL 语法如下。
|
||||
从 v3.0.5.0 开始,我们支持按标签授权某个超级表中部分特定的子表。具体的 SQL 语法如下。
|
||||
|
||||
```sql
|
||||
GRANT privileges ON priv_level [WITH tag_condition] TO user_name
|
||||
|
@ -110,11 +109,11 @@ priv_level : {
|
|||
|
||||
下表列出了在不同的数据库授权和表级授权的组合下产生的实际权限。
|
||||
|
||||
| | **表无授权** | **表读授权** | **表读授权有标签条件** | **表写授权** | **表写授权有标签条件** |
|
||||
| ---------------- | ---------------- | ---------------------------------------- | ------------------------------------------------------------ | ---------------------------------------- | ---------------------------------------------------------- |
|
||||
| **数据库无授权** | 无授权 | 对此表有读权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表无权限 | 对此表有写权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有写权限,对数据库下的其他表无权限 |
|
||||
| **数据库读授权** | 对所有表有读权限 | 对所有表有读权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表有读权限 | 对此表有写权限,对所有表有读权限 | 对此表符合标签权限的子表有写权限,所有表有读权限 |
|
||||
| **数据库写授权** | 对所有表有写权限 | 对此表有读权限,对所有表有写权限 | 对此表符合标签权限的子表有读权限,对所有表有写权限 | 对所有表有写权限 | 对此表符合标签权限的子表有写权限,数据库下的其他表有写权限 |
|
||||
| | **表无授权** | **表读授权** | **表读授权有标签条件** | **表写授权** | **表写授权有标签条件** |
|
||||
| -------------- | ------------- | --------------------------------- | ------------------------------------------------- | ---------------------------------- | -------------------- |
|
||||
| **数据库无授权** | 无授权 | 对此表有读权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表无权限 | 对此表有写权限,对数据库下的其他表无权限 | 对此表符合标签权限的子表有写权限,对数据库下的其他表无权限 |
|
||||
| **数据库读授权** | 对所有表有读权限 | 对所有表有读权限 | 对此表符合标签权限的子表有读权限,对数据库下的其他表有读权限 | 对此表有写权限,对所有表有读权限 | 对此表符合标签权限的子表有写权限,所有表有读权限 |
|
||||
| **数据库写授权** | 对所有表有写权限 | 对此表有读权限,对所有表有写权限 | 对此表符合标签权限的子表有读权限,对所有表有写权限 | 对所有表有写权限 | 对此表符合标签权限的子表有写权限,数据库下的其他表有写权限 |
|
||||
|
||||
|
||||
## 查看用户授权
|
||||
|
@ -127,7 +126,7 @@ show user privileges
|
|||
|
||||
## 撤销授权
|
||||
|
||||
1. 撤销数据库访问的授权
|
||||
1. 撤销数据库访问的授权
|
||||
|
||||
```sql
|
||||
REVOKE privileges ON priv_level FROM user_name
|
||||
|
@ -149,7 +148,7 @@ priv_level : {
|
|||
}
|
||||
```
|
||||
|
||||
2. 撤销数据订阅的授权
|
||||
2. 撤销数据订阅的授权
|
||||
|
||||
```sql
|
||||
REVOKE privileges ON priv_level FROM user_name
|
||||
|
|
|
@ -4,10 +4,10 @@ title: 自定义函数
|
|||
description: 使用 UDF 的详细指南
|
||||
---
|
||||
|
||||
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。
|
||||
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入 TDengine 系统中。
|
||||
## 创建 UDF
|
||||
|
||||
用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。
|
||||
用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 mnode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。
|
||||
|
||||
在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。
|
||||
|
||||
|
@ -15,11 +15,11 @@ description: 使用 UDF 的详细指南
|
|||
```sql
|
||||
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
|
||||
```
|
||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:标量函数未来在 SQL 中被调用时的函数名;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。 如果这个从句忽略,编程语言是C语言
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称;
|
||||
- OR REPLACE:如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:标量函数未来在 SQL 中被调用时的函数名。
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持 C 语言和 Python 语言。如果这个从句忽略,编程语言是 C 语言。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是 Python,路径是包含 UDF 函数实现的 Python 文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称。
|
||||
|
||||
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
|
||||
|
||||
|
@ -36,19 +36,19 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ
|
|||
```sql
|
||||
CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ] [LANGUAGE 'C|Python'];
|
||||
```
|
||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||
- OR REPLACE:如果函数已经存在,会修改已有的函数属性。
|
||||
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言(v3.7+)。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
||||
- LANGUAGE 'C|Python':函数编程语言,目前支持 C 语言和 Python 语言(v3.7+)。
|
||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 `.so` 文件)。如果编程语言是 Python,路径是包含 UDF 函数实现的 Python 文件路径。这个路径需要用英文单引号或英文双引号括起来;
|
||||
- output_type:此函数计算结果的数据类型名称;
|
||||
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
||||
|
||||
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF:
|
||||
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF。
|
||||
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
|
||||
```
|
||||
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为64。
|
||||
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为 64。
|
||||
```sql
|
||||
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
|
||||
```
|
||||
|
@ -57,25 +57,26 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
|
|||
|
||||
## 管理 UDF
|
||||
|
||||
- 删除指定名称的用户定义函数:
|
||||
- 删除指定名称的用户定义函数。
|
||||
```
|
||||
DROP FUNCTION function_name;
|
||||
```
|
||||
|
||||
- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如bit_and, l2norm
|
||||
- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如 bit_and、l2norm
|
||||
```sql
|
||||
DROP FUNCTION bit_and;
|
||||
```
|
||||
- 显示系统中当前可用的所有 UDF:
|
||||
|
||||
- 显示系统中当前可用的所有 UDF。
|
||||
```sql
|
||||
SHOW FUNCTIONS;
|
||||
```
|
||||
|
||||
## 调用 UDF
|
||||
|
||||
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如:
|
||||
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。
|
||||
```sql
|
||||
SELECT bit_and(c1,c2) FROM table;
|
||||
```
|
||||
|
||||
表示对表 table 上名为 c1, c2 的数据列调用名为 bit_and 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
|
||||
表示对表 table 上名为 c1、c2 的数据列调用名为 bit_and 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
|
||||
|
|
|
@ -4,36 +4,38 @@ title: 窗口预聚集
|
|||
description: 窗口预聚集使用说明
|
||||
---
|
||||
|
||||
在大数据量场景下, 经常需要查询某段时间内的汇总结果, 当历史数据变多或者时间范围变大时, 查询时间也会相应增加. 通过预聚集的方式可以将计算结果提前存储下来, 后续查询可以直接读取聚集结果, 而不需要扫描原始数据, 如当前Block内的SMA (Small Materialized Aggregates)信息.
|
||||
Block内的SMA信息粒度较小, 若查询时间范围是日,月甚至年时, Block的数量将会很多, 因此TSMA (Time-Range Small Materialized Aggregates)支持用户指定时间窗口进行预聚集. 通过对固定时间窗口内的数据进行预计算, 并将计算结果存储下来, 查询时通过查询预计算结果以提高查询性能。
|
||||
在大数据量场景下,经常需要查询某段时间内的汇总结果,当历史数据变多或者时间范围变大时,查询时间也会相应增加。通过预聚集的方式可以将计算结果提前存储下来,后续查询可以直接读取聚集结果,而不需要扫描原始数据,如当前 Block 内的 SMA (Small Materialized Aggregates)信息。
|
||||
|
||||
Block 内的 SMA 信息粒度较小,若查询时间范围是日,月甚至年时,Block 的数量将会很多,因此 TSMA (Time-Range Small Materialized Aggregates)支持用户指定时间窗口进行预聚集。通过对固定时间窗口内的数据进行预计算,并将计算结果存储下来,查询时通过查询预计算结果以提高查询性能。
|
||||
|
||||

|
||||
|
||||
## 创建TSMA
|
||||
## 创建 TSMA
|
||||
|
||||
```sql
|
||||
-- 创建基于超级表或普通表的tsma
|
||||
-- 创建基于超级表或普通表的 tsma
|
||||
CREATE TSMA tsma_name ON [dbname.]table_name FUNCTION (func_name(func_param) [, ...] ) INTERVAL(time_duration);
|
||||
-- 创建基于小窗口tsma的大窗口tsma
|
||||
|
||||
-- 创建基于小窗口 tsma 的大窗口 tsma
|
||||
CREATE RECURSIVE TSMA tsma_name ON [db_name.]tsma_name1 INTERVAL(time_duration);
|
||||
|
||||
time_duration:
|
||||
number unit
|
||||
```
|
||||
|
||||
创建 TSMA 时需要指定 TSMA 名字, 表名字, 函数列表以及窗口大小. 当基于一个已经存在的 TSMA 创建新的 TSMA 时, 需要使用 `RECURSIVE` 关键字但不能指定 `FUNCTION()`, 新创建的 TSMA 已有 TSMA 拥有相同的函数列表, 且此种情况下所指定的 INTERVAL 必须至少为所基于的 TSMA 窗口长度的整数倍, 并且天不能基于2h或3h建立, 只能基于1h建立, 月也只能基于1d而非2d,3d建立。
|
||||
创建 TSMA 时需要指定 TSMA 名字,表名字,函数列表以及窗口大小。当基于一个已经存在的 TSMA 创建新的 TSMA 时,需要使用 `RECURSIVE` 关键字但不能指定 `FUNCTION()`,新创建的 TSMA 已有 TSMA 拥有相同的函数列表,且此种情况下所指定的 INTERVAL 必须至少为所基于的 TSMA 窗口长度的整数倍,并且天不能基于 2h 或 3h 建立,只能基于 1h 建立,月也只能基于 1d 而非 2d、3d 建立。
|
||||
|
||||
其中 TSMA 命名规则与表名字类似, 长度最大限制为表名长度限制减去输出表后缀长度, 表名长度限制为193, 输出表后缀为`_tsma_res_stb_`, TSMA 名字最大长度为178.
|
||||
其中 TSMA 命名规则与表名字类似,长度最大限制为表名长度限制减去输出表后缀长度,表名长度限制为 193,输出表后缀为`_tsma_res_stb_`,TSMA 名字最大长度为 178。
|
||||
|
||||
TSMA只能基于超级表和普通表创建, 不能基于子表创建.
|
||||
TSMA 只能基于超级表和普通表创建,不能基于子表创建。
|
||||
|
||||
函数列表中只能指定支持的聚集函数(见下文), 并且函数参数必须为1个, 即使当前函数支持多个参数, 函数参数内必须为普通列名, 不能为标签列. 函数列表中完全相同的函数和列会被去重, 如同时创建两个avg(c1), 则只会计算一个输出. TSMA 计算时将会把所有`函数中间结果`都输出到另一张超级表中, 输出超级表还包含了原始表的所有tag列. 函数列表中函数个数最多支持创建表最大列个数(包括tag列)减去 TSMA 计算附加的四列, 分别为`_wstart`, `_wend`, `_wduration`, 以及一个新增tag列 `tbname`, 再减去原始表的tag列数. 若列个数超出限制, 会报`Too many columns`错误.
|
||||
函数列表中只能指定支持的聚集函数(见下文),并且函数参数必须为 1 个,即使当前函数支持多个参数,函数参数内必须为普通列名,不能为标签列。函数列表中完全相同的函数和列会被去重,如同时创建两个 avg(c1),则只会计算一个输出。TSMA 计算时将会把所有 `函数中间结果` 都输出到另一张超级表中,输出超级表还包含了原始表的所有 tag 列。函数列表中函数个数最多支持创建表最大列个数(包括 tag 列)减去 TSMA 计算附加的四列,分别为 `_wstart`、`_wend`、`_wduration`,以及一个新增 tag 列 `tbname`,再减去原始表的 tag 列数。若列个数超出限制,会报 `Too many columns` 错误。
|
||||
|
||||
由于TSMA输出为一张超级表, 因此输出表的行长度受最大行长度限制, 不同函数的`中间结果`大小各异, 一般都大于原始数据大小, 若输出表的行长度大于最大行长度限制, 将会报`Row length exceeds max length`错误. 此时需要减少函数个数或者将常用的函数进行分组拆分到多个TSMA中.
|
||||
由于 TSMA 输出为一张超级表,因此输出表的行长度受最大行长度限制,不同函数的 `中间结果` 大小各异,一般都大于原始数据大小,若输出表的行长度大于最大行长度限制,将会报 `Row length exceeds max length` 错误。此时需要减少函数个数或者将常用的函数进行分组拆分到多个 TSMA 中。
|
||||
|
||||
窗口大小的限制为[1m ~ 1y/12n]. INTERVAL 的单位与查询中INTERVAL子句相同, 如 a (毫秒), b (纳秒), h (小时), m (分钟), s (秒), u (微秒), d (天), w(周), n(月), y(年).
|
||||
窗口大小的限制为 [1m ~ 1y/12n]。INTERVAL 的单位与查询中 INTERVAL 子句相同,如 a(毫秒)、b(纳秒)、h(小时)、m(分钟)、s(秒)、u(微秒)、d(天)、w(周)、n(月)、y(年)。
|
||||
|
||||
TSMA为库内对象, 但名字全局唯一. 集群内一共可创建TSMA个数受参数`maxTsmaNum`限制, 参数默认值为3, 范围: [0-3]. 注意, 由于TSMA后台计算使用流计算, 因此每创建一条TSMA, 将会创建一条流, 因此能够创建的TSMA条数也受当前已经存在的流条数和最大可创建流条数限制.
|
||||
TSMA 为库内对象,但名字全局唯一。集群内一共可创建 TSMA 个数受参数 `maxTsmaNum` 限制,参数默认值为 3,范围:[0-3]。注意,由于 TSMA 后台计算使用流计算,因此每创建一条 TSMA,将会创建一条流,因此能够创建的 TSMA 条数也受当前已经存在的流条数和最大可创建流条数限制。
|
||||
|
||||
## 支持的函数列表
|
||||
| 函数| 备注 |
|
||||
|
@ -44,65 +46,64 @@ TSMA为库内对象, 但名字全局唯一. 集群内一共可创建TSMA个数
|
|||
|first||
|
||||
|last||
|
||||
|avg||
|
||||
|count| 若想使用count(*), 则应创建count(ts)函数|
|
||||
|count| 若想使用 count(*),则应创建 count(ts) 函数|
|
||||
|spread||
|
||||
|stddev||
|
||||
|||
|
||||
|
||||
## 删除TSMA
|
||||
## 删除 TSMA
|
||||
```sql
|
||||
DROP TSMA [db_name.]tsma_name;
|
||||
```
|
||||
若存在其他TSMA基于当前被删除TSMA创建, 则删除操作报`Invalid drop base tsma, drop recursive tsma first`错误. 因此需先删除 所有Recursive TSMA.
|
||||
若存在其他 TSMA 基于当前被删除 TSMA 创建,则删除操作报 `Invalid drop base tsma, drop recursive tsma first` 错误。因此需先删除 所有 Recursive TSMA。
|
||||
|
||||
## TSMA的计算
|
||||
TSMA的计算结果为与原始表相同库下的一张超级表, 此表用户不可见. 不可删除, 在`DROP TSMA`时自动删除. TSMA的计算是通过流计算完成的, 此过程为后台异步过程, TSMA的计算结果不保证实时性, 但可以保证最终正确性.
|
||||
## TSMA 的计算
|
||||
TSMA 的计算结果为与原始表相同库下的一张超级表,此表用户不可见。不可删除,在 `DROP TSMA` 时自动删除。TSMA 的计算是通过流计算完成的,此过程为后台异步过程,TSMA 的计算结果不保证实时性,但可以保证最终正确性。
|
||||
|
||||
TSMA计算时若原始子表内没有数据, 则可能不会创建对应的输出子表, 因此在count查询中, 即使配置了`countAlwaysReturnValue`, 也不会返回该表的结果.
|
||||
TSMA 计算时若原始子表内没有数据,则可能不会创建对应的输出子表,因此在 count 查询中,即使配置了 `countAlwaysReturnValue`,也不会返回该表的结果。
|
||||
|
||||
当存在大量历史数据时, 创建TSMA之后, 流计算将会首先计算历史数据, 此期间新创建的TSMA不会被使用. 数据更新删除或者过期数据到来时自动重新计算影响部分数据。 在重新计算期间 TSMA 查询结果不保证实时性。若希望查询实时数据, 可以通过在 SQL 中添加 hint `/*+ skip_tsma() */` 或者关闭参数`querySmaOptimize`从原始数据查询。
|
||||
当存在大量历史数据时,创建 TSMA 之后,流计算将会首先计算历史数据,此期间新创建的 TSMA 不会被使用。数据更新删除或者过期数据到来时自动重新计算影响部分数据。在重新计算期间 TSMA 查询结果不保证实时性。若希望查询实时数据,可以通过在 SQL 中添加 hint `/*+ skip_tsma() */` 或者关闭参数 `querySmaOptimize` 从原始数据查询。
|
||||
|
||||
## TSMA的使用与限制
|
||||
## TSMA 的使用与限制
|
||||
|
||||
客户端配置参数: `querySmaOptimize`, 用于控制查询时是否使用TSMA, `True`为使用, `False`为不使用即从原始数据查询.
|
||||
- 客户端配置参数:`querySmaOptimize`,用于控制查询时是否使用TSMA,`True`为使用,`False` 为不使用即从原始数据查询。
|
||||
- 客户端配置参数:`maxTsmaCalcDelay`,单位为秒,用于控制用户可以接受的 TSMA 计算延迟,若 TSMA 的计算进度与最新时间差距在此范围内,则该 TSMA 将会被使用,若超出该范围,则不使用,默认值:600(10 分钟),最小值:600(10 分钟),最大值:86400(1 天)。
|
||||
- 客户端配置参数:`tsmaDataDeleteMark`,单位毫秒,与流计算参数 `deleteMark` 一致,用于控制流计算中间结果的保存时间,默认值为 1d,最小值为 1h。因此那些距最后一条数据时间大于配置参数的历史数据将不保存流计算中间结果,因此若修改这些时间窗口内的数据,TSMA 的计算结果中将不包含更新的结果。即与查询原始数据结果将不一致。
|
||||
|
||||
客户端配置参数:`maxTsmaCalcDelay`,单位 s,用于控制用户可以接受的 TSMA 计算延迟,若 TSMA 的计算进度与最新时间差距在此范围内, 则该 TSMA 将会被使用, 若超出该范围, 则不使用, 默认值: 600(10 分钟), 最小值: 600(10 分钟), 最大值: 86400(1 天).
|
||||
### 查询时使用 TSMA
|
||||
|
||||
客户端配置参数: `tsmaDataDeleteMark`, 单位毫秒, 与流计算参数`deleteMark`一致, 用于控制流计算中间结果的保存时间, 默认值为: 1d, 最小值为1h. 因此那些距最后一条数据时间大于配置参数的历史数据将不保存流计算中间结果, 因此若修改这些时间窗口内的数据, TSMA的计算结果中将不包含更新的结果. 即与查询原始数据结果将不一致.
|
||||
已在 TSMA 中定义的 agg 函数在大部分查询场景下都可直接使用,若存在多个可用的 TSMA,优先使用大窗口的 TSMA,未闭合窗口通过查询小窗口 TSMA 或者原始数据计算。同时也有某些场景不能使用 TSMA(见下文)。不可用时整个查询将使用原始数据进行计算。
|
||||
|
||||
### 查询时使用TSMA
|
||||
未指定窗口大小的查询语句默认优先使用包含所有查询聚合函数的最大窗口 TSMA 进行数据的计算。如 `SELECT COUNT(*) FROM stable GROUP BY tbname` 将会使用包含 count(ts) 且窗口最大的 TSMA。因此若使用聚合查询频率高时,应当尽可能创建大窗口的 TSMA。
|
||||
|
||||
已在 TSMA 中定义的 agg 函数在大部分查询场景下都可直接使用, 若存在多个可用的 TSMA, 优先使用大窗口的 TSMA, 未闭合窗口通过查询小窗口TSMA或者原始数据计算。 同时也有某些场景不能使用 TSMA(见下文)。 不可用时整个查询将使用原始数据进行计算。
|
||||
指定窗口大小时即 `INTERVAL` 语句,使用最大的可整除窗口 TSMA。窗口查询中,`INTERVAL` 的窗口大小、`OFFSET` 以及 `SLIDING` 都影响能使用的 TSMA 窗口大小。因此若使用窗口查询较多时,需要考虑经常查询的窗口大小,以及 offset、sliding 大小来创建 TSMA。
|
||||
|
||||
未指定窗口大小的查询语句默认优先使用包含所有查询聚合函数的最大窗口 TSMA 进行数据的计算。 如`SELECT COUNT(*) FROM stable GROUP BY tbname`将会使用包含count(ts)且窗口最大的TSMA。因此若使用聚合查询频率高时, 应当尽可能创建大窗口的TSMA.
|
||||
|
||||
指定窗口大小时即 `INTERVAL` 语句,使用最大的可整除窗口 TSMA。 窗口查询中, `INTERVAL` 的窗口大小, `OFFSET` 以及 `SLIDING` 都影响能使用的 TSMA 窗口大小, 可整 除窗口 TSMA 即 TSMA 窗口大小可被查询语句的 `INTERVAL, OFFSET, SLIDING` 整除的窗口。因此若使用窗口查询较多时, 需要考虑经常查询的窗口大小, 以及 offset, sliding大小来创建TSMA.
|
||||
|
||||
例 1. 如 创建 TSMA 窗口大小 `5m` 一条, `10m` 一条, 查询时 `INTERVAL(30m)`, 那么优先使用 `10m` 的 TSMA, 若查询为 `INTERVAL(30m, 10m) SLIDING(5m)`, 那么仅可使用 `5m` 的 TSMA 查询。
|
||||
例如 创建 TSMA 窗口大小 `5m` 一条,`10m` 一条,查询时 `INTERVAL(30m)`,那么优先使用 `10m` 的 TSMA,若查询为 `INTERVAL(30m, 10m) SLIDING(5m)`,那么仅可使用 `5m` 的 TSMA 查询。
|
||||
|
||||
|
||||
### 查询限制
|
||||
|
||||
在开启了参数`querySmaOptimize`并且无`skip_tsma()` hint时, 以下查询场景无法使用TSMA:
|
||||
在开启了参数 `querySmaOptimize` 并且无 `skip_tsma()` hint 时,以下查询场景无法使用 TSMA。
|
||||
|
||||
- 某个TSMA 中定义的 agg 函数不能覆盖当前查询的函数列表时
|
||||
- 某个 TSMA 中定义的 agg 函数不能覆盖当前查询的函数列表时
|
||||
- 非 `INTERVAL` 的其他窗口,或者 `INTERVAL` 查询窗口大小(包括 `INTERVAL,SLIDING,OFFSET`)不是定义窗口的整数倍,如定义窗口为 2m,查询使用 5 分钟窗口,但若存在 1m 的窗口,则可以使用。
|
||||
- 查询 `WHERE` 条件中包含任意普通列(非主键时间列)的过滤。
|
||||
- `PARTITION` 或者 `GROUY BY` 包含任意普通列或其表达式时
|
||||
- 可以使用其他更快的优化逻辑时, 如last cache优化, 若符合last优化的条件, 则先走last 优化, 无法走last时, 再判断是否可以走tsma优化
|
||||
- 可以使用其他更快的优化逻辑时,如 last cache 优化,若符合 last 优化的条件,则先走 last 优化,无法走 last 时,再判断是否可以走 tsma 优化
|
||||
- 当前 TSMA 计算进度延迟大于配置参数 `maxTsmaCalcDelay`时
|
||||
|
||||
下面是一些例子:
|
||||
下面是一些例子:
|
||||
|
||||
```sql
|
||||
SELECT agg_func_list [, pesudo_col_list] FROM stable WHERE exprs [GROUP/PARTITION BY [tbname] [, tag_list]] [HAVING ...] [INTERVAL(time_duration, offset) SLIDING(duration)]...;
|
||||
|
||||
-- 创建
|
||||
CREATE TSMA tsma1 ON stable FUNCTION(COUNT(ts), SUM(c1), SUM(c3), MIN(c1), MIN(c3), AVG(c1)) INTERVAL(1m);
|
||||
|
||||
-- 查询
|
||||
SELECT COUNT(*), SUM(c1) + SUM(c3) FROM stable; ---- use tsma1
|
||||
SELECT COUNT(*), AVG(c1) FROM stable GROUP/PARTITION BY tbname, tag1, tag2; --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h); ---use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h); --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1), SPREAD(c1) FROM stable INTERVAL(1h); ----- can't use, spread func not defined, although SPREAD can be calculated by MIN and MAX which are defined.
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(30s); ----- can't use tsma1, time_duration not fit. Normally, query_time_duration should be multple of create_duration.
|
||||
SELECT COUNT(*), MIN(c1) FROM stable where c2 > 0; ---- can't use tsma1, can't do c2 filtering
|
||||
|
@ -113,10 +114,10 @@ SELECT MIN(c3), MIN(c2) FROM stable INTERVAL(1m); ---- can't use tsma1, c2 is no
|
|||
CREATE RECURSIVE TSMA tsma2 on tsma1 INTERVAL(1h);
|
||||
SELECT COUNT(*), SUM(c1) FROM stable; ---- use tsma2
|
||||
SELECT COUNT(*), AVG(c1) FROM stable GROUP/PARTITION BY tbname, tag1, tag2; --- use tsma2
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(2h); ---use tsma2
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(2h); --- use tsma2
|
||||
SELECT COUNT(*), MIN(c1) FROM stable WHERE ts < '2023-01-01 10:10:10' INTERVAL(30m); --use tsma1
|
||||
SELECT COUNT(*), MIN(c1) + MIN(c3) FROM stable INTERVAL(30m); ---use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h) SLIDING(30m); ---use tsma1
|
||||
SELECT COUNT(*), MIN(c1) + MIN(c3) FROM stable INTERVAL(30m); --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(1h) SLIDING(30m); --- use tsma1
|
||||
SELECT COUNT(*), MIN(c1), SPREAD(c1) FROM stable INTERVAL(1h); ----- can't use tsma1 or tsma2, spread func not defined
|
||||
SELECT COUNT(*), MIN(c1) FROM stable INTERVAL(30s); ----- can't use tsma1 or tsma2, time_duration not fit. Normally, query_time_duration should be multple of create_duration.
|
||||
SELECT COUNT(*), MIN(c1) FROM stable where c2 > 0; ---- can't use tsma1 or tsam2, can't do c2 filtering
|
||||
|
@ -124,15 +125,15 @@ SELECT COUNT(*), MIN(c1) FROM stable where c2 > 0; ---- can't use tsma1 or tsam2
|
|||
|
||||
### 使用限制
|
||||
|
||||
创建TSMA之后, 对原始超级表的操作有以下限制:
|
||||
创建 TSMA 之后,对原始超级表的操作有以下限制:
|
||||
|
||||
- 必须删除该表上的所有TSMA才能删除该表.
|
||||
- 原始表所有tag列不能删除, 也不能修改tag列名或子表的tag值, 必须先删除TSMA, 才能删除tag列.
|
||||
- 若某些列被TSMA使用了, 则这些列不能被删除, 必须先删除TSMA. 添加列不受影响, 但是新添加的列不在任何TSMA中, 因此若要计算新增列, 需要新创建其他的TSMA.
|
||||
- 必须删除该表上的所有 TSMA 才能删除该表。
|
||||
- 原始表所有 tag 列不能删除,也不能修改 tag 列名或子表的 tag 值,必须先删除 TSMA,才能删除 tag 列。
|
||||
- 若某些列被 TSMA 使用了,则这些列不能被删除,必须先删除 TSMA。添加列不受影响,但是新添加的列不在任何 TSMA 中,因此若要计算新增列,需要新创建其他的 TSMA。
|
||||
|
||||
## 查看TSMA
|
||||
```sql
|
||||
SHOW [db_name.]TSMAS;
|
||||
SELECT * FROM information_schema.ins_tsma;
|
||||
```
|
||||
若创建时指定的较多的函数, 且列名较长, 在显示函数列表时可能会被截断(目前最大支持输出256KB).
|
||||
若创建时指定的较多的函数,且列名较长,在显示函数列表时可能会被截断(目前最大支持输出 256KB)。
|
||||
|
|
|
@ -9,15 +9,15 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| # | **元素** | **<div style={{width: 60}}>差异性</div>** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | VARCHAR | 新增 | BINARY类型的别名。
|
||||
| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
|
||||
| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
|
||||
| 4 | _IROWTS伪列 | 新增 | 用于返回 interp 函数插值结果对应的时间戳列。
|
||||
| 5 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
|
||||
| 2 | TIMESTAMP 字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
|
||||
| 3 | _ROWTS 伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
|
||||
| 4 | _IROWTS 伪列 | 新增 | 用于返回 interp 函数插值结果对应的时间戳列。
|
||||
| 5 | INFORMATION_SCHEMA | 新增 | 包含各种 SCHEMA 定义的系统数据库。
|
||||
| 6 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
|
||||
| 7 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
|
||||
| 8 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
|
||||
| 8 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT 的各个子句均全面支持符合语法语义的混合运算。
|
||||
| 9 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
|
||||
| 10 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 10 | 时间线子句和时间函数用于超级表查询 | 增强 |没有 PARTITION BY 时,超级表的数据会被合并成一条时间线。
|
||||
| 11 | GEOMETRY | 新增 | 几何类型。
|
||||
|
||||
## SQL 语句变更
|
||||
|
@ -26,15 +26,15 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
|
||||
| # | **语句** | **<div style={{width: 60}}>差异性</div>** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
|
||||
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
|
||||
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
|
||||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
|
||||
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
|
||||
| 1 | ALTER ACCOUNT | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 2 | ALTER ALL DNODES | 新增 | 修改所有 DNODE 的参数。
|
||||
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0 版本使用 BUFFER 来表示 VNODE 写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0 版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0 版本用 CACHEMODEL 代替。</li><li>COMP:3.0 版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原 FSYNC 参数。</li><li>WAL_LEVEL:代替原 WAL 参数。</li><li>WAL_RETENTION_PERIOD:v3.0.4.0 新增,WAL 文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:v3.0.4.0 新增,WAL 文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0 版本新增支持带单位的设置方式。</li></ul>
|
||||
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0 版本使用 RENAME TAG 代替。<br/>新增</li><li>RENAME TAG:代替原 CHANGE TAG 子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
|
||||
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0 版本使用 RENAME TAG 代替。<br/>新增</li><li>RENAME TAG:代替原 CHANGE TAG 子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
|
||||
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0 版本使用 GRANT 和 REVOKE 来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
|
||||
| 7 | COMPACT VNODES | 暂不支持 | 整理指定 VNODE 的数据。
|
||||
| 8 | CREATE ACCOUNT | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKS:VNODE 使用的内存块数。3.0 版本使用 BUFFER 来表示 VNODE 写入内存池的大小。</li><li>CACHE:VNODE 使用的内存块的大小。3.0 版本使用 BUFFER 来表示 VNODE 写入内存池的大小。</li><li>CACHELAST:缓存最新一行数据的模式。3.0 版本用 CACHEMODEL 代替。</li><li>DAYS:数据文件存储数据的时间跨度。3.0 版本使用 DURATION 代替。</li><li>FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0 版本使用 WAL_FSYNC_PERIOD 代替。</li><li>QUORUM:写入需要的副本确认数。3.0 版本使用 STRICT 来指定强一致还是弱一致。</li><li>UPDATE:更新操作的支持模式。3.0 版本所有数据库都支持部分列更新。</li><li>WAL:WAL 级别。3.0 版本使用 WAL_LEVEL 代替。</li></ul><p>新增</p><ul><li>BUFFER:一个 VNODE 写入内存池大小。</li><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>DURATION:代替原 DAYS 参数。新增支持带单位的设置方式。</li><li>PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS:表示数据的聚合周期和保存时长。</li><li>STRICT:表示数据同步的一致性要求。</li><li>SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS:数据库中初始 VGROUP 的数目。</li><li>WAL_FSYNC_PERIOD:代替原 FSYNC 参数。</li><li>WAL_LEVEL:代替原 WAL 参数。</li><li>WAL_RETENTION_PERIOD:WAL 文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:WAL 文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0 版本新增支持带单位的设置方式。</li></ul>
|
||||
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
|
||||
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
|
||||
| 12 | CREATE MNODE | 新增 | 创建管理节点。
|
||||
|
@ -43,7 +43,7 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 15 | CREATE STREAM | 新增 | 创建流。
|
||||
| 16 | CREATE TABLE | 调整 | 新增表参数语法<ul><li>COMMENT:表注释。</li><li>WATERMARK:指定窗口的关闭时间。</li><li>MAX_DELAY:用于控制推送计算结果的最大延迟。</li><li>ROLLUP:指定的聚合函数,提供基于多层级的降采样聚合结果。</li><li>SMA:提供基于数据块的自定义预计算功能。</li><li>TTL:用来指定表的生命周期的参数。</li></ul>
|
||||
| 17 | CREATE TOPIC | 新增 | 创建订阅主题。
|
||||
| 18 | DROP ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 18 | DROP ACCOUNT | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 19 | DROP CONSUMER GROUP | 新增 | 删除消费组。
|
||||
| 20 | DROP INDEX | 新增 | 删除索引。
|
||||
| 21 | DROP MNODE | 新增 | 创建管理节点。
|
||||
|
@ -54,52 +54,52 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 26 | EXPLAIN | 新增 | 查看查询语句的执行计划。
|
||||
| 27 | GRANT | 新增 | 授予用户权限。
|
||||
| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0 版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 31 | REVOKE | 新增 | 回收用户权限。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。</li><li>DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。</li><li>JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。</li><li>FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE后可以使用任意的标量表达式。</li><li>GROUP BY功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。</li><li>新增PARTITION BY语法。替代原来的GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT 关闭隐式结果列,输出列均需要由 SELECT 子句来指定。</li><li>DISTINCT 功能全面支持。2.x 版本只支持对标签列去重,并且不可以和 JOIN、GROUP BY 等子句混用。</li><li>JOIN 功能增强。增加支持:JOIN 后 WHERE 条件中有 OR 条件;JOIN 后的多表运算;JOIN 后的多表 GROUP BY。</li><li>FROM 后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和 UNION ALL 混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE 后可以使用任意的标量表达式。</li><li>GROUP BY 功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION 可以用于超级表。之前版本,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW 可以用于超级表。之前版本,超级表的数据会被合并成一条时间线。</li><li>ORDER BY 功能大幅增强。不再必须和 GROUP BY 子句一起使用;不再有排序表达式个数的限制;增加支持 NULLS FIRST/LAST 语法功能;支持符合语法语义的任意表达式。</li><li>新增 PARTITION BY 语法。替代原来的 GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x 中为企业版功能,3.0 不再支持。语法暂时保留,执行报 “This statement is no longer supported” 错误。
|
||||
| 34 | SHOW APPS |新增 | 显示接入集群的应用(客户端)信息。
|
||||
| 35 | SHOW CONSUMERS | 新增 | 显示当前数据库下所有活跃的消费者的信息。
|
||||
| 36 | SHOW DATABASES | 调整 | 3.0版本只显示数据库名。
|
||||
| 37 | SHOW FUNCTIONS | 调整 | 3.0版本只显示自定义函数名。
|
||||
| 38 | SHOW LICENCE | 新增 | 和SHOW GRANTS 命令等效。
|
||||
| 36 | SHOW DATABASES | 调整 | 3.0 版本只显示数据库名。
|
||||
| 37 | SHOW FUNCTIONS | 调整 | 3.0 版本只显示自定义函数名。
|
||||
| 38 | SHOW LICENCE | 新增 | 和 SHOW GRANTS 命令等效。
|
||||
| 39 | SHOW INDEXES | 新增 | 显示已创建的索引。
|
||||
| 40 | SHOW LOCAL VARIABLES | 新增 | 显示当前客户端配置参数的运行值。
|
||||
| 41 | SHOW MODULES | 废除 | 显示当前系统中所安装的组件的信息。
|
||||
| 42 | SHOW QNODES | 新增 | 显示当前系统中QNODE的信息。
|
||||
| 43 | SHOW STABLES | 调整 | 3.0版本只显示超级表名。
|
||||
| 44 | SHOW STREAMS | 调整 | 2.x版本此命令显示系统中已创建的连续查询的信息。3.0版本废除了连续查询,用流代替。此命令显示已创建的流。
|
||||
| 43 | SHOW STABLES | 调整 | 3.0 版本只显示超级表名。
|
||||
| 44 | SHOW STREAMS | 调整 | 2.x 版本此命令显示系统中已创建的连续查询的信息。3.0 版本废除了连续查询,用流代替。此命令显示已创建的流。
|
||||
| 45 | SHOW SUBSCRIPTIONS | 新增 | 显示当前数据库下的所有的订阅关系
|
||||
| 46 | SHOW TABLES | 调整 | 3.0版本只显示表名。
|
||||
| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替2.x版本中的SELECT _block_dist() FROM \{ tb_name | stb_name }方式。
|
||||
| 46 | SHOW TABLES | 调整 | 3.0 版本只显示表名。
|
||||
| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替 2.x 版本中的 `SELECT _block_dist() FROM tb_name` 方式。
|
||||
| 48 | SHOW TOPICS | 新增 | 显示当前数据库下的所有订阅主题。
|
||||
| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定 DNODE 的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中 VNODE 的信息。
|
||||
| 52 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整VGROUP中VNODE的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整VGROUP中VNODE的分布。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整 VGROUP 中 VNODE 的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整 VGROUP中 VNODE 的分布。
|
||||
|
||||
## SQL 函数变更
|
||||
|
||||
| # | **函数** | ** <div style={{width: 60}}>差异性</div> ** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | TWA | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 2 | IRATE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 3 | LEASTSQUARES | 增强 | 可以用于超级表了。
|
||||
| 4 | ELAPSED | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 5 | DIFF | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 6 | DERIVATIVE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 7 | CSUM | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 8 | MAVG | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
|
||||
| 12 | TIMETRUNCATE | 增强 | 增加ignore_timezone参数,可选是否使用,默认值为1.
|
||||
| 1 | TWA | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 2 | IRATE | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 3 | LEASTSQUARES | 增强 | 可以用于超级表。
|
||||
| 4 | ELAPSED | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 5 | DIFF | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 6 | DERIVATIVE | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 7 | CSUM | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 8 | MAVG | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 9 | SAMPLE | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 10 | STATECOUNT | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 11 | STATEDURATION | 增强 | 可以直接用于超级表。之前版本,超级表的数据会被合并成一条时间线。
|
||||
| 12 | TIMETRUNCATE | 增强 | 增加 ignore_timezone 参数,可选是否使用,默认值为 1。
|
||||
|
||||
|
||||
## SCHEMALESS 变更
|
||||
|
||||
| # | **元素** | **<div style={{width: 60}}>差异性</div>** | **说明** |
|
||||
| - | :------- | :-------- | :------- |
|
||||
| 1 | 主键ts 变更为 _ts | 变更 | schemaless自动建的列名用 _ 开头,不同于2.x。
|
||||
| 1 | 主键 ts 变更为 _ts | 变更 | schemaless 自动建的列名用 `_` 开头,不同于2.x。
|
||||
|
|