Merge branch '3.0' of github.com:taosdata/TDengine into test/cover1
|
@ -74,6 +74,11 @@ jobs:
|
|||
snappy \
|
||||
zlib
|
||||
|
||||
- name: prepare install path
|
||||
run: |
|
||||
sudo mkdir -p /usr/local/lib
|
||||
sudo mkdir -p /usr/local/include
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
mkdir debug && cd debug
|
||||
|
|
|
@ -0,0 +1,114 @@
|
|||
name: TDengine Release Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- '3.*'
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- 'packaging/**'
|
||||
- 'tests/**'
|
||||
- '**/*.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Run on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-22.04
|
||||
- macos-14
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install dependencies on Linux
|
||||
if: runner.os == 'Linux'
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
gawk \
|
||||
libgeos-dev \
|
||||
libjansson-dev \
|
||||
liblzma-dev \
|
||||
libsnappy-dev \
|
||||
libssl-dev \
|
||||
libz-dev \
|
||||
pkg-config \
|
||||
zlib1g
|
||||
|
||||
- name: Install dependencies on macOS
|
||||
if: runner.os == 'macOS'
|
||||
run: |
|
||||
brew update
|
||||
brew install \
|
||||
argp-standalone \
|
||||
gawk \
|
||||
gflags \
|
||||
geos \
|
||||
jansson \
|
||||
openssl \
|
||||
pkg-config \
|
||||
snappy \
|
||||
zlib
|
||||
|
||||
- name: Build and install TDengine
|
||||
run: |
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_TOOLS=true \
|
||||
-DBUILD_KEEPER=true \
|
||||
-DBUILD_HTTP=false \
|
||||
-DBUILD_TEST=true \
|
||||
-DWEBSOCKET=true \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DBUILD_DEPENDENCY_TESTS=false
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Statistics ldd
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -name "*.so" -print0 | xargs -0 ldd || true
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ldd || true
|
||||
|
||||
- name: Statistics size
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -print0 | xargs -0 ls -lhrS
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ls -lhrS
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
sudo echo "supportVnodes 256" >> taos.cfg
|
||||
nohup sudo taosd -c taos.cfg &
|
||||
|
||||
- name: Start taosadapter
|
||||
run: nohup sudo taosadapter &
|
||||
|
||||
- name: Run tests with taosBenchmark
|
||||
run: |
|
||||
taosBenchmark -t 10 -n 10 -y
|
||||
taos -s "select count(*) from test.meters"
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if pgrep taosd; then sudo pkill taosd; fi
|
||||
if pgrep taosadapter; then sudo pkill taosadapter; fi
|
|
@ -6,6 +6,7 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
|
|
81
README.md
|
@ -8,7 +8,7 @@
|
|||
</a>
|
||||
</p>
|
||||
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/tdengine-test.yml)
|
||||
[](https://github.com/taosdata/TDengine/actions/workflows/tdengine-release-build.yml)
|
||||
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
|
||||
[](https://github.com/feici02/TDengine/commits/main/)
|
||||
<br />
|
||||
|
@ -82,7 +82,9 @@ For contributing/building/testing TDengine Connectors, please check the followin
|
|||
|
||||
# 3. Prerequisites
|
||||
|
||||
At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
At the moment, TDengine server supports running on Linux/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
|
||||
Starting from version 3.1.0.0, TDengine supports the Windows system exclusively in its Enterprise edition.
|
||||
|
||||
If you want to compile taosAdapter or taosKeeper, you need to install Go 1.18 or above.
|
||||
|
||||
|
@ -127,13 +129,7 @@ brew install argp-standalone gflags pkgconfig
|
|||
|
||||
## 3.3 Prerequisites on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Install required tools on Windows</summary>
|
||||
|
||||
Work in Progress.
|
||||
|
||||
</details>
|
||||
Not available for community edition.
|
||||
|
||||
## 3.4 Clone the repo
|
||||
|
||||
|
@ -212,40 +208,7 @@ If you want to compile taosKeeper, you need to add the `-DBUILD_KEEPER=true` opt
|
|||
|
||||
## 4.3 Build on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on Windows</summary>
|
||||
|
||||
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
|
||||
Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat.
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
||||
If you use the Visual Studio 2019 or 2017:
|
||||
|
||||
please open a command window by executing "cmd.exe".
|
||||
Please specify "x64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat.
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
|
||||
Or, you can simply open a command window by clicking Windows Start -> "Visual Studio < 2019 | 2017 >" folder -> "x64 Native Tools Command Prompt for VS < 2019 | 2017 >" or "x86 Native Tools Command Prompt for VS < 2019 | 2017 >" depends what architecture your Windows is, then execute commands as follows:
|
||||
|
||||
```cmd
|
||||
mkdir debug && cd debug
|
||||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
</details>
|
||||
Not available for community edition.
|
||||
|
||||
# 5. Packaging
|
||||
|
||||
|
@ -285,17 +248,7 @@ sudo make install
|
|||
|
||||
## 6.3 Install on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to install on windows</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
```
|
||||
|
||||
</details>
|
||||
Not available for community edition.
|
||||
|
||||
# 7. Running
|
||||
|
||||
|
@ -360,25 +313,7 @@ If TDengine CLI connects the server successfully, welcome messages and version i
|
|||
|
||||
## 7.3 Run TDengine on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on windows</summary>
|
||||
|
||||
You can start TDengine server on Windows platform with below commands:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taosd.exe -c test\cfg
|
||||
```
|
||||
|
||||
In another terminal, use the TDengine CLI to connect the server:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taos.exe -c test\cfg
|
||||
```
|
||||
|
||||
option "-c test/cfg" specifies the system configuration file directory.
|
||||
|
||||
</details>
|
||||
Not available for community edition.
|
||||
|
||||
# 8. Testing
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG 3.3.6
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -151,15 +151,22 @@ Using docker-compose, configure Grafana Provisioning for automated setup, and ex
|
|||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine:3.3.0.0
|
||||
image: tdengine/tdengine:latest
|
||||
container_name: tdengine
|
||||
hostname: tdengine
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
MONITOR_FQDN: tdengine
|
||||
EXPLORER_CLUSTER: http://tdengine:6041
|
||||
TAOS_KEEPER_TDENGINE_HOST: tdengine
|
||||
volumes:
|
||||
- tdengine-data:/var/lib/taos/
|
||||
ports:
|
||||
- 6060:6060
|
||||
grafana:
|
||||
image: grafana/grafana:9.3.6
|
||||
image: grafana/grafana:latest
|
||||
volumes:
|
||||
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
|
||||
- ./tdengine.yml:/etc/grafana/provisioning/tdengine.yml
|
||||
- grafana-data:/var/lib/grafana
|
||||
environment:
|
||||
# install tdengine plugin at start
|
||||
|
@ -169,6 +176,7 @@ Using docker-compose, configure Grafana Provisioning for automated setup, and ex
|
|||
TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
volumes:
|
||||
grafana-data:
|
||||
tdengine-data:
|
||||
|
|
|
@ -532,6 +532,24 @@ These fields are present only when "windowType" is "Count".
|
|||
#### Fields for Window Invalidation
|
||||
|
||||
Due to scenarios such as data disorder, updates, or deletions during stream computing, windows that have already been generated might be removed or their results need to be recalculated. In such cases, a notification with the eventType "WINDOW_INVALIDATION" is sent to inform which windows have been invalidated.
|
||||
|
||||
For events with "eventType" as "WINDOW_INVALIDATION", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
|
||||
## Support for Virtual Tables in Stream Computing
|
||||
|
||||
Starting with v3.3.6.0, stream computing can use virtual tables—including virtual regular tables, virtual sub-tables, and virtual super tables—as data sources for computation. The syntax is identical to that for non‑virtual tables.
|
||||
|
||||
However, because the behavior of virtual tables differs from that of non‑virtual tables, the following restrictions apply when using stream computing:
|
||||
|
||||
1. The schema of virtual regular tables/virtual sub-tables involved in stream computing cannot be modified.
|
||||
1. During stream computing, if the data source corresponding to a column in a virtual table is changed, the stream computation will not pick up the change; it will still read from the old data source.
|
||||
1. During stream computing, if the original table corresponding to a column in a virtual table is deleted and later a new table with the same name and a column with the same name is created, the stream computation will not read data from the new table.
|
||||
1. The watermark for stream computing must be 0; otherwise, an error will occur during creation.
|
||||
1. If the data source for stream computing is a virtual super table, sub-tables that are added after the stream computing task starts will not participate in the computation.
|
||||
1. The timestamps of different underlying tables in a virtual table may not be completely consistent; merging the data might produce null values, and interpolation is currently not supported.
|
||||
1. Out-of-order data, updates, or deletions are not handled. In other words, when creating a stream, you cannot specify `ignore update 0` or `ignore expired 0`; otherwise, an error will be reported.
|
||||
1. Historical data computation is not supported. That is, when creating a stream, you cannot specify `fill_history 1`; otherwise, an error will be reported.
|
||||
1. The trigger modes MAX_DELAY, CONTINUOUS_WINDOW_CLOSE and FORCE_WINDOW_CLOSE are not supported.
|
||||
1. The COUNT_WINDOW type is not supported.
|
||||
|
|
|
@ -558,6 +558,20 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80004017 | Invalid status, please subscribe topic first | tmq status invalidate | Without calling subscribe, directly poll data |
|
||||
| 0x80004100 | Stream task not exist | The stream computing task does not exist | Check the server-side error logs |
|
||||
|
||||
## TDgpt
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommanded Actions for Users |
|
||||
| ---------- | --------------------- | -------------------------------------------------------------------------------- | ------------------------------ |
|
||||
| 0x80000440 | Analysis service response is NULL | The response content is empty | Check the taosanode.app.log for detailed response information |
|
||||
| 0x80000441 | Analysis service can't access | Service is not work currectly, or network is broken | Check the status of taosanode and network status |
|
||||
| 0x80000442 | Analysis algorithm is missing | Algorithm used in analysis is not specified | Add the "algo" parameter in forecast function or anomaly_window clause |
|
||||
| 0x80000443 | Analysis algorithm not loaded | The specified algorithm is not available | Check for the specified algorithm |
|
||||
| 0x80000444 | Analysis invalid buffer type | The bufferred data type is invalid | Check the taosanode.app.log for more details |
|
||||
| 0x80000445 | Analysis failed since anode return error | The responses from anode with error message | Check the taosanode.app.log for more details |
|
||||
| 0x80000446 | Analysis failed since too many input rows for anode | Input data is too many | Reduce the rows of input data to below than the threshold |
|
||||
| 0x80000447 | white-noise data not processed | white noise data is not processed | Ignore the white noise check or use another input data |
|
||||
| 0x80000448 | Analysis internal error, not processed | Internal error occurs | Check the taosanode.app.log for more details |
|
||||
|
||||
|
||||
## virtual table
|
||||
|
||||
|
@ -570,4 +584,4 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |
|
||||
| 0x80006205 | Virtual table not support in STMT query and STMT insert | Use virtual table in stmt query and stmt insert | do not use virtual table in stmt query and insert |
|
||||
| 0x80006206 | Virtual table not support in Topic | Use virtual table in topic | do not use virtual table in topic |
|
||||
| 0x80006206 | Virtual super table query not support origin table from different databases | Virtual super table ‘s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |
|
||||
| 0x80006207 | Virtual super table query not support origin table from different databases | Virtual super table ‘s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |
|
||||
|
|
|
@ -182,7 +182,7 @@ def test_json_to_taos(consumer: Consumer):
|
|||
'voltage': 105,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None, leader_epoch=0),
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value=json.dumps({'table_name': 'd1',
|
||||
'ts': '2022-12-06 15:13:39.643',
|
||||
|
@ -190,7 +190,7 @@ def test_json_to_taos(consumer: Consumer):
|
|||
'voltage': 102,
|
||||
'phase': 0.02027, }),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
|
||||
]
|
||||
]
|
||||
|
||||
|
@ -203,11 +203,11 @@ def test_line_to_taos(consumer: Consumer):
|
|||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value="d0 values('2023-01-01 00:00:00.001', 3.49, 109, 0.02737)".encode('utf-8'),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
|
||||
ConsumerRecord(checksum=None, headers=None, offset=1, key=None,
|
||||
value="d1 values('2023-01-01 00:00:00.002', 6.19, 112, 0.09171)".encode('utf-8'),
|
||||
partition=1, topic='test', serialized_key_size=None, serialized_header_size=None,
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None),
|
||||
serialized_value_size=None, timestamp=time.time(), timestamp_type=None,leader_epoch=0 ),
|
||||
]
|
||||
]
|
||||
consumer._line_to_taos(messages=records)
|
||||
|
|
|
@ -77,6 +77,22 @@ toc_max_heading_level: 4
|
|||
|
||||

|
||||
|
||||
### 虚拟表
|
||||
|
||||
“一个设备一张表”的设计解决了工业和物联网等场景下的大多数时序数据管理和分析难题,但是在遇到更复杂的场景时,这种设计受到了设备复杂性的挑战。根源在于一个设备无法简单的用一个或一组数据采集点来描述或管理,而业务分析往往需要综合多个或多组采集点的数据才能完成。以汽车或发电风机为例,整个设备(汽车或风机)中含有非常大量的传感器(数据采集点),这些传感器的输出和采集频率千差万别。一个超级表只能描述其中一种传感器,当需要综合多个传感器的数据进行分析计算时,只能通过多级关联查询的方式来进行,而这往往会导致易用性和性能方面的问题。
|
||||
|
||||
为了解决这个问题,TDengine 引入虚拟表(Virtual Table,简称为 VTable)的概念。虚拟表是一种不存储实际数据而可以用于分析计算的表,它的数据来源为其它真实存储数据的子表、普通表,通过将不同列数据按照时间戳排序、对齐、合并的方式来生成虚拟表。同真实表类似,虚拟表也可以分为虚拟超级表、虚拟子表、虚拟普通表。虚拟超级表可以是一个设备或一组分析计算所需数据的完整集合,每个虚拟子表可以根据需要引用相同或不同的列,因此可以灵活地根据业务需要进行定义,最终达到千表千面的效果。虚拟表不能写入、删除数据,在查询使用上同真实表基本相同,支持虚拟超级表、虚拟子表、虚拟普通表上的任何查询。唯一的区别在于虚拟表的数据是每次查询计算时动态生成的,只有一个查询中引用的列才会被合并进虚拟表中,因此同一个虚拟表在不同的查询中所呈现的数据可能是不同的。
|
||||
|
||||
虚拟超级表的主要功能特点包括:
|
||||
1. 列选择与拼接 <br />
|
||||
用户可以从多个原始表中选择指定的列,按需组合到一张虚拟表中,形成统一的数据视图。
|
||||
2. 基于时间戳对齐 <br />
|
||||
以时间戳为依据对数据进行对齐,如果多个表在相同时间戳下存在数据,则对应列的值组合成同一行;若部分表在该时间戳下无数据,则对应列填充为 NULL。
|
||||
3. 动态更新 <br />
|
||||
虚拟表根据原始表的数据变化自动更新,确保数据的实时性。虚拟表不需实际存储,计算在生成时动态完成。
|
||||
|
||||
通过引入虚拟表的概念,TDengine 可以非常方便的管理更大更复杂的设备数据。无论每个采集点如何建模(单列 or 多列),无论这些采集点的数据是分布在一个或多个库中,都可以通过定义虚拟子表的方式跨库跨表任意指定数据源,通过虚拟超级表的方式进行跨设备、跨分析的聚合运算,从此“一个设备一张表”彻底成为现实。
|
||||
|
||||
### 库
|
||||
|
||||
库是 TDengine 中用于管理一组表的集合。TDengine 允许一个运行实例包含多个库,并且每个库都可以配置不同的存储策略。由于不同类型的数据采集点通常具有不同的数据特征,如数据采集频率、数据保留期限、副本数量、数据块大小等。为了在各种场景下确保 TDengine 能够发挥最大效率,建议将具有不同数据特征的超级表创建在不同的库中。
|
||||
|
@ -85,7 +101,7 @@ toc_max_heading_level: 4
|
|||
|
||||
### 时间戳
|
||||
|
||||
时间戳在时序数据处理中扮演着至关重要的角色,特别是在应用程序需要从多个不同时区访问数据库时,这一问题变得更加复杂。在深入了解 TDengine 如何处理时间戳与时区之前,我们先介绍以下几个基本概念。
|
||||
时间戳在时序数据处理中扮演着至关重要的角色,特别是在应用程序需要从多个不同时区访问数据库时,这一问题变得更加复杂。在深入了解 TDengine 如何处理时间戳与时区之前,先介绍以下几个基本概念。
|
||||
- 本地日期时间:指特定地区的当地时间,通常表示为 yyyy-MM-dd hh:mm:ss.SSS 格式的字符串。这种时间表示不包含任何时区信息,如 “2021-07-21 12:00:00.000”。
|
||||
- 时区:地球上不同地理位置的标准时间。协调世界时(Universal Time Coordinated,UTC)或格林尼治时间是国际时间标准,其他时区通常表示为相对于 UTC 的偏移量,如 “UTC+8” 代表东八区时间。 UTC 时间戳:表示自 UNIX 纪元(即 UTC 时间 1970 年 1 月 1 日 0 点)起经过的毫秒数。例如,“1700000000000” 对应的日期时间是 “2023-11-14 22:13:20(UTC+0)”。 在 TDengine 中保存时序数据时,实际上保存的是 UTC 时间戳。TDengine 在写入数据时,时间戳的处理分为如下两种情况。
|
||||
- RFC-3339 格式:当使用这种格式时,TDengine 能够正确解析带有时区信息的时间字符串为 UTC 时间戳。例如,“2018-10-03T14:38:05.000+08:00” 会被转换为 UTC 时间戳。
|
||||
|
@ -95,7 +111,7 @@ toc_max_heading_level: 4
|
|||
|
||||
## 数据建模
|
||||
|
||||
本节用智能电表做例子,简要的介绍如何在 TDengine 里使用 SQL 创建数据库、超级表、表的基本操作。
|
||||
本节以智能电表为例,介绍如何在 TDengine 里使用 SQL 创建数据库、超级表、表的基本操作。
|
||||
|
||||
### 创建数据库
|
||||
|
||||
|
@ -215,3 +231,177 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型
|
|||
尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。
|
||||
|
||||
总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。
|
||||
|
||||
### 创建虚拟表
|
||||
|
||||
无论是选择单列模型还是多列模型,TDengine 都可以通过使用虚拟表进行跨表的运算。为智能电表为例,这里介绍虚拟表的两种使用场景:
|
||||
|
||||
1. 单源多维度时序聚合
|
||||
2. 跨源采集量对比分析
|
||||
|
||||
#### 单源多维度时序聚合
|
||||
在单源多维度时序聚合场景中,“单源”并非指单一物理表,而是指来自**同一数据采集点**下的多个单列时序数据表。这些数据因业务需求或其他限制被拆分为多个单列存储的表,但通过设备标签和时间基准保持逻辑一致性。虚拟表在此场景中的作用是将一个采集点中“纵向“拆分的数据,还原为完整的“横向”状态。
|
||||
例如,在建模时采用了单列模型,对于电流、电压和相位这 3 种物理量,分别建立 3 张超级表。在这种场景下,用户可以通过虚拟表将这 3 种不同的采集量聚合到一张表中,以便进行统一的查询和分析。
|
||||
|
||||
创建单列模型的超级表的 SQL 如下:
|
||||
|
||||
```sql
|
||||
|
||||
CREATE STABLE current_stb (
|
||||
ts timestamp,
|
||||
current float
|
||||
) TAGS (
|
||||
device_id varchar(64),
|
||||
location varchar(64),
|
||||
group_id int
|
||||
);
|
||||
|
||||
CREATE STABLE voltage_stb (
|
||||
ts timestamp,
|
||||
voltage int
|
||||
) TAGS (
|
||||
device_id varchar(64),
|
||||
location varchar(64),
|
||||
group_id int
|
||||
);
|
||||
|
||||
CREATE STABLE phase_stb (
|
||||
ts timestamp,
|
||||
phase float
|
||||
) TAGS (
|
||||
device_id varchar(64),
|
||||
location varchar(64),
|
||||
group_id int
|
||||
);
|
||||
```
|
||||
|
||||
假设有 d1001、d1002、d1003、d1004 四个设备,为四个设备的电流、电压、相位采集量分别创建子表,SQL 如下:
|
||||
|
||||
```sql
|
||||
create table current_d1001 using current_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
|
||||
create table current_d1002 using current_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
|
||||
create table current_d1003 using current_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
|
||||
create table current_d1004 using current_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
|
||||
|
||||
create table voltage_d1001 using voltage_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
|
||||
create table voltage_d1002 using voltage_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
|
||||
create table voltage_d1003 using voltage_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
|
||||
create table voltage_d1004 using voltage_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
|
||||
|
||||
create table phase_d1001 using phase_stb(deviceid, location, group_id) tags("d1001", "California.SanFrancisco", 2);
|
||||
create table phase_d1002 using phase_stb(deviceid, location, group_id) tags("d1002", "California.SanFrancisco", 3);
|
||||
create table phase_d1003 using phase_stb(deviceid, location, group_id) tags("d1003", "California.LosAngeles", 3);
|
||||
create table phase_d1004 using phase_stb(deviceid, location, group_id) tags("d1004", "California.LosAngeles", 2);
|
||||
```
|
||||
|
||||
可通过一张虚拟超级表来将这三种采集量聚合到一张表中,创建虚拟超级表 SQL 如下:
|
||||
|
||||
```sql
|
||||
CREATE STABLE meters_v (
|
||||
ts timestamp,
|
||||
current float,
|
||||
voltage int,
|
||||
phase float
|
||||
) TAGS (
|
||||
location varchar(64),
|
||||
group_id int
|
||||
) VIRTUAL 1;
|
||||
```
|
||||
|
||||
并且对四个设备 d1001、d1002、d1003、d1004 分别创建虚拟子表,SQL 如下:
|
||||
|
||||
```sql
|
||||
CREATE VTABLE d1001_v (
|
||||
current from current_d1001.current,
|
||||
voltage from voltage_d1001.voltage,
|
||||
phase from phase_d1001.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.SanFrancisco",
|
||||
2
|
||||
);
|
||||
|
||||
CREATE VTABLE d1002_v (
|
||||
current from current_d1002.current,
|
||||
voltage from voltage_d1002.voltage,
|
||||
phase from phase_d1002.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.SanFrancisco",
|
||||
3
|
||||
);
|
||||
|
||||
CREATE VTABLE d1003_v (
|
||||
current from current_d1003.current,
|
||||
voltage from voltage_d1003.voltage,
|
||||
phase from phase_d1003.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.LosAngeles",
|
||||
3
|
||||
);
|
||||
|
||||
CREATE VTABLE d1004_v (
|
||||
current from current_d1004.current,
|
||||
voltage from voltage_d1004.voltage,
|
||||
phase from phase_d1004.phase
|
||||
)
|
||||
USING meters_v
|
||||
TAGS (
|
||||
"California.LosAngeles",
|
||||
2
|
||||
);
|
||||
```
|
||||
|
||||
以设备 d1001 为例,假设 d1001 设备的电流、电压、相位数据如下:
|
||||
|
||||

|
||||
|
||||
虚拟表 d1001_v 中的数据如下:
|
||||
|
||||
| Timestamp | Current | Voltage | Phase |
|
||||
|:--------------:|:-------:|:---------:|:-------:|
|
||||
| 1538548685000 | 10.3 | 219 | 0.31 |
|
||||
| 1538548695000 | 12.6 | 218 | 0.33 |
|
||||
| 1538548696800 | 12.3 | 221 | 0.31 |
|
||||
| 1538548697100 | 12.1 | 220 | NULL |
|
||||
| 1538548697200 | NULL | NULL | 0.32 |
|
||||
| 1538548697700 | 11.8 | NULL | NULL |
|
||||
| 1538548697800 | NULL | 222 | 0.33 |
|
||||
|
||||
#### 跨源采集量对比分析
|
||||
|
||||
在跨源采集量对比分析中,“跨源”指数据来自**不同数据采集点**。在不同数据采集点中提取具有可比语义的采集量,通过虚拟表将这些采集量按照时间戳进行对齐和合并,并进行对比分析。
|
||||
例如,用户可以将来自不同设备的电流数据聚合到一张虚拟表中,以便进行电流数据的对比分析。
|
||||
|
||||
以分析 d1001、d1002、d1003、d1004 四个设备的电流数据为例,创建虚拟表的 SQL 如下:
|
||||
|
||||
```sql
|
||||
CREATE VTABLE current_v (
|
||||
ts timestamp,
|
||||
d1001_current float from current_d1001.current,
|
||||
d1002_current float from current_d1002.current,
|
||||
d1003_current float from current_d1003.current,
|
||||
d1004_current float from current_d1004.current
|
||||
);
|
||||
```
|
||||
|
||||
假设 d1001, d1002, d1003, d1004 四个设备的电流数据如下:
|
||||
|
||||

|
||||
|
||||
虚拟表 current_v 中的数据如下:
|
||||
|
||||
| Timestamp | d1001_current | d1002_current | d1003_current | d1004_current |
|
||||
|:--------------:|:-------------:|:-------------:|:-------------:|:-------------:|
|
||||
| 1538548685000 | 10.3 | 11.7 | 11.2 | 12.4 |
|
||||
| 1538548695000 | 12.6 | 11.9 | 10.8 | 11.3 |
|
||||
| 1538548696800 | 12.3 | 12.4 | 12.3 | 10.1 |
|
||||
| 1538548697100 | 12.1 | NULL | 11.1 | NULL |
|
||||
| 1538548697200 | NULL | 12.2 | NULL | 11.7 |
|
||||
| 1538548697700 | 11.8 | 11.4 | NULL | NULL |
|
||||
| 1538548697800 | NULL | NULL | 12.1 | 12.6 |
|
||||
|
||||
|
|
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 53 KiB |
|
@ -9,14 +9,14 @@ import wndata from './pic/white-noise-data.png'
|
|||
### 分析流程
|
||||
时序数据分析之前需要有预处理的过程,为减轻分析算法的负担,TDgpt 在将时序数据发给具体分析算法进行分析时,已经对数据做了预处理,整体的流程如下图所示。
|
||||
|
||||
<img src={activity} width="560" alt="预处理流程" />
|
||||
<img src={activity} width="320" alt="预处理流程" />
|
||||
|
||||
TDgpt 首先对输入数据进行白噪声检查(White Noise Data check), 检查通过以后针对预测分析,还要进行输入(历史)数据的重采样和时间戳对齐处理(异常检测跳过数据重采样和时间戳对齐步骤)。
|
||||
预处理完成以后,再进行预测或异常检测操作。预处理过程不属于预测或异常检测处理逻辑的一部分。
|
||||
|
||||
### 白噪声检查
|
||||
|
||||
<img src={wndata} width="430" alt="white-noise-data"/>
|
||||
<img src={wndata} width="344" alt="white-noise-data"/>
|
||||
|
||||
白噪声时序数据可以简单地认为是随机数构成的时间数据序列(如上图所示的正态分布随机数序列),随机数构成的时间序列没有分析的价值,因此会直接返回。白噪声检查采用经典的 `Ljung-Box` 统计量检验,计算 `Ljung-Box` 统计量需遍历整个输入时间序列。如果用户能够明确输入序列一定不是白噪声序列,那么可以在参数列表中增加参数 `wncheck=0` 强制要求分析平台忽略白噪声检查,从而节省计算资源。
|
||||
TDgpt 暂不提供独立的时间序列白噪声检测功能。
|
||||
|
|
|
@ -3,19 +3,18 @@ title: 预测算法
|
|||
description: 预测算法
|
||||
---
|
||||
|
||||
import fc_result from '../pic/fc.png';
|
||||
import fc_result_figure from '../pic/fc-result.png';
|
||||
import fc_result from '../pic/fc-result.png';
|
||||
|
||||
时序数据预测处理以持续一个时间段的时序数据作为输入,预测接下来一个连续时间区间内时间序列数据趋势。用户可以指定输出的(预测)时间序列数据点的数量,因此其输出的结果行数不确定。为此,TDengine 使用新 SQL 函数 `FORECAST` 提供时序数据预测服务。基础数据(用于预测的历史时间序列数据)是该函数的输入,预测结果是该函数的输出。用户可以通过 `FORECAST` 函数调用 Anode 提供的预测算法提供的服务。
|
||||
|
||||
在后续章节中,使用时序数据表`foo`作为示例,介绍预测和异常检测算法的使用方式,`foo` 表的模式如下:
|
||||
在后续章节中,使用时序数据表 `foo` 作为示例,介绍预测和异常检测算法的使用方式,`foo` 表的模式如下:
|
||||
|
||||
|列名称|类型|说明|
|
||||
|---|---|---|
|
||||
|ts| timestamp| 主时间戳列|
|
||||
|i32| int32| 4字节整数,设备测量值 metric|
|
||||
| 列名称 | 类型 | 说明 |
|
||||
| ------ | --------- | ---------------------------- |
|
||||
| ts | timestamp | 主时间戳列 |
|
||||
| i32 | int32 | 4字节整数,设备测量值 metric |
|
||||
|
||||
```bash
|
||||
```sql
|
||||
taos> select * from foo;
|
||||
ts | i32 |
|
||||
========================================
|
||||
|
@ -30,6 +29,7 @@ taos> select * from foo;
|
|||
```
|
||||
|
||||
### 语法
|
||||
|
||||
```SQL
|
||||
FORECAST(column_expr, option_expr)
|
||||
|
||||
|
@ -42,21 +42,21 @@ algo=expr1
|
|||
[,start=start_ts_val]
|
||||
[,expr2]
|
||||
"}
|
||||
|
||||
```
|
||||
|
||||
1. `column_expr`:预测的时序数据列,只支持数值类型列输入。
|
||||
2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
|
||||
|
||||
### 参数说明
|
||||
|
||||
|参数|含义|默认值|
|
||||
|---|---|---|
|
||||
|algo|预测分析使用的算法|holtwinters|
|
||||
|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
|
||||
|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
|
||||
|every|预测数据的采样间隔|输入数据的采样间隔|
|
||||
|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样间隔时间区间|
|
||||
|rows|预测结果的记录数|10|
|
||||
| 参数 | 含义 | 默认值 |
|
||||
| ------- | ------------------------------------------ | ---------------------------------------------- |
|
||||
| algo | 预测分析使用的算法 | holtwinters |
|
||||
| wncheck | 白噪声(white noise data)检查 | 默认值为 1,0 表示不进行检查 |
|
||||
| conf | 预测数据的置信区间范围 ,取值范围 [0, 100] | 95 |
|
||||
| every | 预测数据的采样间隔 | 输入数据的采样间隔 |
|
||||
| start | 预测结果的开始时间戳 | 输入数据最后一个时间戳加上一个采样间隔时间区间 |
|
||||
| rows | 预测结果的记录数 | 10 |
|
||||
|
||||
1. 预测查询结果新增三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
|
||||
2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
|
||||
|
@ -74,7 +74,8 @@ FROM foo;
|
|||
SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10,wncheck=0")
|
||||
FROM foo;
|
||||
```
|
||||
```
|
||||
|
||||
```sql
|
||||
taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
|
||||
_flow | _fhigh | _frowts | forecast(i32) |
|
||||
========================================================================================
|
||||
|
@ -90,8 +91,8 @@ taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
|
|||
-1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.000 | 69 |
|
||||
```
|
||||
|
||||
|
||||
## 内置预测算法
|
||||
|
||||
- [arima](./02-arima.md)
|
||||
- [holtwinters](./03-holtwinters.md)
|
||||
- CES (Complex Exponential Smoothing)
|
||||
|
@ -111,6 +112,7 @@ taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
|
|||
- TimesNet
|
||||
|
||||
## 算法有效性评估工具
|
||||
|
||||
TDgpt 提供预测分析算法有效性评估工具 `analytics_compare`,调用该工具并设置合适的参数,能够使用 TDengine 中的数据作为回测依据,评估不同预测算法或相同的预测算法在不同的参数或训练模型的下的预测有效性。预测有效性的评估使用 `MSE` 和 `MAE` 指标作为依据,后续还将增加 `MAPE`指标。
|
||||
|
||||
```ini
|
||||
|
@ -134,12 +136,12 @@ res_start_time = 1730000000000
|
|||
gen_figure = true
|
||||
```
|
||||
|
||||
算法对比分析运行完成以后,生成 fc-results.xlsx 文件,其中包含了调用算法的预测分析误差、执行时间、调用参数等信息。如下图所示:
|
||||
|
||||
<img src={fc_result} width="760" alt="预测对比结果" />
|
||||
|
||||
| algorithm | params | MSE | elapsed_time(ms.) |
|
||||
| ----------- | ------------------------------------------------------------------------- | ------- | ----------------- |
|
||||
| holtwinters | `{"trend":"add", "seasonal":"add"}` | 351.622 | 125.1721 |
|
||||
| arima | `{"time_step":3600000, "start_p":0, "max_p":10, "start_q":0, "max_q":10}` | 433.709 | 45577.9187 |
|
||||
|
||||
如果设置了 `gen_figure` 为 true,分析结果中还会有绘制的分析预测结果图(如下图所示)。
|
||||
|
||||
<img src={fc_result_figure} width="540" alt="预测对比结果" />
|
||||
<img src={fc_result} width="360" alt="预测对比结果" />
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ description: 异常检测算法
|
|||
|
||||
import ad from '../pic/anomaly-detection.png';
|
||||
import ad_result from '../pic/ad-result.png';
|
||||
import ad_result_figure from '../pic/ad-result-figure.png';
|
||||
|
||||
TDengine 中定义了异常(状态)窗口来提供异常检测服务。异常窗口可以视为一种特殊的**事件窗口(Event Window)**,即异常检测算法确定的连续异常时间序列数据所在的时间窗口。与普通事件窗口区别在于——时间窗口的起始时间和结束时间均是分析算法识别确定,不是用户给定的表达式进行判定。因此,在 `WHERE` 子句中使用 `ANOMALY_WINDOW` 关键词即可调用时序数据异常检测服务,同时窗口伪列(`_WSTART`, `_WEND`, `_WDURATION`)也能够像其他时间窗口一样用于描述异常窗口的起始时间(`_WSTART`)、结束时间(`_WEND`)、持续时间(`_WDURATION`)。例如:
|
||||
|
||||
|
@ -40,13 +39,15 @@ algo=expr1
|
|||
4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
|
||||
|
||||
### 参数说明
|
||||
|参数|含义|默认值|
|
||||
|---|---|---|
|
||||
|algo|异常检测调用的算法|iqr|
|
||||
|wncheck|对输入数据列是否进行白噪声检查,取值为0或1|1|
|
||||
|
||||
| 参数 | 含义 | 默认值 |
|
||||
| ------- | ------------------------------------------ | ------ |
|
||||
| algo | 异常检测调用的算法 | iqr |
|
||||
| wncheck | 对输入数据列是否进行白噪声检查,取值为0或1 | 1 |
|
||||
|
||||
|
||||
### 示例
|
||||
|
||||
```SQL
|
||||
--- 使用 iqr 算法进行异常检测,检测列 i32 列。
|
||||
SELECT _wstart, _wend, SUM(i32)
|
||||
|
@ -65,11 +66,12 @@ taos> SELECT _wstart, _wend, count(*) FROM foo ANOMAYL_WINDOW(i32);
|
|||
Query OK, 1 row(s) in set (0.028946s)
|
||||
```
|
||||
|
||||
|
||||
### 内置异常检测算法
|
||||
|
||||
分析平台内置了6个异常检查模型,分为3个类别,分别是[基于统计学的算法](./02-statistics-approach.md)、[基于数据密度的算法](./03-data-density.md)、以及[基于机器学习的算法](./04-machine-learning.md)。在不指定异常检测使用的方法的情况下,默认调用 IQR 进行异常检测。
|
||||
|
||||
### 异常检测算法有效性比较工具
|
||||
|
||||
TDgpt 提供自动化的工具对比不同数据集的不同算法监测有效性,针对异常检测算法提供查全率(recall)和查准率(precision)两个指标衡量不同算法的有效性。
|
||||
通过在配置文件中(analysis.ini)设置以下的选项可以调用需要使用的异常检测算法,异常检测算法测试用数据的时间范围、是否生成标注结果的图片、调用的异常检测算法以及相应的参数。
|
||||
调用异常检测算法比较之前,需要人工手动标注异常监测数据集的结果,即设置[anno_res]选项的数值,第几个数值是异常点,需要标注在数组中,如下测试集中,第 9 个点是异常点,我们就标注异常结果为 [9].
|
||||
|
@ -93,14 +95,21 @@ anno_res = [9]
|
|||
ksigma={"k": 2}
|
||||
iqr={}
|
||||
grubbs={}
|
||||
lof={"algo":"auto", "n_neighbor": 3}
|
||||
lof={"algorithm":"auto", "n_neighbor": 3}
|
||||
```
|
||||
|
||||
对比程序执行完成以后,会自动生成名称为`ad_result.xlsx` 的文件,第一个卡片是算法运行结果(如下图所示),分别包含了算法名称、执行调用参数、查全率、查准率、执行时间 5 个指标。
|
||||
|
||||
<img src={ad_result} width="760" alt="异常检测对比结果" />
|
||||
|
||||
| algorithm | params | precision(%) | recall(%) | elapsed_time(ms.) |
|
||||
| --------- | -------------------------------------- | ------------ | --------- | ----------------- |
|
||||
| ksigma | `{"k":2}` | 100 | 100 | 0.453 |
|
||||
| iqr | `{}` | 100 | 100 | 2.727 |
|
||||
| grubbs | `{}` | 100 | 100 | 2.811 |
|
||||
| lof | `{"algorithm":"auto", "n_neighbor":3}` | 0 | 0 | 4.660 |
|
||||
|
||||
|
||||
如果设置了 `gen_figure` 为 `true`,比较程序会自动将每个参与比较的算法分析结果采用图片方式呈现出来(如下图所示为 ksigma 的异常检测结果标注)。
|
||||
|
||||
<img src={ad_result_figure} width="540" alt="异常检测标注图" />
|
||||
<img src={ad_result} width="540" alt="异常检测标注图" />
|
||||
|
||||
|
|
Before Width: | Height: | Size: 41 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 26 KiB |
Before Width: | Height: | Size: 13 KiB |
|
@ -138,15 +138,22 @@ docker run -d \
|
|||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine:3.3.0.0
|
||||
image: tdengine/tdengine:latest
|
||||
container_name: tdengine
|
||||
hostname: tdengine
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
MONITOR_FQDN: tdengine
|
||||
EXPLORER_CLUSTER: http://tdengine:6041
|
||||
TAOS_KEEPER_TDENGINE_HOST: tdengine
|
||||
volumes:
|
||||
- tdengine-data:/var/lib/taos/
|
||||
ports:
|
||||
- 6060:6060
|
||||
grafana:
|
||||
image: grafana/grafana:9.3.6
|
||||
image: grafana/grafana:latest
|
||||
volumes:
|
||||
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
|
||||
- ./tdengine.yml:/etc/grafana/provisioning/tdengine.yml
|
||||
- grafana-data:/var/lib/grafana
|
||||
environment:
|
||||
# install tdengine plugin at start
|
||||
|
@ -156,6 +163,7 @@ docker run -d \
|
|||
TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
volumes:
|
||||
grafana-data:
|
||||
tdengine-data:
|
||||
|
|
|
@ -22,6 +22,7 @@ table_option: {
|
|||
COMMENT 'string_value'
|
||||
| SMA(col_name [, col_name] ...)
|
||||
| KEEP value
|
||||
| VIRTUAL {0 | 1}
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -36,6 +37,7 @@ table_option: {
|
|||
4. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考 [按列压缩](../compress)
|
||||
5. 关于 table_option 中的参数说明,请参考 [建表 SQL 说明](../table)
|
||||
6. 关于 table_option 中的 keep 参数,仅对超级表生效,keep 参数的详细说明可以参考 [数据库说明](02-database.md),唯一不同的是超级表 keep 不会立即影响查询结果,仅在 compact 后生效。
|
||||
7. 关于 table_option 中的 virtual 参数,仅对超级表生效,指定为 1 表示创建虚拟超级表,为 0 表示创建超级表,默认为 0。创建虚拟超级表时,column_definition 中只支持 type_name 选项,不支持定义额外主键列以及压缩选项。
|
||||
|
||||
## 查看超级表
|
||||
|
||||
|
|
|
@ -527,6 +527,24 @@ CREATE STREAM avg_current_stream FILL_HISTORY 1
|
|||
#### 窗口失效相关字段
|
||||
|
||||
因为流计算过程中会遇到数据乱序、更新、删除等情况,可能造成已生成的窗口被删除,或者结果需要重新计算。此时会向通知地址发送一条 WINDOW_INVALIDATION 的通知,说明哪些窗口已经被删除。
|
||||
|
||||
这部分是 eventType 为 WINDOW_INVALIDATION 时,event 对象才有的字段。
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd: 长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
|
||||
## 流式计算对虚拟表的支持
|
||||
|
||||
从 v3.3.6.0 开始,流计算能够使用虚拟表(包括虚拟普通表、虚拟子表、虚拟超级表)作为数据源进行计算,语法和非虚拟表完全一致。
|
||||
|
||||
但是虚拟表的行为与非虚拟表存在差异,所以目前在使用流计算对虚拟表进行计算时存在以下限制:
|
||||
|
||||
1. 流计算中涉及的虚拟普通表/虚拟子表的 schema 不允许更改。
|
||||
1. 流计算过程中,如果修改虚拟表某一列对应的数据源,对流计算来说不生效。即:流计算仍只读取老的数据源。
|
||||
1. 流计算过程中,如果虚拟表某一列对应的原始表被删除,之后新建了同名的表和同名的列,流计算不会读取新表的数据。
|
||||
1. 流计算的 watermark 只能是 0,否则创建时就报错。
|
||||
1. 如果流计算的数据源是虚拟超级表,流计算任务启动后新增的子表不参与计算。
|
||||
1. 虚拟表的不同原始表的时间戳不完全一致,数据合并后可能会产生空值,暂不支持插值处理。
|
||||
1. 不处理数据的乱序、更新或删除。即:流创建时不能指定 `ignore update 0` 或者 `ignore expired 0`,否则报错。
|
||||
1. 不支持历史数据计算,即:流创建时不能指定 `fill_history 1`,否则报错。
|
||||
1. 不支持触发模式:MAX_DELAY, FORCE_WINDOW_CLOSE, CONTINUOUS_WINDOW_CLOSE。
|
||||
1. 不支持窗口类型:COUNT_WINDOW。
|
||||
|
|
|
@ -151,18 +151,19 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
提供用户创建的超级表的相关信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ----------------------------------------------------------------------------------------------------- |
|
||||
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
|----|:-------------:|---------------|-----------------------------------------------------------------|
|
||||
| 1 | stable_name | VARCHAR(192) | 超级表表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 超级表所在的数据库的名称 |
|
||||
| 3 | create_time | TIMESTAMP | 创建时间 |
|
||||
| 4 | columns | INT | 列数目 |
|
||||
| 5 | tags | INT | 标签数目。需要注意,`tags` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 6 | last_update | TIMESTAMP | 最后更新时间 |
|
||||
| 7 | table_comment | VARCHAR(1024) | 表注释 |
|
||||
| 8 | watermark | VARCHAR(64) | 窗口的关闭时间。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 9 | max_delay | VARCHAR(64) | 推送计算结果的最大延迟。需要注意,`max_delay` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 10 | rollup | VARCHAR(128) | rollup 聚合函数。需要注意,`rollup` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
|
||||
| 11 | virtual | BOOL | 超级表是否是虚拟超级表。 |
|
||||
|
||||
## INS_TABLES
|
||||
|
||||
|
@ -194,17 +195,18 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
|
||||
## INS_COLUMNS
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | ---------------------- |
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
|
||||
| 3 | table_type | VARCHAR(21) | 表类型 |
|
||||
| 4 | col_name | VARCHAR(64) | 列 的名称 |
|
||||
| 5 | col_type | VARCHAR(32) | 列 的类型 |
|
||||
| 6 | col_length | INT | 列 的长度 |
|
||||
| 7 | col_precision | INT | 列 的精度 |
|
||||
| 8 | col_scale | INT | 列 的比例 |
|
||||
| 9 | col_nullable | INT | 列 是否可以为空 |
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
|----|:-------------:|--------------|---------------------------------------------------------------|
|
||||
| 1 | table_name | VARCHAR(192) | 表名 |
|
||||
| 2 | db_name | VARCHAR(64) | 该表所在的数据库的名称 |
|
||||
| 3 | table_type | VARCHAR(21) | 表类型 |
|
||||
| 4 | col_name | VARCHAR(64) | 列 的名称 |
|
||||
| 5 | col_type | VARCHAR(32) | 列 的类型 |
|
||||
| 6 | col_length | INT | 列 的长度 |
|
||||
| 7 | col_precision | INT | 列 的精度 |
|
||||
| 8 | col_scale | INT | 列 的比例 |
|
||||
| 9 | col_nullable | INT | 列 是否可以为空 |
|
||||
| 10 | col_source | VARCHAR(322) | 列 的数据来源。只有虚拟表的列才会有该值,表示虚拟表的数据来源,为 db_name.table_name.col_name |
|
||||
|
||||
## INS_USERS
|
||||
|
||||
|
|
|
@ -0,0 +1,311 @@
|
|||
---
|
||||
sidebar_label: 虚拟表
|
||||
title: 虚拟表
|
||||
description: 对虚拟表的各种管理操作
|
||||
---
|
||||
## 创建虚拟表
|
||||
|
||||
`CREATE VTABLE` 语句用于创建普通虚拟表和以虚拟超级表为模板创建虚拟子表。
|
||||
|
||||
### 创建虚拟超级表
|
||||
|
||||
见 [创建超级表](./04-stable.md#创建超级表) 中的 `VIRTUAL` 参数。
|
||||
|
||||
### 创建虚拟普通表
|
||||
```sql
|
||||
CREATE VTABLE [IF NOT EXISTS] [db_name].vtb_name
|
||||
ts_col_name timestamp,
|
||||
(create_defination[ ,create_defination] ...)
|
||||
|
||||
create_definition:
|
||||
vtb_col_name column_definition
|
||||
|
||||
column_definition:
|
||||
type_name [FROM [db_name.]table_name.col_name]
|
||||
|
||||
```
|
||||
|
||||
### 创建虚拟子表
|
||||
```sql
|
||||
CREATE VTABLE [IF NOT EXISTS] [db_name].vtb_name
|
||||
(create_defination[ ,create_defination] ...)
|
||||
USING [db_name.]stb_name
|
||||
[(tag_name [, tag_name] ...)]
|
||||
TAGS (tag_value [, tag_value] ...)
|
||||
|
||||
create_definition:
|
||||
[stb_col_name FROM] [db_name.]table_name.col_name
|
||||
tag_value:
|
||||
const_value
|
||||
```
|
||||
|
||||
**使用说明**
|
||||
|
||||
1. 虚拟表(列)名命名规则参见 [名称命名规则](./19-limit.md#名称命名规则)。
|
||||
2. 表名最大长度为 192。
|
||||
3. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。
|
||||
4. 表的每行长度不能超过 64KB(注意:每个 VARCHAR/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
|
||||
5. 使用数据类型 VARCHAR/NCHAR/GEOMETRY,需指定其最长的字节数,如 VARCHAR(20),表示 20 字节。
|
||||
6. 创建虚拟表时使用 FROM 来指定列的数据源,支持使用 db_name 跨库指定数据源,不指定 db_name 时默认使用当前 use 的数据库,若不指定 db_name 且未 use 数据库,则会报错。
|
||||
7. 创建虚拟表时不显式的指定 ts 列的数据源,ts 列的取值是查询虚拟表时查询语句中包含的所有列对应的原始表的主键时间戳合并的结果。
|
||||
8. 虚拟超级表下只支持创建虚拟子表,虚拟子表也只能以虚拟超级表为模版来创建。
|
||||
9. 创建虚拟表时需要保证虚拟表中的列、标签和指定的数据来源列、标签的数据类型相同,否则会报错。
|
||||
10. 在同一个数据库内,虚拟表名称不允许重名,虚拟表名和表名也不允许重名。虚拟表名和视图名允许重名(不推荐)当出现视图与虚拟表名重名时,写入、查询、授权、回收权限等操作优先使用同名表。
|
||||
11. 创建虚拟子表和虚拟普通表时,使用 FROM 指定某一列的数据来源时,该列只能来源于普通子表或普通表,不支持来源于超级表、视图或其他虚拟表。也不支持来源于有复合主键的表。
|
||||
|
||||
## 查询虚拟表
|
||||
|
||||
虚拟表与正常表无论是查询语法还是范围都没有区别,不同之处在于虚拟表所呈现的数据集在不同的查询中可能是不相同的,具体可以参考虚拟表数据生成规则。
|
||||
|
||||
### 虚拟表数据生成规则
|
||||
|
||||
1. 虚拟表以时间戳为基准,对多个原始表的数据进行对齐。
|
||||
2. 如果多个原始表在相同时间戳下有数据,则这些列的值组合成同一行;否则,对于缺失的列,填充 NULL。
|
||||
3. 虚拟表的时间戳的值是查询中包含的所有列所在的原始表的时间戳的并集,因此当不同查询选择列不同时可能出现结果集行数不一样的情况。
|
||||
4. 用户可以从多个表中选择任意列进行组合,未选择的列不会出现在虚拟表中。
|
||||
|
||||
**示例**
|
||||
|
||||
假设有表 t1、t2、t3 结构和数据如下:
|
||||
|
||||

|
||||
|
||||
并且有虚拟普通表 v1 ,创建方式如下:
|
||||
|
||||
```sql
|
||||
create vtable v1 (
|
||||
ts timestamp,
|
||||
c1 int from t1.value,
|
||||
c2 int from t2.value,
|
||||
c3 int from t3.value1,
|
||||
c4 int from t3.value2);
|
||||
```
|
||||
|
||||
那么根据虚拟表对于多表数据的整合规则,执行如下查询时:
|
||||
|
||||
```sql
|
||||
select * from v1;
|
||||
```
|
||||
|
||||
结果如下:
|
||||
|
||||

|
||||
|
||||
如果没有选择全部列,只是选择了部分列,查询的结果只会包含选择的列的原始表的时间戳,例如执行如下查询:
|
||||
|
||||
```sql
|
||||
select c1, c2 from v1;
|
||||
```
|
||||
|
||||
得到的结果如下图所示:
|
||||
|
||||

|
||||
|
||||
因为 c1、c2 列对应的原始表 t1、t2 中没有 0:00:03 这个时间戳,所以最后的结果也不会包含这个时间戳。
|
||||
|
||||
**使用限制**
|
||||
|
||||
1. 查询虚拟超级表时,暂不支持虚拟子表的数据源来自不同的数据库。
|
||||
|
||||
## 修改虚拟普通表
|
||||
|
||||
```sql
|
||||
ALTER VTABLE [db_name.]vtb_name alter_table_clause
|
||||
|
||||
alter_table_clause: {
|
||||
ADD COLUMN vtb_col_name vtb_column_type [FROM table_name.col_name]
|
||||
| DROP COLUMN vtb_col_name
|
||||
| ALTER COLUMN vtb_col_name SET {table_name.col_name | NULL }
|
||||
| MODIFY COLUMN col_name column_type
|
||||
| RENAME COLUMN old_col_name new_col_name
|
||||
}
|
||||
```
|
||||
|
||||
**使用说明**
|
||||
对虚拟普通表可以进行如下修改操作
|
||||
|
||||
1. ADD COLUMN:添加列。
|
||||
2. DROP COLUMN:删除列。
|
||||
3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。如果虚拟表该列已指定数据源,那么修改列宽会因为修改后的列宽和数据源的列宽不匹配而报错,可以先将数据源置为空后再修改列宽。
|
||||
4. RENAME COLUMN:修改列名称。
|
||||
5. ALTER COLUMN .. SET:修改列的数据源。 SET NULL 表示将虚拟表某列的数据源置为空。
|
||||
|
||||
### 增加列
|
||||
|
||||
```sql
|
||||
ALTER VTABLE vtb_name ADD COLUMN vtb_col_name vtb_col_type [FROM [db_name].table_name.col_name]
|
||||
```
|
||||
|
||||
### 删除列
|
||||
|
||||
```sql
|
||||
ALTER VTABLE vtb_name DROP COLUMN vtb_col_name
|
||||
```
|
||||
|
||||
### 修改列宽
|
||||
|
||||
```sql
|
||||
ALTER VTABLE vtb_name MODIFY COLUMN vtb_col_name data_type(length);
|
||||
```
|
||||
|
||||
### 修改列名
|
||||
|
||||
```sql
|
||||
ALTER VTABLE vtb_name RENAME COLUMN old_col_name new_col_name
|
||||
```
|
||||
|
||||
### 修改列的数据源
|
||||
|
||||
```sql
|
||||
ALTER VTABLE vtb_name ALTER COLUMN vtb_col_name SET {[db_name.]table_name.col_name | NULL}
|
||||
```
|
||||
|
||||
## 修改虚拟子表
|
||||
|
||||
```sql
|
||||
ALTER VTABLE [db_name.]vtb_name alter_table_clause
|
||||
|
||||
alter_table_clause: {
|
||||
ALTER COLUMN vtb_col_name SET table_name.col_name
|
||||
| SET TAG tag_name = new_tag_value
|
||||
}
|
||||
```
|
||||
|
||||
**使用说明**
|
||||
|
||||
1. 对虚拟子表的列和标签的修改,除了更改标签值以外,都要通过虚拟超级表才能进行。
|
||||
|
||||
### 修改虚拟子表标签值
|
||||
|
||||
```sql
|
||||
ALTER VTABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
|
||||
```
|
||||
|
||||
### 修改列的数据源
|
||||
|
||||
```sql
|
||||
ALTER VTABLE vtb_name ALTER COLUMN vtb_col_name SET {[db_name.]table_name.col_name | NULL}
|
||||
```
|
||||
|
||||
## 删除虚拟表
|
||||
|
||||
```sql
|
||||
DROP VTABLE [IF EXISTS] [dbname].vtb_name;
|
||||
```
|
||||
|
||||
## 查看虚拟表的信息
|
||||
|
||||
### 显示某个数据库下所有虚拟表
|
||||
|
||||
如下 SQL 语句可以列出当前数据库中的所有虚拟表名。
|
||||
|
||||
```sql
|
||||
SHOW [NORMAL | CHILD] [db_name.]VTABLES [LIKE 'pattern'];
|
||||
```
|
||||
|
||||
**使用说明**
|
||||
|
||||
1. 如果没有指定 db_name,显示当前数据库下的所有虚拟普通表和虚拟子表的信息。若没有使用数据库并且没有指定 db_name, 则会报错 database not specified。可以使用 LIKE 对表名进行模糊匹配。NORMAL 指定只显示虚拟普通表信息, CHILD 指定只显示虚拟子表信息。
|
||||
|
||||
### 显示虚拟表创建语句
|
||||
|
||||
```sql
|
||||
SHOW CREATE VTABLE [db_name.]vtable_name;
|
||||
```
|
||||
|
||||
显示 vtable_name 指定的虚拟表的创建语句。支持虚拟普通表和虚拟子表。常用于数据库迁移。对一个已经存在的虚拟表,返回其创建语句;在另一个集群中执行该语句,就能得到一个结构完全相同的虚拟表。
|
||||
|
||||
### 获取虚拟表结构信息
|
||||
|
||||
```sql
|
||||
DESCRIBE [db_name.]vtb_name;
|
||||
```
|
||||
|
||||
### 查看所有虚拟表信息
|
||||
|
||||
```sql
|
||||
SELECT ... FROM information_schema.ins_tables where type = 'VIRTUAL_NORMAL_TABLE' or type = 'VIRTUAL_CHILD_TABLE';
|
||||
```
|
||||
|
||||
## 写入虚拟表
|
||||
|
||||
不支持向虚拟表中写入数据,以及不支持删除虚拟表中的数据。虚拟表只是对原始表进行运算后的计算结果,是一张逻辑表,因此只能对其进行查询,不可以写入或删除数据。
|
||||
|
||||
|
||||
## 虚拟表与视图
|
||||
|
||||
虚拟表与视图看起来相似,但是有很多不同点:
|
||||
|
||||
| 属性 |虚拟表 (Virtual Table) |视图 (View)|
|
||||
|----------------|------------------------|-----------|
|
||||
| 定义 |虚拟表是一种动态数据结构,根据多表的列和时间戳组合规则生成逻辑表。 |视图是一种基于 SQL 查询的虚拟化表结构,用于保存查询逻辑的定义。|
|
||||
| 数据来源 |来自多个原始表,可以动态选择列,并通过时间戳对齐数据。 |来自单个或多个表的查询结果,通常是一个复杂的 SQL 查询。|
|
||||
| 数据存储 |不实际存储数据,所有数据在查询时动态生成。 |不实际存储数据,仅保存 SQL 查询逻辑。|
|
||||
| 时间戳处理 |通过时间戳对齐将不同表的列整合到统一的时间轴上。| 不支持时间戳对齐,数据由查询逻辑直接决定。|
|
||||
| 更新机制 |动态更新,原始表数据变更时,虚拟表数据实时反映变化。| 动态更新,但依赖于视图定义的查询逻辑,不涉及对齐或数据整合。|
|
||||
| 功能特性 |支持空值填充和插值(如 prev、next、linear)。 |不支持内置填充和插值功能,需通过查询逻辑自行实现。|
|
||||
| 应用场景 |时间序列对齐、跨表数据整合、多源数据对比分析等场景。| 简化复杂查询逻辑、限制用户访问、封装业务逻辑等场景。|
|
||||
| 性能 |由于多表对齐和空值处理,查询复杂度可能较高,尤其在数据量大时。| 性能通常取决于视图的查询语句复杂度,与单表查询性能相似。|
|
||||
|
||||
不支持虚拟表和视图之间的相互转化,如根据虚拟表建立视图或者根据视图建立虚拟表。
|
||||
|
||||
## 虚拟表的权限
|
||||
|
||||
### 权限说明
|
||||
|
||||
虚拟表的权限分为 READ、WRITE 两种,查询操作需要具备 READ 权限,对虚拟表本身的删除和修改操作需要具备 WRITE 权限。
|
||||
|
||||
### 语法
|
||||
|
||||
#### 授权
|
||||
|
||||
```sql
|
||||
GRANT privileges ON [db_name.]vtable_name TO user_name
|
||||
privileges: {
|
||||
ALL,
|
||||
| priv_type [, priv_type] ...
|
||||
}
|
||||
priv_type: {
|
||||
READ
|
||||
| WRITE
|
||||
}
|
||||
```
|
||||
|
||||
#### 回收权限
|
||||
|
||||
```sql
|
||||
REVOKE privileges ON [db_name.]vtable_name FROM user_name
|
||||
privileges: {
|
||||
ALL,
|
||||
| priv_type [, priv_type] ...
|
||||
}
|
||||
priv_type: {
|
||||
READ
|
||||
| WRITE
|
||||
}
|
||||
```
|
||||
|
||||
### 权限规则
|
||||
|
||||
1. 虚拟表的创建者和 root 用户默认具备所有权限。
|
||||
2. 用户可以通过 dbname.vtbname 来为指定的虚拟表表(包括虚拟超级表和虚拟普通表)授予或回收其读写权限,不支持直接对虚拟子表授予或回收权限。
|
||||
3. 虚拟子表和虚拟超级表不支持基于标签的授权(表级授权),虚拟子表继承虚拟超级表的权限。
|
||||
4. 对其他用户进行授权与回收权限可以通过 GRANT 和 REVOKE 语句进行,该操作只能由 root 用户进行。
|
||||
5. 具体相关权限控制细则总结如下:
|
||||
|
||||
| 序号 | 操作 | 权限要求 |
|
||||
|------|------|----------------------------------------------------------|
|
||||
| 1 | CREATE VTABLE | 用户对虚拟表所属数据库有 WRITE 权限 且<br /> 用户对虚拟表的数据源对应的原始表有 READ 权限。 |
|
||||
| 2 | DROP/ALTER VTABLE | 用户对虚拟表有 WRITE 权限,若要指定某一列的数据源,需要同时对数据源对应的原始表有 READ 权限。 |
|
||||
| 3 |SHOW VTABLES | 无 |
|
||||
| 4 | SHOW CREATE VTABLE | 无 |
|
||||
| 5 | DESCRIBE VTABLE | 无 |
|
||||
| 6 | 系统表查询 | 无 |
|
||||
| 7 | SELECT FROM VTABLE | 操作用户对虚拟表有 READ 权限 |
|
||||
| 8 | GRANT/REVOKE | 只有 root 用户有权限 |
|
||||
|
||||
## 使用场景
|
||||
|
||||
| SQL 查询 | SQL 写入 | STMT 查询 | STMT 写入 | 订阅 | 流计算 |
|
||||
|---------|--------|---------|------|--------|---|
|
||||
| 支持 | 不支持 | 不支持 | 不支持 | 不支持 | 支持 |
|
After Width: | Height: | Size: 7.8 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 4.9 KiB |
|
@ -591,3 +591,17 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
| 0x80006206 | Virtual table not support in Topic | 不支持在订阅中使用虚拟表 | 不在订阅中使用虚拟表 |
|
||||
| 0x80006207 | Virtual super table query not support origin table from different databases | 虚拟超级表不支持子表的数据源来自不同的数据库 | 确保虚拟超级表的子表的数据源都来自同一个数据库 |
|
||||
|
||||
|
||||
## TDgpt
|
||||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
| ---------- | --------------------- | -------------------------------------------------------------------------------- | ------------------------------ |
|
||||
| 0x80000440 | Analysis service response is NULL | 分析服务返回错误 | 检查服务端日志确认返回信息是否正确 |
|
||||
| 0x80000441 | Analysis service can't access | 分析服务无法使用 | 检查 anoded 服务是否可用 |
|
||||
| 0x80000442 | Analysis algorithm is missing | 未指定分析算法名称 | 增加算法名称 |
|
||||
| 0x80000443 | Analysis algorithm not loaded | 指定算法未加载 | 指定算法未加载 |
|
||||
| 0x80000444 | Analysis invalid buffer type | 缓存数据格式不对 | 具体查看server端的错误日志 |
|
||||
| 0x80000445 | Analysis failed since anode return error | anode 返回错误信息 | 请检查服务端日志确认问题原因 |
|
||||
| 0x80000446 | Analysis failed since too many input rows for anode | 输入数据太多 | 减小分析数据输入规模 |
|
||||
| 0x80000447 | white-noise data not processed | 白噪声数据不分析 | |
|
||||
| 0x80000448 | Analysis internal error, not processed | anode 出现内部错误 | 具体查看server端的日志 (taosanode.app.log) |
|
||||
|
|
|
@ -49,10 +49,16 @@ extern "C" {
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define HAS_BIND_VALUE ((uint8_t)0x1)
|
||||
#define IS_FIXED_VALUE ((uint8_t)0x2)
|
||||
#define USING_CLAUSE ((uint8_t)0x4)
|
||||
#define IS_FIXED_TAG ((uint8_t)0x8)
|
||||
#define NO_DATA_USING_CLAUSE ((uint8_t)0x7)
|
||||
|
||||
typedef struct SStmtCallback {
|
||||
TAOS_STMT* pStmt;
|
||||
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
|
||||
int32_t (*setInfoFn)(TAOS_STMT*, STableMeta*, void*, SName*, bool, SHashObj*, SHashObj*, const char*, bool);
|
||||
int32_t (*setInfoFn)(TAOS_STMT*, STableMeta*, void*, SName*, bool, SHashObj*, SHashObj*, const char*, uint8_t);
|
||||
int32_t (*getExecInfoFn)(TAOS_STMT*, SHashObj**, SHashObj**);
|
||||
} SStmtCallback;
|
||||
|
||||
|
@ -175,7 +181,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c
|
|||
int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
|
||||
int32_t colIdx, int32_t rowNum, void* charsetCxt);
|
||||
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
|
||||
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool hasCtbName, int32_t* fieldNum,
|
||||
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, uint8_t tbNameFlag, int32_t* fieldNum,
|
||||
TAOS_FIELD_ALL** fields);
|
||||
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
|
||||
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
|
||||
|
|
|
@ -273,9 +273,12 @@ function install_lib() {
|
|||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaosnative.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
|
||||
|
||||
#link lib/link_dir
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
|
||||
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.* ${lib_link_dir}/libtaosnative.so.1
|
||||
|
@ -283,13 +286,14 @@ function install_lib() {
|
|||
|
||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
|
||||
|
||||
#link lib64/link_dir
|
||||
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
|
||||
${csudo}ln -sf ${install_main_dir}/driver/libtaosnative.* ${lib64_link_dir}/libtaosnative.so.1 || :
|
||||
${csudo}ln -sf ${lib64_link_dir}/libtaosnative.so.1 ${lib64_link_dir}/libtaosnative.so || :
|
||||
|
||||
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
|
||||
fi
|
||||
|
||||
${csudo}ldconfig
|
||||
|
|
|
@ -312,15 +312,37 @@ function install_avro() {
|
|||
|
||||
function install_lib() {
|
||||
# Remove links
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib_link_dir}/libtaosnative.* || :
|
||||
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
|
||||
remove_links() {
|
||||
local dir=$1
|
||||
find ${dir} -name "libtaos.*" -exec ${csudo}rm -f {} \; || :
|
||||
find ${dir} -name "libtaosnative.so" -exec ${csudo}rm -f {} \; || :
|
||||
find ${dir} -name "libtaosws.so" -exec ${csudo}rm -f {} \; || :
|
||||
}
|
||||
|
||||
remove_links ${lib_link_dir}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.so || :
|
||||
remove_links ${lib64_link_dir}
|
||||
fi
|
||||
|
||||
# Copy and set permissions for libraries
|
||||
copy_and_set_permissions() {
|
||||
local src=$1
|
||||
local dest=$2
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}cp ${src} ${dest} && ${csudo}chmod 777 ${dest}
|
||||
else
|
||||
${csudo}cp -Rf ${src} ${dest} && ${csudo}chmod 777 ${dest}
|
||||
fi
|
||||
}
|
||||
|
||||
# Create symbolic links
|
||||
create_symlink() {
|
||||
local target=$1
|
||||
local link_name=$2
|
||||
${csudo}ln -sf ${target} ${link_name}
|
||||
}
|
||||
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
${csudo}cp ${binary_dir}/build/lib/libtaos.so.${verNumber} \
|
||||
${install_main_dir}/driver &&
|
||||
|
|
|
@ -179,13 +179,13 @@ remove_bin() {
|
|||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
[ -f ${lib_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || :
|
||||
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
|
||||
${csudo}find ${lib_link_dir} -name "libtaos.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib_link_dir} -name "libtaosnative.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib_link_dir} -name "libtaosws.*" -exec ${csudo}rm -f {} \; || :
|
||||
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
||||
${csudo}find ${lib64_link_dir} -name "libtaos.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib64_link_dir} -name "libtaosnative.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib64_link_dir} -name "libtaosws.*" -exec ${csudo}rm -f {} \; || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
|
@ -338,4 +338,4 @@ fi
|
|||
command -v systemctl >/dev/null 2>&1 && ${csudo}systemctl daemon-reload >/dev/null 2>&1 || true
|
||||
echo
|
||||
echo "${productName} is removed successfully!"
|
||||
echo
|
||||
echo
|
||||
|
|
|
@ -70,15 +70,16 @@ function clean_bin() {
|
|||
}
|
||||
|
||||
function clean_lib() {
|
||||
# Remove link
|
||||
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
|
||||
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
|
||||
[ -f ${lib_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosnative.* || :
|
||||
# Remove link
|
||||
${csudo}find ${lib_link_dir} -name "libtaos.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib_link_dir} -name "libtaosnative.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib_link_dir} -name "libtaosws.*" -exec ${csudo}rm -f {} \; || :
|
||||
|
||||
${csudo}find ${lib64_link_dir} -name "libtaos.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib64_link_dir} -name "libtaosnative.*" -exec ${csudo}rm -f {} \; || :
|
||||
${csudo}find ${lib64_link_dir} -name "libtaosws.*" -exec ${csudo}rm -f {} \; || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
|
||||
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
|
||||
[ -f ${lib64_link_dir}/libtaosnative.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosnative.* || :
|
||||
#${csudo}rm -rf ${v15_java_app_dir} || :
|
||||
}
|
||||
|
||||
function clean_header() {
|
||||
|
|
|
@ -64,7 +64,7 @@ typedef struct SStmtBindInfo {
|
|||
int32_t sBindLastIdx;
|
||||
int8_t tbType;
|
||||
bool tagsCached;
|
||||
bool preCtbname;
|
||||
uint8_t tbNameFlag;
|
||||
void *boundTags;
|
||||
char tbName[TSDB_TABLE_FNAME_LEN];
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
|
|
|
@ -75,7 +75,7 @@ static int32_t smlProcessTagTelnet(SSmlHandle *info, char *data, char *sqlEnd){
|
|||
const char *sql = data;
|
||||
while (sql < sqlEnd) {
|
||||
JUMP_SPACE(sql, sqlEnd)
|
||||
if (unlikely(*sql == '\0')) break;
|
||||
if (unlikely(*sql == '\0' || *sql == '\n')) break;
|
||||
|
||||
const char *key = sql;
|
||||
size_t keyLen = 0;
|
||||
|
|
|
@ -238,7 +238,7 @@ int32_t stmtRestoreQueryFields(STscStmt* pStmt) {
|
|||
}
|
||||
*/
|
||||
int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, const char* sTableName,
|
||||
bool autoCreateTbl) {
|
||||
bool autoCreateTbl, uint8_t tbNameFlag) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(tbName, tbFName);
|
||||
|
@ -256,6 +256,7 @@ int32_t stmtUpdateBindInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags,
|
|||
pStmt->bInfo.tbType = pTableMeta->tableType;
|
||||
pStmt->bInfo.boundTags = tags;
|
||||
pStmt->bInfo.tagsCached = false;
|
||||
pStmt->bInfo.tbNameFlag = tbNameFlag;
|
||||
tstrncpy(pStmt->bInfo.stbFName, sTableName, sizeof(pStmt->bInfo.stbFName));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -271,10 +272,10 @@ int32_t stmtUpdateExecInfo(TAOS_STMT* stmt, SHashObj* pVgHash, SHashObj* pBlockH
|
|||
}
|
||||
|
||||
int32_t stmtUpdateInfo(TAOS_STMT* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, bool autoCreateTbl,
|
||||
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, bool preCtbname) {
|
||||
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, uint8_t tbNameFlag) {
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
|
||||
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl));
|
||||
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl, tbNameFlag));
|
||||
STMT_ERR_RET(stmtUpdateExecInfo(stmt, pVgHash, pBlockHash));
|
||||
|
||||
pStmt->sql.autoCreateTbl = autoCreateTbl;
|
||||
|
@ -1729,7 +1730,10 @@ int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
|
|||
STMT_ERRI_JRET(stmtFetchTagFields(stmt, nums, fields));
|
||||
|
||||
_return:
|
||||
|
||||
// compatible with previous versions
|
||||
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST && (pStmt->bInfo.tbNameFlag & NO_DATA_USING_CLAUSE) == 0x0) {
|
||||
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
|
||||
}
|
||||
pStmt->errCode = preCode;
|
||||
|
||||
return code;
|
||||
|
|
|
@ -193,7 +193,7 @@ static int32_t stmtGetTbName(TAOS_STMT2* stmt, char** tbName) {
|
|||
}
|
||||
|
||||
static int32_t stmtUpdateBindInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void* tags, SName* tbName,
|
||||
const char* sTableName, bool autoCreateTbl, bool preCtbname) {
|
||||
const char* sTableName, bool autoCreateTbl, int8_t tbNameFlag) {
|
||||
STscStmt2* pStmt = (STscStmt2*)stmt;
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = tNameExtractFullName(tbName, tbFName);
|
||||
|
@ -217,7 +217,7 @@ static int32_t stmtUpdateBindInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void
|
|||
|
||||
pStmt->bInfo.boundTags = tags;
|
||||
pStmt->bInfo.tagsCached = false;
|
||||
pStmt->bInfo.preCtbname = preCtbname;
|
||||
pStmt->bInfo.tbNameFlag = tbNameFlag;
|
||||
tstrncpy(pStmt->bInfo.stbFName, sTableName, sizeof(pStmt->bInfo.stbFName));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -233,10 +233,10 @@ static int32_t stmtUpdateExecInfo(TAOS_STMT2* stmt, SHashObj* pVgHash, SHashObj*
|
|||
}
|
||||
|
||||
static int32_t stmtUpdateInfo(TAOS_STMT2* stmt, STableMeta* pTableMeta, void* tags, SName* tbName, bool autoCreateTbl,
|
||||
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, bool preCtbname) {
|
||||
SHashObj* pVgHash, SHashObj* pBlockHash, const char* sTableName, uint8_t tbNameFlag) {
|
||||
STscStmt2* pStmt = (STscStmt2*)stmt;
|
||||
|
||||
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl, preCtbname));
|
||||
STMT_ERR_RET(stmtUpdateBindInfo(stmt, pTableMeta, tags, tbName, sTableName, autoCreateTbl, tbNameFlag));
|
||||
STMT_ERR_RET(stmtUpdateExecInfo(stmt, pVgHash, pBlockHash));
|
||||
|
||||
pStmt->sql.autoCreateTbl = autoCreateTbl;
|
||||
|
@ -1233,7 +1233,8 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL
|
|||
}
|
||||
|
||||
STMT_ERRI_JRET(
|
||||
qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
|
||||
qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.tbNameFlag, fieldNum, fields));
|
||||
|
||||
if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE && cleanStb) {
|
||||
pStmt->bInfo.needParse = true;
|
||||
qDestroyStmtDataBlock(*pDataBlock);
|
||||
|
@ -2022,7 +2023,9 @@ int stmtParseColFields2(TAOS_STMT2* stmt) {
|
|||
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
|
||||
pStmt->bInfo.needParse = false;
|
||||
}
|
||||
|
||||
if (pStmt->sql.stbInterlaceMode && pStmt->sql.siInfo.pDataCtx != NULL) {
|
||||
pStmt->bInfo.needParse = false;
|
||||
}
|
||||
if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
|
||||
taos_free_result(pStmt->exec.pRequest);
|
||||
pStmt->exec.pRequest = NULL;
|
||||
|
@ -2036,6 +2039,10 @@ int stmtParseColFields2(TAOS_STMT2* stmt) {
|
|||
}
|
||||
|
||||
_return:
|
||||
// compatible with previous versions
|
||||
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST && (pStmt->bInfo.tbNameFlag & NO_DATA_USING_CLAUSE) == 0x0) {
|
||||
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
|
||||
}
|
||||
|
||||
pStmt->errCode = preCode;
|
||||
|
||||
|
|
|
@ -36,10 +36,17 @@ TARGET_LINK_LIBRARIES(
|
|||
#)
|
||||
|
||||
ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c)
|
||||
if (TD_WINDOWS)
|
||||
TARGET_LINK_LIBRARIES(
|
||||
userOperTest
|
||||
PUBLIC ${TAOS_NATIVE_LIB}
|
||||
PUBLIC ${TAOS_NATIVE_LIB_STATIC}
|
||||
)
|
||||
else()
|
||||
TARGET_LINK_LIBRARIES(
|
||||
userOperTest
|
||||
PUBLIC ${TAOS_NATIVE_LIB}
|
||||
)
|
||||
endif()
|
||||
|
||||
ADD_EXECUTABLE(stmt2Test stmt2Test.cpp)
|
||||
TARGET_LINK_LIBRARIES(
|
||||
|
@ -129,4 +136,4 @@ add_test(
|
|||
add_test(
|
||||
NAME userOperTest
|
||||
COMMAND userOperTest
|
||||
)
|
||||
)
|
||||
|
|
|
@ -534,7 +534,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
|
|||
printf("support case \n");
|
||||
// case 1 : test child table already exist
|
||||
{
|
||||
const char* sql = "INSERT INTO stmt2_testdb_3.t0(ts,b)using stmt2_testdb_3.stb (t1,t2) TAGS(?,?) VALUES (?,?)";
|
||||
const char* sql = "INSERT INTO stmt2_testdb_3.t0(ts,b)using stmt2_testdb_3.stb (t1,t2) TAGS(?,?) VALUES(?,?)";
|
||||
TAOS_FIELD_ALL expectedFields[4] = {{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG},
|
||||
{"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG},
|
||||
{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL},
|
||||
|
@ -612,7 +612,7 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
|
|||
|
||||
// case 8 : 'db' 'stb'
|
||||
{
|
||||
const char* sql = "INSERT INTO 'stmt2_testdb_3'.? using 'stmt2_testdb_3'.'stb' (t1,t2) TAGS(?,?) (ts,b)VALUES(?,?)";
|
||||
const char* sql = "INSERT INTO 'stmt2_testdb_3'.? using 'stmt2_testdb_3'.'stb' (t1,t2) TAGS(?,?)(ts,b)VALUES(?,?)";
|
||||
TAOS_FIELD_ALL expectedFields[5] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME},
|
||||
{"t1", TSDB_DATA_TYPE_INT, 0, 0, 4, TAOS_FIELD_TAG},
|
||||
{"t2", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_TAG},
|
||||
|
@ -634,9 +634,20 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
|
|||
printf("case 9 : %s\n", sql);
|
||||
getFieldsSuccess(taos, sql, expectedFields, 5);
|
||||
}
|
||||
// case 11: TD-34097
|
||||
{
|
||||
do_query(taos, "use stmt2_testdb_3");
|
||||
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(1,'abc') (ts,b)VALUES(?,?)";
|
||||
TAOS_FIELD_ALL expectedFields[3] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME},
|
||||
{"ts", TSDB_DATA_TYPE_TIMESTAMP, 2, 0, 8, TAOS_FIELD_COL},
|
||||
{"b", TSDB_DATA_TYPE_BINARY, 0, 0, 12, TAOS_FIELD_COL}};
|
||||
printf("case 11 : %s\n", sql);
|
||||
getFieldsSuccess(taos, sql, expectedFields, 3);
|
||||
}
|
||||
|
||||
// case 10 : test all types
|
||||
{
|
||||
do_query(taos, "use stmt2_testdb_3");
|
||||
const char* sql =
|
||||
"insert into ? using all_stb tags(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)";
|
||||
TAOS_FIELD_ALL expectedFields[33] = {{"tbname", TSDB_DATA_TYPE_BINARY, 0, 0, 271, TAOS_FIELD_TBNAME},
|
||||
|
@ -711,7 +722,27 @@ TEST(stmt2Case, insert_ctb_using_get_fields_Test) {
|
|||
printf("case 5 : %s\n", sql);
|
||||
getFieldsError(taos, sql, TSDB_CODE_TSC_SQL_SYNTAX_ERROR);
|
||||
}
|
||||
|
||||
// case 6 : mix value and ?
|
||||
{
|
||||
do_query(taos, "use stmt2_testdb_3");
|
||||
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(1,?) (ts,b)VALUES(?,?)";
|
||||
printf("case 6 : %s\n", sql);
|
||||
getFieldsError(taos, sql, TSDB_CODE_TSC_SQL_SYNTAX_ERROR);
|
||||
}
|
||||
// case 7 : mix value and ?
|
||||
{
|
||||
do_query(taos, "use stmt2_testdb_3");
|
||||
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(?,?) (ts,b)VALUES(15910606280001,?)";
|
||||
printf("case 7 : %s\n", sql);
|
||||
getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION);
|
||||
}
|
||||
// case 8 : mix value and ?
|
||||
{
|
||||
do_query(taos, "use stmt2_testdb_3");
|
||||
const char* sql = "INSERT INTO ? using stb (t1,t2) TAGS(?,?) (ts,b)VALUES(15910606280001,'abc')";
|
||||
printf("case 8 : %s\n", sql);
|
||||
getFieldsError(taos, sql, TSDB_CODE_TSC_INVALID_OPERATION);
|
||||
}
|
||||
do_query(taos, "drop database if exists stmt2_testdb_3");
|
||||
taos_close(taos);
|
||||
}
|
||||
|
@ -1002,6 +1033,15 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
|
|||
printf("stmt2 [%s] : %s\n", "less params", sql);
|
||||
int code = taos_stmt2_prepare(stmt, sql, 0);
|
||||
checkError(stmt, code);
|
||||
// test get fields
|
||||
int fieldNum = 0;
|
||||
TAOS_FIELD_ALL* pFields = NULL;
|
||||
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
|
||||
checkError(stmt, code);
|
||||
ASSERT_EQ(fieldNum, 2);
|
||||
ASSERT_STREQ(pFields[0].name, "tbname");
|
||||
ASSERT_STREQ(pFields[1].name, "ts");
|
||||
|
||||
int total_affect_rows = 0;
|
||||
|
||||
int t64_len[2] = {sizeof(int64_t), sizeof(int64_t)};
|
||||
|
@ -1024,11 +1064,22 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
|
|||
code = taos_stmt2_bind_param(stmt, &bindv, -1);
|
||||
checkError(stmt, code);
|
||||
|
||||
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
|
||||
checkError(stmt, code);
|
||||
ASSERT_EQ(fieldNum, 2);
|
||||
ASSERT_STREQ(pFields[0].name, "tbname");
|
||||
ASSERT_STREQ(pFields[1].name, "ts");
|
||||
|
||||
int affected_rows;
|
||||
taos_stmt2_exec(stmt, &affected_rows);
|
||||
total_affect_rows += affected_rows;
|
||||
|
||||
checkError(stmt, code);
|
||||
|
||||
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
|
||||
checkError(stmt, code);
|
||||
ASSERT_EQ(fieldNum, 2);
|
||||
ASSERT_STREQ(pFields[0].name, "tbname");
|
||||
ASSERT_STREQ(pFields[1].name, "ts");
|
||||
}
|
||||
|
||||
ASSERT_EQ(total_affect_rows, 12);
|
||||
|
@ -1959,27 +2010,27 @@ void stmt2_async_test(std::atomic<bool>& stop_task) {
|
|||
stop_task = true;
|
||||
}
|
||||
|
||||
TEST(stmt2Case, async_order) {
|
||||
std::atomic<bool> stop_task(false);
|
||||
std::thread t(stmt2_async_test, std::ref(stop_task));
|
||||
// TEST(stmt2Case, async_order) {
|
||||
// std::atomic<bool> stop_task(false);
|
||||
// std::thread t(stmt2_async_test, std::ref(stop_task));
|
||||
|
||||
// 等待 60 秒钟
|
||||
auto start_time = std::chrono::steady_clock::now();
|
||||
while (!stop_task) {
|
||||
auto elapsed_time = std::chrono::steady_clock::now() - start_time;
|
||||
if (std::chrono::duration_cast<std::chrono::seconds>(elapsed_time).count() > 100) {
|
||||
if (t.joinable()) {
|
||||
t.detach();
|
||||
}
|
||||
FAIL() << "Test[stmt2_async_test] timed out";
|
||||
break;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1)); // 每 1s 检查一次
|
||||
}
|
||||
if (t.joinable()) {
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
// // 等待 60 秒钟
|
||||
// auto start_time = std::chrono::steady_clock::now();
|
||||
// while (!stop_task) {
|
||||
// auto elapsed_time = std::chrono::steady_clock::now() - start_time;
|
||||
// if (std::chrono::duration_cast<std::chrono::seconds>(elapsed_time).count() > 100) {
|
||||
// if (t.joinable()) {
|
||||
// t.detach();
|
||||
// }
|
||||
// FAIL() << "Test[stmt2_async_test] timed out";
|
||||
// break;
|
||||
// }
|
||||
// std::this_thread::sleep_for(std::chrono::seconds(1)); // 每 1s 检查一次
|
||||
// }
|
||||
// if (t.joinable()) {
|
||||
// t.join();
|
||||
// }
|
||||
// }
|
||||
|
||||
TEST(stmt2Case, rowformat_bind) {
|
||||
TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0);
|
||||
|
|
|
@ -39,7 +39,7 @@ EDriverType tsDriverType = DRIVER_NATIVE;
|
|||
|
||||
void *tsDriver = NULL;
|
||||
|
||||
static int32_t tossGetDevelopPath(char *driverPath, const char *driverName) {
|
||||
static int32_t taosGetDevelopPath(char *driverPath, const char *driverName) {
|
||||
char appPath[PATH_MAX] = {0};
|
||||
int32_t ret = taosAppPath(appPath, PATH_MAX);
|
||||
if (ret == 0) {
|
||||
|
@ -67,7 +67,7 @@ int32_t taosDriverInit(EDriverType driverType) {
|
|||
driverName = DRIVER_WSBSOCKET_NAME;
|
||||
}
|
||||
|
||||
if (tsDriver == NULL && tossGetDevelopPath(driverPath, driverName) == 0) {
|
||||
if (tsDriver == NULL && taosGetDevelopPath(driverPath, driverName) == 0) {
|
||||
tsDriver = taosLoadDll(driverPath);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
static TdThreadOnce tsDriverOnce = PTHREAD_ONCE_INIT;
|
||||
volatile int32_t tsDriverOnceRet = 0;
|
||||
|
||||
static TdThreadOnce tsInitOnce = PTHREAD_ONCE_INIT;
|
||||
volatile int32_t tsInitOnceRet = 0;
|
||||
|
||||
#define ERR_VOID(code) \
|
||||
terrno = code; \
|
||||
return;
|
||||
|
@ -89,21 +92,25 @@ setConfRet taos_set_config(const char *config) {
|
|||
return (*fp_taos_set_config)(config);
|
||||
}
|
||||
|
||||
static void taos_init_wrapper(void) {
|
||||
static void taos_init_driver(void) {
|
||||
tsDriverOnceRet = taosDriverInit(tsDriverType);
|
||||
if (tsDriverOnceRet != 0) return;
|
||||
|
||||
tsDriverOnceRet = 0;
|
||||
}
|
||||
static void taos_init_wrapper(void) {
|
||||
if (fp_taos_init == NULL) {
|
||||
terrno = TSDB_CODE_DLL_FUNC_NOT_LOAD;
|
||||
tsDriverOnceRet = -1;
|
||||
tsInitOnceRet = -1;
|
||||
} else {
|
||||
tsDriverOnceRet = (*fp_taos_init)();
|
||||
tsInitOnceRet = (*fp_taos_init)();
|
||||
}
|
||||
}
|
||||
|
||||
int taos_init(void) {
|
||||
(void)taosThreadOnce(&tsDriverOnce, taos_init_wrapper);
|
||||
return tsDriverOnceRet;
|
||||
(void)taosThreadOnce(&tsDriverOnce, taos_init_driver);
|
||||
(void)taosThreadOnce(&tsInitOnce, taos_init_wrapper);
|
||||
return tsInitOnceRet;
|
||||
}
|
||||
|
||||
void taos_cleanup(void) {
|
||||
|
@ -126,11 +133,7 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) {
|
|||
terrno = TSDB_CODE_REPEAT_INIT;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (taos_init() != 0) {
|
||||
terrno = TSDB_CODE_DLL_NOT_LOAD;
|
||||
return -1;
|
||||
}
|
||||
(void)taosThreadOnce(&tsDriverOnce, taos_init_driver);
|
||||
|
||||
CHECK_INT(fp_taos_options);
|
||||
return (*fp_taos_options)(option, arg);
|
||||
|
@ -143,7 +146,7 @@ int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const voi
|
|||
|
||||
TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port) {
|
||||
if (taos_init() != 0) {
|
||||
terrno = TSDB_CODE_DLL_NOT_LOAD;
|
||||
//terrno = TSDB_CODE_DLL_NOT_LOAD;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -646,6 +649,7 @@ TAOS_RES *taos_schemaless_insert_ttl_with_reqid_tbname_key(TAOS *taos, char *lin
|
|||
}
|
||||
|
||||
tmq_conf_t *tmq_conf_new() {
|
||||
taos_init();
|
||||
CHECK_PTR(fp_tmq_conf_new);
|
||||
return (*fp_tmq_conf_new)();
|
||||
}
|
||||
|
@ -666,6 +670,7 @@ void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *para
|
|||
}
|
||||
|
||||
tmq_list_t *tmq_list_new() {
|
||||
taos_init();
|
||||
CHECK_PTR(fp_tmq_list_new);
|
||||
return (*fp_tmq_list_new)();
|
||||
}
|
||||
|
@ -691,6 +696,7 @@ char **tmq_list_to_c_array(const tmq_list_t *tlist) {
|
|||
}
|
||||
|
||||
tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen) {
|
||||
taos_init();
|
||||
CHECK_PTR(fp_tmq_consumer_new);
|
||||
return (*fp_tmq_consumer_new)(conf, errstr, errstrLen);
|
||||
}
|
||||
|
@ -860,11 +866,13 @@ TSDB_SERVER_STATUS taos_check_server_status(const char *fqdn, int port, char *de
|
|||
}
|
||||
|
||||
void taos_write_crashinfo(int signum, void *sigInfo, void *context) {
|
||||
taos_init();
|
||||
CHECK_VOID(fp_taos_write_crashinfo);
|
||||
(*fp_taos_write_crashinfo)(signum, sigInfo, context);
|
||||
}
|
||||
|
||||
char *getBuildInfo() {
|
||||
taos_init();
|
||||
CHECK_PTR(fp_getBuildInfo);
|
||||
return (*fp_getBuildInfo)();
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ int metaUpdateMetaRsp(tb_uid_t uid, char *tbName, SSchemaWrapper *pSchema, STabl
|
|||
return terrno;
|
||||
}
|
||||
|
||||
pMetaRsp->pSchemaExt = taosMemoryMalloc(pSchema->nCols * sizeof(SSchemaExt));
|
||||
pMetaRsp->pSchemaExt = taosMemoryCalloc(1, pSchema->nCols * sizeof(SSchemaExt));
|
||||
if (pMetaRsp->pSchemaExt == NULL) {
|
||||
taosMemoryFree(pMetaRsp->pSchemas);
|
||||
return terrno;
|
||||
|
|
|
@ -692,13 +692,13 @@ static int32_t tsdbDoS3Migrate(SRTNer *rtner) {
|
|||
int32_t r = taosStatFile(fobj->fname, &size, &mtime, NULL);
|
||||
if (size > chunksize && mtime < rtner->now - tsS3UploadDelaySec) {
|
||||
if (pCfg->s3Compact && lcn < 0) {
|
||||
extern int32_t tsdbAsyncCompact(STsdb * tsdb, const STimeWindow *tw, bool sync);
|
||||
extern int32_t tsdbAsyncCompact(STsdb * tsdb, const STimeWindow *tw, bool sync,bool s3Migrate);
|
||||
|
||||
STimeWindow win = {0};
|
||||
tsdbFidKeyRange(fset->fid, rtner->tsdb->keepCfg.days, rtner->tsdb->keepCfg.precision, &win.skey, &win.ekey);
|
||||
|
||||
tsdbInfo("vgId:%d, async compact begin lcn: %d.", TD_VID(rtner->tsdb->pVnode), lcn);
|
||||
code = tsdbAsyncCompact(rtner->tsdb, &win, pCfg->sttTrigger == 1);
|
||||
code = tsdbAsyncCompact(rtner->tsdb, &win, pCfg->sttTrigger == 1, true);
|
||||
tsdbInfo("vgId:%d, async compact end lcn: %d.", TD_VID(rtner->tsdb->pVnode), lcn);
|
||||
goto _exit;
|
||||
return code;
|
||||
|
|
|
@ -32,7 +32,7 @@ typedef struct SInsertParseContext {
|
|||
bool needTableTagVal;
|
||||
bool needRequest; // whether or not request server
|
||||
bool isStmtBind; // whether is stmt bind
|
||||
bool preCtbname;
|
||||
uint8_t stmtTbNameFlag;
|
||||
} SInsertParseContext;
|
||||
|
||||
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
|
||||
|
@ -993,6 +993,10 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt
|
|||
code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", token.z);
|
||||
break;
|
||||
}
|
||||
if (pTagVals->size != 0) {
|
||||
code = buildSyntaxErrMsg(&pCxt->msg, "no mix usage for ? and tag values", token.z);
|
||||
break;
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -1026,6 +1030,10 @@ static int32_t parseTagsClauseImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt
|
|||
pTag = NULL;
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS && !isParseBindParam) {
|
||||
pCxt->stmtTbNameFlag |= IS_FIXED_TAG;
|
||||
}
|
||||
|
||||
_exit:
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pTagVals); ++i) {
|
||||
STagVal* p = (STagVal*)TARRAY_GET_ELEM(pTagVals, i);
|
||||
|
@ -1416,6 +1424,7 @@ static int32_t parseUsingTableName(SInsertParseContext* pCxt, SVnodeModifyOpStmt
|
|||
return getTargetTableSchema(pCxt, pStmt);
|
||||
}
|
||||
pStmt->usingTableProcessing = true;
|
||||
pCxt->stmtTbNameFlag |= USING_CLAUSE;
|
||||
// pStmt->pSql -> stb_name [(tag1_name, ...)
|
||||
pStmt->pSql += index;
|
||||
int32_t code = parseDuplicateUsingClause(pCxt, pStmt, &pCxt->usingDuplicateTable);
|
||||
|
@ -1465,7 +1474,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
|
|||
|
||||
char tbFName[TSDB_TABLE_FNAME_LEN];
|
||||
int32_t code = 0;
|
||||
if (pCxt->preCtbname) {
|
||||
if ((pCxt->stmtTbNameFlag & NO_DATA_USING_CLAUSE) == USING_CLAUSE) {
|
||||
tstrncpy(pStmt->targetTableName.tname, pStmt->usingTableName.tname, sizeof(pStmt->targetTableName.tname));
|
||||
tstrncpy(pStmt->targetTableName.dbname, pStmt->usingTableName.dbname, sizeof(pStmt->targetTableName.dbname));
|
||||
pStmt->targetTableName.type = TSDB_SUPER_TABLE;
|
||||
|
@ -2764,6 +2773,7 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
|
|||
}
|
||||
|
||||
if (TK_NK_QUESTION == pTbName->type) {
|
||||
pCxt->stmtTbNameFlag &= ~IS_FIXED_VALUE;
|
||||
pCxt->isStmtBind = true;
|
||||
if (NULL == pCxt->pComCxt->pStmtCb) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
|
||||
|
@ -2772,14 +2782,15 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
|
|||
char* tbName = NULL;
|
||||
int32_t code = (*pCxt->pComCxt->pStmtCb->getTbNameFn)(pCxt->pComCxt->pStmtCb->pStmt, &tbName);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pCxt->stmtTbNameFlag |= HAS_BIND_VALUE;
|
||||
pTbName->z = tbName;
|
||||
pTbName->n = strlen(tbName);
|
||||
} else if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR) {
|
||||
pCxt->preCtbname = true;
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
return code;
|
||||
}
|
||||
if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR) {
|
||||
pCxt->stmtTbNameFlag &= ~HAS_BIND_VALUE;
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
if (TK_NK_ID != pTbName->type && TK_NK_STRING != pTbName->type && TK_NK_QUESTION != pTbName->type) {
|
||||
|
@ -2788,26 +2799,34 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif
|
|||
|
||||
// db.? situation,ensure that the only thing following the '.' mark is '?'
|
||||
char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true);
|
||||
if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) {
|
||||
char* tbName = NULL;
|
||||
if (NULL == pCxt->pComCxt->pStmtCb) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
|
||||
}
|
||||
int32_t code = (*pCxt->pComCxt->pStmtCb->getTbNameFn)(pCxt->pComCxt->pStmtCb->pStmt, &tbName);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pCxt->preCtbname = true;
|
||||
if (tbNameAfterDbName != NULL) {
|
||||
if (*(tbNameAfterDbName + 1) == '?') {
|
||||
pCxt->stmtTbNameFlag &= ~IS_FIXED_VALUE;
|
||||
char* tbName = NULL;
|
||||
if (NULL == pCxt->pComCxt->pStmtCb) {
|
||||
return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z);
|
||||
}
|
||||
int32_t code = (*pCxt->pComCxt->pStmtCb->getTbNameFn)(pCxt->pComCxt->pStmtCb->pStmt, &tbName);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pCxt->stmtTbNameFlag |= HAS_BIND_VALUE;
|
||||
pTbName->z = tbName;
|
||||
pTbName->n = strlen(tbName);
|
||||
}
|
||||
if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR) {
|
||||
pCxt->stmtTbNameFlag &= ~HAS_BIND_VALUE;
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else {
|
||||
pTbName->z = tbName;
|
||||
pTbName->n = strlen(tbName);
|
||||
}
|
||||
}
|
||||
|
||||
if (pCxt->isStmtBind) {
|
||||
if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) {
|
||||
// In SQL statements, the table name has already been specified.
|
||||
pCxt->stmtTbNameFlag |= IS_FIXED_VALUE;
|
||||
parserWarn("QID:0x%" PRIx64 ", table name is specified in sql, ignore the table name in bind param",
|
||||
pCxt->pComCxt->requestId);
|
||||
*pHasData = true;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (TK_NK_ID == pTbName->type) {
|
||||
pCxt->stmtTbNameFlag |= IS_FIXED_VALUE;
|
||||
}
|
||||
|
||||
*pHasData = true;
|
||||
|
@ -2824,7 +2843,7 @@ static int32_t setStmtInfo(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt)
|
|||
SStmtCallback* pStmtCb = pCxt->pComCxt->pStmtCb;
|
||||
int32_t code = (*pStmtCb->setInfoFn)(pStmtCb->pStmt, pStmt->pTableMeta, tags, &pStmt->targetTableName,
|
||||
pStmt->usingTableProcessing, pStmt->pVgroupsHashObj, pStmt->pTableBlockHashObj,
|
||||
pStmt->usingTableName.tname, pCxt->preCtbname);
|
||||
pStmt->usingTableName.tname, pCxt->stmtTbNameFlag);
|
||||
|
||||
memset(&pCxt->tags, 0, sizeof(pCxt->tags));
|
||||
pStmt->pVgroupsHashObj = NULL;
|
||||
|
@ -2880,9 +2899,6 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
|
|||
if (TSDB_CODE_SUCCESS == code && hasData) {
|
||||
code = parseInsertTableClause(pCxt, pStmt, &token);
|
||||
}
|
||||
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pCxt->preCtbname) {
|
||||
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
|
|
|
@ -1070,10 +1070,11 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc
|
|||
}
|
||||
|
||||
int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_ALL** fields,
|
||||
STableMeta* pMeta, void* boundTags, bool preCtbname) {
|
||||
STableMeta* pMeta, void* boundTags, uint8_t tbNameFlag) {
|
||||
SBoundColInfo* tags = (SBoundColInfo*)boundTags;
|
||||
bool hastag = tags != NULL;
|
||||
int32_t numOfBound = boundColsInfo.numOfBound + (preCtbname ? 1 : 0);
|
||||
bool hastag = (tags != NULL) && !(tbNameFlag & IS_FIXED_TAG);
|
||||
int32_t numOfBound =
|
||||
boundColsInfo.numOfBound + ((tbNameFlag & IS_FIXED_VALUE) == 0 && (tbNameFlag & USING_CLAUSE) != 0 ? 1 : 0);
|
||||
if (hastag) {
|
||||
numOfBound += tags->mixTagsCols ? 0 : tags->numOfBound;
|
||||
}
|
||||
|
@ -1084,7 +1085,7 @@ int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32
|
|||
return terrno;
|
||||
}
|
||||
|
||||
if (preCtbname && numOfBound != boundColsInfo.numOfBound) {
|
||||
if ((tbNameFlag & IS_FIXED_VALUE) == 0 && (tbNameFlag & USING_CLAUSE) != 0) {
|
||||
(*fields)[idx].field_type = TAOS_FIELD_TBNAME;
|
||||
tstrncpy((*fields)[idx].name, "tbname", sizeof((*fields)[idx].name));
|
||||
(*fields)[idx].type = TSDB_DATA_TYPE_BINARY;
|
||||
|
@ -1188,7 +1189,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool preCtbname, int32_t* fieldNum,
|
||||
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, uint8_t tbNameFlag, int32_t* fieldNum,
|
||||
TAOS_FIELD_ALL** fields) {
|
||||
STableDataCxt* pDataBlock = (STableDataCxt*)pBlock;
|
||||
SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta);
|
||||
|
@ -1202,7 +1203,7 @@ int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool preCtbname, i
|
|||
}
|
||||
|
||||
CHECK_CODE(buildStbBoundFields(pDataBlock->boundColsInfo, pSchema, fieldNum, fields, pDataBlock->pMeta, boundTags,
|
||||
preCtbname));
|
||||
tbNameFlag));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -3134,7 +3134,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfKe
|
|||
taosMemoryFree(err); \
|
||||
code = TSDB_CODE_THIRDPARTY_ERROR; \
|
||||
} else { \
|
||||
stInfo("[InternalERR] write streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d, %p", toString, \
|
||||
stDebug("[InternalERR] write streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d, %p", toString, \
|
||||
funcname, vLen, ttlVLen, wrapper); \
|
||||
} \
|
||||
taosMemoryFree(ttlV); \
|
||||
|
@ -4593,7 +4593,7 @@ int32_t streamStatePutBatchOptimize(SStreamState* pState, int32_t cfIdx, rocksdb
|
|||
int32_t klen = ginitDict[cfIdx].enFunc((void*)key, buf);
|
||||
|
||||
ginitDict[cfIdx].toStrFunc((void*)key, toString);
|
||||
qInfo("[InternalERR] write cfIdx:%d key:%s vlen:%d", cfIdx, toString, vlen);
|
||||
stDebug("[InternalERR] write cfIdx:%d key:%s vlen:%d", cfIdx, toString, vlen);
|
||||
|
||||
char* ttlV = tmpBuf;
|
||||
int32_t ttlVLen = ginitDict[cfIdx].enValueFunc(dst, size, ttl, &ttlV);
|
||||
|
|
|
@ -1029,7 +1029,7 @@ void mpUpdateSystemAvailableMemorySize() {
|
|||
|
||||
atomic_store_64(&tsCurrentAvailMemorySize, sysAvailSize);
|
||||
|
||||
uDebug("system available memory size: %" PRId64, sysAvailSize);
|
||||
uTrace("system available memory size: %" PRId64, sysAvailSize);
|
||||
}
|
||||
|
||||
void mpSchedTrim(int64_t* loopTimes) {
|
||||
|
|
|
@ -729,7 +729,7 @@ int32_t mptGetMemPoolMaxMemSize(void* pHandle, int64_t* maxSize) {
|
|||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
uError("get system avaiable memory size failed, error: 0x%x", code);
|
||||
uError("get system available memory size failed, error: 0x%x", code);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,9 @@ This manual is intended to give developers a comprehensive guidance to test TDen
|
|||
> - The commands and scripts below are verified on Linux (Ubuntu 18.04/20.04/22.04).
|
||||
> - [taos-connector-python](https://github.com/taosdata/taos-connector-python) is used by tests written in Python, which requires Python 3.7+.
|
||||
> - The commands and steps described below are to run the tests on a single host.
|
||||
> - The testing framework is currently compatible with Python versions 3.8 through 3.10.
|
||||
> - Vitural Environment is advised when setting up the environment, pease refer to [venv](https://docs.python.org/3/library/venv.html) for details.
|
||||
|
||||
|
||||
# 2. Prerequisites
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ class TDTestCase(TBase):
|
|||
['-a "abc"', "[0x80000357]"],
|
||||
]
|
||||
for arg in args:
|
||||
rlist = self.taos("Z 0 " + arg[0])
|
||||
rlist = self.taos("-Z 0 " + arg[0])
|
||||
if arg[1] != None:
|
||||
self.checkListString(rlist, arg[1])
|
||||
|
||||
|
@ -340,9 +340,14 @@ class TDTestCase(TBase):
|
|||
self.checkExcept(taos + " -s 'show dnodes;' " + option)
|
||||
|
||||
def checkModeVersion(self):
|
||||
|
||||
# check default conn mode
|
||||
#DEFAULT_CONN = "WebSocket"
|
||||
DEFAULT_CONN = "Native"
|
||||
|
||||
# results
|
||||
results = [
|
||||
"WebSocket Client Version",
|
||||
f"{DEFAULT_CONN} Client Version",
|
||||
"2022-10-01 00:01:39.000",
|
||||
"Query OK, 100 row(s) in set"
|
||||
]
|
||||
|
@ -351,8 +356,10 @@ class TDTestCase(TBase):
|
|||
cmd = f"-s 'select ts from test.d0'"
|
||||
rlist = self.taos(cmd, checkRun = True)
|
||||
self.checkManyString(rlist, results)
|
||||
|
||||
# websocket
|
||||
cmd = f"-Z 1 -s 'select ts from test.d0'"
|
||||
results[0] = "WebSocket Client Version"
|
||||
rlist = self.taos(cmd, checkRun = True)
|
||||
self.checkManyString(rlist, results)
|
||||
|
||||
|
|
|
@ -196,9 +196,9 @@ class TBase:
|
|||
tdSql.checkFirstValue(sql, expect)
|
||||
|
||||
# order by desc limit 1 with last
|
||||
sql = f"select first({col}) from {self.db}.{self.db}."
|
||||
sql = f"select first({col}) from {self.db}.{self.stb}"
|
||||
expect = tdSql.getFirstValue(sql)
|
||||
sql = f"select {col} from {self.db}.{self.db}. order by _c0 asc limit 1"
|
||||
sql = f"select {col} from {self.db}.{self.stb} order by _c0 asc limit 1"
|
||||
tdSql.checkFirstValue(sql, expect)
|
||||
|
||||
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
[02/10 13:52:16.164959] SUCC: created database (test)
|
||||
[02/10 13:52:16.182024] INFO: start creating 1000 table(s) with 8 thread(s)
|
||||
[02/10 13:52:16.396337] SUCC: Spent 0.2140 seconds to create 1000 table(s) with 8 thread(s) speed: 4673 tables/s, already exist 0 table(s), actual 1000 table(s) pre created, 0 table(s) will be auto created
|
||||
[02/10 13:53:05.155428] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 339193.01 records/second
|
||||
[02/10 13:53:05.160652] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 341816.65 records/second
|
||||
[02/10 13:53:05.207601] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 340556.51 records/second
|
||||
[02/10 13:53:05.215370] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 338804.97 records/second
|
||||
[02/10 13:53:05.224077] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 338596.28 records/second
|
||||
[02/10 13:53:05.249786] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 339208.40 records/second
|
||||
[02/10 13:53:05.256970] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 339174.04 records/second
|
||||
[02/10 13:53:05.274900] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 339551.12 records/second
|
||||
[02/10 13:53:05.275900] SUCC: Spent 48.867685 (real 36.806958) seconds to insert rows: 100000000 with 8 thread(s) into test 2046342.08 (real 2716877.61) records/second
|
||||
[02/10 13:53:05.275909] SUCC: insert delay, min: 11.2580ms, avg: 29.4456ms, p90: 32.7750ms, p95: 34.1120ms, p99: 39.5900ms, max: 70.3780ms
|
||||
[02/12 15:46:06.469780] SUCC: created database (test)
|
||||
[02/12 15:46:06.499844] INFO: start creating 10000 table(s) with 8 thread(s)
|
||||
[02/12 15:46:08.185009] SUCC: Spent 1.6860 seconds to create 10000 table(s) with 8 thread(s) speed: 5931 tables/s, already exist 0 table(s), actual 10000 table(s) pre created, 0 table(s) will be auto created
|
||||
[02/12 15:46:57.356674] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 339076.93 records/second
|
||||
[02/12 15:46:57.434553] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 338528.52 records/second
|
||||
[02/12 15:46:57.452522] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 339844.37 records/second
|
||||
[02/12 15:46:57.452921] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 339349.90 records/second
|
||||
[02/12 15:46:57.463726] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 339986.37 records/second
|
||||
[02/12 15:46:57.466467] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 339785.50 records/second
|
||||
[02/12 15:46:57.499118] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 339326.86 records/second
|
||||
[02/12 15:46:57.501694] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 338309.30 records/second
|
||||
[02/12 15:46:57.502535] SUCC: Spent 49.309586 (real 36.843268) seconds to insert rows: 100000000 with 8 thread(s) into test 2028003.24 (real 2714200.05) records/second
|
||||
[02/12 15:46:57.502546] SUCC: insert delay, min: 10.9580ms, avg: 29.4746ms, p90: 32.6960ms, p95: 33.8290ms, p99: 36.8390ms, max: 77.9940ms
|
||||
[02/14 15:27:32.543409] SUCC: created database (test)
|
||||
[02/14 15:27:32.568881] INFO: start creating 10000 table(s) with 8 thread(s)
|
||||
[02/14 15:27:34.249759] SUCC: Spent 1.6810 seconds to create 10000 table(s) with 8 thread(s) speed: 5949 tables/s, already exist 0 table(s), actual 10000 table(s) pre created, 0 table(s) will be auto created
|
||||
[02/14 15:28:26.165699] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 321266.73 records/second
|
||||
[02/14 15:28:26.281188] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 319863.00 records/second
|
||||
[02/14 15:28:26.326975] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 321802.51 records/second
|
||||
[02/14 15:28:26.328615] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 321804.13 records/second
|
||||
[02/14 15:28:26.379189] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 320719.22 records/second
|
||||
[02/14 15:28:26.400891] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 321512.59 records/second
|
||||
[02/14 15:28:26.470912] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 319026.94 records/second
|
||||
[02/14 15:28:26.565079] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 317248.21 records/second
|
||||
[02/14 15:28:26.566013] SUCC: Spent 52.307623 (real 39.013939) seconds to insert rows: 100000000 with 8 thread(s) into test 1911767.24 (real 2563186.45) records/second
|
||||
[02/14 15:28:26.566024] SUCC: insert delay, min: 11.1290ms, avg: 31.2112ms, p90: 35.4900ms, p95: 37.0580ms, p99: 41.5180ms, max: 68.5900ms
|
||||
[02/17 14:09:42.181835] SUCC: created database (test)
|
||||
[02/17 14:09:42.210373] INFO: start creating 10000 table(s) with 8 thread(s)
|
||||
[02/17 14:09:44.199467] SUCC: Spent 1.9890 seconds to create 10000 table(s) with 8 thread(s) speed: 5028 tables/s, already exist 0 table(s), actual 10000 table(s) pre created, 0 table(s) will be auto created
|
||||
[02/17 14:10:32.845475] SUCC: thread[3] progressive mode, completed total inserted rows: 12500000, 338184.62 records/second
|
||||
[02/17 14:10:32.872586] SUCC: thread[4] progressive mode, completed total inserted rows: 12500000, 338445.48 records/second
|
||||
[02/17 14:10:32.873271] SUCC: thread[1] progressive mode, completed total inserted rows: 12500000, 339256.73 records/second
|
||||
[02/17 14:10:32.938231] SUCC: thread[5] progressive mode, completed total inserted rows: 12500000, 338737.29 records/second
|
||||
[02/17 14:10:32.947655] SUCC: thread[2] progressive mode, completed total inserted rows: 12500000, 338938.99 records/second
|
||||
[02/17 14:10:32.952985] SUCC: thread[0] progressive mode, completed total inserted rows: 12500000, 338652.89 records/second
|
||||
[02/17 14:10:32.962370] SUCC: thread[6] progressive mode, completed total inserted rows: 12500000, 338890.00 records/second
|
||||
[02/17 14:10:32.998729] SUCC: thread[7] progressive mode, completed total inserted rows: 12500000, 339216.19 records/second
|
||||
[02/17 14:10:32.999680] SUCC: Spent 48.790057 (real 36.896020) seconds to insert rows: 100000000 with 8 thread(s) into test 2049597.93 (real 2710319.43) records/second
|
||||
[02/17 14:10:32.999696] SUCC: insert delay, min: 10.7720ms, avg: 29.5168ms, p90: 32.6910ms, p95: 33.8370ms, p99: 36.6750ms, max: 76.0590ms
|
|
@ -37,9 +37,6 @@ import taos
|
|||
import taosrest
|
||||
import taosws
|
||||
|
||||
from taos.cinterface import *
|
||||
taos.taos_options(6, "native")
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
timeCount = 0
|
||||
|
@ -261,9 +258,8 @@ if __name__ == "__main__":
|
|||
#
|
||||
# do exeCmd command
|
||||
#
|
||||
taosAdapter = True # default is websocket , so must start taosAdapter
|
||||
if not execCmd == "":
|
||||
if taosAdapter or restful or websocket:
|
||||
if taosAdapter or taosAdapter or restful or websocket:
|
||||
tAdapter.init(deployPath)
|
||||
else:
|
||||
tdDnodes.init(deployPath)
|
||||
|
|
|
@ -68,14 +68,10 @@ class TDTestCase(TBase):
|
|||
tdSql.checkData(0, 0, 10*10000)
|
||||
|
||||
# add normal table
|
||||
cmd = "%s -N -I sml -t 2 -n 10000 -y" % binPath
|
||||
tdLog.info("%s" % cmd)
|
||||
os.system("%s" % cmd)
|
||||
|
||||
tdSql.query("select count(*) from test.d0")
|
||||
tdSql.checkData(0, 0, 1*10000)
|
||||
tdSql.query("select count(*) from test.d1")
|
||||
tdSql.checkData(0, 0, 1*10000)
|
||||
cmd = "-N -I sml -t 2 -n 10000 -y"
|
||||
rlist = self.benchmark(cmd, checkRun = False)
|
||||
# expect failed
|
||||
self.checkListString(rlist, "schemaless cannot work without stable")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -68,12 +68,15 @@ class TDTestCase(TBase):
|
|||
os.environ['TDENGINE_CLOUD_DSN'] = ""
|
||||
|
||||
def checkCommandLine(self):
|
||||
# default CONN_MODE
|
||||
DEFAULT_CONN_MODE = "Native"
|
||||
|
||||
# modes
|
||||
modes = ["", "-Z 1 -B 1", "-Z websocket", "-Z 0", "-Z native -B 2"]
|
||||
# result
|
||||
Rows = "insert rows: 9990"
|
||||
results1 = [
|
||||
["Connect mode is : WebSocket", Rows],
|
||||
[f"Connect mode is : {DEFAULT_CONN_MODE}", Rows],
|
||||
["Connect mode is : WebSocket", Rows],
|
||||
["Connect mode is : WebSocket", Rows],
|
||||
["Connect mode is : Native", Rows],
|
||||
|
@ -112,7 +115,7 @@ class TDTestCase(TBase):
|
|||
# ommand
|
||||
#
|
||||
self.benchmarkCmd("-h 127.0.0.1", 5, 100, 10, ["insert rows: 500"])
|
||||
self.benchmarkCmd("-h 127.0.0.1 -P 6041 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"])
|
||||
self.benchmarkCmd("-h 127.0.0.1 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"])
|
||||
self.benchmarkCmd("-Z 0 -h 127.0.0.1 -P 6030 -uroot -ptaosdata", 5, 100, 10, ["insert rows: 500"])
|
||||
|
||||
#
|
||||
|
@ -120,7 +123,7 @@ class TDTestCase(TBase):
|
|||
#
|
||||
|
||||
# 6041 is default
|
||||
options = "-h 127.0.0.1 -P 6041 -uroot -ptaosdata"
|
||||
options = "-Z 1 -h 127.0.0.1 -P 6041 -uroot -ptaosdata"
|
||||
json = "tools/benchmark/basic/json/connModePriorityErrHost.json"
|
||||
self.insertBenchJson(json, options, True)
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ class TDTestCase(TBase):
|
|||
def checkTmqJson(self, benchmark, json):
|
||||
OK_RESULT = "Consumed total msgs: 30, total rows: 300000"
|
||||
cmd = benchmark + " -f " + json
|
||||
output,error = frame.eos.run(cmd, 600)
|
||||
output, error, code = frame.eos.run(cmd, 600)
|
||||
if output.find(OK_RESULT) != -1:
|
||||
tdLog.info(f"succ: {cmd} found '{OK_RESULT}'")
|
||||
else:
|
||||
|
|
|
@ -29,7 +29,7 @@ class TDTestCase(TBase):
|
|||
|
||||
def run(self):
|
||||
binPath = etool.benchMarkFile()
|
||||
cmd = "%s -t 1 -n 1 -y -W http://localhost:6041 -D 30" % binPath
|
||||
cmd = "%s -t 1 -n 1 -y -W http://localhost:6041 " % binPath
|
||||
tdLog.info("%s" % cmd)
|
||||
os.system("%s" % cmd)
|
||||
tdSql.execute("reset query cache")
|
||||
|
|
|
@ -158,7 +158,7 @@ class TDTestCase(TBase):
|
|||
def basicCommandLine(self, tmpdir):
|
||||
#command and check result
|
||||
checkItems = [
|
||||
[f"-h 127.0.0.1 -P 6041 -uroot -ptaosdata -A -N -o {tmpdir}", ["OK: Database test dumped"]],
|
||||
[f"-Z 0 -h 127.0.0.1 -P 6030 -uroot -ptaosdata -A -N -o {tmpdir}", ["OK: Database test dumped"]],
|
||||
[f"-r result -a -e test d0 -o {tmpdir}", ["OK: table: d0 dumped", "OK: 100 row(s) dumped out!"]],
|
||||
[f"-n -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]],
|
||||
[f"-Z 0 -P 6030 -n -D test -o {tmpdir}", ["OK: Database test dumped", "OK: 205 row(s) dumped out!"]],
|
||||
|
@ -348,19 +348,12 @@ class TDTestCase(TBase):
|
|||
self.exceptCommandLine(taosdump, db, stb, tmpdir)
|
||||
tdLog.info("4. except command line ................................. [Passed]")
|
||||
|
||||
json = "./tools/taosdump/native/json/insertOther.json"
|
||||
# insert
|
||||
db, stb, childCount, insertRows = self.insertData(json)
|
||||
# dump in/out
|
||||
self.dumpInOutMode("", db , json, tmpdir)
|
||||
tdLog.info("5. native varbinary geometry ........................... [Passed]")
|
||||
|
||||
#
|
||||
# check connMode
|
||||
#
|
||||
|
||||
self.checkConnMode(db, stb, childCount, insertRows, tmpdir)
|
||||
tdLog.info("6. check conn mode ..................................... [Passed]")
|
||||
tdLog.info("5. check conn mode ..................................... [Passed]")
|
||||
|
||||
|
||||
def stop(self):
|
||||
|
|
|
@ -38,8 +38,6 @@ from util.taosadapter import *
|
|||
import taos
|
||||
import taosrest
|
||||
|
||||
from taos.cinterface import *
|
||||
taos.taos_options(6, "native")
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
|
|
|
@ -126,7 +126,7 @@ python3 mockdatasource.py
|
|||
python3 fast_write_example.py
|
||||
|
||||
# 20
|
||||
pip3 install kafka-python
|
||||
pip3 install kafka-python==2.1.2
|
||||
python3 kafka_example_consumer.py
|
||||
|
||||
# 21
|
||||
|
@ -196,4 +196,5 @@ check_transactions || exit 1
|
|||
reset_cache || exit 1
|
||||
python3 tmq_websocket_example.py
|
||||
|
||||
python3 stmt2_native.py
|
||||
python3 stmt2_native.py
|
||||
|
||||
|
|
|
@ -108,7 +108,6 @@
|
|||
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/custom_col_tag.py
|
||||
|
||||
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/default_json.py
|
||||
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/default_tmq_json.py
|
||||
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/demo.py
|
||||
|
||||
,,y,army,./pytest.sh python3 ./test.py -f tools/benchmark/basic/csv-export.py
|
||||
|
|
|
@ -9,6 +9,9 @@
|
|||
set +e
|
||||
#set -x
|
||||
|
||||
export ASAN_OPTIONS=detect_odr_violation=0
|
||||
echo "forbid check ODR violation."
|
||||
|
||||
FILE_NAME=
|
||||
VALGRIND=0
|
||||
TEST=0
|
||||
|
|
|
@ -40,9 +40,6 @@ import taos
|
|||
import taosrest
|
||||
import taosws
|
||||
|
||||
from taos.cinterface import *
|
||||
taos.taos_options(6, "native")
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
timeCount = 0
|
||||
|
@ -73,6 +70,7 @@ def get_local_classes_in_order(file_path):
|
|||
def dynamicLoadModule(fileName):
|
||||
moduleName = fileName.replace(".py", "").replace(os.sep, ".")
|
||||
return importlib.import_module(moduleName, package='..')
|
||||
|
||||
#
|
||||
# run case on previous cluster
|
||||
#
|
||||
|
@ -254,9 +252,8 @@ if __name__ == "__main__":
|
|||
#
|
||||
# do exeCmd command
|
||||
#
|
||||
taosAdapter = True # default is websocket , so must start taosAdapter
|
||||
if not execCmd == "":
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
tAdapter.init(deployPath)
|
||||
else:
|
||||
tdDnodes.init(deployPath)
|
||||
|
@ -295,7 +292,7 @@ if __name__ == "__main__":
|
|||
if valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
toBeKilled = "taosadapter"
|
||||
|
||||
# killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
|
||||
|
@ -391,7 +388,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
|
@ -431,7 +428,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
|
@ -553,7 +550,7 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
|
@ -563,7 +560,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
|
@ -618,7 +615,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if taosAdapter or restful or websocket:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#define PUB_H_
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
#include <ctype.h>
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>
|
||||
|
@ -72,10 +73,11 @@ int8_t getConnMode(char *arg);
|
|||
char* strToLowerCopy(const char *str);
|
||||
int32_t parseDsn(char* dsn, char **host, char **port, char **user, char **pwd, char* error);
|
||||
|
||||
int32_t setConnMode(int8_t connMode, char *dsn);
|
||||
int32_t setConnMode(int8_t connMode, char *dsn, bool show);
|
||||
|
||||
uint16_t defaultPort(int8_t connMode, char *dsn);
|
||||
|
||||
int8_t defaultMode(int8_t connMode, char *dsn);
|
||||
// working connect mode
|
||||
int8_t workingMode(int8_t connMode, char *dsn);
|
||||
|
||||
#endif // PUB_H_
|
|
@ -219,9 +219,6 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) {
|
|||
break;
|
||||
#if defined(LINUX)
|
||||
case 'o':
|
||||
printf(" -o need todo optins.\n");
|
||||
// need todo pass tsLogOutput to engine
|
||||
/*
|
||||
if (strlen(arg) >= PATH_MAX) {
|
||||
printf("failed to set log output since length overflow, max length is %d\r\n", PATH_MAX);
|
||||
return TSDB_CODE_INVALID_CFG;
|
||||
|
@ -235,7 +232,6 @@ static int32_t shellParseSingleOpt(int32_t key, char *arg) {
|
|||
printf("failed to expand log output: '%s' since %s\r\n", arg, tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
*/
|
||||
break;
|
||||
#endif
|
||||
case 'E':
|
||||
|
|
|
@ -1355,17 +1355,22 @@ TAOS* createConnect(SShellArgs *pArgs) {
|
|||
}
|
||||
|
||||
// connect main
|
||||
TAOS * taos = NULL;
|
||||
if (pArgs->auth) {
|
||||
return taos_connect_auth(host, user, pArgs->auth, pArgs->database, port);
|
||||
taos = taos_connect_auth(host, user, pArgs->auth, pArgs->database, port);
|
||||
} else {
|
||||
return taos_connect(host, user, pwd, pArgs->database, port);
|
||||
taos = taos_connect(host, user, pwd, pArgs->database, port);
|
||||
}
|
||||
|
||||
// host user pointer in dsnc address
|
||||
free(dsnc);
|
||||
return taos;
|
||||
}
|
||||
|
||||
int32_t shellExecute(int argc, char *argv[]) {
|
||||
int32_t code = 0;
|
||||
printf(shell.info.clientVersion, shell.info.cusName,
|
||||
defaultMode(shell.args.connMode, shell.args.dsn) == CONN_MODE_NATIVE ? STR_NATIVE : STR_WEBSOCKET,
|
||||
workingMode(shell.args.connMode, shell.args.dsn) == CONN_MODE_NATIVE ? STR_NATIVE : STR_WEBSOCKET,
|
||||
taos_get_client_info(), shell.info.cusName);
|
||||
fflush(stdout);
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ int main(int argc, char *argv[]) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (setConnMode(shell.args.connMode, shell.args.dsn)) {
|
||||
if (setConnMode(shell.args.connMode, shell.args.dsn, false)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@
|
|||
}
|
||||
|
||||
// set conn mode
|
||||
int32_t setConnMode(int8_t connMode, char *dsn) {
|
||||
int32_t setConnMode(int8_t connMode, char *dsn, bool show) {
|
||||
// check default
|
||||
if (connMode == CONN_MODE_INVALID) {
|
||||
if (dsn && dsn[0] != 0) {
|
||||
|
@ -109,11 +109,16 @@ int32_t setConnMode(int8_t connMode, char *dsn) {
|
|||
fprintf(stderr, "failed to load driver. since %s [0x%08X]\r\n", taos_errstr(NULL), taos_errno(NULL));
|
||||
return code;
|
||||
}
|
||||
|
||||
if (show) {
|
||||
fprintf(stdout, "\nConnect mode is : %s\n\n", strMode);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// default mode
|
||||
int8_t defaultMode(int8_t connMode, char *dsn) {
|
||||
int8_t workingMode(int8_t connMode, char *dsn) {
|
||||
int8_t mode = connMode;
|
||||
if (connMode == CONN_MODE_INVALID) {
|
||||
// no input from command line or config
|
||||
|
@ -129,10 +134,15 @@ int8_t defaultMode(int8_t connMode, char *dsn) {
|
|||
|
||||
// get default port
|
||||
uint16_t defaultPort(int8_t connMode, char *dsn) {
|
||||
// port 0 is default
|
||||
return 0;
|
||||
|
||||
/*
|
||||
// consistent with setConnMode
|
||||
int8_t mode = defaultMode(connMode, dsn);
|
||||
int8_t mode = workingMode(connMode, dsn);
|
||||
|
||||
// default port
|
||||
return mode == CONN_MODE_NATIVE ? DEFAULT_PORT_NATIVE : DEFAULT_PORT_WS_LOCAL;
|
||||
*/
|
||||
}
|
||||
|
|
@ -256,13 +256,13 @@ char* genPrepareSql(SSuperTable *stbInfo, char* tagData, uint64_t tableSeq, char
|
|||
"INSERT INTO ? USING `%s`.`%s` TAGS (%s) %s VALUES(?,%s)",
|
||||
db, stbInfo->stbName, tagQ, ttl, colQ);
|
||||
} else {
|
||||
if (g_arguments->connMode == CONN_MODE_NATIVE) {
|
||||
if (workingMode(g_arguments->connMode, g_arguments->dsn) == CONN_MODE_NATIVE) {
|
||||
// native
|
||||
n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len,
|
||||
"INSERT INTO ? VALUES(?,%s)", colQ);
|
||||
} else {
|
||||
// websocket
|
||||
bool ntb = stbInfo->tags == NULL || stbInfo->tags->size == 0; // nomral table
|
||||
bool ntb = stbInfo->tags == NULL || stbInfo->tags->size == 0; // normal table
|
||||
colNames = genColNames(stbInfo->cols, !ntb);
|
||||
n = snprintf(prepare + len, TSDB_MAX_ALLOWED_SQL_LEN - len,
|
||||
"INSERT INTO `%s`.`%s`(%s) VALUES(%s,%s)", db, stbInfo->stbName, colNames,
|
||||
|
|
|
@ -168,7 +168,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
// conn mode
|
||||
if (setConnMode(g_arguments->connMode, g_arguments->dsn) != 0) {
|
||||
if (setConnMode(g_arguments->connMode, g_arguments->dsn, true) != 0) {
|
||||
exitLog();
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -150,6 +150,7 @@ static struct argp_option options[] = {
|
|||
{"inspect", 'I', 0, 0,
|
||||
"inspect avro file content and print on screen", 10},
|
||||
{"no-escape", 'n', 0, 0, "No escape char '`'. Default is using it.", 10},
|
||||
{"restful", 'R', 0, 0, "Use RESTful interface to connect server", 11},
|
||||
{"cloud", 'C', "CLOUD_DSN", 0, OLD_DSN_DESC, 11},
|
||||
{"timeout", 't', "SECONDS", 0, "The timeout seconds for "
|
||||
"websocket to interact."},
|
||||
|
@ -691,7 +692,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
|||
}
|
||||
g_args.thread_num = atoi((const char *)arg);
|
||||
break;
|
||||
|
||||
case 'R':
|
||||
warnPrint("%s\n", "'-R' is not supported, ignore this options.");
|
||||
break;
|
||||
case 'C':
|
||||
case 'X':
|
||||
if (arg) {
|
||||
|
@ -10910,7 +10913,7 @@ int main(int argc, char *argv[]) {
|
|||
}
|
||||
|
||||
// conn mode
|
||||
if (setConnMode(g_args.connMode, g_args.dsn) != 0) {
|
||||
if (setConnMode(g_args.connMode, g_args.dsn, true) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -400,6 +400,7 @@ function install_anode_venv() {
|
|||
${csudo}${venvDir}/bin/pip3 install uwsgi
|
||||
${csudo}${venvDir}/bin/pip3 install torch --index-url https://download.pytorch.org/whl/cpu
|
||||
${csudo}${venvDir}/bin/pip3 install --upgrade keras
|
||||
${csudo}${venvDir}/bin/pip3 install requests
|
||||
|
||||
echo -e "Install python library for venv completed!"
|
||||
}
|
||||
|
|
|
@ -87,7 +87,8 @@ int smlProcess_telnet_Test() {
|
|||
const char *sql1[] = {"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0",
|
||||
"sys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 ",
|
||||
"sys.if.bytes.out 1479496102 1.3E3 network=tcp",
|
||||
" sys.procs.running 1479496100 42 host=web01 "};
|
||||
" sys.procs.running 1479496100 42 host=web01 ",
|
||||
" newline 1479496100 42 host=web\n01 t=fsb\n "};
|
||||
|
||||
// for(int i = 0; i < 4; i++){
|
||||
// strncpy(sql[i], sql1[i], 128);
|
||||
|
@ -2355,12 +2356,35 @@ int sml_td17324_Test() {
|
|||
return code;
|
||||
}
|
||||
|
||||
int smlProcess_34114_Test() {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
|
||||
TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_34114_db schemaless 1");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "use sml_34114_db");
|
||||
taos_free_result(pRes);
|
||||
|
||||
char *sql = {"sys.if.bytes.out 1479496100 1.3E0 host=web01 interface=eth0 \nsys.if.bytes.out 1479496101 1.3E1 interface=eth0 host=web01 "};
|
||||
int32_t totalRows = 0;
|
||||
pRes = taos_schemaless_insert_raw(taos, sql, strlen(sql), &totalRows, TSDB_SML_TELNET_PROTOCOL,
|
||||
TSDB_SML_TIMESTAMP_NANO_SECONDS);
|
||||
printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
|
||||
int code = taos_errno(pRes);
|
||||
taos_free_result(pRes);
|
||||
taos_close(taos);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
if (argc == 2) {
|
||||
taos_options(TSDB_OPTION_CONFIGDIR, argv[1]);
|
||||
}
|
||||
|
||||
int ret = smlProcess_json0_Test();
|
||||
int ret = smlProcess_34114_Test();
|
||||
ASSERT(!ret);
|
||||
ret = smlProcess_json0_Test();
|
||||
ASSERT(!ret);
|
||||
ret = sml_ts5528_test();
|
||||
ASSERT(!ret);
|
||||
|
|