Merge branch '3.0' into merge/3.0to3.3.6

This commit is contained in:
Simon Guan 2025-03-20 19:45:31 +08:00
commit 3dc4731be3
380 changed files with 12830 additions and 8141 deletions

View File

@ -81,6 +81,8 @@ jobs:
-DBUILD_KEEPER=true \
-DBUILD_HTTP=false \
-DBUILD_TEST=true \
-DWEBSOCKET=true \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_DEPENDENCY_TESTS=false
make -j 4
sudo make install
@ -88,6 +90,16 @@ jobs:
which taosadapter
which taoskeeper
- name: Statistics ldd
run: |
find ${{ github.workspace }}/debug/build/lib -type f -name "*.so" -print0 | xargs -0 ldd || true
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ldd || true
- name: Statistics size
run: |
find ${{ github.workspace }}/debug/build/lib -type f -print0 | xargs -0 ls -lhrS
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ls -lhrS
- name: Start taosd
run: |
cp /etc/taos/taos.cfg ./

View File

@ -34,7 +34,9 @@ on:
type: string
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}-${{ github.event.inputs.specified_target_branch }}-${{ github.event.inputs.specified_pr_number }}-TDengine
group: ${{ github.workflow }}-${{ github.event_name }}-
${{ github.event_name == 'pull_request' && github.event.pull_request.base.ref || inputs.specified_target_branch }}-
${{ github.event_name == 'pull_request' && github.event.pull_request.number || inputs.specified_pr_number }}-TDengine
cancel-in-progress: true
env:
@ -43,27 +45,24 @@ env:
jobs:
run-tests-on-linux:
uses: taosdata/.github/.github/workflows/run-tests-on-linux.yml@main
if: ${{ github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch'}}
with:
tdinternal: false
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_source_branch }}
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_target_branch }}
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_pr_number }}
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }}
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_target_branch }}
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_pr_number }}
run-tests-on-mac:
uses: taosdata/.github/.github/workflows/run-tests-on-macos.yml@main
if: ${{ github.event_name == 'pull_request' }}
with:
tdinternal: false
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_source_branch }}
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_target_branch }}
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_pr_number }}
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }}
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_target_branch }}
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_pr_number }}
run-tests-on-windows:
uses: taosdata/.github/.github/workflows/run-tests-on-windows.yml@main
if: ${{ github.event_name == 'pull_request' }}
with:
tdinternal: false
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_source_branch }}
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_target_branch }}
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || github.event.inputs.specified_pr_number }}
specified_source_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_source_branch }}
specified_target_branch: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_target_branch }}
specified_pr_number: ${{ github.event_name == 'pull_request' && 'unavailable' || inputs.specified_pr_number }}

View File

@ -1,3 +1,5 @@
# Run unit-test and system-test cases for TDgpt when TDgpt code is changed.
name: TDgpt Test
on:

View File

@ -1,3 +1,5 @@
# Scheduled updates for the TDgpt service.
name: TDgpt Update Service
on:

1
.gitignore vendored
View File

@ -132,7 +132,6 @@ tools/THANKS
tools/NEWS
tools/COPYING
tools/BUGS
tools/taos-tools
tools/taosws-rs
tags
.clangd

View File

@ -80,7 +80,7 @@ TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。
### Ubuntu 18.04、20.04、22.04
```bash
sudo apt-get udpate
sudo apt-get update
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
```
@ -188,7 +188,7 @@ cmake .. && cmake --build .
如果你想要编译 taosAdapter需要添加 `-DBUILD_HTTP=false` 选项。
如果你想要编译 taosKeeper需要添加 `--DBUILD_KEEPER=true` 选项。
如果你想要编译 taosKeeper需要添加 `-DBUILD_KEEPER=true` 选项。
</details>

View File

@ -8,7 +8,7 @@
</a>
</p>
[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/taosdata/tdengine/taosd-ci-build.yml)](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
[![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/taosdata/tdengine/tdengine-test.yml)](https://github.com/taosdata/TDengine/actions/workflows/tdengine-test.yml)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=3.0)](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
[![GitHub commit activity](https://img.shields.io/github/commit-activity/m/taosdata/tdengine)](https://github.com/feici02/TDengine/commits/main/)
<br />
@ -174,7 +174,7 @@ make
If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
If you want to compile taosKeeper, you need to add the `-DBUILD_KEEPER=true` option.
You can use Jemalloc as memory allocator instead of glibc:
@ -206,7 +206,7 @@ cmake .. && cmake --build .
If you want to compile taosAdapter, you need to add the `-DBUILD_HTTP=false` option.
If you want to compile taosKeeper, you need to add the `--DBUILD_KEEPER=true` option.
If you want to compile taosKeeper, you need to add the `-DBUILD_KEEPER=true` option.
</details>

View File

@ -191,7 +191,7 @@ INTERVAL(interval_val [, interval_offset])
The time window clause includes 3 sub-clauses:
- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies;
- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies its starting offset. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset";
- SLIDING clause: used to specify the time the window slides forward;
- FILL: used to specify the filling mode of data in case of missing data in the window interval.

View File

@ -69,10 +69,10 @@ This statement creates a subscription that includes all table data in the databa
## Delete Topic
If you no longer need to subscribe to the data, you can delete the topic. Note that only topics that are not currently subscribed can be deleted.
If you no longer need to subscribe to the data, you can delete the topic. If the current topic is subscribed to by a consumer, it can be forcibly deleted using the FORCE syntax. After the forced deletion, the subscribed consumer will consume data with errors (FORCE syntax supported from version 3.3.6.0).
```sql
DROP TOPIC [IF EXISTS] topic_name;
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
```
## View Topics
@ -99,10 +99,10 @@ Displays information about all consumers in the current database, including the
### Delete Consumer Group
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted with the following statement when there are no consumers in the group:
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted. If there are consumers in the current consumer group who are consuming, the FORCE syntax can be used to force deletion. After forced deletion, subscribed consumers will consume data with errors (FORCE syntax supported from version 3.3.6.0).
```sql
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
```
## Data Subscription
@ -137,6 +137,7 @@ If the following 3 data entries were written, then during replay, the first entr
When using the data subscription's replay feature, note the following:
- Enable replay function by configuring the consumption parameter enable.replay to true
- The replay function of data subscription only supports data playback for query subscriptions; supertable and database subscriptions do not support playback.
- Replay does not support progress saving.
- Because data playback itself requires processing time, there is a precision error of several tens of milliseconds in playback.

View File

@ -26,11 +26,11 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
FILL_HISTORY [0|1] [ASYNC]
IGNORE UPDATE [0|1]
}
@ -109,7 +109,7 @@ Under normal circumstances, stream computation tasks will not process data that
By enabling the fill_history option, the created stream computation task will be capable of processing data written before, during, and after the creation of the stream. This means that data written either before or after the creation of the stream will be included in the scope of stream computation, thus ensuring data integrity and consistency. This setting provides users with greater flexibility, allowing them to flexibly handle historical and new data according to actual needs.
Tips:
- When enabling fill_ristory, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, the parameter streamRunHistorySync (supported since version 3.3.6.0) can be configured to 1 (default is 0), and the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations.
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
@ -142,8 +142,12 @@ When creating a stream, you can specify the trigger mode of stream computing thr
1. AT_ONCE: Triggered immediately upon writing.
2. WINDOW_CLOSE: Triggered when the window closes (the closing of the window is determined by the event time, can be used in conjunction with watermark).
3. MAX_DELAY time: If the window closes, computation is triggered. If the window has not closed, and the duration since it has not closed exceeds the time specified by max delay, computation is triggered.
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only the results of the currently closed window are calculated and pushed out. The window is only calculated once at the moment of closure, and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does support sliding); In this mode, FILL_HISTORY is automatically set to 0, IGNORE EXPIRED is automatically set to 1 and IGNORE UPDATE is automatically set to 1; FILL only supports PREV, NULL, NONE, VALUE.
- This mode can be used to implement continuous queries, such as creating a stream that queries the number of data entries in the past 10 seconds window every 1 second。SQL as follows:
```sql
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
```
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
The closing of the window is determined by the event time, such as when the event stream is interrupted or continuously delayed, at which point the event time cannot be updated, possibly leading to outdated computation results.
Therefore, stream computing provides the MAX_DELAY trigger mode that combines event time with processing time: MAX_DELAY mode triggers computation immediately when the window closes, and its unit can be specified, specific units: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). Additionally, when data is written, if the time that triggers computation exceeds the time specified by MAX_DELAY, computation is triggered immediately.

View File

@ -146,9 +146,19 @@ Not supported
```
</TabItem>
<TabItem label="C" value="c">
The example code for binding parameters with stmt2 (TDengine v3.3.5.0 or higher is required) is as follows:
```c
{{#include docs/examples/c/stmt2_insert_demo.c}}
```
The example code for binding parameters with stmt is as follows:
```c
{{#include docs/examples/c/stmt_insert_demo.c}}
```
</TabItem>
<TabItem label="REST API" value="rest">
Not supported

View File

@ -298,13 +298,53 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery;
</details>
#### Aggregate Function Example 3 Split string and calculate average value [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c)
The `extract_avg` function converts a comma-separated string sequence into a set of numerical values, counts the results of all rows, and calculates the final average. Note when implementing:
- `interBuf->numOfResult` needs to return 1 or 0 and cannot be used for count.
- Count can use additional caches, such as the `SumCount` structure.
- Use `varDataVal` to obtain the string.
Create table:
```shell
create table scores(ts timestamp, varStr varchar(128));
```
Create custom function:
```shell
create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C';
```
Use custom function:
```shell
select extract_avg(valStr) from scores;
```
Generate `.so` file
```bash
gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so
```
<details>
<summary>max_vol.c</summary>
```c
{{#include tests/script/sh/max_vol.c}}
```
</details>
## Developing UDFs in Python Language
### Environment Setup
The specific steps to prepare the environment are as follows:
- Step 1, prepare the Python runtime environment.
- Step 1, prepare the Python runtime environment. If you compile and install Python locally, be sure to enable the `--enable-shared` option, otherwise the subsequent installation of taospyudf will fail due to failure to generate a shared library.
- Step 2, install the Python package taospyudf. The command is as follows.
```shell

View File

@ -72,8 +72,16 @@ TDengine Enterprise implements incremental backup and recovery of data by using
7. **Directory:** Enter the full path of the directory in which you want to store backup files.
8. **Backup file max size:** Enter the maximum size of a single backup file. If the total size of your backup exceeds this number, the backup is split into multiple files.
9. **Compression level:** Select **fastest** for the fastest performance but lowest compression ratio, **best** for the highest compression ratio but slowest performance, or **balanced** for a combination of performance and compression.
4. Click **Confirm** to create the backup plan.
4. Users can enable S3 dumping to upload backup files to the S3 storage service. To enable S3 dumping, the following information needs to be provided:
1. **Endpoint**: The address of the S3 endpoint.
2. **Access Key ID**: The access key ID for authentication.
3. **Secret Access Key**: The secret access key for authentication.
4. **Bucket**: The name of the target bucket.
5. **Region**: The region where the bucket is located.
6. **Object Prefix**: A prefix for backup file objects, similar to a directory path on S3.
7. **Backup Retention Period**: The retention duration for local backups. All files older than `current time - backup_retention_period` must be uploaded to S3.
8. **Backup Retention Count**: The number of local backups to retain. Only the latest `backup_retention_size` backup files are kept locally.
5. Click **Confirm** to create the backup plan.
You can view your backup plans and modify, clone, or delete them using the buttons in the **Operation** columns. Click **Refresh** to update the status of your plans. Note that you must stop a backup plan before you can delete it. You can also click **View** in the **Backup File** column to view the backup record points and files created by each plan.

View File

@ -55,7 +55,7 @@ When network I/O and other processing resources are not bottlenecks, by optimizi
Generally, when TDengine needs to select a mount point from the same level to create a new data file, it uses a round-robin strategy for selection. However, in reality, each disk may have different capacities, or the same capacity but different amounts of data written, leading to an imbalance in available space on each disk. In practice, this may result in selecting a disk with very little remaining space.
To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes, and its value should be greater than 2GB, i.e., mount points with less than 2GB of available space will be skipped.
To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes. If its value is set as 2GB, i.e., mount points with less than 2GB of available space will be skipped.
Starting from version 3.3.2.0, a new configuration `disable_create_new_file` has been introduced to control the prohibition of generating new files on a certain mount point. The default value is `false`, which means new files can be generated on each mount point by default.

View File

@ -0,0 +1,278 @@
---
sidebar_label: Security Configuration
title: Security Configuration
toc_max_heading_level: 4
---
import Image from '@theme/IdealImage';
import imgEcosys from '../assets/tdengine-components-01.png';
## Background
The distributed and multi-component nature of TDengine makes its security configuration a concern in production systems. This document aims to explain the security issues of various TDengine components and different deployment methods, and provide deployment and configuration suggestions to support the security of user data.
## Components Involved in Security Configuration
TDengine includes multiple components:
- `taosd`: Core component.
- `taosc`: Client library.
- `taosAdapter`: REST API and WebSocket service.
- `taosKeeper`: Monitoring service component.
- `taosX`: Data pipeline and backup recovery component.
- `taosxAgent`: Auxiliary component for external data source access.
- `taosExplorer`: Web visualization management interface.
In addition to TDengine deployment and applications, there are also the following components:
- Applications that access and use the TDengine database through various connectors.
- External data sources: Other data sources that access TDengine, such as MQTT, OPC, Kafka, etc.
The relationship between the components is as follows:
<figure>
<Image img={imgEcosys} alt="TDengine ecosystem"/>
<figcaption>TDengine ecosystem</figcaption>
</figure>
## TDengine Security Settings
### `taosd`
The `taosd` cluster uses TCP connections based on its own protocol for data exchange, which has low risk, but the transmission process is not encrypted, so there is still some security risk.
Enabling compression may help with TCP data obfuscation.
- **compressMsgSize**: Whether to compress RPC messages. Integer, optional: -1: Do not compress any messages; 0: Compress all messages; N (N>0): Only compress messages larger than N bytes.
To ensure the traceability of database operations, it is recommended to enable the audit function.
- **audit**: Audit function switch, 0 is off, 1 is on. Default is on.
- **auditInterval**: Reporting interval, in milliseconds. Default is 5000.
- **auditCreateTable**: Whether to enable the audit function for creating sub-tables. 0 is off, 1 is on. Default is on.
To ensure the security of data files, database encryption can be enabled.
- **encryptAlgorithm**: Data encryption algorithm.
- **encryptScope**: Data encryption scope.
Enabling the whitelist can restrict access addresses and further enhance privacy.
- **enableWhiteList**: Whitelist function switch, 0 is off, 1 is on; default is off.
### `taosc`
Users and other components use the native client library (`taosc`) and its own protocol to connect to `taosd`, which has low data security risk, but the transmission process is still not encrypted, so there is some security risk.
### `taosAdapter`
`taosAdapter` uses the native client library (`taosc`) and its own protocol to connect to `taosd`, and also supports RPC message compression, so there is no data security issue.
Applications and other components connect to `taosAdapter` through various language connectors. By default, the connection is based on HTTP 1.1 and is not encrypted. To ensure the security of data transmission between `taosAdapter` and other components, SSL encrypted connections need to be configured. Modify the following configuration in the `/etc/taos/taosadapter.toml` configuration file:
```toml
[ssl]
enable = true
certFile = "/path/to/certificate-file"
keyFile = "/path/to/private-key"
```
Configure HTTPS/SSL access in the connector to complete encrypted access.
To further enhance security, the whitelist function can be enabled, and configured in `taosd`, which also applies to the `taosAdapter` component.
### `taosX`
`taosX` includes REST API and gRPC interfaces, where the gRPC interface is used for `taos-agent` connections.
- The REST API interface is based on HTTP 1.1 and is not encrypted, posing a security risk.
- The gRPC interface is based on HTTP 2 and is not encrypted, posing a security risk.
To ensure data security, it is recommended that the `taosX` API interface is limited to internal access only. Modify the following configuration in the `/etc/taos/taosx.toml` configuration file:
```toml
[serve]
listen = "127.0.0.1:6050"
grpc = "127.0.0.1:6055"
```
Starting from TDengine 3.3.6.0, `taosX` supports HTTPS connections. Add the following configuration in the `/etc/taos/taosx.toml` file:
```toml
[serve]
ssl_cert = "/path/to/server.pem"
ssl_key = "/path/to/server.key"
ssl_ca = "/path/to/ca.pem"
```
And modify the API address to HTTPS connection in Explorer:
```toml
# Local connection to taosX API
x_api = "https://127.0.01:6050"
# Public IP or domain address
grpc = "https://public.domain.name:6055"
```
### `taosExplorer`
Similar to the `taosAdapter` component, the `taosExplorer` component provides HTTP services for external access. Modify the following configuration in the `/etc/taos/explorer.toml` configuration file:
```toml
[ssl]
# SSL certificate file
certificate = "/path/to/ca.file"
# SSL certificate private key
certificate_key = "/path/to/key.file"
```
Then, use HTTPS to access Explorer, such as [https://192.168.12.34](https://192.168.12.34:6060).
### `taosxAgent`
After `taosX` enables HTTPS, the `Agent` component and `taosX` use HTTP 2 encrypted connections, using Arrow-Flight RPC for data exchange. The transmission content is in binary format, and only registered `Agent` connections are valid, ensuring data security.
It is recommended to always enable HTTPS connections for `Agent` services in insecure or public network environments.
### `taosKeeper`
`taosKeeper` uses WebSocket connections to communicate with `taosAdapter`, writing monitoring information reported by other components into TDengine.
The current version of `taosKeeper` has security risks:
- The monitoring address cannot be restricted to the local machine. By default, it monitors all addresses on port 6043, posing a risk of network attacks. This risk can be ignored when deploying with Docker or Kubernetes without exposing the `taosKeeper` port.
- The configuration file contains plaintext passwords, so the visibility of the configuration file needs to be reduced. In `/etc/taos/taoskeeper.toml`:
```toml
[tdengine]
host = "localhost"
port = 6041
username = "root"
password = "taosdata"
usessl = false
```
## Security Enhancements
We recommend using TDengine within a local area network.
If you must provide access outside the local area network, consider adding the following configurations:
### Load Balancing
Use load balancing to provide `taosAdapter` services externally.
Take Nginx as an example to configure multi-node load balancing:
```nginx
http {
server {
listen 6041;
location / {
proxy_pass http://websocket;
# Headers for websocket compatible
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Forwarded headers
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Server $hostname;
proxy_set_header X-Real-IP $remote_addr;
}
}
upstream websocket {
server 192.168.11.61:6041;
server 192.168.11.62:6041;
server 192.168.11.63:6041;
}
}
```
If the `taosAdapter` component is not configured with SSL secure connections, SSL needs to be configured to ensure secure access. SSL can be configured at a higher-level API Gateway or in Nginx; if you have stronger security requirements for the connections between components, you can configure SSL in all components. The Nginx configuration is as follows:
```nginx
http {
server {
listen 443 ssl;
ssl_certificate /path/to/your/certificate.crt;
ssl_certificate_key /path/to/your/private.key;
}
}
```
### Security Gateway
In modern internet production systems, the use of security gateways is also very common. [traefik](https://traefik.io/) is a good open-source choice. We take traefik as an example to explain the security configuration in the API gateway.
Traefik provides various security configurations through middleware, including:
1. Authentication: Traefik provides multiple authentication methods such as BasicAuth, DigestAuth, custom authentication middleware, and OAuth 2.0.
2. IP Whitelist: Restrict the allowed client IPs.
3. Rate Limit: Control the number of requests sent to the service.
4. Custom Headers: Add configurations such as `allowedHosts` through custom headers to improve security.
A common middleware example is as follows:
```yaml
labels:
- "traefik.enable=true"
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
- "traefik.http.routers.tdengine.entrypoints=https"
- "traefik.http.routers.tdengine.tls.certresolver=default"
- "traefik.http.routers.tdengine.service=tdengine"
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
```
The above example completes the following configurations:
- TLS authentication uses the `default` configuration, which can be configured in the configuration file or traefik startup parameters, as follows:
```yaml
traefik:
image: "traefik:v2.3.2"
hostname: "traefik"
networks:
- traefik
command:
- "--log.level=INFO"
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--providers.docker.swarmmode=true"
- "--providers.docker.network=traefik"
- "--providers.docker.watch=true"
- "--entrypoints.http.address=:80"
- "--entrypoints.https.address=:443"
- "--certificatesresolvers.default.acme.dnschallenge=true"
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
```
The above startup parameters configure the `default` TSL certificate resolver and automatic acme authentication (automatic certificate application and renewal).
- Middleware `redirect-to-https`: Configure redirection from HTTP to HTTPS, forcing the use of secure connections.
```yaml
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
```
- Middleware `check-header`: Configure custom header checks. External access must add custom headers and match header values to prevent unauthorized access. This is a very simple and effective security mechanism when providing API access.
- Middleware `tdengine-ipwhitelist`: Configure IP whitelist. Only allow specified IPs to access, using CIDR routing rules for matching, and can set internal and external IP addresses.
## Summary
Data security is a key indicator of the TDengine product. These measures are designed to protect TDengine deployments from unauthorized access and data breaches while maintaining performance and functionality. However, the security configuration of TDengine itself is not the only guarantee in production. It is more important to develop solutions that better match customer needs in combination with the user's business system.

View File

@ -0,0 +1,180 @@
---
sidebar_label: Perspective
title: Integration With Perspective
toc_max_heading_level: 4
---
Perspective is an open-source and powerful data visualization library developed by [Prospective.co](https://www.perspective.co/). Leveraging the technologies of WebAssembly and Web Workers, it enables interactive real-time data analysis in web applications and provides high-performance visualization capabilities on the browser side. With its help, developers can build dashboards, charts, etc. that update in real time, and users can easily interact with the data, filtering, sorting, and exploring it as needed. It boasts high flexibility, adapting to various data formats and business scenarios. It is also fast, ensuring smooth interaction even when dealing with large-scale data. Moreover, it has excellent usability, allowing both beginners and professional developers to quickly build visualization interfaces.
In terms of data connection, Perspective, through the Python connector of TDengine, perfectly supports TDengine data sources. It can efficiently retrieve various types of data, such as massive time-series data, from TDengine. Additionally, it offers real-time functions including the display of complex charts, in-depth statistical analysis, and trend prediction, helping users gain insights into the value of the data and providing strong support for decision-making. It is an ideal choice for building applications with high requirements for real-time data visualization and analysis.
![perspective-architecture](./perspective/prsp_architecture.webp)
## Prerequisites
Perform the following installation operations in the Linux system:
- TDengine is installed and running normally (both Enterprise and Community versions are available).
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
- Python version 3.10 or higher has been installed (if not installed, please refer to [Python Installation](https://docs.python.org/)).
- Download or clone the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project. After entering the root directory of the project, run the "install.sh" script to download and install the TDengine client library and related dependencies locally.
## Data Analysis
**Step 1**, Run the "run.sh" script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project to start the Perspective service. This service will retrieve data from the TDengine database every 300 milliseconds and transmit the data in a streaming form to the web-based `Perspective Viewer`.
```shell
sh run.sh
```
**Step 2**, Start a static web service. Then, access the prsp-viewer.html resource in the browser, and the visualized data can be displayed.
```python
python -m http.server 8081
```
![perspective-viewer](./perspective/prsp_view.webp)
## Instructions for use
### Write Data to TDengine
The `producer.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project can periodically insert data into the TDengine database with the help of the TDengine Python connector. This script will generate random data and insert it into the database, thus simulating the process of writing real-time data. The specific execution steps are as follows:
1. Establish a connection to TDengine.
2. Create the `power` database and the `meters` table.
3. Generate random data every 300 milliseconds and write it into the TDengine database.
For detailed instructions on writing using the Python connector, please refer to [Python Parameter Binding](../../../tdengine-reference/client-libraries/python/#parameter-binding).
### Load Data from TDengine
The `perspective_server.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project will start a Perspective server. This server will read data from TDengine and stream the data to a Perspective table via the Tornado WebSocket.
1. Start a Perspective server.
2. Establish a connection to TDengine.
3. Create a Perspective table (the table structure needs to match the type of the table in the TDengine database).
4. Call the `Tornado.PeriodicCallback` function to start a scheduled task, thereby achieving the update of the data in the Perspective table. The sample code is as follows:
```python
def perspective_thread(perspective_server: perspective.Server, tdengine_conn: taosws.Connection):
"""
Create a new Perspective table and update it with new data every 50ms
"""
# create a new Perspective table
client = perspective_server.new_local_client()
schema = {
"timestamp": datetime,
"location": str,
"groupid": int,
"current": float,
"voltage": int,
"phase": float,
}
# define the table schema
table = client.table(
schema,
limit=1000, # maximum number of rows in the table
name=PERSPECTIVE_TABLE_NAME, # table name. Use this with perspective-viewer on the client side
)
logger.info("Created new Perspective table")
# update with new data
def updater():
data = read_tdengine(tdengine_conn)
table.update(data)
logger.debug(f"Updated Perspective table: {len(data)} rows")
logger.info(f"Starting tornado ioloop update loop every {PERSPECTIVE_REFRESH_RATE} milliseconds")
# start the periodic callback to update the table data
callback = tornado.ioloop.PeriodicCallback(callback=updater, callback_time=PERSPECTIVE_REFRESH_RATE)
callback.start()
```
### HTML Page Configuration
The `prsp-viewer.html` file in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project embeds the `Perspective Viewer` into the HTML page. It connects to the Perspective server via a WebSocket and displays real-time data according to the chart configuration.
- Configure the displayed charts and the rules for data analysis.
- Establish a Websocket connection with the Perspective server.
- Import the Perspective library, connect to the Perspective server via a WebSocket, and load the `meters_values` table to display dynamic data.
```html
<script type="module">
// import the Perspective library
import perspective from "https://unpkg.com/@finos/perspective@3.1.3/dist/cdn/perspective.js";
document.addEventListener("DOMContentLoaded", async function () {
// an asynchronous function for loading the view
async function load_viewer(viewerId, config) {
try {
const table_name = "meters_values";
const viewer = document.getElementById(viewerId);
// connect Perspective WebSocket server
const websocket = await perspective.websocket("ws://localhost:8085/websocket");
// open server table
const server_table = await websocket.open_table(table_name);
// load the table into the view
await viewer.load(server_table);
// use view configuration
await viewer.restore(config);
} catch (error) {
console.error(`Failed to get data from ${table_name}, err: ${error}`);
}
}
// configuration of the view
const config = {
"version": "3.3.1", // Perspective library version (compatibility identifier)
"plugin": "Datagrid", // View mode: Datagrid (table) or D3FC (chart)
"plugin_config": { // Plugin-specific configuration
"columns": {
"current": {
"width": 150 // Column width in pixels
}
},
"edit_mode": "READ_ONLY", // Edit mode: READ_ONLY (immutable) or EDIT (editable)
"scroll_lock": false // Whether to lock scroll position
},
"columns_config": {}, // Custom column configurations (colors, formatting, etc.)
"settings": true, // Whether to show settings panel (true/false)
"theme": "Power Meters", // Custom theme name (must be pre-defined)
"title": "Meters list data", // View title
"group_by": ["location", "groupid"], // Row grouping fields (equivalent to `row_pivots`)
"split_by": [], // Column grouping fields (equivalent to `column_pivots`)
"columns": [ // Columns to display (in order)
"timestamp",
"location",
"current",
"voltage",
"phase"
],
"filter": [], // Filter conditions (triplet format array)
"sort": [], // Sorting rules (format: [field, direction])
"expressions": {}, // Custom expressions (e.g., calculated columns)
"aggregates": { // Aggregation function configuration
"timestamp": "last", // Aggregation: last (takes the latest value)
"voltage": "last", // Aggregation: last
"phase": "last", // Aggregation: last
"current": "last" // Aggregation: last
}
};
// load the first view
await load_viewer("prsp-viewer-1", config1);
});
</script>
<!-- Define the HTML Structure of the Dashboard -->
<div id="dashboard">
<div class="viewer-container">
<perspective-viewer id="prsp-viewer-1" theme="Pro Dark"></perspective-viewer>
</div>
</div>
```
## Reference Materials
- [Perspective Docs](https://perspective.finos.org/)
- [TDengine Python Connector](../../../tdengine-reference/client-libraries/python/)
- [TDengine Stream Processing](../../../advanced-features/stream-processing/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -170,7 +170,7 @@ The effective value of charset is UTF-8.
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-2199023255552, default value 52428800; Enterprise parameter|
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
@ -246,6 +246,9 @@ The effective value of charset is UTF-8.
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
| streamNotifyMessageSize | After 3.3.6.0 | Not supported | Internal parameter, controls the message size for event notifications, default value is 8192 |
| streamNotifyFrameSize | After 3.3.6.0 | Not supported | Internal parameter, controls the underlying frame size when sending event notification messages, default value is 256 |
| adapterFqdn | After 3.3.6.0 | Not supported | Internal parameter, The address of the taosadapter services, default value is localhost |
| adapterPort | After 3.3.6.0 | Not supported | Internal parameter, The port of the taosadapter services, default value is 6041 |
| adapterToken | After 3.3.6.0 | Not supported | Internal parameter, The string obtained by Base64-encoding `{username}:{password}`, default value is `cm9vdDp0YW9zZGF0YQ==` |
### Log Related

View File

@ -72,12 +72,6 @@ The TDengine client driver provides all the APIs needed for application programm
| tempDir | |Supported, effective immediately | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
| minimalTmpDirGB | |Supported, effective immediately | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
### Stream Related
| Parameter Name |Supported Version|Dynamic Modification| Description |
|-----------------------|----------|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| streamRunHistoryAsync | 3.3.6.0 |Supported, effective immediately | When creating a stream with the fill_history parameter, should the stream statement be executed asynchronously. Boolean value, async if true, sync if false. default is false |
### Log Related
|Parameter Name|Supported Version|Dynamic Modification|Description|

View File

@ -379,6 +379,7 @@ Specify the configuration parameters for tag and data columns in `super_tables`
`query_times` specifies the number of times to run the query, numeric type.
**Note: from version 3.3.5.6 and beyond, simultaneous configuration for `specified_table_query` and `super_table_query` in a JSON file is no longer supported **
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
@ -508,6 +509,15 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to
</details>
<details>
<summary>queryStb.json</summary>
```json
{{#include /TDengine/tools/taos-tools/example/queryStb.json}}
```
</details>
#### Subscription Example
<details>

View File

@ -43,6 +43,7 @@ In TDengine, the following data types can be used in the data model of basic tab
| 16 | VARCHAR | Custom | Alias for BINARY type |
| 17 | GEOMETRY | Custom | Geometry type, supported starting from version 3.1.0.0 |
| 18 | VARBINARY | Custom | Variable-length binary data, supported starting from version 3.1.1.0 |
| 19 | DECIMAL | 8 or 16 | High-precision numeric type. The range of values depends on the precision and scale specified in the type. Supported starting from version 3.3.6. See the description below. |
:::note
@ -61,6 +62,18 @@ In TDengine, the following data types can be used in the data model of basic tab
- VARBINARY is a data type for storing binary data, with a maximum length of 65,517 bytes for data columns and 16,382 bytes for label columns. Binary data can be written via SQL or schemaless methods (needs to be converted to a string starting with \x), or through stmt methods (can use binary directly). Displayed as hexadecimal starting with \x.
:::
### DECIMAL Data Type
The `DECIMAL` data type is used for high-precision numeric storage and is supported starting from version 3.3.6. The definition syntax is: `DECIMAL(18, 2)`, `DECIMAL(38, 10)`, where two parameters must be specified: `precision` and `scale`. `Precision` refers to the maximum number of significant digits supported, and `scale` refers to the maximum number of decimal places. For example, `DECIMAL(8, 4)` represents a range of `[-9999.9999, 9999.9999]`. When defining the `DECIMAL` data type, the range of `precision` is `[1, 38]`, and the range of `scale` is `[0, precision]`. If `scale` is 0, it represents integers only. You can also omit `scale`, in which case it defaults to 0. For example, `DECIMAL(18)` is equivalent to `DECIMAL(18, 0)`.
When the `precision` value is less than or equal to 18, 8 bytes of storage (DECIMAL64) are used internally. When the `precision` is in the range `(18, 38]`, 16 bytes of storage (DECIMAL) are used. When writing `DECIMAL` type data in SQL, numeric values can be written directly. If the value exceeds the maximum representable value for the type, a `DECIMAL_OVERFLOW` error will be reported. If the value does not exceed the maximum representable value but the number of decimal places exceeds the `scale`, it will be automatically rounded. For example, if the type is defined as `DECIMAL(10, 2)` and the value `10.987` is written, the actual stored value will be `10.99`.
The `DECIMAL` type only supports regular columns and does not currently support tag columns. The `DECIMAL` type supports SQL-based writes only and does not currently support `stmt` or schemaless writes.
When performing operations between integer types and the `DECIMAL` type, the integer type is converted to the `DECIMAL` type before the calculation. When the `DECIMAL` type is involved in calculations with `DOUBLE`, `FLOAT`, `VARCHAR`, or `NCHAR` types, it is converted to `DOUBLE` type for computation.
When querying `DECIMAL` type expressions, if the intermediate result of the calculation exceeds the maximum value that the current type can represent, a `DECIMAL_OVERFLOW` error is reported.
## Constants

View File

@ -1186,6 +1186,7 @@ CAST(expr AS type_name)
1) Invalid character situations when converting string types to numeric types, e.g., "a" might convert to 0, but will not throw an error.
2) When converting to numeric types, if the value exceeds the range that `type_name` can represent, it will overflow, but will not throw an error.
3) When converting to string types, if the converted length exceeds the length specified in `type_name`, it will be truncated, but will not throw an error.
- The DECIMAL type does not support conversion to or from JSON, VARBINARY, or GEOMETRY types.
#### TO_ISO8601
@ -1691,12 +1692,14 @@ AVG(expr)
**Function Description**: Calculates the average value of the specified field.
**Return Data Type**: DOUBLE.
**Return Data Type**: DOUBLE, DECIMAL.
**Applicable Data Types**: Numeric types.
**Applicable to**: Tables and supertables.
**Description**: When the input type is DECIMAL, the output type is also DECIMAL. The precision and scale of the output conform to the rules described in the data type section. The result type is obtained by dividing the SUM type by UINT64. If the SUM result causes a DECIMAL type overflow, a DECIMAL OVERFLOW error is reported.
### COUNT
```sql
@ -1847,12 +1850,14 @@ SUM(expr)
**Function Description**: Calculates the sum of a column in a table/supertable.
**Return Data Type**: DOUBLE, BIGINT.
**Return Data Type**: DOUBLE, BIGINT,DECIMAL.
**Applicable Data Types**: Numeric types.
**Applicable to**: Tables and supertables.
**Description**: When the input type is DECIMAL, the output type is DECIMAL(38, scale), where precision is the maximum value currently supported, and scale is the scale of the input type. If the SUM result overflows, a DECIMAL OVERFLOW error is reported.
### HYPERLOGLOG
```sql
@ -2254,6 +2259,7 @@ ignore_null_values: {
- INTERP is used to obtain the record value of a specified column at the specified time slice. It has a dedicated syntax (interp_clause) when used. For syntax introduction, see [reference link](../query-data/#interp).
- When there is no row data that meets the conditions at the specified time slice, the INTERP function will interpolate according to the settings of the [FILL](../time-series-extensions/#fill-clause) parameter.
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
- When using INTERP with FILL PREV/NEXT/NEAR modes, its behavior differs from window queries. If data exists at the slice, no FILL operation will be performed, even if the current value is NULL.
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.

View File

@ -84,10 +84,10 @@ The FILL statement specifies the filling mode when data is missing in a window i
1. No filling: NONE (default filling mode).
2. VALUE filling: Fixed value filling, where the fill value must be specified. For example: FILL(VALUE, 1.23). Note that the final fill value is determined by the type of the corresponding column, such as FILL(VALUE, 1.23), if the corresponding column is of INT type, then the fill value is 1. If multiple columns in the query list need FILL, then each FILL column must specify a VALUE, such as `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note, only ordinary columns in the SELECT expression need to specify FILL VALUE, such as `_wstart`, `_wstart+1a`, `now`, `1+1` and the partition key (like tbname) used with partition by do not need to specify VALUE, like `timediff(last(ts), _wstart)` needs to specify VALUE.
3. PREV filling: Fill data using the previous non-NULL value. For example: FILL(PREV).
3. PREV filling: Fill data using the previous value. For example: FILL(PREV).
4. NULL filling: Fill data with NULL. For example: FILL(NULL).
5. LINEAR filling: Perform linear interpolation filling based on the nearest non-NULL values before and after. For example: FILL(LINEAR).
6. NEXT filling: Fill data using the next non-NULL value. For example: FILL(NEXT).
6. NEXT filling: Fill data using the next value. For example: FILL(NEXT).
Among these filling modes, except for the NONE mode which does not fill by default, other modes will be ignored if there is no data in the entire query time range, resulting in no fill data and an empty query result. This behavior is reasonable under some modes (PREV, NEXT, LINEAR) because no data means no fill value can be generated. For other modes (NULL, VALUE), theoretically, fill values can be generated, and whether to output fill values depends on the application's needs. To meet the needs of applications that require forced filling of data or NULL, without breaking the compatibility of existing filling modes, two new filling modes have been added starting from version 3.0.3.0:
@ -112,7 +112,7 @@ The differences between NULL, NULL_F, VALUE, VALUE_F filling modes for different
Time windows can be divided into sliding time windows and tumbling time windows.
The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window.
The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset".
<figure>
<Image img={imgStep01} alt=""/>

View File

@ -58,11 +58,11 @@ Note: Subscriptions to supertables and databases are advanced subscription modes
## Delete topic
If you no longer need to subscribe to data, you can delete the topic, but note: only TOPICS that are not currently being subscribed to can be deleted.
If you no longer need to subscribe to the data, you can delete the topic. If the current topic is subscribed to by a consumer, it can be forcibly deleted using the FORCE syntax. After the forced deletion, the subscribed consumer will consume data with errors (FORCE syntax supported from version 3.3.6.0)
```sql
/* Delete topic */
DROP TOPIC [IF EXISTS] topic_name;
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
```
At this point, if there are consumers on this subscription topic, they will receive an error.
@ -81,8 +81,10 @@ Consumer groups can only be created through the TDengine client driver or APIs p
## Delete consumer group
When creating a consumer, a consumer group is assigned to the consumer. Consumers cannot be explicitly deleted, but the consumer group can be deleted. If there are consumers in the current consumer group who are consuming, the FORCE syntax can be used to force deletion. After forced deletion, subscribed consumers will consume data with errors (FORCE syntax supported from version 3.3.6.0).
```sql
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
```
Deletes the consumer group `cgroup_name` on the topic `topic_name`.

View File

@ -11,11 +11,11 @@ import imgStream from './assets/stream-processing-01.png';
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
FILL_HISTORY [0|1] [ASYNC]
IGNORE UPDATE [0|1]
}
@ -127,6 +127,13 @@ create stream if not exists s1 fill_history 1 into st1 as select count(*) from
If the stream task is completely outdated and you no longer want it to monitor or process data, you can manually delete it. The computed data will still be retained.
Tips:
- When enabling fill_history, creating a stream requires finding the boundary point of historical data. If there is a lot of historical data, it may cause the task of creating a stream to take a long time. In this case, you can use fill_history 1 async (supported since version 3.3.6.0) , then the task of creating a stream can be processed in the background. The statement of creating a stream can be returned immediately without blocking subsequent operations. async only takes effect when fill_history 1 is used, and creating a stream with fill_history 0 is very fast and does not require asynchronous processing.
- Show streams can be used to view the progress of background stream creation (ready status indicates success, init status indicates stream creation in progress, failed status indicates that the stream creation has failed, and the message column can be used to view the reason for the failure. In the case of failed stream creation, the stream can be deleted and rebuilt).
- Besides, do not create multiple streams asynchronously at the same time, as transaction conflicts may cause subsequent streams to fail.
## Deleting Stream Computing
```sql
@ -157,6 +164,7 @@ For non-window computations, the trigger of stream computing is real-time; for w
2. WINDOW_CLOSE: Triggered when the window closes (window closure is determined by event time, can be used in conjunction with watermark)
3. MAX_DELAY time: Trigger computation if the window closes. If the window does not close, and the time since it has not closed exceeds the time specified by max delay, then trigger computation.
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only compute and push the results of the currently closed window. The window is only computed once at the moment of closure and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
Since the closure of the window is determined by event time, if the event stream is interrupted or continuously delayed, the event time cannot be updated, which may result in not obtaining the latest computation results.

View File

@ -35,6 +35,7 @@ The list of keywords is as follows:
| AS | |
| ASC | |
| ASOF | |
| ASYNC | 3.3.6.0+ |
| AT_ONCE | |
| ATTACH | |
| AUTO | 3.3.5.0+ |

View File

@ -29,6 +29,17 @@ SELECT a.* FROM meters a LEFT ASOF JOIN meters b ON timetruncate(a.ts, 1s) < tim
### Main Join Condition
As a time-series database, all join queries in TDengine revolve around the primary key timestamp column. Therefore, all join queries (except ASOF/Window Join) must include an equality condition on the primary key column, and the first primary key column equality condition that appears in the join conditions will be considered the main join condition. ASOF Join's main join condition can include non-equality conditions, while Window Join's main join condition is specified through `WINDOW_OFFSET`.
Starting from version 3.3.6.0, TDengine supports constant timestamps in subqueries (including constant functions with return timestamps such as today (), now (), etc., constant timestamps and their addition and subtraction operations) as equivalent primary key columns that can appear in the main join condition. For example:
```sql
SELECT * from d1001 a JOIN (SELECT today() as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
```
The above example SQL will perform join operation between all records in table d1001 today and a certain time record in table d1002. It should be noticed that the constant time string appears in SQL will not be treated as a timestamp by default. For example, "2025-03-19 10:00:00.000" will only be treated as a string instead of a timestamp. Therefore, when it needs to be treated as a constant timestamp, you can specify the constant string as a timestamp type by using the type prefix timestamp. For example:
```sql
SELECT * from d1001 a JOIN (SELECT timestamp '2025-03-19 10:00:00.000' as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
```
Apart from Window Join, TDengine supports the `timetruncate` function operation in the main join condition, such as `ON timetruncate(a.ts, 1s) = timetruncate(b.ts, 1s)`, but does not support other functions and scalar operations.
@ -38,7 +49,7 @@ The characteristic ASOF/Window Join of time-series databases supports grouping t
### Primary Key Timeline
As a time-series database, TDengine requires each table (subtable) to have a primary key timestamp column, which will serve as the primary key timeline for many time-related operations. The result of a subquery or the result of a Join operation also needs to clearly identify which column will be considered the primary key timeline for subsequent time-related operations. In subqueries, the first appearing ordered primary key column (or its operation) or a pseudocolumn equivalent to the primary key column (`_wstart`/`_wend`) will be considered the primary key timeline of the output table. The selection of the primary key timeline in Join output results follows these rules:
As a time-series database, TDengine requires each table (subtable) to have a primary key timestamp column, which will serve as the primary key timeline for many time-related operations. The result of a subquery or the result of a Join operation also needs to clearly identify which column will be considered the primary key timeline for subsequent time-related operations. In subqueries, the first appearing ordered primary key column (or its operation) or a pseudocolumn equivalent to the primary key column (`_wstart`/`_wend`) will be considered the primary key timeline of the output table. In addition, starting with version 3.3.6.0, TDengine also supports constant timestamp columns in subquery results as the primary key timeline for the output table. The selection of the primary key timeline in Join output results follows these rules:
- In the Left/Right Join series, the primary key column of the driving table (subquery) will be used as the primary key timeline for subsequent queries; additionally, within the Window Join window, since both tables are ordered, any table's primary key column can be used as the primary key timeline, with a preference for the primary key column of the same table.
- Inner Join can use the primary key column of any table as the primary key timeline, but when there are grouping conditions similar to tag column equality conditions related by `AND` with the main join condition, it will not produce a primary key timeline.

View File

@ -36,6 +36,7 @@ In this document, it specifically refers to the internal levels of the second-le
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
## SQL Syntax

View File

@ -830,6 +830,12 @@ This section introduces APIs that are all synchronous interfaces. After being ca
- res: [Input] Result set.
- **Return Value**: Non-`NULL`: successful, returns a pointer to a TAOS_FIELD structure, each element representing the metadata of a column. `NULL`: failure.
- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)`
- **Interface Description**: Retrieves the attributes of each column in the query result set (column name, data type, column length). Used in conjunction with `taos_num_fields()`, it can be used to parse the data of a tuple (a row) returned by `taos_fetch_row()`. In addition to the basic information provided by TAOS_FIELD, TAOS_FIELD_E also includes `precision` and `scale` information for the data type.
- **Parameter Description**:
- res: [Input] Result set.
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_FIELD_E structure, where each element represents the metadata of a column. `NULL`: Failure.
- `void taos_stop_query(TAOS_RES *res)`
- **Interface Description**: Stops the execution of the current query.
- **Parameter Description**:
@ -1121,10 +1127,14 @@ In addition to using SQL or parameter binding APIs to insert data, you can also
- conf: [Input] Pointer to a valid tmq_conf_t structure, representing a TMQ configuration object.
- key: [Input] Configuration item key name.
- value: [Input] Configuration item value.
- **Return Value**: Returns a tmq_conf_res_t enum value, indicating the result of the configuration setting.
- TMQ_CONF_OK: Successfully set the configuration item.
- TMQ_CONF_INVALID_KEY: Invalid key value.
- TMQ_CONF_UNKNOWN: Invalid key name.
- **Return Value**: Returns a tmq_conf_res_t enum value, indicating the result of the configuration setting. tmq_conf_res_t defined as follows:
```
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2, // invalid key
TMQ_CONF_INVALID = -1, // invalid value
TMQ_CONF_OK = 0, // success
} tmq_conf_res_t;
```
- `void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param)`
- **Interface Description**: Sets the auto-commit callback function in the TMQ configuration object.

View File

@ -121,6 +121,7 @@ Please refer to the specific error codes:
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
| 0x2390 | background thread write error in Efficient Writing | In the event of an efficient background thread write error, you can stop writing and rebuild the connection. |
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
@ -148,6 +149,7 @@ TDengine currently supports timestamp, numeric, character, boolean types, and th
| JSON | java.lang.String | only supported in tags |
| VARBINARY | byte[] | |
| GEOMETRY | byte[] | |
| DECIMAL | java.math.BigDecimal | |
**Note**: Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
GEOMETRY type is binary data in little endian byte order, complying with the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
@ -319,7 +321,15 @@ The configuration parameters in properties are as follows:
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE: Efficient Writing mode. Currently, only the `stmt` method is supported. Effective only when using WebSocket connections. DeDefault value is empty, meaning Efficient Writing mode is not enabled.
- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM: In Efficient Writing mode, this refers to the number of background write threads. Effective only when using WebSocket connections. Default value is 10.
- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW: In Efficient Writing mode, this is the batch size for writing data, measured in rows. Effective only when using WebSocket connections. Default value is 1000.
- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW: In Efficient Writing mode, this is the cache size, measured in rows. Effective only when using WebSocket connections. Default value is 10000.
- TSDBDriver.PROPERTY_KEY_COPY_DATA: In Efficient Writing mode, this determines Whether to copy the binary data passed by the application through the `addBatch` method. Effective only when using WebSocket connections. Default value is false.
- TSDBDriver.PROPERTY_KEY_STRICT_CHECK: In Efficient Writing mode, this determines whether to validate the length of table names and variable-length data types. Effective only when using WebSocket connections. Default value is false.
- TSDBDriver.PROPERTY_KEY_RETRY_TIMES: In Efficient Writing mode, this is the number of retry attempts for failed write operations. Effective only when using WebSocket connections. Default value is 3.
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
**Priority of Configuration Parameters**

View File

@ -25,6 +25,7 @@ Support all platforms that can run Node.js.
| Node.js Connector Version | Major Changes | TDengine Version |
| ------------------------- | ------------------------------------------------------------------------ | --------------------------- |
| 3.1.5 | Password supports special characters. | - |
| 3.1.4 | Modified the readme.| - |
| 3.1.3 | Upgraded the es5-ext version to address vulnerabilities in the lower version. | - |
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance. | - |

View File

@ -168,6 +168,7 @@ This document details the server error codes that may be encountered when using
| 0x8000038B | Index not exist | Does not exist | Confirm if the operation is correct |
| 0x80000396 | Database in creating status | Database is being created | Retry |
| 0x8000039A | Invalid system table name | Internal error | Report issue |
| 0x8000039F | No VGroup's leader need to be balanced | Perform balance leader operation on VGroup | There is no VGroup's leader needs to be balanced |
| 0x800003A0 | Mnode already exists | Already exists | Confirm if the operation is correct |
| 0x800003A1 | Mnode not there | Already exists | Confirm if the operation is correct |
| 0x800003A2 | Qnode already exists | Already exists | Confirm if the operation is correct |
@ -558,9 +559,10 @@ This document details the server error codes that may be encountered when using
## virtual table
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|-------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------|
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------|
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |

View File

@ -0,0 +1,297 @@
---
title: Usage of Special Characters in Passwords
description: Usage of special characters in user passwords in TDengine
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
TDengine user passwords must meet the following rules:
1. The username must not exceed 23 bytes.
2. The password length must be between 8 and 255 characters.
3. The range of password characters:
1. Uppercase letters: `A-Z`
2. Lowercase letters: `a-z`
3. Numbers: `0-9`
4. Special characters: `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
4. When strong password is enabled (EnableStrongPassword 1, enabled by default), the password must contain at least three of the following categories: uppercase letters, lowercase letters, numbers, and special characters. When not enabled, there are no restrictions on character types.
## Usage Guide for Special Characters in Different Components
Take the username `user1` and password `Ab1!@#$%^&*()-_+=[]{}` as an example.
```sql
CREATE USER user1 PASS 'Ab1!@#$%^&*()-_+=[]{}';
```
<Tabs defaultValue="shell" groupId="component">
<TabItem label="CLI" value="shell">
In the [TDengine Command Line Interface (CLI)](../../tdengine-reference/tools/tdengine-cli/), note the following:
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes must be used.
Login with user `user1`:
```shell
taos -u user1 -p'Ab1!@#$%^&*()-_+=[]{}'
taos -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\}
```
</TabItem>
<TabItem label="taosdump" value="taosdump">
In [taosdump](../../tdengine-reference/tools/taosdump/), note the following:
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used.
Backup database `test` with user `user1`:
```shell
taosdump -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -D test
taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test
```
</TabItem>
<TabItem label="Benchmark" value="benchmark">
In [taosBenchmark](../../tdengine-reference/tools/taosbenchmark/), note the following:
- If the `-p` parameter is used without a password, you will be prompted to enter a password, and any acceptable characters can be entered.
- If the `-p` parameter is used with a password, and the password contains special characters, single quotes or escaping must be used.
Example of data write test with user `user1`:
```shell
taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y
```
When using `taosBenchmark -f <JSON>`, there are no restrictions on the password in the JSON file.
</TabItem>
<TabItem label="taosX" value="taosx">
[taosX](../../tdengine-reference/components/taosx/) uses DSN to represent TDengine connections, in the format: `(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`, where `<pass>` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`.
Example of exporting data with user `user1`:
```shell
taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' \
-t 'csv:./test.csv'
```
Note that if the password can be URL decoded, the URL decoded result will be used as the password. For example: `taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` is equivalent to `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041`.
No special handling is required in [Explorer](../../tdengine-reference/components/taosexplorer/), just use it directly.
</TabItem>
<TabItem label="Java" value="java">
When using special character passwords in JDBC, the password needs to be URL encoded, as shown below:
```java
package com.taosdata.example;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import com.taosdata.jdbc.TSDBDriver;
public class JdbcPassDemo {
public static void main(String[] args) throws Exception {
String password = "Ab1!@#$%^&*()-_+=[]{}";
String encodedPassword = URLEncoder.encode(password, StandardCharsets.UTF_8.toString());
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "user1");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, encodedPassword);
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) {
System.out.println("Connected to " + jdbcUrl + " successfully.");
// you can use the connection for execute SQL here
} catch (Exception ex) {
// please refer to the JDBC specifications for detailed exceptions info
System.out.printf("Failed to connect to %s, %sErrMessage: %s%n",
jdbcUrl,
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
ex.getMessage());
// Print stack trace for context in examples. Use logging in production.
ex.printStackTrace();
throw ex;
}
}
}
```
</TabItem>
<TabItem label="Python" value="python">
No special handling is required for special character passwords in Python, as shown below:
```python
import taos
import taosws
def create_connection():
host = "localhost"
port = 6030
return taos.connect(
user="user1",
password="Ab1!@#$%^&*()-_+=[]{}",
host=host,
port=port,
)
def create_ws_connection():
host = "localhost"
port = 6041
return taosws.connect(
user="user1",
password="Ab1!@#$%^&*()-_+=[]{}",
host=host,
port=port,
)
def show_databases(conn):
cursor = conn.cursor()
cursor.execute("show databases")
print(cursor.fetchall())
cursor.close()
if __name__ == "__main__":
print("Connect with native protocol")
conn = create_connection()
show_databases(conn)
print("Connect with websocket protocol")
conn = create_ws_connection()
show_databases(conn)
```
</TabItem>
<TabItem label="Go" value="go">
Starting from version 3.6.0, Go supports passwords containing special characters, which need to be encoded using encodeURIComponent.
```go
package main
import (
"database/sql"
"fmt"
"log"
"net/url"
_ "github.com/taosdata/driver-go/v3/taosWS"
)
func main() {
var user = "user1"
var password = "Ab1!@#$%^&*()-_+=[]{}"
var encodedPassword = url.QueryEscape(password)
var taosDSN = user + ":" + encodedPassword + "@ws(localhost:6041)/"
taos, err := sql.Open("taosWS", taosDSN)
if err != nil {
log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error())
}
fmt.Println("Connected to " + taosDSN + " successfully.")
defer taos.Close()
}
```
</TabItem>
<TabItem label="Rust" value="rust">
In Rust, DSN is used to represent TDengine connections, in the format: `(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`, where `<pass>` can contain special characters, such as: `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`.
```rust
let dsn = "taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041";
let connection = TaosBuilder::from_dsn(&dsn)?.build().await?;
```
</TabItem>
<TabItem label="Node.js" value="node">
Starting from version 3.1.5, the Node.js connector supports passwords containing all valid characters.
```js
const taos = require("@tdengine/websocket");
let dsn = 'ws://localhost:6041';
async function createConnect() {
try {
let conf = new taos.WSConfig(dsn);
conf.setUser('user1');
conf.setPwd('Ab1!@#$%^&*()-_+=[]{}');
conf.setDb('test');
conn = await taos.sqlConnect(conf);
console.log("Connected to " + dsn + " successfully.");
return conn;
} catch (err) {
console.log("Connection failed with code: " + err.code + ", message: " + err.message);
throw err;
}
}
createConnect()
```
</TabItem>
<TabItem label="C#" value="csharp">
When using passwords in C#, note that connection strings do not support semicolons (as semicolons are delimiters). In this case, you can construct the `ConnectionStringBuilder` without a password, and then set the username and password.
As shown below:
```csharp
var builder = new ConnectionStringBuilder("host=localhost;port=6030");
builder.Username = "user1";
builder.Password = "Ab1!@#$%^&*()-_+=[]{}";
using (var client = DbDriver.Open(builder)){}
```
</TabItem>
<TabItem label="C" value="c">
There are no restrictions on passwords in C.
```c
TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6030);
```
</TabItem>
<TabItem label="REST" value="rest">
When using passwords in REST API, note the following:
- Passwords use Basic Auth, in the format `Authorization: Basic base64(<user>:<pass>)`.
- Passwords containing colons `:` are not supported.
The following two methods are equivalent:
```shell
curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' \
-d 'show databases' http://localhost:6041/rest/sql
curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' \
-d 'show databases' http://localhost:6041/rest/sql
```
</TabItem>
</Tabs>

View File

@ -9,6 +9,7 @@ TARGETS = connect_example \
with_reqid_demo \
sml_insert_demo \
stmt_insert_demo \
stmt2_insert_demo \
tmq_demo
SOURCES = connect_example.c \
@ -18,6 +19,7 @@ SOURCES = connect_example.c \
with_reqid_demo.c \
sml_insert_demo.c \
stmt_insert_demo.c \
stmt2_insert_demo.c \
tmq_demo.c
LIBS = -ltaos -lpthread
@ -31,4 +33,4 @@ $(TARGETS):
$(CC) $(CFLAGS) -o $@ $(wildcard $(@F).c) $(LIBS)
clean:
rm -f $(TARGETS)
rm -f $(TARGETS)

View File

@ -0,0 +1,204 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// TAOS standard API example. The same syntax as MySQL, but only a subset
// to compile: gcc -o stmt2_insert_demo stmt2_insert_demo.c -ltaos
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "taos.h"
#define NUM_OF_SUB_TABLES 10
#define NUM_OF_ROWS 10
/**
* @brief Executes an SQL query and checks for errors.
*
* @param taos Pointer to TAOS connection.
* @param sql SQL query string.
*/
void executeSQL(TAOS *taos, const char *sql) {
TAOS_RES *res = taos_query(taos, sql);
int code = taos_errno(res);
if (code != 0) {
fprintf(stderr, "Error: %s\n", taos_errstr(res));
taos_free_result(res);
taos_close(taos);
exit(EXIT_FAILURE);
}
taos_free_result(res);
}
/**
* @brief Checks return status and exits if an error occurs.
*
* @param stmt2 Pointer to TAOS_STMT2.
* @param code Error code.
* @param msg Error message prefix.
*/
void checkErrorCode(TAOS_STMT2 *stmt2, int code, const char *msg) {
if (code != 0) {
fprintf(stderr, "%s. Code: %d, Error: %s\n", msg, code, taos_stmt2_error(stmt2));
taos_stmt2_close(stmt2);
exit(EXIT_FAILURE);
}
}
/**
* @brief Prepares data bindings for batch insertion.
*
* @param table_name Pointer to store allocated table names.
* @param tags Pointer to store allocated tag bindings.
* @param params Pointer to store allocated parameter bindings.
*/
void prepareBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) {
*table_name = (char **)malloc(NUM_OF_SUB_TABLES * sizeof(char *));
*tags = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *));
*params = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *));
for (int i = 0; i < NUM_OF_SUB_TABLES; i++) {
// Allocate and assign table name
(*table_name)[i] = (char *)malloc(20 * sizeof(char));
sprintf((*table_name)[i], "d_bind_%d", i);
// Allocate memory for tags data
int *gid = (int *)malloc(sizeof(int));
int *gid_len = (int *)malloc(sizeof(int));
*gid = i;
*gid_len = sizeof(int);
char *location = (char *)malloc(20 * sizeof(char));
int *location_len = (int *)malloc(sizeof(int));
*location_len = sprintf(location, "location_%d", i);
(*tags)[i] = (TAOS_STMT2_BIND *)malloc(2 * sizeof(TAOS_STMT2_BIND));
(*tags)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, gid, gid_len, NULL, 1};
(*tags)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, location, location_len, NULL, 1};
// Allocate memory for columns data
(*params)[i] = (TAOS_STMT2_BIND *)malloc(4 * sizeof(TAOS_STMT2_BIND));
int64_t *ts = (int64_t *)malloc(NUM_OF_ROWS * sizeof(int64_t));
float *current = (float *)malloc(NUM_OF_ROWS * sizeof(float));
int *voltage = (int *)malloc(NUM_OF_ROWS * sizeof(int));
float *phase = (float *)malloc(NUM_OF_ROWS * sizeof(float));
int32_t *ts_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
int32_t *current_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
int32_t *voltage_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
int32_t *phase_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
(*params)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, ts, ts_len, NULL, NUM_OF_ROWS};
(*params)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, current, current_len, NULL, NUM_OF_ROWS};
(*params)[i][2] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, voltage, voltage_len, NULL, NUM_OF_ROWS};
(*params)[i][3] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, phase, phase_len, NULL, NUM_OF_ROWS};
for (int j = 0; j < NUM_OF_ROWS; j++) {
struct timeval tv;
gettimeofday(&tv, NULL);
ts[j] = tv.tv_sec * 1000LL + tv.tv_usec / 1000 + j;
current[j] = (float)rand() / RAND_MAX * 30;
voltage[j] = rand() % 300;
phase[j] = (float)rand() / RAND_MAX;
ts_len[j] = sizeof(int64_t);
current_len[j] = sizeof(float);
voltage_len[j] = sizeof(int);
phase_len[j] = sizeof(float);
}
}
}
/**
* @brief Frees allocated memory for binding data.
*
* @param table_name Pointer to allocated table names.
* @param tags Pointer to allocated tag bindings.
* @param params Pointer to allocated parameter bindings.
*/
void freeBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) {
for (int i = 0; i < NUM_OF_SUB_TABLES; i++) {
free((*table_name)[i]);
for (int j = 0; j < 2; j++) {
free((*tags)[i][j].buffer);
free((*tags)[i][j].length);
}
free((*tags)[i]);
for (int j = 0; j < 4; j++) {
free((*params)[i][j].buffer);
free((*params)[i][j].length);
}
free((*params)[i]);
}
free(*table_name);
free(*tags);
free(*params);
}
/**
* @brief Inserts data using the TAOS stmt2 API.
*
* @param taos Pointer to TAOS connection.
*/
void insertData(TAOS *taos) {
TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL};
TAOS_STMT2 *stmt2 = taos_stmt2_init(taos, &option);
if (!stmt2) {
fprintf(stderr, "Failed to initialize TAOS statement.\n");
exit(EXIT_FAILURE);
}
// stmt2 prepare sql
checkErrorCode(stmt2, taos_stmt2_prepare(stmt2, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 0),
"Statement preparation failed");
char **table_name;
TAOS_STMT2_BIND **tags, **params;
prepareBindData(&table_name, &tags, &params);
// stmt2 bind batch
TAOS_STMT2_BINDV bindv = {NUM_OF_SUB_TABLES, table_name, tags, params};
checkErrorCode(stmt2, taos_stmt2_bind_param(stmt2, &bindv, -1), "Parameter binding failed");
// stmt2 exec batch
int affected;
checkErrorCode(stmt2, taos_stmt2_exec(stmt2, &affected), "Execution failed");
printf("Successfully inserted %d rows.\n", affected);
// free and close
freeBindData(&table_name, &tags, &params);
taos_stmt2_close(stmt2);
}
int main() {
const char *host = "localhost";
const char *user = "root";
const char *password = "taosdata";
uint16_t port = 6030;
TAOS *taos = taos_connect(host, user, password, NULL, port);
if (taos == NULL) {
fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL),
taos_errstr(NULL));
taos_cleanup();
exit(EXIT_FAILURE);
}
// create database and table
executeSQL(taos, "CREATE DATABASE IF NOT EXISTS power");
executeSQL(taos, "USE power");
executeSQL(taos,
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
"(groupId INT, location BINARY(24))");
insertData(taos);
taos_close(taos);
taos_cleanup();
}

View File

@ -4,7 +4,7 @@
"main": "index.js",
"license": "MIT",
"dependencies": {
"@tdengine/websocket": "^3.1.2"
"@tdengine/websocket": "^3.1.5"
},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"

View File

@ -1,4 +1,3 @@
const { sleep } = require("@tdengine/websocket");
const taos = require("@tdengine/websocket");
// ANCHOR: create_consumer
@ -52,6 +51,12 @@ async function prepare() {
await wsSql.close();
}
const delay = function(ms) {
return new Promise(function(resolve) {
setTimeout(resolve, ms);
});
};
async function insert() {
let conf = new taos.WSConfig('ws://localhost:6041');
conf.setUser('root');
@ -60,7 +65,7 @@ async function insert() {
let wsSql = await taos.sqlConnect(conf);
for (let i = 0; i < 50; i++) {
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
await sleep(100);
await delay(100);
}
await wsSql.close();
}

View File

@ -1,4 +1,3 @@
const { sleep } = require("@tdengine/websocket");
const taos = require("@tdengine/websocket");
const db = 'power';

View File

@ -182,7 +182,7 @@ INTERVAL(interval_val [, interval_offset])
```
时间窗口子句包括 3 个子句:
- INTERVAL 子句用于产生相等时间周期的窗口interval_val 指定每个时间窗口的大小interval_offset 指定窗口偏移量;
- INTERVAL 子句用于产生相等时间周期的窗口interval_val 指定每个时间窗口的大小interval_offset 指定窗口偏移量;默认情况下,窗口是从 Unix time 01970-01-01 00:00:00 UTC开始划分的如果设置了 interval_offset那么窗口的划分将从 “Unix time 0 + interval_offset” 开始;
- SLIDING 子句:用于指定窗口向前滑动的时间;
- FILL用于指定窗口区间数据缺失的情况下数据的填充模式。
@ -688,4 +688,4 @@ select a.* from meters a left asof join meters b on timetruncate(a.ts, 1s) < tim
查询结果顺序的限制包括如下这些。
- 普通表、子表、subquery 且无分组条件无排序的场景下,查询结果会按照驱动表的主键列顺序输出。
- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。
- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。

View File

@ -64,10 +64,10 @@ CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name;
## 删除主题
如果不再需要订阅数据,可以删除 topic需要注意只有当前未在订阅中的 topic 才能被删除
如果不再需要订阅数据,可以删除 topic如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除强制删除后订阅的消费者会消费数据会出错FORCE 语法从 v3.3.6.0 开始支持)
```sql
DROP TOPIC [IF EXISTS] topic_name;
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
```
## 查看主题
@ -94,9 +94,9 @@ SHOW CONSUMERS;
### 删除消费组
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是消费者组在组内没有消费者时可以通过下面语句删除:
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除强制删除后订阅的消费者会消费数据会出错FORCE 语法从 v3.3.6.0 开始支持)。
```sql
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
```
## 数据订阅
@ -129,6 +129,7 @@ TDengine 的数据订阅功能支持回放replay功能允许用户按
```
使用数据订阅的回放功能时需要注意如下几项:
- 通过配置消费参数 enable.replay 为 true 开启回放功能。
- 数据订阅的回放功能仅查询订阅支持数据回放,超级表和库订阅不支持回放。
- 回放不支持进度保存。
- 因为数据回放本身需要处理时间,所以回放的精度存在几十毫秒的误差。

View File

@ -23,11 +23,11 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
FILL_HISTORY [0|1] [ASYNC]
IGNORE UPDATE [0|1]
}
@ -102,7 +102,7 @@ PARTITION 子句中,为 tbname 定义了一个别名 tname 在 PARTITION
通过启用 fill_history 选项,创建的流计算任务将具备处理创建前、创建过程中以及创建后写入的数据的能力。这意味着,无论数据是在流创建之前还是之后写入的,都将纳入流计算的范围,从而确保数据的完整性和一致性。这一设置为用户提供了更大的灵活性,使其能够根据实际需求灵活处理历史数据和新数据。
注意:
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以配置参数 streamRunHistoryAsync3.3.6.0版本开始支持) 为 1 默认为0将创建流的任务放在后台处理,创建流的语句可立即返回,不阻塞后面的操作。
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以通过 fill_history 1 asyncv3.3.6.0 开始支持) 语法将创建流的任务放在后台处理,创建流的语句可立即返回,不阻塞后面的操作。async 只对 fill_history 1 起效fill_history 0 时建流很快,不需要异步处理。
- 通过 show streams 可查看后台建流的进度ready 状态表示成功init 状态表示正在建流failed 状态表示建流失败,失败时 message 列可以查看原因。对于建流失败的情况可以删除流重新建立)。
@ -131,7 +131,12 @@ create stream if not exists count_history_s fill_history 1 into count_history as
1. AT_ONCE写入立即触发。
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)。
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV 、NULL、 NONE、VALUE。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口支持滑动该模式时FILL_HISTORY 自动设置为 0IGNORE EXPIRED 自动设置为 1IGNORE UPDATE 自动设置为 1FILL 只支持 PREV 、NULL、 NONE、VALUE。
- 该模式可用于实现连续查询,比如,创建一个流,每隔 1s 查询一次过去 10s 窗口内的数据条数。SQL 如下:
```sql
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
```
5. CONTINUOUS_WINDOW_CLOSE窗口关闭时输出结果。修改、删除数据并不会立即触发重算每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val那么重算周期是60分钟。如果重算的时间长度超过 rec_time_val在本次重算后自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL需要配置 adapter的相关信息adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。

View File

@ -97,7 +97,6 @@ CSV 文件中的每个 Row 配置一个 OPC 数据点位。Row 的规则如下
(1) 与 Header 中的列有如下对应关系
| 序号 | Header 中的列 | 值的类型 | 值的范围 | 是否必填 | 默认值 |
|----|-------------------------| -------- |----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------|--------------|
| 1 | tag_name | String | 类似`root.parent.temperature`这样的字符串,要满足 OPC DA 的 ID 规范 | 是 | |

View File

@ -9,9 +9,15 @@
> 丢弃:将异常数据忽略,不写入目标库
> 报错:任务报错
- **目标库连接超时** 目标库连接失败,可选处理策略:归档、丢弃、报错、缓存
> 缓存:当目标库状态异常(连接错误或资源不足等情况)时写入缓存文件(默认路径为 `${data_dir}/tasks/_id/.datetime`),目标库恢复正常后重新入库
- **目标库不存在** 写入报错目标库不存在,可选处理策略:归档、丢弃、报错
- **表不存在** 写入报错表不存在,可选处理策略:归档、丢弃、报错、自动建表
> 自动建表:自动建表,建表成功后重试
- **主键时间戳溢出** 检查数据中第一列时间戳是否在正确的时间范围内now - keep1, now + 100y可选处理策略归档、丢弃、报错
- **主键时间戳空** 检查数据中第一列时间戳是否为空,可选处理策略:归档、丢弃、报错、使用当前时间
> 使用当前时间:使用当前时间填充到空的时间戳字段中
- **复合主键空** 写入报错复合主键空,可选处理策略:归档、丢弃、报错
- **表名长度溢出** 检查子表表名的长度是否超出限制(最大 192 字符),可选处理策略:归档、丢弃、报错、截断、截断且归档
> 截断:截取原始表名的前 192 个字符作为新的表名
> 截断且归档:截取原始表名的前 192 个字符作为新的表名,并且将此行记录写入归档文件
@ -20,4 +26,20 @@
- **表名模板变量空值** 检查子表表名模板中的变量是否为空,可选处理策略:丢弃、留空、变量替换为指定字符串
> 留空:变量位置不做任何特殊处理,例如 `a_{x}` 转换为 `a_`
> 变量替换为指定字符串:变量位置使用后方输入框中的指定字符串,例如 `a_{x}` 转换为 `a_b`
- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错
- **列名不存在** 写入报错列名不存在,可选处理策略:归档、丢弃、报错、自动增加缺失列
> 自动增加缺失列:根据数据信息,自动修改表结构增加列,修改成功后重试
- **列名长度溢出** 检查列名的长度是否超出限制(最大 64 字符),可选处理策略:归档、丢弃、报错
- **列自动扩容** 开关选项,打开时,列数据长度超长时将自动修改表结构并重试
- **列长度溢出** 写入报错列长度溢出,可选处理策略:归档、丢弃、报错、截断、截断且归档
> 截断:截取数据中符合长度限制的前 n 个字符
> 截断且归档:截取数据中符合长度限制的前 n 个字符,并且将此行记录写入归档文件
- **数据异常** 其他数据异常(未在上方列出的其他异常)的处理策略,可选处理策略:归档、丢弃、报错
- **连接超时** 配置目标库连接超时时间,单位“秒”取值范围 1~600
- **临时存储文件位置** 配置缓存文件的位置,实际生效位置 `$DATA_DIR/tasks/:id/{location}`
- **归档数据保留天数** 非负整数0 表示无限制
- **归档数据可用空间** 0~65535其中 0 表示无限制
- **归档数据文件位置** 配置归档文件的位置,实际生效位置 `$DATA_DIR/tasks/:id/{location}`
- **归档数据失败处理策略** 当写入归档文件报错时的处理策略,可选处理策略:删除旧文件、丢弃、报错并停止任务
> 删除旧文件:删除旧文件,如果删除旧文件后仍然无法写入,则报错并停止任务
> 丢弃:丢弃即将归档的数据
> 报错并停止任务:报错并停止当前任务

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 98 KiB

View File

@ -0,0 +1,31 @@
---
title: "LSTM"
sidebar_label: "LSTM"
---
本节说明 LSTM 模型的使用方法。
## 功能概述
LSTM 模型即长短期记忆网络(Long Short Term Memory),是一种特殊的循环神经网络,适用于处理时间序列数据、自然语言处理等任务,通过其独特的门控机制,能够有效捕捉长期依赖关系,
解决传统 RNN 的梯度消失问题,从而对序列数据进行准确预测,不过它不直接提供计算的置信区间范围结果。
完整的调用 SQL 语句如下:
```SQL
SELECT _frowts, FORECAST(i32, "algo=lstm,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") from foo
```
```json5
{
"rows": fc_rows, // 返回结果的行数
"period": period, // 返回结果的周期性,同输入
"alpha": alpha, // 返回结果的置信区间,同输入
"algo": "lstm", // 返回结果使用的算法
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
"res": res // 列模式的结果
}
```
### 参考文献
- [1] Hochreiter S. Long Short-term Memory[J]. Neural Computation MIT-Press, 1997.

View File

@ -0,0 +1,35 @@
---
title: "MLP"
sidebar_label: "MLP"
---
本节说明 MLP 模型的使用方法。
## 功能概述
MLPMutiLayers Perceptron多层感知机是一种典的神经网络模型能够通过学习历史数据的非线性关系
捕捉时间序列中的模式并进行未来值预测。它通过多层全连接网络进行特征提取和映射,
对输入的历史数据生成预测结果。由于不直接考虑趋势或季节性变化,通常需要结合数据预处理来提升效果,
适合解决非线性和复杂的时间序列问题。
完整的调用SQL语句如下
```SQL
SELECT _frowts, FORECAST(i32, "algo=mlp") from foo
```
```json5
{
"rows": fc_rows, // 返回结果的行数
"period": period, // 返回结果的周期性,同输入
"alpha": alpha, // 返回结果的置信区间,同输入
"algo": "mlp", // 返回结果使用的算法
"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE)
"res": res // 列模式的结果
}
```
### 参考文献
- [1]Rumelhart D E, Hinton G E, Williams R J. Learning representations by back-propagating errors[J]. nature, 1986, 323(6088): 533-536.
- [2]Rosenblatt F. The perceptron: a probabilistic model for information storage and organization in the brain[J]. Psychological review, 1958, 65(6): 386.
- [3]LeCun Y, Bottou L, Bengio Y, et al. Gradient-based learning applied to document recognition[J]. Proceedings of the IEEE, 1998, 86(11): 2278-2324.

View File

@ -3,7 +3,9 @@ title: "机器学习算法"
sidebar_label: "机器学习算法"
---
Autoencoder<sup>[1]</sup>: TDgpt 内置使用自编码器Autoencoder的异常检测算法对周期性的时间序列数据具有较好的检测结果。使用该模型需要针对输入时序数据进行预训练同时将训练完成的模型保存在到服务目录 `ad_autoencoder` 中,然后在 SQL 语句中指定调用该算法模型即可使用。
Autoencoder<sup>[1]</sup>: TDgpt 内置使用自编码器Autoencoder的异常检测算法
对周期性的时间序列数据具有较好的检测结果。使用该模型需要针对输入时序数据进行预训练,
同时将训练完成的模型保存在到服务目录 `ad_autoencoder` 中,然后在 SQL 语句中指定调用该算法模型即可使用。
```SQL
--- 在 options 中增加 model 的名称ad_autoencoder_foo 针对 foo 数据集(表)训练的采用自编码器的异常检测模型进行异常检测

View File

@ -141,9 +141,20 @@ stmt 绑定参数的示例代码如下:
```
</TabItem>
<TabItem label="C" value="c">
stmt2 绑定参数的示例代码如下(需要 TDengine v3.3.5.0 及以上):
```c
{{#include docs/examples/c/stmt2_insert_demo.c}}
```
stmt 绑定参数的示例代码如下:
```c
{{#include docs/examples/c/stmt_insert_demo.c}}
```
</TabItem>
<TabItem label="REST API" value="rest">
不支持

View File

@ -237,7 +237,7 @@ typedef struct SUdfInterBuf {
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
bit_add 实现多列的按位与功能。如果只有一列返回这一列。bit_add 忽略空值。
bit_and 实现多列的按位与功能。如果只有一列返回这一列。bit_and 忽略空值。
<details>
<summary>bit_and.c</summary>
@ -287,12 +287,46 @@ select max_vol(vol1, vol2, vol3, deviceid) from battery;
</details>
#### 聚合函数示例3 切分字符串求平均值 [extract_avg](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/extract_avg.c)
`extract_avg` 函数是将一个逗号分隔的字符串数列转为一组数值,统计所有行的结果,计算最终平均值。实现时需注意:
- `interBuf->numOfResult` 需要返回 1 或者 0不能用于 count 计数。
- count 计数可使用额外的缓存,例如 `SumCount` 结构体。
- 字符串的获取需使用`varDataVal`。
创建表:
```bash
create table scores(ts timestamp, varStr varchar(128));
```
创建自定义函数:
```bash
create aggregate function extract_avg as '/root/udf/libextract_avg.so' outputtype double bufsize 16 language 'C';
```
使用自定义函数:
```bash
select extract_avg(valStr) from scores;
```
生成 `.so` 文件
```bash
gcc -g -O0 -fPIC -shared extract_vag.c -o libextract_avg.so
```
<details>
<summary>extract_avg.c</summary>
```c
{{#include tests/script/sh/extract_avg.c}}
```
</details>
## 用 Python 语言开发 UDF
### 准备环境
准备环境的具体步骤如下:
- 第 1 步,准备好 Python 运行环境。
- 第 1 步,准备好 Python 运行环境。本地编译安装 python 注意打开 `--enable-shared` 选项,不然后续安装 taospyudf 会因无法生成共享库而导致失败。
- 第 2 步,安装 Python 包 taospyudf。命令如下。
```shell
pip3 install taospyudf

View File

@ -56,7 +56,7 @@ dataDir /mnt/data6 2 0
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。
为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 minDiskFreeSize当某块磁盘上的可用空间小于等于这个阈值时该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节其值应该大于 2GB会跳过可用空间小于 2GB 的挂载点。
为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 minDiskFreeSize当某块磁盘上的可用空间小于等于这个阈值时该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节若配置值大于 2GB会跳过可用空间小于 2GB 的挂载点。
从 3.3.2.0 版本开始,引入了一个新的配置 disable_create_new_file用于控制在某个挂载点上禁止生成新文件其缺省值为 false即每个挂载点上默认都可以生成新文件。

View File

@ -0,0 +1,274 @@
---
sidebar_label: 安全配置
title: 安全配置
toc_max_heading_level: 4
---
## 背景
TDengine 的分布式、多组件特性导致 TDengine 的安全配置是生产系统中比较关注的问题。本文档旨在对 TDengine 各组件及在不同部署方式下的安全问题进行说明,并提供部署和配置建议,为用户的数据安全提供支持。
## 安全配置涉及组件
TDengine 包含多个组件,有:
- `taosd` 内核组件。
- `taosc` 客户端库。
- `taosAdapter` REST API 和 WebSocket 服务。
- `taosKeeper`:监控服务组件。
- `taosX`:数据管道和备份恢复组件。
- `taosxAgent`:外部数据源数据接入辅助组件。
- `taosExplorer`Web 可视化管理界面。
与 TDengine 部署和应用相关,还会存在以下组件:
- 通过各种连接器接入并使用 TDengine 数据库的应用。
- 外部数据源:指接入 TDengine 的其他数据源,如 MQTT、OPC、Kafka 等。
各组件关系如下:
![TDengine 产品生态拓扑架构](./tdengine-topology.png)
关于各组件的详细介绍,请参考 [组件介绍](./intro)。
## TDengine 安全设置
### `taosd`
taosd 集群间使用 TCP 连接基于自有协议进行数据交换,风险较低,但传输过程不是加密的,仍有一定安全风险。
启用压缩可能对 TCP 数据混淆有帮助。
- **compressMsgSize**:是否对 RPC 消息进行压缩,整数,可选:-1所有消息都不压缩0所有消息都压缩N (N>0):只有大于 N 个字节的消息才压缩。
为了保证数据库操作可追溯,建议启用审计功能。
- **audit**审计功能开关0 为关1 为开。默认打开。
- **auditInterval**:上报间隔,单位为毫秒。默认 5000。
- **auditCreateTable**:是否针对创建子表开启申计功能。 0 为关1 为开。默认打开。
为保证数据文件安全,可启用数据库加密。
- **encryptAlgorithm**:数据加密算法。
- **encryptScope**:数据加密范围。
启用白名单可限制访问地址,进一步增强私密性。
- **enableWhiteList**白名单功能开关0 为关, 1 为开;默认关闭。
### `taosc`
用户和其他组件与 `taosd` 之间使用原生客户端库taosc和自有协议进行连接数据安全风险较低但传输过程仍然不是加密的有一定安全风险。
### `taosAdapter`
taosadapter 与 taosd 之间使用原生客户端库taosc和自有协议进行连接同样支持 RPC 消息压缩,不会造成数据安全问题。
应用和其他组件通过各语言连接器与 taosadapter 进行连接。默认情况下,连接是基于 HTTP 1.1 且不加密的。要保证 taosadapter 与其他组件之间的数据传输安全,需要配置 SSL 加密连接。在 `/etc/taos/taosadapter.toml` 配置文件中修改如下配置:
```toml
[ssl]
enable = true
certFile = "/path/to/certificate-file"
keyFile = "/path/to/private-key"
```
在连接器中配置 HTTPS/SSL 访问方式,完成加密访问。
为进一步增强安全性,可启用白名单功能,在 `taosd` 中配置,对 taosdapter 组件同样生效。
### `taosX`
`taosX` 对外包括 REST API 接口和 gRPC 接口,其中 gRPC 接口用于 taos-agent 连接。
- REST API 接口是基于 HTTP 1.1 且不加密的,有安全风险。
- gRPC 接口基于 HTTP 2 且不加密,有安全风险 。
为了保证数据安全,建议 taosX API 接口仅限内部访问。在 `/etc/taos/taosx.toml` 配置文件中修改如下配置:
```toml
[serve]
listen = "127.0.0.1:6050"
grpc = "127.0.0.1:6055"
```
从 TDengine 3.3.6.0 开始taosX 支持 HTTPS 连接,在 `/etc/taos/taosx.toml` 文件中添加如下配置:
```toml
[serve]
ssl_cert = "/path/to/server.pem"
ssl_key = "/path/to/server.key"
ssl_ca = "/path/to/ca.pem"
```
并在 Explorer 中修改 API 地址为 HTTPS 连接:
```toml
# taosX API 本地连接
x_api = "https://127.0.01:6050"
# Public IP 或者域名地址
grpc = "https://public.domain.name:6055"
```
### `taosExplorer`
`taosAdapter` 组件相似,`taosExplorer` 组件提供 HTTP 服务对外访问。在 `/etc/taos/explorer.toml` 配置文件中修改如下配置:
```toml
[ssl]
# SSL certificate file
certificate = "/path/to/ca.file"
# SSL certificate private key
certificate_key = "/path/to/key.file"
```
之后,使用 HTTPS 进行 Explorer 访问,如 [https://192.168.12.34](https://192.168.12.34:6060) 。
### `taosxAgent`
taosX 启用 HTTPS 后Agent 组件与 taosx 之间使用 HTTP 2 加密连接,使用 Arrow-Flight RPC 进行数据交换,传输内容是二进制格式,且仅注册过的 Agent 连接有效,保障数据安全。
建议在不安全网络或公共网络环境下的 Agent 服务,始终开启 HTTPS 连接。
### `taosKeeper`
taosKeeper 使用 WebSocket 连接与 taosadpater 通信,将其他组件上报的监控信息写入 TDengine。
`taosKeeper` 当前版本存在安全风险:
- 监控地址不可限制在本机,默认监控 所有地址的 6043 端口,存在网络攻击风险。使用 Docker 或 Kubernetes 部署不暴露 taosKeeper 端口时,此风险可忽略。
- 配置文件中配置明文密码,需要降低配置文件可见性。在 `/etc/taos/taoskeeper.toml` 中存在:
```toml
[tdengine]
host = "localhost"
port = 6041
username = "root"
password = "taosdata"
usessl = false
```
## 安全增强
我们建议使用在局域网内部使用 TDengine。
如果必须在局域网外部提供访问,请考虑添加以下配置:
### 负载均衡
使用负载均衡对外提供 taosAdapter 服务。
以 Nginx 为例,配置多节点负载均衡:
```nginx
http {
server {
listen 6041;
location / {
proxy_pass http://websocket;
# Headers for websocket compatible
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Forwarded headers
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Server $hostname;
proxy_set_header X-Real-IP $remote_addr;
}
}
upstream websocket {
server 192.168.11.61:6041;
server 192.168.11.62:6041;
server 192.168.11.63:6041;
}
}
```
如果 taosAdapter 组件未配置 SSL 安全连接,还需要配置 SSL 才能保证安全访问。SSL 可以配置在更上层的 API Gateway也可以配置在 Nginx 中;如果你对各组件之间的安全性有更强的要求,您可以在所有组件中都配置 SSL。Nginx 配置如下:
```nginx
http {
server {
listen 443 ssl;
ssl_certificate /path/to/your/certificate.crt;
ssl_certificate_key /path/to/your/private.key;
}
}
```
### 安全网关
在现在互联网生产系统中,安全网关使用也很普遍。[traefik](https://traefik.io/) 是一个很好的开源选择,我们以 traefik 为例,解释在 API 网关中的安全配置。
Traefik 中通过 middleware 中间件提供多种安全配置,包括:
1. 认证AuthenticationTraefik 提供 BasicAuth、DigestAuth、自定义认证中间件、OAuth 2.0 等多种认证方式。
2. IP 白名单IPWhitelist限制允许访问的客户端 IP。
3. 频率限制RateLimit控制发送到服务的请求数。
4. 自定义 Headers通过自定义 Headers 添加 `allowedHosts` 等配置,提高安全性。
一个常见的中间件示例如下:
```yaml
labels:
- "traefik.enable=true"
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
- "traefik.http.routers.tdengine.entrypoints=https"
- "traefik.http.routers.tdengine.tls.certresolver=default"
- "traefik.http.routers.tdengine.service=tdengine"
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
```
上面的示例完成以下配置:
- TLS 认证使用 `default` 配置,这个配置可使用配置文件或 traefik 启动参数中配置,如下:
```yaml
traefik:
image: "traefik:v2.3.2"
hostname: "traefik"
networks:
- traefik
command:
- "--log.level=INFO"
- "--api.insecure=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--providers.docker.swarmmode=true"
- "--providers.docker.network=traefik"
- "--providers.docker.watch=true"
- "--entrypoints.http.address=:80"
- "--entrypoints.https.address=:443"
- "--certificatesresolvers.default.acme.dnschallenge=true"
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
```
上面的启动参数配置了 `default` TSL 证书解析器和自动 acme 认证(自动证书申请和延期)。
- 中间件 `redirect-to-https`:配置从 HTTP 到 HTTPS 的转发,强制使用安全连接。
```yaml
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
```
- 中间件 `check-header`:配置自定义 Headers 检查。外部访问必须添加自定义 Header 并匹配 Header 值,避免非法访问。这在提供 API 访问时是一个非常简单有效的安全机制。
- 中间件 `tdengine-ipwhitelist`:配置 IP 白名单。仅允许指定 IP 访问,使用 CIDR 路由规则进行匹配,可以设置内网及外网 IP 地址。
## 总结
数据安全是 TDengine 产品的一项关键指标,这些措施旨在保护 TDengine 部署免受未经授权的访问和数据泄露,同时保持性能和功能。但 TDengine 自身的安全配置不是生产中的唯一保障,结合用户业务系统制定更加匹配客户需求的解决方案更加重要。

View File

@ -0,0 +1,188 @@
---
sidebar_label: Perspective
title: 与 Perspective 集成
toc_max_heading_level: 4
---
## 概述
Perspective 是一款开源且强大的数据可视化库,由 [Prospective.co](https://www.perspective.co/) 开发,运用 `WebAssembly` 和 `Web Workers` 技术,在 Web 应用中实现交互式实时数据分析,能在浏览器端提供高性能可视化能力。借助它,开发者可构建实时更新的仪表盘、图表等,用户能轻松与数据交互,按需求筛选、排序及挖掘数据。其灵活性高,适配多种数据格式与业务场景;速度快,处理大规模数据也能保障交互流畅;易用性佳,新手和专业开发者都能快速搭建可视化界面。
在数据连接方面Perspective 通过 TDengine 的 Python 连接器,完美支持 TDengine 数据源,可高效获取其中海量时序数据等各类数据,并提供展示复杂图表、深度统计分析和趋势预测等实时功能,助力用户洞察数据价值,为决策提供有力支持,是构建对实时数据可视化和分析要求高的应用的理想选择。
![perspective-architecture](./perspective/prsp_architecture.webp)
## 前置条件
在 Linux 系统中进行如下安装操作:
- TDengine 服务已部署并正常运行(企业及社区版均可)。
- taosAdapter 能够正常运行,详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)。
- Python 3.10 及以上版本已安装(如未安装,可参考 [Python 安装](https://docs.python.org/)。
- 下载或克隆 [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目,进入项目根目录后运行 “install.sh” 脚本,以便在本地下载并安装 TDengine 客户端库以及相关的依赖项。
## 数据分析
**第 1 步**,运行 [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 “run.sh” 脚本,以此启动 Perspective 服务。该服务会每隔 300 毫秒从 TDengine 数据库中获取一次数据,并将数据以流的形式传输至基于 Web 的 `Perspective Viewer` 。
```shell
sh run.sh
```
**第 2 步**,启动一个静态 Web 服务,随后在浏览器中访问 `prsp-viewer.html` 资源,便能展示可视化数据。
```python
python -m http.server 8081
```
![perspective-viewer](./perspective/prsp_view.webp)
## 使用说明
### 写入数据
[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `producer.py` 脚本,借助 TDengine Python 连接器,可定期向 TDengine 数据库插入数据。此脚本会生成随机数据并将其插入数据库,以此模拟实时数据的写入过程。具体执行步骤如下:
1. 建立与 TDengine 的连接。
1. 创建 power 数据库和 meters 表。
1. 每隔 300 毫秒生成一次随机数据,并写入 TDengine 数据库中。
Python 连接器详细写入说明可参见 [Python 参数绑定](../../../reference/connector/python/#参数绑定)。
### 加载数据
[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `perspective_server.py` 脚本会启动一个 Perspective 服务器,该服务器会从 TDengine 读取数据,并通过 Tornado WebSocket 将数据流式传输到一个 Perspective 表中。
1. 启动一个 Perspective 服务器
1. 建立与 TDengine 的连接。
1. 创建一个 Perspective 表(表结构需要与 TDengine 数据库中表的类型保持匹配)。
1. 调用 `Tornado.PeriodicCallback` 函数来启动定时任务,进而实现对 Perspective 表数据的更新,示例代码如下:
```python
def perspective_thread(perspective_server: perspective.Server, tdengine_conn: taosws.Connection):
"""
Create a new Perspective table and update it with new data every 50ms
"""
# create a new Perspective table
client = perspective_server.new_local_client()
schema = {
"timestamp": datetime,
"location": str,
"groupid": int,
"current": float,
"voltage": int,
"phase": float,
}
# define the table schema
table = client.table(
schema,
limit=1000, # maximum number of rows in the table
name=PERSPECTIVE_TABLE_NAME, # table name. Use this with perspective-viewer on the client side
)
logger.info("Created new Perspective table")
# update with new data
def updater():
data = read_tdengine(tdengine_conn)
table.update(data)
logger.debug(f"Updated Perspective table: {len(data)} rows")
logger.info(f"Starting tornado ioloop update loop every {PERSPECTIVE_REFRESH_RATE} milliseconds")
# start the periodic callback to update the table data
callback = tornado.ioloop.PeriodicCallback(callback=updater, callback_time=PERSPECTIVE_REFRESH_RATE)
callback.start()
```
### HTML 页面配置
[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `prsp-viewer.html`文件将 `Perspective Viewer` 嵌入到 HTML 页面中。它通过 WebSocket 连接到 Perspective 服务器,并根据图表配置显示实时数据。
- 配置展示的图表以及数据分析的规则。
- 与 Perspective 服务器建立 Websocket 连接。
- 引入 Perspective 库,通过 WebSocket 连接到 Perspective 服务器,加载 meters_values 表来展示动态数据。
```html
<script type="module">
// import the Perspective library
import perspective from "https://unpkg.com/@finos/perspective@3.1.3/dist/cdn/perspective.js";
document.addEventListener("DOMContentLoaded", async function () {
// an asynchronous function for loading the view
async function load_viewer(viewerId, config) {
try {
const table_name = "meters_values";
const viewer = document.getElementById(viewerId);
// connect Perspective WebSocket server
const websocket = await perspective.websocket("ws://localhost:8085/websocket");
// open server table
const server_table = await websocket.open_table(table_name);
// load the table into the view
await viewer.load(server_table);
// use view configuration
await viewer.restore(config);
} catch (error) {
console.error(`Failed to get data from ${table_name}, err: ${error}`);
}
}
// configuration of the view
const config = {
"version": "3.3.1", // Perspective library version (compatibility identifier)
"plugin": "Datagrid", // View mode: Datagrid (table) or D3FC (chart)
"plugin_config": { // Plugin-specific configuration
"columns": {
"current": {
"width": 150 // Column width in pixels
}
},
"edit_mode": "READ_ONLY", // Edit mode: READ_ONLY (immutable) or EDIT (editable)
"scroll_lock": false // Whether to lock scroll position
},
"columns_config": {}, // Custom column configurations (colors, formatting, etc.)
"settings": true, // Whether to show settings panel (true/false)
"theme": "Power Meters", // Custom theme name (must be pre-defined)
"title": "Meters list data", // View title
"group_by": ["location", "groupid"], // Row grouping fields (equivalent to `row_pivots`)
"split_by": [], // Column grouping fields (equivalent to `column_pivots`)
"columns": [ // Columns to display (in order)
"timestamp",
"location",
"current",
"voltage",
"phase"
],
"filter": [], // Filter conditions (triplet format array)
"sort": [], // Sorting rules (format: [field, direction])
"expressions": {}, // Custom expressions (e.g., calculated columns)
"aggregates": { // Aggregation function configuration
"timestamp": "last", // Aggregation: last (takes the latest value)
"voltage": "last", // Aggregation: last
"phase": "last", // Aggregation: last
"current": "last" // Aggregation: last
}
};
// load the first view
await load_viewer("prsp-viewer-1", config1);
});
</script>
<!-- define the HTML Structure of the Dashboard -->
<div id="dashboard">
<div class="viewer-container">
<perspective-viewer id="prsp-viewer-1" theme="Pro Dark"></perspective-viewer>
</div>
</div>
```
## 参考资料
- [Perspective 文档](https://perspective.finos.org/)
- [TDengine Python 连接器](../../../reference/connector/python)
- [TDengine 流计算](../../../advanced/stream/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

File diff suppressed because it is too large Load Diff

View File

@ -305,15 +305,6 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API并且在
- 动态修改:不支持
- 支持版本:从 v3.1.0.0 版本开始引入
### 流相关
#### streamRunHistoryAsync
- 说明:创建流有 fill_history 参数时,是否异步执行建流语句
- 类型布尔false同步true异步
- 默认值false
- 动态修改:支持通过 SQL 修改,立即生效
- 支持版本:从 v3.3.6.0 版本开始引入
### 日志相关
#### logDir

View File

@ -290,6 +290,8 @@ taosBenchmark -f <json file>
其它通用参数详见 [通用配置参数](#通用配置参数)。
**说明:从 v3.3.5.6 及以上版本不再支持 json 文件中同时配置 `specified_table_query``super_table_query`**
#### 执行指定查询语句
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
@ -416,6 +418,15 @@ taosBenchmark -f <json file>
</details>
<details>
<summary>queryStb.json</summary>
```json
{{#include /TDengine/tools/taos-tools/example/queryStb.json}}
```
</details>
### 订阅 JSON 示例
<details>

View File

@ -44,6 +44,7 @@ CREATE DATABASE db_name PRECISION 'ns';
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
| 17 | GEOMETRY | 自定义 | 几何类型3.1.0.0 版本开始支持
| 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持|
| 19 | DECIMAL | 8或16 | 高精度数值类型, 取值范围取决于类型中指定的precision和scale, 自3.3.6开始支持, 见下文描述|
:::note
@ -63,6 +64,18 @@ CREATE DATABASE db_name PRECISION 'ns';
:::
### DECIMAL数据类型
`DECIMAL`数据类型用于高精度数值存储, 自版本3.3.6开始支持, 定义语法: DECIMAL(18, 2), DECIMAL(38, 10), 其中需要指定两个参数, 分别为`precision`和`scale`. `precision`是指最大支持的有效数字个数, `scale`是指最大支持的小数位数. 如DECIMAL(8, 4), 可表示范围即[-9999.9999, 9999.9999]. 定义DECIMAL数据类型时, `precision`范围为: [1,38], scale的范围为: [0,precision], scale为0时, 仅表示整数. 也可以不指定scale, 默认为0, 如DECIMAL(18), 与DECIMAL(18,0)相同。
当`precision`值不大于18时, 内部使用8字节存储(DECIMAL64), 当precision范围为(18, 38]时, 使用16字节存储(DECIMAL). SQL中写入DECIMAL类型数据时, 可直接使用数值写入, 当写入值大于类型可表示的最大值时会报DECIMAL_OVERFLOW错误, 当未大于类型表示的最大值, 但小数位数超过SCALE时, 会自动四舍五入处理, 如定义类型DECIMAL(10, 2), 写入10.987, 则实际存储值为10.99。
DECIMAL类型仅支持普通列, 暂不支持tag列. DECIMAL类型只支持SQL写入 暂不支持stmt写入和schemeless写入。
整数类型和DECIMAL类型操作时, 会将整数类型转换为DECIMAL类型再进行计算. DECIMAL类型与DOUBLE/FLOAT/VARCHAR/NCHAR等类型计算时, 转换为DOUBLE类型进行计算.
查询DECIMAL类型表达式时, 若计算的中间结果超出当前类型可表示的最大值时, 报DECIMAL OVERFLOW错误.
## 常量
TDengine 支持多个类型的常量,细节如下表:

View File

@ -7,7 +7,7 @@ description: "创建、删除数据库,查看、修改数据库参数"
## 创建数据库
```sql
CREATE DATABASE [IF NOT EXISTS] db_name [database_options]
CREATE DATABASE [IF NOT EXISTS] db_name [database_options];
database_options:
database_option ...
@ -46,53 +46,80 @@ database_option: {
### 参数说明
- VGROUPS数据库中初始 vgroup 的数目。
- PRECISION数据库的时间戳精度。ms 表示毫秒、us 表示微秒、ns 表示纳秒、默认 ms 毫秒。
- REPLICA表示数据库副本数取值为 1、2 或 3默认为 1; 2 仅在企业版 3.3.0.0 及以后版本中可用。在集群中使用,副本数必须小于或等于 DNODE 的数目。且使用时存在以下限制:
- PRECISION数据库的时间戳精度。
- ms 表示毫秒(默认值)。
- us 表示微秒。
- ns 表示纳秒。
- REPLICA表示数据库副本数取值为 1、2 或 3默认为 1; 2 仅在企业版 3.3.0.0 及以后版本中可用。在集群中使用时,副本数必须小于或等于 DNODE 的数目。且使用时存在以下限制:
- 暂不支持对双副本数据库相关 Vgroup 进行 SPLITE VGROUP 或 REDISTRIBUTE VGROUP 操作
- 单副本数据库可变更为双副本数据库,但不支持从双副本变更为其它副本数,也不支持从三副本变更为双副本
- 单副本数据库可变更为双副本数据库,但不支持从双副本变更为其它副本数,也不支持从三副本变更为双副本
- BUFFER一个 vnode 写入内存池大小,单位为 MB默认为 256最小为 3最大为 16384。
- PAGES一个 vnode 中元数据存储引擎的缓存页个数,默认为 256最小 64。一个 vnode 元数据存储占用 PAGESIZE \* PAGES默认情况下为 1MB 内存。
- PAGESIZE一个 vnode 中元数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB 到 16 MB。
- CACHEMODEL表示是否在内存中缓存子表的最近数据。默认为 none。
- none表示不缓存。
- CACHEMODEL表示是否在内存中缓存子表的最近数据。
- none表示不缓存(默认值)
- last_row表示缓存子表最近一行数据。这将显著改善 LAST_ROW 函数的性能表现。
- last_value表示缓存子表每一列的最近的非 NULL 值。这将显著改善无特殊影响WHERE、ORDER BY、GROUP BY、INTERVAL下的 LAST 函数的性能表现。
- both表示同时打开缓存最近行和列功能。
NoteCacheModel 值来回切换有可能导致 last/last_row 的查询结果不准确,请谨慎操作。推荐保持打开
NoteCacheModel 值来回切换有可能导致 last/last_row 的查询结果不准确,请谨慎操作(推荐保持打开)
- CACHESIZE表示每个 vnode 中用于缓存子表最近数据的内存大小。默认为 1 ,范围是[1, 65536],单位是 MB。
- COMP表示数据库文件压缩标志位缺省值为 2取值范围为 [0, 2]。
- 0表示不压缩。
- 1表示一阶段压缩。
- 2表示两阶段压缩。
- DURATION数据文件存储数据的时间跨度。可以使用加单位的表示形式如 DURATION 100h、DURATION 10d 等,支持 m分钟、h小时和 d三个单位。不加时间单位时默认单位为天如 DURATION 50 表示 50 天。
- DURATION数据文件存储数据的时间跨度。
- 可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m分钟、h小时和 d三个单位。
- 不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
- MAXROWS文件块中记录的最大条数默认为 4096 条。
- MINROWS文件块中记录的最小条数默认为 100 条。
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2如 KEEP 100h,100d,3650d; 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
- KEEP_TIME_OFFSET自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
- STT_TRIGGER表示落盘文件触发文件合并的个数。对于少表高频写入场景此参数建议使用默认配置而对于多表低频写入场景此参数建议配置较大的值。
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。
- 数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间;
- KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位
- 也可以不写单位,如 KEEP 50此时默认单位为天
- 仅企业版支持[多级存储](https://docs.taosdata.com/operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2如 KEEP 100h,100d,3650d
- 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间);
- 了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)。
- KEEP_TIME_OFFSET删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间(自 3.2.0.0 版本生效),默认值为 0 (小时)。
- 在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
- STT_TRIGGER表示落盘文件触发文件合并的个数。
- 对于少表高频写入场景,此参数建议使用默认配置;
- 而对于多表低频写入场景,此参数建议配置较大的值。
- SINGLE_STABLE表示此数据库中是否只可以创建一个超级表用于超级表列非常多的情况。
- 0表示可以创建多张超级表。
- 1表示只可以创建一张超级表。
- TABLE_PREFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
- TABLE_SUFFIX当其为正值时在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
- TABLE_PREFIX分配数据表到某个 vgroup 时,用于忽略或仅使用表名前缀的长度值。
- 当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;
- 当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;
- 例如:假定表名为 "v30001",当 TSDB_PREFIX = 2 时,使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup。
- TABLE_SUFFIX分配数据表到某个 vgroup 时,用于忽略或仅使用表名后缀的长度值。
- 当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;
- 当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;
- 例如:假定表名为 "v30001",当 TSDB_SUFFIX = 2 时,使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup。
- TSDB_PAGESIZE一个 vnode 中时序数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB到 16 MB。
- DNODES指定 vnode 所在的 DNODE 列表,如 '1,2,3',以逗号区分且字符间不能有空格,仅企业版支持。
- DNODES指定 vnode 所在的 DNODE 列表,如 '1,2,3',以逗号区分且字符间不能有空格 **仅企业版支持**
- WAL_LEVELWAL 级别,默认为 1。
- 1写 WAL但不执行 fsync。
- 2写 WAL而且执行 fsync。
- WAL_FSYNC_PERIOD当 WAL_LEVEL 参数设置为 2 时,用于设置落盘的周期。默认为 3000单位毫秒。最小为 0表示每次写入立即落盘最大为 180000即三分钟。
- WAL_RETENTION_PERIOD为了数据订阅消费需要 WAL 日志文件额外保留的最大时长策略。WAL 日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 3600表示在 WAL 保留最近 3600 秒的数据,请根据数据订阅的需要修改这个参数为适当值。
- WAL_RETENTION_SIZE为了数据订阅消费需要 WAL 日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0表示累计大小无上限。
- COMPACT_INTERVAL自动 compact 触发周期(从 1970-01-01T00:00:00Z 开始切分的时间周期)。取值范围0 或 [10m, keep2]单位m分钟h小时d。不加时间单位默认单位为天默认值为 0即不触发自动 compact 功能。如果 db 中有未完成的 compact 任务,不重复下发 compact 任务。仅企业版 3.3.5.0 版本开始支持。
- COMPACT_TIME_RANGE自动 compact 任务触发的 compact 时间范围,取值范围:[-keep2, -duration]单位m分钟h小时d。不加时间单位时默认单位为天默认值为 [0, 0]。取默认值 [0, 0] 时,如果 COMPACT_INTERVAL 大于 0会按照 [-keep2, -duration] 下发自动 compact。因此要关闭自动 compact 功能,需要将 COMPACT_INTERVAL 设置为 0。仅企业版 3.3.5.0 版本开始支持。
- COMPACT_TIME_OFFSET自动 compact 任务触发的 compact 时间相对本地时间的偏移量。取值范围:[0, 23]单位h小时默认值为 0。以 UTC 0 时区为例,如果 COMPACT_INTERVAL 为 1d当 COMPACT_TIME_OFFSET 为 0 时,在每天 0 点下发自动 compact如果 COMPACT_TIME_OFFSET 为 2在每天 2 点下发自动 compact。仅企业版 3.3.5.0 版本开始支持。
-
- COMPACT_INTERVAL自动 compact 触发周期(从 1970-01-01T00:00:00Z 开始切分的时间周期)**仅企业版 3.3.5.0 版本开始支持**)。
- 取值范围0 或 [10m, keep2]单位m分钟h小时d
- 不加时间单位默认单位为天,默认值为 0即不触发自动 compact 功能;
- 如果 db 中有未完成的 compact 任务,不重复下发 compact 任务。
- COMPACT_TIME_RANGE自动 compact 任务触发的 compact 时间范围(**仅企业版 3.3.5.0 版本开始支持**)。
- 取值范围:[-keep2, -duration]单位m分钟h小时d
- 不加时间单位时默认单位为天,默认值为 [0, 0]
- 取默认值 [0, 0] 时,如果 COMPACT_INTERVAL 大于 0会按照 [-keep2, -duration] 下发自动 compact
- 因此,要关闭自动 compact 功能,需要将 COMPACT_INTERVAL 设置为 0。
- COMPACT_TIME_OFFSET自动 compact 任务触发的 compact 时间相对本地时间的偏移量(**仅企业版 3.3.5.0 版本开始支持**)。取值范围:[0, 23]单位h小时默认值为 0。以 UTC 0 时区为例:
- 如果 COMPACT_INTERVAL 为 1d当 COMPACT_TIME_OFFSET 为 0 时,在每天 0 点下发自动 compact
- 如果 COMPACT_TIME_OFFSET 为 2在每天 2 点下发自动 compact。
### 创建数据库示例
```sql
create database if not exists db vgroups 10 buffer 10
create database if not exists db vgroups 10 buffer 10;
```
以上示例创建了一个有 10 个 vgroup 名为 db 的数据库, 其中每个 vnode 分配 10MB 的写入缓存
@ -108,7 +135,7 @@ USE db_name;
## 删除数据库
```sql
DROP DATABASE [IF EXISTS] db_name
DROP DATABASE [IF EXISTS] db_name;
```
删除数据库。指定 Database 所包含的全部数据表将被删除,该数据库的所有 vgroups 也会被全部销毁,请谨慎使用!
@ -146,15 +173,18 @@ alter_database_option: {
1. 如何查看 cachesize?
通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值(单位为 MB
通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值(单位MB
2. 如何查看 cacheload?
通过 show \<db_name>.vgroups; 可以查看 cacheload单位字节)。
通过 show \<db_name>.vgroups; 可以查看 cacheload单位Byte字节)。
3. 判断 cachesize 是否够用
如果 cacheload 非常接近 cachesize则 cachesize 可能过小。 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。可以根据这个原则判断是否需要修改 cachesize 。具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。
- 如果 cacheload 非常接近 cachesize则 cachesize 可能过小。
- 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。
- 可以根据这个原则判断是否需要修改 cachesize 。
- 具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。
:::note
其它参数在 3.0.0.0 中暂不支持修改
@ -204,7 +234,7 @@ FLUSH DATABASE db_name;
## 调整 VGROUP 中 VNODE 的分布
```sql
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3];
```
按照给定的 dnode 列表,调整 vgroup 中的 vnode 分布。因为副本数目最大为 3所以最多输入 3 个 dnode。
@ -212,10 +242,10 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3
## 自动调整 VGROUP 中 LEADER 的分布
```sql
BALANCE VGROUP LEADER
BALANCE VGROUP LEADER;
```
触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载均衡操作。(企业版功能)
触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载均衡操作。(**企业版功能**
## 查看数据库工作状态
@ -223,19 +253,22 @@ BALANCE VGROUP LEADER
SHOW db_name.ALIVE;
```
查询数据库 db_name 的可用状态,返回值 0不可用 1完全可用 2部分可用即数据库包含的 VNODE 部分节点可用,部分节点不可用)
查询数据库 db_name 的可用状态(返回值):
- 0不可用
- 1完全可用
- 2部分可用即数据库包含的 VNODE 部分节点可用,部分节点不可用)。
## 查看 DB 的磁盘空间占用
```sql
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name'
select * from INFORMATION_SCHEMA.INS_DISK_USAGE where db_name = 'db_name';
```
查看DB各个模块所占用磁盘的大小
查看DB各个模块所占用磁盘的大小
```sql
SHOW db_name.disk_info;
```
查看数据库 db_name 的数据压缩压缩率和数据在磁盘上所占用的大小
查看数据库 db_name 的数据压缩压缩率和数据在磁盘上所占用的大小
该命令本质上等同于 `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`
该命令本质上等同于 `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname";`

View File

@ -1137,6 +1137,7 @@ CAST(expr AS type_name)
- 字符串类型转换数值类型时可能出现的无效字符情况,例如 "a" 可能转为 0但不会报错。
- 转换到数值类型时,数值大于 type_name 可表示的范围时,则会溢出,但不会报错。
- 转换到字符串类型时,如果转换后长度超过 type_name 中指定的长度,则会截断,但不会报错。
- DECIMAL类型不支持与JSON,VARBINARY,GEOMERTY类型的互转.
#### TO_CHAR
@ -1618,12 +1619,14 @@ AVG(expr)
**功能说明**:统计指定字段的平均值。
**返回数据类型**DOUBLE。
**返回数据类型**DOUBLE, DECIMAL
**适用数据类型**:数值类型。
**适用于**:表和超级表。
**说明**: 当输入类型为DECIMAL类型时, 输出类型也为DECIMAL类型, 输出的precision和scale大小符合数据类型章节中的描述规则, 通过计算SUM类型和UINT64的除法得到结果类型, 若SUM的结果导致DECIMAL类型溢出, 则报DECIMAL OVERFLOW错误。
### COUNT
```sql
@ -1805,12 +1808,14 @@ SUM(expr)
**功能说明**:统计表/超级表中某列的和。
**返回数据类型**DOUBLE、BIGINT。
**返回数据类型**DOUBLE、BIGINT,DECIMAL
**适用数据类型**:数值类型。
**适用于**:表和超级表。
**说明**: 输入类型为DECIMAL类型时, 输出类型为DECIMAL(38, scale), precision为当前支持的最大值, scale为输入类型的scale, 若SUM的结果溢出时, 报DECIMAL OVERFLOW错误.
### VAR_POP
```sql
@ -2174,6 +2179,7 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,使用时有专用语法(interp_clause),语法介绍[参考链接](../select/#interp) 。
- 当指定时间断面不存在符合条件的行数据时INTERP 函数会根据 [FILL](../distinguished/#fill-子句) 参数的设定进行插值。
- INTERP 作用于超级表时,会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
- INTERP在FILL PREV/NEXT/NEAR时, 行为与窗口查询有所区别, 当截面存在数据时, 不会进行FILL, 即便当前值为NULL.
- INTERP 可以与伪列 `_irowts` 一起使用,返回插值点所对应的时间戳(v3.0.2.0 以后支持)。
- INTERP 可以与伪列 `_isfilled` 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(v3.0.3.0 以后支持)。
- 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`, 用于返回 `interp` 函数所使用的原始数据的时间戳列。若范围内无值, 则返回 NULL。`_irowts_origin` 在 v3.3.4.9 以后支持。

View File

@ -77,10 +77,10 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
1. 不进行填充NONE默认填充模式
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如 `FILL(VALUE, 1.23)`。这里需要注意,最终填充的值受由相应列的类型决定,如 `FILL(VALUE, 1.23)`,相应列为 INT 类型,则填充值为 1若查询列表中有多列需要 FILL则需要给每一个 FILL 列指定 VALUE`SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`注意SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE`_wstart`、`_wstart+1a`、`now`、`1+1` 以及使用 `partition by` 时的 `partition key` (如 tbname)都不需要指定 VALUE`timediff(last(ts), _wstart)` 则需要指定 VALUE。
3. PREV 填充:使用前一个非 NULL 值填充数据。例如 FILL(PREV)。
3. PREV 填充:使用前一个值填充数据。例如 FILL(PREV)。
4. NULL 填充:使用 NULL 填充数据。例如 FILL(NULL)。
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如 FILL(LINEAR)。
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如 FILL(NEXT)。
6. NEXT 填充:使用下一个值填充数据。例如 FILL(NEXT)。
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略即不产生填充数据查询结果为空。这种行为在部分模式PREV、NEXT、LINEAR下具有合理性因为在这些模式下没有数据意味着无法产生填充数值。而对另外一些模式NULL、VALUE来说理论上是可以产生填充数值的至于需不需要输出填充数值取决于应用的需求。所以为了满足这类需要强制填充数据或 NULL 的应用的需求,同时不破坏现有填充模式的行为兼容性,从 v3.0.3.0 开始,增加了两种新的填充模式:
@ -104,7 +104,7 @@ NULL、NULL_F、VALUE、 VALUE_F 这几种填充模式针对不同场景区别
时间窗口又可分为滑动时间窗口和翻转时间窗口。
INTERVAL 子句用于产生相等时间周期的窗口SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口time window 大小和每次前向增量时间forward sliding times。如图[t0s, t0e] [t1s , t1e][t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。
INTERVAL 子句用于产生相等时间周期的窗口SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口time window 大小和每次前向增量时间forward sliding times。如图[t0s, t0e] [t1s , t1e][t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。默认情况下,窗口是从 Unix time 01970-01-01 00:00:00 UTC开始划分的如果设置了 interval_offset那么窗口的划分将从 “Unix time 0 + interval_offset” 开始。
![TDengine Database 时间窗口示意图](./timewindow-1.webp)

View File

@ -59,11 +59,11 @@ CREATE TOPIC [IF NOT EXISTS] topic_name [with meta] AS DATABASE db_name;
## 删除 topic
如果不再需要订阅数据,可以删除 topic需要注意:只有当前未在订阅中的 TOPIC 才能被删除
如果不再需要订阅数据,可以删除 topic如果当前 topic 被消费者订阅,通过 FORCE 语法可强制删除强制删除后订阅的消费者会消费数据会出错FORCE 语法3.3.6.0版本开始支持)
```sql
/* 删除 topic */
DROP TOPIC [IF EXISTS] topic_name;
DROP TOPIC [IF EXISTS] [FORCE] topic_name;
```
此时如果该订阅主题上存在 consumer则此 consumer 会收到一个错误。
@ -82,8 +82,10 @@ SHOW TOPICS;
## 删除消费组
消费者创建的时候,会给消费者指定一个消费者组,消费者不能显式的删除,但是可以删除消费者组。如果当前消费者组里有消费者在消费,通过 FORCE 语法可强制删除强制删除后订阅的消费者会消费数据会出错FORCE 语法3.3.6.0版本开始支持)。
```sql
DROP CONSUMER GROUP [IF EXISTS] cgroup_name ON topic_name;
DROP CONSUMER GROUP [IF EXISTS] [FORCE] cgroup_name ON topic_name;
```
删除主题 topic_name 上的消费组 cgroup_name。

View File

@ -10,11 +10,11 @@ description: 流式计算的相关 SQL 的详细语法
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE| CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
FILL_HISTORY [0|1] [ASYNC]
IGNORE UPDATE [0|1]
}
@ -34,7 +34,7 @@ subquery: SELECT select_list
stb_name 是保存计算结果的超级表的表名,如果该超级表不存在,会自动创建;如果已存在,则检查列的 schema 信息。详见 [写入已存在的超级表](#写入已存在的超级表)。
TAGS 子句定义了流计算中创建TAG的规则可以为每个 partition 对应的子表生成自定义的TAG值详见 [自定义 TAG](#自定义 TAG)
TAGS 子句定义了流计算中创建TAG的规则可以为每个 partition 对应的子表生成自定义的TAG值详见 [自定义 TAG](#自定义-TAG)
```sql
create_definition:
col_name column_definition
@ -42,7 +42,7 @@ column_definition:
type_name [COMMENT 'string_value']
```
subtable 子句定义了流式计算中创建的子表的命名规则,详见 [流式计算的 partition](#流式计算的 partition)。
subtable 子句定义了流式计算中创建的子表的命名规则,详见 [流式计算的 partition](#流式计算的-partition)。
```sql
window_clause: {
@ -127,6 +127,13 @@ create stream if not exists s1 fill_history 1 into st1 as select count(*) from
如果该流任务已经彻底过期,并且您不再想让它检测或处理数据,您可以手动删除它,被计算出的数据仍会被保留。
注意:
- 开启 fill_history 时,创建流需要找到历史数据的分界点,如果历史数据很多,可能会导致创建流任务耗时较长,此时可以通过 fill_history 1 asyncv3.3.6.0 开始支持) 语法将创建流的任务放在后台处理创建流的语句可立即返回不阻塞后面的操作。async 只对 fill_history 1 起效fill_history 0 时建流很快,不需要异步处理。
- 通过 show streams 可查看后台建流的进度ready 状态表示成功init 状态表示正在建流failed 状态表示建流失败,失败时 message 列可以查看原因。对于建流失败的情况可以删除流重新建立)。
- 另外,不要同时异步创建多个流,可能由于事务冲突导致后面创建的流失败。
## 删除流式计算
```sql
@ -158,8 +165,11 @@ SELECT * from information_schema.`ins_streams`;
2. WINDOW_CLOSE窗口关闭时触发窗口关闭由事件时间决定可配合 watermark 使用)
3. MAX_DELAY time若窗口关闭则触发计算。若窗口未关闭且未关闭时长超过 max delay 指定的时间,则触发计算。
4. FORCE_WINDOW_CLOSE以操作系统当前时间为准只计算当前关闭窗口的结果并推送出去。窗口只会在被关闭的时刻计算一次后续不会再重复计算。该模式当前只支持 INTERVAL 窗口不支持滑动FILL_HISTORY 必须为 0IGNORE EXPIRED 必须为 1IGNORE UPDATE 必须为 1FILL 只支持 PREV、NULL、NONE、VALUE。
5. CONTINUOUS_WINDOW_CLOSE窗口关闭时输出结果。修改、删除数据并不会立即触发重算每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val那么重算周期是60分钟。如果重算的时间长度超过 rec_time_val在本次重算后自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL需要配置 adapter的相关信息adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 最小时间是 5s如果低于 5s创建流计算时会报错。

View File

@ -30,6 +30,17 @@ SELECT a.* FROM meters a LEFT ASOF JOIN meters b ON timetruncate(a.ts, 1s) < tim
### 主连接条件
作为一款时序数据库TDengine 所有的关联查询都围绕主键时戳列进行,因此要求除 ASOF/Window Join 外的所有关联查询都必须含有主键列的等值连接条件而按照顺序首次出现在连接条件中的主键列等值连接条件将会被作为主连接条件。ASOF Join 的主连接条件可以包含非等值的连接条件,而 Window Join 的主连接条件则是通过 `WINDOW_OFFSET` 来指定。
从 3.3.6.0 版本开始TDengine 支持子查询中的常量包含返回时戳的常量函数如today()、now()等,常量时戳及其加减运算)作为等价主键列可以出现在主连接条件中。例如:
```sql
SELECT * from d1001 a JOIN (SELECT today() as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
```
上面的示例语句可以实现表 d1001 今天的所有记录与表 d1002 中某一时刻的某条记录进行关联运算。需要注意的是SQL 中出现的时间字符串常量默认不会被当作时戳,例如 `'2025-03-19 10:00:00.000'` 只会被当作字符串而不是时戳,因此当需要作为常量时戳处理时,可以通过类型前缀 timestamp 来指定字符串常量为时间戳类型,例如:
```sql
SELECT * from d1001 a JOIN (SELECT timestamp '2025-03-19 10:00:00.000' as ts1, * from d1002 WHERE ts = '2025-03-19 10:00:00.000') b ON timetruncate(a.ts, 1d) = b.ts1;
```
除 Window Join 外TDengine 支持在主连接条件中进行 `timetruncate` 函数操作,例如 `ON timetruncate(a.ts, 1s) = timetruncate(b.ts, 1s)`,除此之外,暂不支持其他函数及标量运算。
@ -39,7 +50,7 @@ SELECT a.* FROM meters a LEFT ASOF JOIN meters b ON timetruncate(a.ts, 1s) < tim
### 主键时间线
TDengine 作为时序数据库要求每个表(子表)中必须有主键时间戳列,它将作为该表的主键时间线进行很多跟时间相关的运算,而子查询的结果或者 Join 运算的结果中也需要明确哪一列将被视作主键时间线参与后续的时间相关的运算。在子查询中,查询结果中存在的有序的第一个出现的主键列(或其运算)或等同主键列的伪列(`_wstart`/`_wend`将被视作该输出表的主键时间线。Join 输出结果中主键时间线的选择遵从以下规则:
TDengine 作为时序数据库要求每个表(子表)中必须有主键时间戳列,它将作为该表的主键时间线进行很多跟时间相关的运算,而子查询的结果或者 Join 运算的结果中也需要明确哪一列将被视作主键时间线参与后续的时间相关的运算。在子查询中,查询结果中存在的有序的第一个出现的主键列(或其运算)或等同主键列的伪列(`_wstart`/`_wend`)将被视作该输出表的主键时间线。此外,从 3.3.6.0 版本开始TDengine 也开始支持子查询结果中的常量时戳列作为输出表的主键时间线。Join 输出结果中主键时间线的选择遵从以下规则:
- Left/Right Join 系列中驱动表(子查询)的主键列将被作为后续查询的主键时间线;此外,在 Window Join 窗口内,因为左右表同时有序所以在窗口内可以把任意一个表的主键列做作主键时间线,优先选择本表的主键列作为主键时间线。
- Inner Join 可以把任意一个表的主键列做作主键时间线当存在类似分组条件Tag 列的等值条件且与主连接条件 `AND` 关系)时将无法产生主键时间线。
- Full Join 因为无法产生任何一个有效的主键时间序列,因此没有主键时间线,这也就意味着 Full Join 中无法进行时间线相关的运算。

View File

@ -37,6 +37,7 @@ description: 可配置压缩算法
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
## SQL 语法

View File

@ -826,6 +826,12 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
- res[入参] 结果集。
- **返回值**:非 `NULL`:成功,返回一个指向 TAOS_FIELD 结构体的指针,每个元素代表一列的元数据。`NULL`:失败。
- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)`
- **接口说明**:获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fields()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。TAOS_FIELD_E中除了TAOS_FIELD的基本信息外, 还包括了类型的`precision`和`scale`信息。
- **参数说明**
- res[入参] 结果集。
- **返回值**:非 `NULL`:成功,返回一个指向 TAOS_FIELD_E 结构体的指针,每个元素代表一列的元数据。`NULL`:失败。
- `void taos_stop_query(TAOS_RES *res)`
- **接口说明**:停止当前查询的执行。
- **参数说明**
@ -1115,10 +1121,14 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
- conf[入参] 指向一个有效的 tmq_conf_t 结构体指针,该结构体代表一个 TMQ 配置对象。
- key[入参] 数配置项的键名。
- value[入参] 配置项的值。
- **返回值**:返回一个 tmq_conf_res_t 枚举值,表示配置设置的结果。
- TMQ_CONF_OK成功设置配置项。
- TMQ_CONF_INVALID_KEY键值无效。
- TMQ_CONF_UNKNOWN键名无效。
- **返回值**:返回一个 tmq_conf_res_t 枚举值表示配置设置的结果。tmq_conf_res_t 定义如下:
```
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2, // 键名无效
TMQ_CONF_INVALID = -1, // 键值无效
TMQ_CONF_OK = 0, // 成功设置配置项
} tmq_conf_res_t;
```
- `void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param)`
- **接口说明**:设置 TMQ 配置对象中的自动提交回调函数。

View File

@ -121,6 +121,7 @@ JDBC 连接器可能报错的错误码包括 4 种:
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
| 0x2390 | background thread write error in Efficient Writing | 高效写入后台线程写入错误,可以停止写入,重建连接 |
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
<!-- - [TDengine_ERROR_CODE](../error-code) -->
@ -148,6 +149,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| JSON | java.lang.String |仅在 tag 中支持|
| VARBINARY | byte[] ||
| GEOMETRY | byte[] ||
| DECIMAL | java.math.BigDecimal ||
**注意**由于历史原因TDengine中的BINARY底层不是真正的二进制数据已不建议使用。请用VARBINARY类型代替。
GEOMETRY类型是little endian字节序的二进制数据符合WKB规范。详细信息请参考 [数据类型](../../taos-sql/data-type/#数据类型)
@ -314,7 +316,15 @@ properties 中的配置参数如下:
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION关闭 SSL 证书验证 。仅在使用 WebSocket 连接时生效。true启用false不启用。默认为 false。
- TSDBDriver.PROPERTY_KEY_APP_NAMEApp 名称,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为 java。
- TSDBDriver.PROPERTY_KEY_APP_IPApp IP可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。
- TSDBDriver.PROPERTY_KEY_APP_IPApp IP可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。
- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE高效写入模式目前仅支持 `stmt` 方式。仅在使用 WebSocket 连接时生效。默认值为空,即不启用高效写入模式。
- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM高效写入模式下后台写入线程数。仅在使用 WebSocket 连接时生效。默认值为 10。
- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW高效写入模式下写入数据的批大小单位是行。仅在使用 WebSocket 连接时生效。默认值为 1000。
- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW高效写入模式下缓存的大小单位是行。仅在使用 WebSocket 连接时生效。默认值为 10000。
- TSDBDriver.PROPERTY_KEY_COPY_DATA高效写入模式下是否拷贝应用通过 addBatch 传入的二进制类型数据。仅在使用 WebSocket 连接时生效。默认值为 false。
- TSDBDriver.PROPERTY_KEY_STRICT_CHECK高效写入模式下是否校验表名长度和变长数据类型长度。仅在使用 WebSocket 连接时生效。默认值为 false。
- TSDBDriver.PROPERTY_KEY_RETRY_TIMES高效写入模式下写入失败重试次数。仅在使用 WebSocket 连接时生效。默认值为 3。
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数比如日志级别、SQL 长度等。

View File

@ -24,6 +24,7 @@ Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-conne
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
| ------------------| ----------------------| ----------------|
| 3.1.5 | 密码支持特殊字符 | - |
| 3.1.4 | 修改 readme | - |
| 3.1.3 | 升级了 es5-ext 版本,解决低版本的漏洞 | - |
| 3.1.2 | 对数据协议和解析进行了优化,性能得到大幅提升| - |

View File

@ -31,7 +31,7 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
## TDengine 客户端和连接器支持的平台列表
目前 TDengine 的连接器可支持的平台广泛目前包括X64/X86/ARM64/ARM32/MIPS/LoongArch64 等硬件平台,以及 Linux/Win64/Win32/macOS 等开发环境。
目前 TDengine 的连接器可支持的平台广泛目前包括X64/X86/ARM64/ARM32/MIPS/LoongArch64(或Loong64) 等硬件平台,以及 Linux/Win64/Win32/macOS 等开发环境。
对照矩阵如下:

View File

@ -174,6 +174,7 @@ description: TDengine 服务端的错误码列表和详细说明
| 0x8000038B | Index not exist | 不存在 | 确认操作是否正确 |
| 0x80000396 | Database in creating status | 数据库正在被创建 | 重试 |
| 0x8000039A | Invalid system table name | 内部错误 | 上报 issue |
| 0x8000039F | No VGroup's leader need to be balanced | 执行 balance vgroup leader 操作 | 没有需要进行 balance leader 操作的 VGroup |
| 0x800003A0 | Mnode already exists | 已存在 | 确认操作是否正确 |
| 0x800003A1 | Mnode not there | 已存在 | 确认操作是否正确 |
| 0x800003A2 | Qnode already exists | 已存在 | 确认操作是否正确 |
@ -577,10 +578,11 @@ description: TDengine 服务端的错误码列表和详细说明
## virtual table
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|------------|---------------------------------------------------------|------------------------------------------------|------------------------|
| 0x80006200 | Virtual table scan 算子内部错误 | virtual table scan 算子内部逻辑错误,一般不会出现 | 具体查看client端的错误日志提示 |
| 0x80006201 | Virtual table scan invalid downstream operator type | 由于生成的执行计划不对,导致 virtual table scan 算子的下游算子类型不正确 | 保留 explain 执行计划,联系开发处理 |
| 0x80006202 | Virtual table prim timestamp column should not has ref | 虚拟表的时间戳主键列不应该有数据源,如果有,后续查询虚拟表的时候就会出现该错误 | 检查错误日志,联系开发处理 |
| 0x80006203 | Create virtual child table must use virtual super table | 虚拟子表必须建在虚拟超级表下,否则就会出现该错误 | 创建虚拟子表的时候USING 虚拟超级表 |
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|------------|---------------------------------------------------------|------------------------------------------------|----------------------------|
| 0x80006200 | Virtual table scan 算子内部错误 | virtual table scan 算子内部逻辑错误,一般不会出现 | 具体查看client端的错误日志提示 |
| 0x80006201 | Virtual table scan invalid downstream operator type | 由于生成的执行计划不对,导致 virtual table scan 算子的下游算子类型不正确 | 保留 explain 执行计划,联系开发处理 |
| 0x80006202 | Virtual table prim timestamp column should not has ref | 虚拟表的时间戳主键列不应该有数据源,如果有,后续查询虚拟表的时候就会出现该错误 | 检查错误日志,联系开发处理 |
| 0x80006203 | Create virtual child table must use virtual super table | 虚拟子表必须建在虚拟超级表下,否则就会出现该错误 | 创建虚拟子表的时候USING 虚拟超级表 |
| 0x80006204 | Virtual table not support decimal type | 虚拟表不支持 decimal 类型 | 创建虚拟表时不使用 decimal 类型的列/tag |

View File

@ -1,291 +1,293 @@
---
title: 夏令时使用指南
description: TDengine 中关于夏令时使用问题的解释和建议
---
## 背景
在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。
## 定义
### 时区
时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。
### IANA 时区
IANAInternet Assigned Numbers Authority时区数据库也称为 tz database提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。
IANA 使用“区域/城市”格式(如 Europe/Berlin来明确标识时区。
TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。
### 标准时间与当地时间
标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。
- 格林尼治标准时间GMT历史上使用的参考时间位于 0° 经线。
- 协调世界时UTC现代的时间标准类似于GMT但更加精确。
标准时间与时区的关系如下:
- 基准:标准时间(如 UTC是时区设定的基准点。
- 偏移量不同时区通过相对于标准时间的偏移量来定义。例如UTC+1 表示比 UTC 快 1 小时。
- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。
相对于标准时间,每个地区根据其所在时区设定其当地时间:
- 时区偏移当地时间等于标准时间加上该时区的偏移量。例如UTC+2 表示比 UTC 时间快 2 小时。
- 夏令时DST某些地区在特定时间段调整当地时间例如将时钟拨快一小时。详见下节。
### 夏令时
夏令时Daylight Saving TimeDST是一种通过将时间提前一小时以充分利用日光、节约能源的制度。通常在春季开始秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例对夏令时和夏令时的影响做说明。
按照这个规则,可以看到:
- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00之间的时间不存在跳变
- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00之间的时间出现了两次。
#### 夏令时与 IANA 时区数据库
- 记录规则IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。
- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。
- 历史变更IANA 数据库还追踪历史上的夏令时变化,以确保准确性。
#### 夏令时与时间戳转换
- 时间戳转为当地时间是确定的。例如1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。
- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳`2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。
### RFC3339 时间格式
RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。
其格式如下:
- 基本格式:`YYYY-MM-DDTHH:MM:SSZ`
- 时区表示:
- Z 表示协调世界时UTC
- 偏移量格式,例如 +02:00表示与 UTC 的时差。
通过明确的时区偏移RFC 3339 格式可以在全球范围内准确地解析和比较时间。
RFC 3339 的优势包括:
- 标准化:提供统一的格式,方便跨系统数据交换。
- 清晰性:明确时区信息,避免时间误解。
TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据:
```sql
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
```
### 未定义行为
未定义行为Undefined Behavior是指特定代码或操作没有明确规定的结果也不会对该结果作出兼容性的保证TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。
## 夏令时在 TDengine 中的写入与查询
我们使用下表来展示夏令时在写入和查询中的影响。
![DST Berlin](./02-dst/dst-berlin.png)
### 表格说明
- **TIMESTAMP**TDengine 中使用 64位整数来存储原始时间戳。
- **UTC**:时间戳对应的 UTC 时间表示。
- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。
- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。
### 表格分析
- 在**夏令时开始**(柏林时间 3 月 31 日 02:00时间直接从 02:00 跳到 03:00往后跳一小时
- 浅绿色是夏令时开始前一小时的时间戳;
- 深绿色是夏令时开始后一小时的时间戳;
- 红色为 TDengine 数据库中插入了不存在的当地时间:
- 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00``2024-03-31 02:59:59` 的数据会被自动调整为 -1000在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000微秒数据库为 -1000000纳秒数据库为 -1000000000因为那一时刻在本地时间中不存在
- 在**夏令时结束**(柏林时间 10 月 27 日 03:00时间从 03:00 跳到 02:00 (往前跳一小时)。
- 浅蓝色表示时钟跳变前一小时的时间戳;
- 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。
- 紫色表示时钟跳变一小时后的时间戳;
- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。
- **UTC 时间不变**UTC 时间保持不变,确保了时间的一致性和顺序性。
- **RFC3339**RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00结束后变为 +01:00 。
- **条件查询**
- **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`:
```sql
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
ts |
=================
-1000 |
Query OK, 1 row(s) in set (0.003635s)
```
当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在:
```sql
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
ts | to_iso8601(ts,'Z') |
==================================================
-1000 | 1969-12-31T23:59:59.000Z |
1711843200000 | 2024-03-31T00:00:00.000Z |
1711846799000 | 2024-03-31T00:59:59.000Z |
1711846800000 | 2024-03-31T01:00:00.000Z |
1711846801000 | 2024-03-31T01:00:01.000Z |
Query OK, 5 row(s) in set (0.003339s)
```
以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期:
```sql
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
Query OK, 0 row(s) in set (0.000930s)
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
ts | to_iso8601(ts,'Z') |
==================================================
1711843200000 | 2024-03-31T00:00:00.000Z |
1711846799000 | 2024-03-31T00:59:59.000Z |
Query OK, 2 row(s) in set (0.001227s)
```
- 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`重复了两次TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。
- 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据:
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=======================================================================================
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
Query OK, 5 row(s) in set (0.001370s)
````
- 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据:
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=======================================================================================
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
Query OK, 1 row(s) in set (0.004480s)
```
- 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据):
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
================================================================================================
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
Query OK, 3 row(s) in set (0.004428s)
```
## 总结与建议
### 总结
仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。
- 写入:
- 无法写入夏令时跳变时不存在的时间数据。
- 写入夏令时跳变时重复的时间是未定义行为。
- 查询:
- 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。
- 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。
- 显示:
- 带时区显示不受影响。
- 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。
- 用户应谨慎使用不带时区的时间进行展示和应用。
### 建议
为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。
- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。
| TIMESTAMP | UTC | Europe/Berlin | Local |
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
```sql
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
Insert OK, 2 row(s) affected (0.001434s)
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
ts | v1 |
===============================
1711846799000 | 1 |
1711846800000 | 2 |
Query OK, 2 row(s) in set (0.003503s)
```
- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。
| TIMESTAMP | UTC | Europe/Berlin | Local |
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
```sql
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
('2024-10-27T02:59:59.000+02:00', 2)
('2024-10-27T02:00:00.000+01:00', 3)
('2024-10-27T02:59:59.000+01:00', 4);
Insert OK, 4 row(s) affected (0.001514s)
taos> SELECT *,
to_iso8601(ts,'Z'),
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
AND ts <= '2024-10-27T02:59:59.000+01:00';
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=====================================================================================================
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
Query OK, 4 row(s) in set (0.004275s)
taos> SELECT *,
to_iso8601(ts,'Z'),
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
AND ts <= '2024-10-27T02:59:59.000+02:00';
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=====================================================================================================
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
Query OK, 2 row(s) in set (0.004275s)
```
- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。
- taosAdapter使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。
```shell
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
-d "select ts from tz1.t1"
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
```
- Explorer使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。
![Explorer DST](./02-dst/explorer-with-tz.png)
## 参考文档
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)
---
title: 夏令时使用指南
description: TDengine 中关于夏令时使用问题的解释和建议
---
## 背景
在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。
## 定义
### 时区
时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。
### IANA 时区
IANAInternet Assigned Numbers Authority时区数据库也称为 tz database提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。
IANA 使用“区域/城市”格式(如 Europe/Berlin来明确标识时区。
TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。
### 标准时间与当地时间
标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。
- 格林尼治标准时间GMT历史上使用的参考时间位于 0° 经线。
- 协调世界时UTC现代的时间标准类似于GMT但更加精确。
标准时间与时区的关系如下:
- 基准:标准时间(如 UTC是时区设定的基准点。
- 偏移量不同时区通过相对于标准时间的偏移量来定义。例如UTC+1 表示比 UTC 快 1 小时。
- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。
相对于标准时间,每个地区根据其所在时区设定其当地时间:
- 时区偏移当地时间等于标准时间加上该时区的偏移量。例如UTC+2 表示比 UTC 时间快 2 小时。
- 夏令时DST某些地区在特定时间段调整当地时间例如将时钟拨快一小时。详见下节。
### 夏令时
夏令时Daylight Saving TimeDST是一种通过将时间提前一小时以充分利用日光、节约能源的制度。通常在春季开始秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例对夏令时和夏令时的影响做说明。
![DST Berlin](./02-dst/dst-berlin.png)
按照这个规则,可以看到:
- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00之间的时间不存在跳变
- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00之间的时间出现了两次。
#### 夏令时与 IANA 时区数据库
- 记录规则IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。
- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。
- 历史变更IANA 数据库还追踪历史上的夏令时变化,以确保准确性。
#### 夏令时与时间戳转换
- 时间戳转为当地时间是确定的。例如1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。
- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳`2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。
### RFC3339 时间格式
RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。
其格式如下:
- 基本格式:`YYYY-MM-DDTHH:MM:SSZ`
- 时区表示:
- Z 表示协调世界时UTC
- 偏移量格式,例如 +02:00表示与 UTC 的时差。
通过明确的时区偏移RFC 3339 格式可以在全球范围内准确地解析和比较时间。
RFC 3339 的优势包括:
- 标准化:提供统一的格式,方便跨系统数据交换。
- 清晰性:明确时区信息,避免时间误解。
TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据:
```sql
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
```
### 未定义行为
未定义行为Undefined Behavior是指特定代码或操作没有明确规定的结果也不会对该结果作出兼容性的保证TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。
## 夏令时在 TDengine 中的写入与查询
我们使用下表来展示夏令时在写入和查询中的影响。
![DST Table](./02-dst/dst-table.png)
### 表格说明
- **TIMESTAMP**TDengine 中使用 64位整数来存储原始时间戳。
- **UTC**:时间戳对应的 UTC 时间表示。
- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。
- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。
### 表格分析
- 在**夏令时开始**(柏林时间 3 月 31 日 02:00时间直接从 02:00 跳到 03:00往后跳一小时
- 浅绿色是夏令时开始前一小时的时间戳;
- 深绿色是夏令时开始后一小时的时间戳;
- 红色为 TDengine 数据库中插入了不存在的当地时间:
- 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00``2024-03-31 02:59:59` 的数据会被自动调整为 -1000在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000微秒数据库为 -1000000纳秒数据库为 -1000000000因为那一时刻在本地时间中不存在
- 在**夏令时结束**(柏林时间 10 月 27 日 03:00时间从 03:00 跳到 02:00 (往前跳一小时)。
- 浅蓝色表示时钟跳变前一小时的时间戳;
- 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。
- 紫色表示时钟跳变一小时后的时间戳;
- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。
- **UTC 时间不变**UTC 时间保持不变,确保了时间的一致性和顺序性。
- **RFC3339**RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00结束后变为 +01:00 。
- **条件查询**
- **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`:
```sql
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
ts |
=================
-1000 |
Query OK, 1 row(s) in set (0.003635s)
```
当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在:
```sql
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
ts | to_iso8601(ts,'Z') |
==================================================
-1000 | 1969-12-31T23:59:59.000Z |
1711843200000 | 2024-03-31T00:00:00.000Z |
1711846799000 | 2024-03-31T00:59:59.000Z |
1711846800000 | 2024-03-31T01:00:00.000Z |
1711846801000 | 2024-03-31T01:00:01.000Z |
Query OK, 5 row(s) in set (0.003339s)
```
以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期:
```sql
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
Query OK, 0 row(s) in set (0.000930s)
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
ts | to_iso8601(ts,'Z') |
==================================================
1711843200000 | 2024-03-31T00:00:00.000Z |
1711846799000 | 2024-03-31T00:59:59.000Z |
Query OK, 2 row(s) in set (0.001227s)
```
- 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`重复了两次TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。
- 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据:
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=======================================================================================
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
Query OK, 5 row(s) in set (0.001370s)
````
- 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据:
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=======================================================================================
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
Query OK, 1 row(s) in set (0.004480s)
```
- 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据):
```sql
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
================================================================================================
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
Query OK, 3 row(s) in set (0.004428s)
```
## 总结与建议
### 总结
仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。
- 写入:
- 无法写入夏令时跳变时不存在的时间数据。
- 写入夏令时跳变时重复的时间是未定义行为。
- 查询:
- 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。
- 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。
- 显示:
- 带时区显示不受影响。
- 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。
- 用户应谨慎使用不带时区的时间进行展示和应用。
### 建议
为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。
- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。
| TIMESTAMP | UTC | Europe/Berlin | Local |
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
```sql
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
Insert OK, 2 row(s) affected (0.001434s)
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
ts | v1 |
===============================
1711846799000 | 1 |
1711846800000 | 2 |
Query OK, 2 row(s) in set (0.003503s)
```
- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。
| TIMESTAMP | UTC | Europe/Berlin | Local |
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
```sql
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
('2024-10-27T02:59:59.000+02:00', 2)
('2024-10-27T02:00:00.000+01:00', 3)
('2024-10-27T02:59:59.000+01:00', 4);
Insert OK, 4 row(s) affected (0.001514s)
taos> SELECT *,
to_iso8601(ts,'Z'),
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
AND ts <= '2024-10-27T02:59:59.000+01:00';
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=====================================================================================================
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
Query OK, 4 row(s) in set (0.004275s)
taos> SELECT *,
to_iso8601(ts,'Z'),
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
AND ts <= '2024-10-27T02:59:59.000+02:00';
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
=====================================================================================================
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
Query OK, 2 row(s) in set (0.004275s)
```
- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。
- taosAdapter使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。
```shell
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
-d "select ts from tz1.t1"
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
```
- Explorer使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。
![Explorer DST](./02-dst/explorer-with-tz.png)
## 参考文档
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)

View File

@ -0,0 +1,297 @@
---
title: 密码中特殊字符的使用
description: TDengine 用户密码中特殊字符的使用
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
TDengine 用户密码需满足以下规则:
1. 用户名最长不超过 23 个字节。
2. 密码长度必须为 8 到 255 位。
3. 密码字符的取值范围
1. 大写字母:`A-Z`
2. 小写字母:`a-z`
3. 数字:`0-9`
4. 特殊字符: `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
4. 强密码启用EnableStrongPassword 1默认开启至少包含大写字母、小写字母、数字、特殊字符中的三类不启用时字符种类不做约束。
## 各组件特殊字符使用指南
以用户名 `user1`,密码 `Ab1!@#$%^&*()-_+=[]{}` 为例。
```sql
CREATE USER user1 PASS 'Ab1!@#$%^&*()-_+=[]{}';
```
<Tabs defaultValue="shell" groupId="component">
<TabItem label="CLI" value="shell">
在 [TDengine 命令行客户端CLI](../../reference/tools/taos-cli/) 中使用需要注意以下几点:
- 使用参数 `-p` 后不带密码,会提示输入密码,可输入任意可接收字符。
- 使用参数 `-p` 后带密码,如果密码中包含特殊字符,需使用单引号。
使用用户 `user1` 登录:
```shell
taos -u user1 -p'Ab1!@#$%^&*()-_+=[]{}'
taos -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\}
```
</TabItem>
<TabItem label="taosdump" value="taosdump">
在 [taosdump](../../reference/tools/taosdump/) 中使用需要注意以下几点:
- 使用参数 `-p` 后不带密码,会提示输入密码,可输入任意可接收字符。
- 使用参数 `-p` 后带密码,如果密码中包含特殊字符,需使用单引号或进行转义。
使用用户 `user1` 备份数据库 `test`
```shell
taosdump -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -D test
taosdump -u user1 -pAb1\!\@\#\$\%\^\&\*\(\)\-\_\+\=\[\]\{\} -D test
```
</TabItem>
<TabItem label="Benchmark" value="benchmark">
在 [taosBenchmark](../../reference/tools/taosbenchmark/) 中使用需要注意以下几点:
- 使用参数 `-p` 后不带密码,会提示输入密码,可输入任意可接收字符。
- 使用参数 `-p` 后带密码,如果密码中包含特殊字符,需使用单引号或进行转义。
使用用户 `user1` 进行数据写入测试示例如下:
```shell
taosBenchmark -u user1 -p'Ab1!@#$%^&*()-_+=[]{}' -d test -y
```
使用 `taosBenchmark -f <JSON>` 方式时JSON 文件中密码使用无限制。
</TabItem>
<TabItem label="taosX" value="taosx">
[taosX](../../reference/components/taosx/) 使用 DSN 表示 TDengine 连接,使用如下格式:`(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`,其中 `<pass>` 可以包含特殊字符,如:`taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`。
使用用户 `user1` 导出数据示例如下:
```shell
taosx -f 'taos://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6030?query=select * from test.t1' \
-t 'csv:./test.csv'
```
需要注意的是,如果密码可被 URL decode则会使用 URL decoded 结果作为密码。如:`taos+ws://user1:Ab1%21%40%23%24%25%5E%26%2A%28%29-_%2B%3D%5B%5D%7B%7D@localhost:6041` 与 `taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041` 是等价的。
在 [Explorer](../../reference/components/explorer/) 中无需特殊处理,直接使用即可。
</TabItem>
<TabItem label="Java" value="java">
在 JDBC 中使用特殊字符密码时,密码需要通过 URL 编码,示例如下:
```java
package com.taosdata.example;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import com.taosdata.jdbc.TSDBDriver;
public class JdbcPassDemo {
public static void main(String[] args) throws Exception {
String password = "Ab1!@#$%^&*()-_+=[]{}";
String encodedPassword = URLEncoder.encode(password, StandardCharsets.UTF_8.toString());
String jdbcUrl = "jdbc:TAOS-WS://localhost:6041";
Properties connProps = new Properties();
connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "user1");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, encodedPassword);
connProps.setProperty(TSDBDriver.PROPERTY_KEY_ENABLE_AUTO_RECONNECT, "true");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
try (Connection conn = DriverManager.getConnection(jdbcUrl, connProps)) {
System.out.println("Connected to " + jdbcUrl + " successfully.");
// you can use the connection for execute SQL here
} catch (Exception ex) {
// please refer to the JDBC specifications for detailed exceptions info
System.out.printf("Failed to connect to %s, %sErrMessage: %s%n",
jdbcUrl,
ex instanceof SQLException ? "ErrCode: " + ((SQLException) ex).getErrorCode() + ", " : "",
ex.getMessage());
// Print stack trace for context in examples. Use logging in production.
ex.printStackTrace();
throw ex;
}
}
}
```
</TabItem>
<TabItem label="Python" value="python">
在 Python 中使用特殊字符密码无需特殊处理,示例如下:
```python
import taos
import taosws
def create_connection():
host = "localhost"
port = 6030
return taos.connect(
user="user1",
password="Ab1!@#$%^&*()-_+=[]{}",
host=host,
port=port,
)
def create_ws_connection():
host = "localhost"
port = 6041
return taosws.connect(
user="user1",
password="Ab1!@#$%^&*()-_+=[]{}",
host=host,
port=port,
)
def show_databases(conn):
cursor = conn.cursor()
cursor.execute("show databases")
print(cursor.fetchall())
cursor.close()
if __name__ == "__main__":
print("Connect with native protocol")
conn = create_connection()
show_databases(conn)
print("Connect with websocket protocol")
conn = create_ws_connection()
show_databases(conn)
```
</TabItem>
<TabItem label="Go" value="go">
从 3.6.0 版本开始Go 语言中支持密码中包含特殊字符,使用时需要 encodeURIComponent 编码。
```go
package main
import (
"database/sql"
"fmt"
"log"
"net/url"
_ "github.com/taosdata/driver-go/v3/taosWS"
)
func main() {
var user = "user1"
var password = "Ab1!@#$%^&*()-_+=[]{}"
var encodedPassword = url.QueryEscape(password)
var taosDSN = user + ":" + encodedPassword + "@ws(localhost:6041)/"
taos, err := sql.Open("taosWS", taosDSN)
if err != nil {
log.Fatalln("Failed to connect to " + taosDSN + "; ErrMessage: " + err.Error())
}
fmt.Println("Connected to " + taosDSN + " successfully.")
defer taos.Close()
}
```
</TabItem>
<TabItem label="Rust" value="rust">
Rust 中使用 DSN 表示 TDengine 连接,使用如下格式:`(taos|tmq)[+ws]://<user>:<pass>@<ip>:<port>`,其中 `<pass>` 可以包含特殊字符,如:`taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@192.168.10.10:6041`。
```rust
let dsn = "taos+ws://user1:Ab1!@#$%^&*()-_+=[]{}@localhost:6041";
let connection = TaosBuilder::from_dsn(&dsn)?.build().await?;
```
</TabItem>
<TabItem label="Node.js" value="node">
从 3.1.5 版本开始Node.js 连接器支持密码中包含特殊字符无需特殊处理。
```js
const taos = require("@tdengine/websocket");
let dsn = 'ws://localhost:6041';
async function createConnect() {
try {
let conf = new taos.WSConfig(dsn);
conf.setUser('user1');
conf.setPwd('Ab1!@#$%^&*()-_+=[]{}');
conf.setDb('test');
conn = await taos.sqlConnect(conf);
console.log("Connected to " + dsn + " successfully.");
return conn;
} catch (err) {
console.log("Connection failed with code: " + err.code + ", message: " + err.message);
throw err;
}
}
createConnect()
```
</TabItem>
<TabItem label="C#" value="csharp">
在 C# 中使用密码时,需要注意:使用连接字符串时,不支持分号(因分号为分隔符);此时可使用不带密码的字符串构建 `ConnectionStringBuilder`,之后再设置用户名和密码。
示例如下:
```csharp
var builder = new ConnectionStringBuilder("host=localhost;port=6030");
builder.Username = "user1";
builder.Password = "Ab1!@#$%^&*()-_+=[]{}";
using (var client = DbDriver.Open(builder)){}
```
</TabItem>
<TabItem label="C" value="c">
C 语言中使用密码无限制。
```c
TAOS *taos = taos_connect("localhost", "user1", "Ab1!@#$%^&*()-_+=[]{}", NULL, 6030);
```
</TabItem>
<TabItem label="REST" value="rest">
REST API 中使用密码时,需要注意以下几点:
- 密码使用 Basic Auth格式为 `Authorization: Basic base64(<user>:<pass>)`
- 不支持密码中包含冒号 `:`
以下两种方式等价:
```shell
curl -u'user1:Ab1!@#$%^&*()-_+=[]{}' \
-d 'show databases' http://localhost:6041/rest/sql
curl -H 'Authorization: Basic dXNlcjE6QWIxIUAjJCVeJiooKS1fKz1bXXt9' \
-d 'show databases' http://localhost:6041/rest/sql
```
</TabItem>
</Tabs>

View File

@ -241,6 +241,7 @@ typedef struct SRestoreCheckpointInfo {
int32_t transId; // transaction id of the update the consensus-checkpointId transaction
int32_t taskId;
int32_t nodeId;
int32_t term;
} SRestoreCheckpointInfo;
int32_t tEncodeRestoreCheckpointInfo(SEncoder* pEncoder, const SRestoreCheckpointInfo* pReq);

View File

@ -29,9 +29,10 @@ extern "C" {
#define ANALY_FORECAST_DEFAULT_CONF 95
#define ANALY_FORECAST_DEFAULT_WNCHECK 1
#define ANALY_FORECAST_MAX_ROWS 40000
#define ANALY_FORECAST_RES_MAX_ROWS 1024
#define ANALY_ANOMALY_WINDOW_MAX_ROWS 40000
#define ANALY_DEFAULT_TIMEOUT 60
#define ANALY_MAX_TIMEOUT 600
#define ANALY_DEFAULT_TIMEOUT 60
#define ANALY_MAX_TIMEOUT 600
typedef struct {
EAnalAlgoType type;

View File

@ -297,7 +297,6 @@ extern bool tsFilterScalarMode;
extern int32_t tsMaxStreamBackendCache;
extern int32_t tsPQSortMemThreshold;
extern bool tsStreamCoverage;
extern bool tsStreamRunHistoryAsync;
extern int8_t tsS3EpNum;
extern int32_t tsStreamNotifyMessageSize;
extern int32_t tsStreamNotifyFrameSize;

View File

@ -1430,6 +1430,7 @@ typedef struct {
int64_t watermark1;
int64_t watermark2;
int32_t ttl;
int32_t keep;
SArray* pFuncs;
int32_t commentLen;
char* pComment;
@ -3358,6 +3359,7 @@ typedef struct {
int8_t igNotExists;
int32_t sqlLen;
char* sql;
int8_t force;
} SMDropTopicReq;
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
@ -3368,6 +3370,7 @@ typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
char cgroup[TSDB_CGROUP_LEN];
int8_t igNotExists;
int8_t force;
} SMDropCgroupReq;
int32_t tSerializeSMDropCgroupReq(void* buf, int32_t bufLen, SMDropCgroupReq* pReq);

View File

@ -54,6 +54,8 @@ const char* tNameGetDbNameP(const SName* name);
int32_t tNameGetFullDbName(const SName* name, char* dst);
int32_t tNameGetFullTableName(const SName* name, char* dst);
bool tNameIsEmpty(const SName* name);
void tNameAssign(SName* dst, const SName* src);

View File

@ -134,6 +134,8 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
bool qIsDynamicExecTask(qTaskInfo_t tinfo);
void qDestroyOperatorParam(SOperatorParam* pParam);
void qUpdateOperatorParam(qTaskInfo_t tinfo, void* pParam);
/**

View File

@ -553,6 +553,7 @@ typedef struct SDropTopicStmt {
ENodeType type;
char topicName[TSDB_TOPIC_NAME_LEN];
bool ignoreNotExists;
bool force;
} SDropTopicStmt;
typedef struct SDropCGroupStmt {
@ -560,6 +561,7 @@ typedef struct SDropCGroupStmt {
char topicName[TSDB_TOPIC_NAME_LEN];
char cgroup[TSDB_CGROUP_LEN];
bool ignoreNotExists;
bool force;
} SDropCGroupStmt;
typedef struct SAlterClusterStmt {
@ -608,6 +610,7 @@ typedef struct SStreamOptions {
SNode* pDeleteMark;
SNode* pRecInterval;
int8_t fillHistory;
bool runHistoryAsync;
int8_t ignoreExpired;
int8_t ignoreUpdate;
int64_t setFlag;

View File

@ -495,6 +495,17 @@ struct SStreamTask {
typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*);
typedef enum {
START_MARK_REQ_CHKPID = 0x1,
START_WAIT_FOR_CHKPTID = 0x2,
START_CHECK_DOWNSTREAM = 0x3,
} EStartStage;
typedef struct {
EStartStage stage;
int64_t ts;
} SStartTaskStageInfo;
typedef struct STaskStartInfo {
int64_t startTs;
int64_t readyTs;
@ -504,6 +515,8 @@ typedef struct STaskStartInfo {
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
int64_t elapsedTime;
int32_t restartCount; // restart task counter
EStartStage curStage; // task start stage
SArray* pStagesList; // history stage list with timestamp, SArrya<SStartTaskStageInfo>
startComplete_fn_t completeFn; // complete callback function
} STaskStartInfo;
@ -738,7 +751,7 @@ void streamTaskStopMonitorCheckRsp(STaskCheckInfo* pInfo, const char* id);
void streamTaskCleanupCheckInfo(STaskCheckInfo* pInfo);
// fill-history task
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
int32_t streamLaunchFillHistoryTask(SStreamTask* pTask, bool lock);
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration);
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
@ -810,12 +823,14 @@ void streamMetaNotifyClose(SStreamMeta* pMeta);
void streamMetaStartHb(SStreamMeta* pMeta);
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
int64_t endTs, bool ready);
int32_t streamMetaAddTaskLaunchResultNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId,
int64_t startTs, int64_t endTs, bool ready);
int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo);
void streamMetaClearStartInfo(STaskStartInfo* pStartInfo);
int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta);
int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaAddFailedTaskSelf(SStreamTask* pTask, int64_t failedTs);
int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, bool lock);
void streamMetaAddFailedTaskSelf(SStreamTask* pTask, int64_t failedTs, bool lock);
void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SStreamTask* pHTask, int32_t transId,
int64_t startTs);
void streamMetaClearSetUpdateTaskListComplete(SStreamMeta* pMeta);

View File

@ -21,6 +21,7 @@
#include "tdef.h"
#include "tlog.h"
#include "tmsg.h"
#include "ttrace.h"
#ifdef __cplusplus
extern "C" {
@ -170,7 +171,8 @@ void walClose(SWal *);
// write interfaces
// By assigning index by the caller, wal gurantees linearizability
int32_t walAppendLog(SWal *, int64_t index, tmsg_t msgType, SWalSyncInfo syncMeta, const void *body, int32_t bodyLen);
int32_t walAppendLog(SWal *, int64_t index, tmsg_t msgType, SWalSyncInfo syncMeta, const void *body, int32_t bodyLen,
const STraceId *trace);
int32_t walFsync(SWal *, bool force);
// apis for lifecycle management

View File

@ -367,6 +367,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_MND_INVALID_WAL_LEVEL TAOS_DEF_ERROR_CODE(0, 0x039C)
#define TSDB_CODE_MND_INVALID_DNODE_LIST_FMT TAOS_DEF_ERROR_CODE(0, 0x039D)
#define TSDB_CODE_MND_DNODE_LIST_REPEAT TAOS_DEF_ERROR_CODE(0, 0x039E)
#define TSDB_CODE_MND_NO_VGROUP_ON_DB TAOS_DEF_ERROR_CODE(0, 0x039F)
// mnode-node
#define TSDB_CODE_MND_MNODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03A0)
@ -513,6 +514,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_ANA_ANODE_RETURN_ERROR TAOS_DEF_ERROR_CODE(0, 0x0445)
#define TSDB_CODE_ANA_ANODE_TOO_MANY_ROWS TAOS_DEF_ERROR_CODE(0, 0x0446)
#define TSDB_CODE_ANA_WN_DATA TAOS_DEF_ERROR_CODE(0, 0x0447)
#define TSDB_CODE_ANA_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x0448)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
@ -1069,6 +1071,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_VTABLE_SCAN_INVALID_DOWNSTREAM TAOS_DEF_ERROR_CODE(0, 0x6201)
#define TSDB_CODE_VTABLE_PRIMTS_HAS_REF TAOS_DEF_ERROR_CODE(0, 0x6202)
#define TSDB_CODE_VTABLE_NOT_VIRTUAL_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x6203)
#define TSDB_CODE_VTABLE_NOT_SUPPORT_DATA_TYPE TAOS_DEF_ERROR_CODE(0, 0x6204)
#ifdef __cplusplus
}
#endif

View File

@ -36,6 +36,12 @@ extern "C" {
#define FLT_GREATEREQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) > (_y)))
#define FLT_LESSEQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) < (_y)))
#define DBL_EQUAL(_x, _y) fabs((_x) - (_y)) <= (FLT_COMPAR_TOL_FACTOR * DBL_EPSILON)
#define DBL_GREATER(_x, _y) (!DBL_EQUAL((_x), (_y)) && ((_x) > (_y)))
#define DBL_LESS(_x, _y) (!DBL_EQUAL((_x), (_y)) && ((_x) < (_y)))
#define DBL_GREATEREQUAL(_x, _y) (DBL_EQUAL((_x), (_y)) || ((_x) > (_y)))
#define DBL_LESSEQUAL(_x, _y) (DBL_EQUAL((_x), (_y)) || ((_x) < (_y)))
#define PATTERN_COMPARE_INFO_INITIALIZER { '%', '_', L'%', L'_' }
typedef struct SPatternCompareInfo {

View File

@ -606,7 +606,8 @@ typedef enum ELogicConditionType {
#define TFS_MAX_LEVEL (TFS_MAX_TIERS - 1)
#define TFS_PRIMARY_LEVEL 0
#define TFS_PRIMARY_ID 0
#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024
#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024 // 50MB
#define TFS_MIN_DISK_FREE_SIZE_MAX (2ULL * 1024 * 1024 * 1024 * 1024) // 2TB
enum { TRANS_STAT_INIT = 0, TRANS_STAT_EXECUTING, TRANS_STAT_EXECUTED, TRANS_STAT_ROLLBACKING, TRANS_STAT_ROLLBACKED };
enum { TRANS_OPER_INIT = 0, TRANS_OPER_EXECUTE, TRANS_OPER_ROLLBACK };

View File

@ -150,6 +150,7 @@ clean_service
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
${csudo}rm -f ${bin_link_dir}/taosudf || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :

View File

@ -162,9 +162,9 @@ remove_service_of() {
remove_tools_of() {
_tool=$1
kill_service_of ${_tool}
[ -e "${bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${bin_link_dir}/${_tool} || :
[ -L "${bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${bin_link_dir}/${_tool} || :
[ -e "${installDir}/bin/${_tool}" ] && ${csudo}rm -rf ${installDir}/bin/${_tool} || :
[ -e "${local_bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${local_bin_link_dir}/${_tool} || :
[ -L "${local_bin_link_dir}/${_tool}" ] && ${csudo}rm -rf ${local_bin_link_dir}/${_tool} || :
}
remove_bin() {
@ -236,21 +236,56 @@ function remove_data_and_config() {
[ -d "${log_dir}" ] && ${csudo}rm -rf ${log_dir}
}
echo
echo "Do you want to remove all the data, log and configuration files? [y/n]"
read answer
remove_flag=false
if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then
confirmMsg="I confirm that I would like to delete all data, log and configuration files"
echo "Please enter '${confirmMsg}' to continue"
function usage() {
echo -e "\nUsage: $(basename $0) [-e <yes|no>]"
echo "-e: silent mode, specify whether to remove all the data, log and configuration files."
echo " yes: remove the data, log, and configuration files."
echo " no: don't remove the data, log, and configuration files."
}
# main
interactive_remove="yes"
remove_flag="false"
while getopts "e:h" opt; do
case $opt in
e)
interactive_remove="no"
if [ "$OPTARG" == "yes" ]; then
remove_flag="true"
echo "Remove all the data, log, and configuration files."
elif [ "$OPTARG" == "no" ]; then
remove_flag="false"
echo "Do not remove the data, log, and configuration files."
else
echo "Invalid option for -e: $OPTARG"
usage
exit 1
fi
;;
h | *)
usage
exit 1
;;
esac
done
if [ "$interactive_remove" == "yes" ]; then
echo -e "\nDo you want to remove all the data, log and configuration files? [y/n]"
read answer
if [ X"$answer" == X"${confirmMsg}" ]; then
remove_flag=true
else
echo "answer doesn't match, skip this step"
if [ X$answer == X"y" ] || [ X$answer == X"Y" ]; then
confirmMsg="I confirm that I would like to delete all data, log and configuration files"
echo "Please enter '${confirmMsg}' to continue"
read answer
if [ X"$answer" == X"${confirmMsg}" ]; then
remove_flag="true"
else
echo "answer doesn't match, skip this step"
fi
fi
echo
fi
echo
if [ -e ${install_main_dir}/uninstall_${PREFIX}x.sh ]; then
if [ X$remove_flag == X"true" ]; then
@ -260,7 +295,6 @@ if [ -e ${install_main_dir}/uninstall_${PREFIX}x.sh ]; then
fi
fi
if [ "$osType" = "Darwin" ]; then
clean_service_on_launchctl
${csudo}rm -rf /Applications/TDengine.app
@ -299,8 +333,7 @@ elif echo $osinfo | grep -qwi "centos"; then
${csudo}rpm -e --noscripts tdengine >/dev/null 2>&1 || :
fi
command -v systemctl >/dev/null 2>&1 && ${csudo}systemctl daemon-reload >/dev/null 2>&1 || true
echo
echo "${productName} is removed successfully!"
echo
echo

View File

@ -57,7 +57,7 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
[ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || :
[ -L ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || :
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
@ -65,7 +65,7 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript2} || :
[ -f ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || :
[ -L ${bin_link_dir}/${inspect_name} ] && ${csudo}rm -f ${bin_link_dir}/${inspect_name} || :
fi
}

View File

@ -313,7 +313,7 @@ typedef struct SSyncQueryParam {
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4);
void doSetOneRowPtr(SReqResultInfo* pResultInfo, bool isStmt);
void doSetOneRowPtr(SReqResultInfo* pResultInfo);
void setResPrecision(SReqResultInfo* pResInfo, int32_t precision);
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4, bool isStmt);
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4, bool isStmt);

View File

@ -93,7 +93,7 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
int32_t total = atomic_add_fetch_64((int64_t *)&pSummary->totalRequests, 1);
int32_t currentInst = atomic_add_fetch_64((int64_t *)&pSummary->currentRequests, 1);
tscDebug("req:0x%" PRIx64 ", new from connObj:0x%" PRIx64 ", current:%d, app current:%d, total:%d, QID:0x%" PRIx64,
tscDebug("req:0x%" PRIx64 ", create request from conn:0x%" PRIx64 ", current:%d, app current:%d, total:%d, QID:0x%" PRIx64,
pRequest->self, pRequest->pTscObj->id, num, currentInst, total, pRequest->requestId);
}
@ -254,7 +254,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t reqType = SLOW_LOG_TYPE_OTHERS;
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
tscDebug("req:0x%" PRIx64 ", free from connObj:0x%" PRIx64 ", QID:0x%" PRIx64
tscDebug("req:0x%" PRIx64 ", free from conn:0x%" PRIx64 ", QID:0x%" PRIx64
", elapsed:%.2f ms, current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
@ -263,16 +263,18 @@ static void deregisterRequest(SRequestObj *pRequest) {
(0 == ((SVnodeModifyOpStmt *)pRequest->pQuery->pRoot)->sqlNodeType)) ||
QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType) {
tscDebug("req:0x%" PRIx64 ", insert duration:%" PRId64 "us, parseCost:%" PRId64 "us, ctgCost:%" PRId64
"us, analyseCost:%" PRId64 "us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
"us, analyseCost:%" PRId64 "us, planCost:%" PRId64 "us, exec:%" PRId64 "us, QID:0x%" PRIx64,
pRequest->self, duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs,
pRequest->metric.analyseCostUs, pRequest->metric.planCostUs, pRequest->metric.execCostUs);
pRequest->metric.analyseCostUs, pRequest->metric.planCostUs, pRequest->metric.execCostUs,
pRequest->requestId);
(void)atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
reqType = SLOW_LOG_TYPE_INSERT;
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscDebug("req:0x%" PRIx64 ", query duration:%" PRId64 "us, parseCost:%" PRId64 "us, ctgCost:%" PRId64
"us, analyseCost:%" PRId64 "us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
"us, analyseCost:%" PRId64 "us, planCost:%" PRId64 "us, exec:%" PRId64 "us, QID:0x%" PRIx64,
pRequest->self, duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs,
pRequest->metric.analyseCostUs, pRequest->metric.planCostUs, pRequest->metric.execCostUs);
pRequest->metric.analyseCostUs, pRequest->metric.planCostUs, pRequest->metric.execCostUs,
pRequest->requestId);
(void)atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
reqType = SLOW_LOG_TYPE_QUERY;
@ -460,7 +462,7 @@ void destroyTscObj(void *pObj) {
STscObj *pTscObj = pObj;
int64_t tscId = pTscObj->id;
tscTrace("connObj:%" PRIx64 ", begin destroy, p:%p", tscId, pTscObj);
tscTrace("conn:%" PRIx64 ", begin destroy, p:%p", tscId, pTscObj);
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
hbDeregisterConn(pTscObj, connKey);
@ -469,7 +471,7 @@ void destroyTscObj(void *pObj) {
taosHashCleanup(pTscObj->pRequests);
schedulerStopQueryHb(pTscObj->pAppInfo->pTransporter);
tscDebug("connObj:0x%" PRIx64 ", p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj,
tscDebug("conn:0x%" PRIx64 ", p:%p destroyed, remain inst totalConn:%" PRId64, pTscObj->id, pTscObj,
pTscObj->pAppInfo->numOfConns);
// In any cases, we should not free app inst here. Or an race condition rises.
@ -478,7 +480,7 @@ void destroyTscObj(void *pObj) {
(void)taosThreadMutexDestroy(&pTscObj->mutex);
taosMemoryFree(pTscObj);
tscTrace("connObj:0x%" PRIx64 ", end destroy, p:%p", tscId, pTscObj);
tscTrace("conn:0x%" PRIx64 ", end destroy, p:%p", tscId, pTscObj);
}
int32_t createTscObj(const char *user, const char *auth, const char *db, int32_t connType, SAppInstInfo *pAppInfo,
@ -518,7 +520,7 @@ int32_t createTscObj(const char *user, const char *auth, const char *db, int32_t
(void)atomic_add_fetch_64(&(*pObj)->pAppInfo->numOfConns, 1);
tscInfo("connObj:0x%" PRIx64 ", created, p:%p", (*pObj)->id, *pObj);
tscInfo("conn:0x%" PRIx64 ", created, p:%p", (*pObj)->id, *pObj);
return code;
}
@ -684,7 +686,7 @@ void doDestroyRequest(void *p) {
SRequestObj *pRequest = (SRequestObj *)p;
uint64_t reqId = pRequest->requestId;
tscDebug("QID:0x%" PRIx64 ", begin destroy request, res:%p", reqId, pRequest);
tscTrace("QID:0x%" PRIx64 ", begin destroy request, res:%p", reqId, pRequest);
int64_t nextReqRefId = pRequest->relation.nextRefId;
@ -726,7 +728,7 @@ void doDestroyRequest(void *p) {
taosMemoryFreeClear(pRequest->effectiveUser);
taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFree(pRequest);
tscDebug("QID:0x%" PRIx64 ", end destroy request, res:%p", reqId, pRequest);
tscTrace("QID:0x%" PRIx64 ", end destroy request, res:%p", reqId, pRequest);
destroyNextReq(nextReqRefId);
}

View File

@ -121,7 +121,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
pTscObj->authVer = pRsp->version;
if (pTscObj->sysInfo != pRsp->sysInfo) {
tscDebug("update sysInfo of user %s from %" PRIi8 " to %" PRIi8 ", connObj:%" PRIi64, pRsp->user,
tscDebug("update sysInfo of user %s from %" PRIi8 " to %" PRIi8 ", conn:%" PRIi64, pRsp->user,
pTscObj->sysInfo, pRsp->sysInfo, pTscObj->id);
pTscObj->sysInfo = pRsp->sysInfo;
}
@ -134,7 +134,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
if (passInfo->fp) {
(*passInfo->fp)(passInfo->param, &pRsp->passVer, TAOS_NOTIFY_PASSVER);
}
tscDebug("update passVer of user %s from %d to %d, connObj:%" PRIi64, pRsp->user, oldVer,
tscDebug("update passVer of user %s from %d to %d, conn:%" PRIi64, pRsp->user, oldVer,
atomic_load_32(&passInfo->ver), pTscObj->id);
}
}
@ -147,7 +147,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
if (whiteListInfo->fp) {
(*whiteListInfo->fp)(whiteListInfo->param, &pRsp->whiteListVer, TAOS_NOTIFY_WHITELIST_VER);
}
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", connObj:%" PRIi64, pRsp->user,
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", conn:%" PRIi64, pRsp->user,
oldVer, atomic_load_64(&whiteListInfo->ver), pTscObj->id);
}
} else {
@ -156,7 +156,7 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
SWhiteListInfo *whiteListInfo = &pTscObj->whiteListInfo;
int64_t oldVer = atomic_load_64(&whiteListInfo->ver);
atomic_store_64(&whiteListInfo->ver, pRsp->whiteListVer);
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", connObj:%" PRIi64, pRsp->user,
tscDebug("update whitelist version of user %s from %" PRId64 " to %" PRId64 ", conn:%" PRIi64, pRsp->user,
oldVer, atomic_load_64(&whiteListInfo->ver), pTscObj->id);
}
releaseTscObj(pReq->connKey.tscRid);
@ -516,7 +516,7 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
pTscObj->pAppInfo->totalDnodes);
if (pRsp->query->killRid) {
tscDebug("QID:%" PRIx64 ", need to be killed now", pRsp->query->killRid);
tscDebug("QID:0x%" PRIx64 ", need to be killed now", pRsp->query->killRid);
SRequestObj *pRequest = acquireRequest(pRsp->query->killRid);
if (NULL == pRequest) {
tscDebug("QID:0x%" PRIx64 ", not exist to kill", pRsp->query->killRid);
@ -885,7 +885,7 @@ int32_t hbGetExpiredDBInfo(SClientHbKey *connKey, struct SCatalog *pCatalog, SCl
for (int32_t i = 0; i < dbNum; ++i) {
SDbCacheInfo *db = &dbs[i];
tscDebug("the %dth expired dbFName:%s, dbId:%" PRId64
tscDebug("the %dth expired db:%s, dbId:%" PRId64
", vgVersion:%d, cfgVersion:%d, numOfTable:%d, startTs:%" PRId64,
i, db->dbFName, db->dbId, db->vgVersion, db->cfgVersion, db->numOfTable, db->stateTs);

View File

@ -247,7 +247,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
int32_t err = taosHashPut(pTscObj->pRequests, &(*pRequest)->self, sizeof((*pRequest)->self), &(*pRequest)->self,
sizeof((*pRequest)->self));
if (err) {
tscError("req:0x%" PRId64 ", failed to add to request container, QID:0x%" PRIx64 ", connObj:%" PRId64 ", %s",
tscError("req:0x%" PRId64 ", failed to add to request container, QID:0x%" PRIx64 ", conn:%" PRId64 ", %s",
(*pRequest)->self, (*pRequest)->requestId, pTscObj->id, sql);
destroyRequest(*pRequest);
*pRequest = NULL;
@ -258,7 +258,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
if (tsQueryUseNodeAllocator && !qIsInsertValuesSql((*pRequest)->sqlstr, (*pRequest)->sqlLen)) {
if (TSDB_CODE_SUCCESS !=
nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
tscError("req:0x%" PRId64 ", failed to create node allocator, QID:0x%" PRIx64 ", connObj:%" PRId64 ", %s", (*pRequest)->self,
tscError("req:0x%" PRId64 ", failed to create node allocator, QID:0x%" PRIx64 ", conn:%" PRId64 ", %s", (*pRequest)->self,
(*pRequest)->requestId, pTscObj->id, sql);
destroyRequest(*pRequest);
*pRequest = NULL;
@ -266,7 +266,7 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
}
}
tscDebugL("req:0x%" PRIx64 ", QID:0x%" PRIx64 ", build request", (*pRequest)->self, (*pRequest)->requestId);
tscDebugL("req:0x%" PRIx64 ", build request, QID:0x%" PRIx64, (*pRequest)->self, (*pRequest)->requestId);
return TSDB_CODE_SUCCESS;
}
@ -1658,7 +1658,7 @@ int32_t taosConnectImpl(const char* user, const char* auth, const char* db, __ta
*pTscObj = NULL;
return terrno;
} else {
tscInfo("connObj:0x%" PRIx64 ", connection is opening, connId:%u, dnodeConn:%p, QID:0x%" PRIx64, (*pTscObj)->id,
tscInfo("conn:0x%" PRIx64 ", connection is opening, connId:%u, dnodeConn:%p, QID:0x%" PRIx64, (*pTscObj)->id,
(*pTscObj)->connId, (*pTscObj)->pAppInfo->pTransporter, pRequest->requestId);
destroyRequest(pRequest);
}
@ -1941,12 +1941,12 @@ TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, cons
// return taos_connect(ipStr, userStr, passStr, dbStr, port);
// }
void doSetOneRowPtr(SReqResultInfo* pResultInfo, bool isStmt) {
void doSetOneRowPtr(SReqResultInfo* pResultInfo) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
SResultColumn* pCol = &pResultInfo->pCol[i];
int32_t type = pResultInfo->fields[i].type;
int32_t schemaBytes = calcSchemaBytesFromTypeBytes(type, pResultInfo->fields[i].bytes, isStmt);
int32_t schemaBytes = calcSchemaBytesFromTypeBytes(type, pResultInfo->userFields[i].bytes, false);
if (IS_VAR_DATA_TYPE(type)) {
if (!IS_VAR_NULL_TYPE(type, schemaBytes) && pCol->offset[pResultInfo->current] != -1) {
@ -2012,7 +2012,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
}
if (setupOneRowPtr) {
doSetOneRowPtr(pResultInfo, pRequest->isStmtBind);
doSetOneRowPtr(pResultInfo);
pResultInfo->current += 1;
}
@ -2059,7 +2059,7 @@ void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertU
return NULL;
} else {
if (setupOneRowPtr) {
doSetOneRowPtr(pResultInfo, pRequest->isStmtBind);
doSetOneRowPtr(pResultInfo);
pResultInfo->current += 1;
}
@ -2135,8 +2135,9 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength, bo
static int32_t convertDecimalType(SReqResultInfo* pResultInfo) {
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
TAOS_FIELD_E* pField = pResultInfo->fields + i;
int32_t type = pField->type;
TAOS_FIELD_E* pFieldE = pResultInfo->fields + i;
TAOS_FIELD* pField = pResultInfo->userFields + i;
int32_t type = pFieldE->type;
int32_t bufLen = 0;
char* p = NULL;
if (!IS_DECIMAL_TYPE(type) || !pResultInfo->pCol[i].pData) {
@ -2144,6 +2145,7 @@ static int32_t convertDecimalType(SReqResultInfo* pResultInfo) {
} else {
bufLen = 64;
p = taosMemoryRealloc(pResultInfo->convertBuf[i], bufLen * pResultInfo->numOfRows);
pFieldE->bytes = bufLen;
pField->bytes = bufLen;
}
if (!p) return terrno;
@ -2151,7 +2153,7 @@ static int32_t convertDecimalType(SReqResultInfo* pResultInfo) {
for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) {
int32_t code = decimalToStr((DecimalWord*)(pResultInfo->pCol[i].pData + j * tDataTypes[type].bytes), type,
pField->precision, pField->scale, p, bufLen);
pFieldE->precision, pFieldE->scale, p, bufLen);
p += bufLen;
if (TSDB_CODE_SUCCESS != code) {
return code;
@ -2395,6 +2397,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
}
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4, bool isStmt) {
bool convertForDecimal = convertUcs4;
if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) {
tscError("setResultDataPtr paras error");
return TSDB_CODE_TSC_INTERNAL_ERROR;
@ -2507,7 +2510,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4, bool isS
code = doConvertUCS4(pResultInfo, colLength, isStmt);
}
#endif
if (TSDB_CODE_SUCCESS == code && convertUcs4) {
if (TSDB_CODE_SUCCESS == code && convertForDecimal) {
code = convertDecimalType(pResultInfo);
}
return code;
@ -2990,7 +2993,7 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s
return NULL;
}
tscDebug("connObj:0x%" PRIx64 ", taos_query start with sql:%s", *(int64_t*)taos, sql);
tscDebug("conn:0x%" PRIx64 ", taos_query execute sql:%s", *(int64_t*)taos, sql);
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
if (NULL == param) {
@ -3021,7 +3024,8 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s
}
taosMemoryFree(param);
tscDebug("connObj:0x%" PRIx64 ", res:%p created, taos_query end", *(int64_t*)taos, pRequest);
tscDebug("QID:0x%" PRIx64 ", taos_query end, conn:0x%" PRIx64 " res:%p", pRequest ? pRequest->requestId : 0,
*(int64_t*)taos, pRequest);
return pRequest;
}

View File

@ -487,10 +487,10 @@ void taos_close_internal(void *taos) {
}
STscObj *pTscObj = (STscObj *)taos;
tscDebug("connObj:0x%" PRIx64 ", try to close connection, numOfReq:%d", pTscObj->id, pTscObj->numOfReqs);
tscDebug("conn:0x%" PRIx64 ", try to close connection, numOfReq:%d", pTscObj->id, pTscObj->numOfReqs);
if (TSDB_CODE_SUCCESS != taosRemoveRef(clientConnRefPool, pTscObj->id)) {
tscError("connObj:0x%" PRIx64 ", failed to remove ref from conn pool", pTscObj->id);
tscError("conn:0x%" PRIx64 ", failed to remove ref from conn pool", pTscObj->id);
}
}
@ -548,7 +548,7 @@ void taos_free_result(TAOS_RES *res) {
if (TD_RES_QUERY(res)) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("QID:0x%" PRIx64 ", call taos_free_result to free query", pRequest->requestId);
tscDebug("QID:0x%" PRIx64 ", call taos_free_result to free query, res:%p", pRequest->requestId, res);
destroyRequest(pRequest);
return;
}
@ -647,7 +647,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
}
if (pResultInfo->current < pResultInfo->numOfRows) {
doSetOneRowPtr(pResultInfo, false);
doSetOneRowPtr(pResultInfo);
pResultInfo->current += 1;
return pResultInfo->row;
} else {
@ -655,7 +655,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
return NULL;
}
doSetOneRowPtr(pResultInfo, false);
doSetOneRowPtr(pResultInfo);
pResultInfo->current += 1;
return pResultInfo->row;
}
@ -1360,7 +1360,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
SQuery *pQuery = pRequest->pQuery;
pRequest->metric.ctgCostUs += taosGetTimestampUs() - pRequest->metric.ctgStart;
qDebug("req:0x%" PRIx64 ", start to continue parse, QID:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId,
qDebug("req:0x%" PRIx64 ", continue parse query, QID:0x%" PRIx64 ", code:%s", pRequest->self, pRequest->requestId,
tstrerror(code));
if (code == TSDB_CODE_SUCCESS) {

View File

@ -361,7 +361,7 @@ void monitorCounterInc(int64_t clusterId, const char* counterName, const char**
tscError("clusterId:0x%" PRIx64 ", monitor:%p counter:%s inc failed", clusterId, pMonitor, counterName);
goto end;
}
tscDebug("clusterId:0x%" PRIx64 ", monitor:%p, counter:%s inc", pMonitor->clusterId, pMonitor, counterName);
tscTrace("clusterId:0x%" PRIx64 ", monitor:%p, counter:%s inc", pMonitor->clusterId, pMonitor, counterName);
end:
taosWUnLockLatch(&monitorLock);

View File

@ -123,7 +123,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
for (int32_t i = 0; i < connectRsp.epSet.numOfEps; ++i) {
tscDebug("QID:0x%" PRIx64 ", epSet.fqdn[%d]:%s port:%d, connObj:0x%" PRIx64, pRequest->requestId, i,
tscDebug("QID:0x%" PRIx64 ", epSet.fqdn[%d]:%s port:%d, conn:0x%" PRIx64, pRequest->requestId, i,
connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id);
}
@ -956,7 +956,9 @@ int32_t processCreateStreamFirstPhaseRsp(void* param, SDataBuf* pMsg, int32_t co
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
if (code == 0 && !pRequest->streamRunHistory && tsStreamRunHistoryAsync){
if (code == 0 && !pRequest->streamRunHistory &&
((SCreateStreamStmt*)(pRequest->pQuery->pRoot))->pOptions->fillHistory &&
((SCreateStreamStmt*)(pRequest->pQuery->pRoot))->pOptions->runHistoryAsync){
processCreateStreamSecondPhase(pRequest);
}

View File

@ -1631,20 +1631,20 @@ int stmtClose(TAOS_STMT* stmt) {
STMT_DLOG_E("start to free stmt");
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
if (pStmt->bindThreadInUse) {
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
(void)taosThreadJoin(pStmt->bindThread, NULL);
pStmt->bindThreadInUse = false;
}
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
}
STMT_DLOG("stmt %p closed, stbInterlaceMode:%d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64

View File

@ -889,6 +889,9 @@ static int stmtSetDbName2(TAOS_STMT2* stmt, const char* dbName) {
if (pStmt->exec.pRequest->pDb == NULL) {
return terrno;
}
if (pStmt->sql.stbInterlaceMode) {
pStmt->sql.siInfo.dbname = pStmt->db;
}
return TSDB_CODE_SUCCESS;
}
@ -1928,16 +1931,19 @@ int stmtClose2(TAOS_STMT2* stmt) {
STMT_DLOG_E("start to free stmt");
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
if (pStmt->bindThreadInUse) {
pStmt->queue.stopQueue = true;
(void)taosThreadMutexLock(&pStmt->queue.mutex);
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
(void)taosThreadJoin(pStmt->bindThread, NULL);
pStmt->bindThreadInUse = false;
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
}
TSC_ERR_RET(taosThreadMutexLock(&pStmt->asyncBindParam.mutex));
@ -1946,9 +1952,6 @@ int stmtClose2(TAOS_STMT2* stmt) {
}
TSC_ERR_RET(taosThreadMutexUnlock(&pStmt->asyncBindParam.mutex));
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
(void)taosThreadCondDestroy(&pStmt->asyncBindParam.waitCond);
(void)taosThreadMutexDestroy(&pStmt->asyncBindParam.mutex);

View File

@ -1167,6 +1167,7 @@ TEST(stmt2Case, stmt2_insert_non_statndard) {
}
// TD-33419
// TD-34075
TEST(stmt2Case, stmt2_insert_db) {
TAOS* taos = taos_connect("localhost", "root", "taosdata", "", 0);
ASSERT_NE(taos, nullptr);
@ -1177,7 +1178,7 @@ TEST(stmt2Case, stmt2_insert_db) {
"INSERT INTO `stmt2_testdb_12`.`stb1` (ts,int_tag,tbname) VALUES "
"(1591060627000,1,'tb1')(1591060627000,2,'tb2')");
TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL};
TAOS_STMT2_OPTION option = {0, true, true, NULL, NULL};
TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
ASSERT_NE(stmt, nullptr);

View File

@ -822,6 +822,7 @@ _exit:
return code;
}
// todo: serialized term attributes.
int32_t tDecodeRestoreCheckpointInfo(SDecoder* pDecoder, SRestoreCheckpointInfo* pReq) {
int32_t code = 0;
int32_t lino;

View File

@ -3970,6 +3970,8 @@ int32_t tSerializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp) {
}
}
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->keep));
tEndEncode(&encoder);
_exit:
@ -4070,6 +4072,13 @@ int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp)
pRsp->pColRefs = NULL;
}
}
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pRsp->keep));
} else {
pRsp->keep = 0;
}
tEndDecode(&decoder);
_exit:
@ -6676,6 +6685,8 @@ int32_t tSerializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pReq
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->name));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->igNotExists));
ENCODESQL();
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->force));
tEndEncode(&encoder);
_exit:
@ -6699,6 +6710,9 @@ int32_t tDeserializeSMDropTopicReq(void *buf, int32_t bufLen, SMDropTopicReq *pR
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->name));
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->igNotExists));
DECODESQL();
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->force));
}
tEndDecode(&decoder);
_exit:
@ -6719,6 +6733,7 @@ int32_t tSerializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *pR
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->topic));
TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->cgroup));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->igNotExists));
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->force));
tEndEncode(&encoder);
_exit:
@ -6741,6 +6756,9 @@ int32_t tDeserializeSMDropCgroupReq(void *buf, int32_t bufLen, SMDropCgroupReq *
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->topic));
TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->cgroup));
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->igNotExists));
if (!tDecodeIsEnd(&decoder)) {
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->force));
}
tEndDecode(&decoder);
_exit:
@ -9477,10 +9495,11 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam)
TAOS_CHECK_RETURN(tDecodeI32(pDecoder, &pOpParam->downstreamIdx));
switch (pOpParam->opType) {
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
STableScanOperatorParam *pScan = taosMemoryMalloc(sizeof(STableScanOperatorParam));
if (NULL == pScan) {
pOpParam->value = taosMemoryMalloc(sizeof(STableScanOperatorParam));
if (NULL == pOpParam->value) {
TAOS_CHECK_RETURN(terrno);
}
STableScanOperatorParam *pScan = pOpParam->value;
TAOS_CHECK_RETURN(tDecodeI8(pDecoder, (int8_t *)&pScan->tableSeq));
int32_t uidNum = 0;
int64_t uid = 0;
@ -9526,8 +9545,6 @@ int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam *pOpParam)
}
TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pScan->window.skey));
TAOS_CHECK_RETURN(tDecodeI64(pDecoder, &pScan->window.ekey));
pOpParam->value = pScan;
break;
}
default:

Some files were not shown because too many files have changed in this diff Show More