Merge branch '3.3.6' into fix/mergegpt
This commit is contained in:
commit
b4e5aafac2
|
@ -90,6 +90,16 @@ jobs:
|
|||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Statistics ldd
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -name "*.so" -print0 | xargs -0 ldd || true
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ldd || true
|
||||
|
||||
- name: Statistics size
|
||||
run: |
|
||||
find ${{ github.workspace }}/debug/build/lib -type f -print0 | xargs -0 ls -lhrS
|
||||
find ${{ github.workspace }}/debug/build/bin -type f -print0 | xargs -0 ls -lhrS
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
|
|
|
@ -6,6 +6,7 @@ on:
|
|||
- 'main'
|
||||
- '3.0'
|
||||
- '3.1'
|
||||
- '3.3.6'
|
||||
paths-ignore:
|
||||
- 'packaging/**'
|
||||
- 'docs/**'
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Run unit-test and system-test cases for TDgpt when TDgpt code is changed.
|
||||
|
||||
name: TDgpt Test
|
||||
|
||||
on:
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Scheduled updates for the TDgpt service.
|
||||
|
||||
name: TDgpt Update Service
|
||||
|
||||
on:
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG 3.0
|
||||
GIT_TAG 3.3.6
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -191,7 +191,7 @@ INTERVAL(interval_val [, interval_offset])
|
|||
|
||||
The time window clause includes 3 sub-clauses:
|
||||
|
||||
- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies;
|
||||
- INTERVAL clause: used to generate windows of equal time periods, where interval_val specifies the size of each time window, and interval_offset specifies its starting offset. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset";
|
||||
- SLIDING clause: used to specify the time the window slides forward;
|
||||
- FILL: used to specify the filling mode of data in case of missing data in the window interval.
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
|
|||
SUBTABLE(expression) AS subquery
|
||||
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
|
@ -147,6 +147,7 @@ When creating a stream, you can specify the trigger mode of stream computing thr
|
|||
```sql
|
||||
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
|
||||
```
|
||||
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
|
||||
The closing of the window is determined by the event time, such as when the event stream is interrupted or continuously delayed, at which point the event time cannot be updated, possibly leading to outdated computation results.
|
||||
|
||||
Therefore, stream computing provides the MAX_DELAY trigger mode that combines event time with processing time: MAX_DELAY mode triggers computation immediately when the window closes, and its unit can be specified, specific units: a (milliseconds), s (seconds), m (minutes), h (hours), d (days), w (weeks). Additionally, when data is written, if the time that triggers computation exceeds the time specified by MAX_DELAY, computation is triggered immediately.
|
||||
|
|
|
@ -146,9 +146,19 @@ Not supported
|
|||
```
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
The example code for binding parameters with stmt2 (TDengine v3.3.5.0 or higher is required) is as follows:
|
||||
|
||||
```c
|
||||
{{#include docs/examples/c/stmt2_insert_demo.c}}
|
||||
```
|
||||
|
||||
The example code for binding parameters with stmt is as follows:
|
||||
|
||||
```c
|
||||
{{#include docs/examples/c/stmt_insert_demo.c}}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="REST API" value="rest">
|
||||
Not supported
|
||||
|
|
|
@ -55,7 +55,7 @@ When network I/O and other processing resources are not bottlenecks, by optimizi
|
|||
|
||||
Generally, when TDengine needs to select a mount point from the same level to create a new data file, it uses a round-robin strategy for selection. However, in reality, each disk may have different capacities, or the same capacity but different amounts of data written, leading to an imbalance in available space on each disk. In practice, this may result in selecting a disk with very little remaining space.
|
||||
|
||||
To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes, and its value should be greater than 2GB, i.e., mount points with less than 2GB of available space will be skipped.
|
||||
To address this issue, starting from 3.1.1.0, a new configuration minDiskFreeSize was introduced. When the available space on a disk is less than or equal to this threshold, that disk will no longer be selected for generating new data files. The unit of this configuration item is bytes. If its value is set as 2GB, i.e., mount points with less than 2GB of available space will be skipped.
|
||||
|
||||
Starting from version 3.3.2.0, a new configuration `disable_create_new_file` has been introduced to control the prohibition of generating new files on a certain mount point. The default value is `false`, which means new files can be generated on each mount point by default.
|
||||
|
||||
|
|
|
@ -0,0 +1,278 @@
|
|||
---
|
||||
sidebar_label: Security Configuration
|
||||
title: Security Configuration
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
import Image from '@theme/IdealImage';
|
||||
import imgEcosys from '../assets/tdengine-components-01.png';
|
||||
|
||||
## Background
|
||||
|
||||
The distributed and multi-component nature of TDengine makes its security configuration a concern in production systems. This document aims to explain the security issues of various TDengine components and different deployment methods, and provide deployment and configuration suggestions to support the security of user data.
|
||||
|
||||
## Components Involved in Security Configuration
|
||||
|
||||
TDengine includes multiple components:
|
||||
|
||||
- `taosd`: Core component.
|
||||
- `taosc`: Client library.
|
||||
- `taosAdapter`: REST API and WebSocket service.
|
||||
- `taosKeeper`: Monitoring service component.
|
||||
- `taosX`: Data pipeline and backup recovery component.
|
||||
- `taosxAgent`: Auxiliary component for external data source access.
|
||||
- `taosExplorer`: Web visualization management interface.
|
||||
|
||||
In addition to TDengine deployment and applications, there are also the following components:
|
||||
|
||||
- Applications that access and use the TDengine database through various connectors.
|
||||
- External data sources: Other data sources that access TDengine, such as MQTT, OPC, Kafka, etc.
|
||||
|
||||
The relationship between the components is as follows:
|
||||
|
||||
<figure>
|
||||
<Image img={imgEcosys} alt="TDengine ecosystem"/>
|
||||
<figcaption>TDengine ecosystem</figcaption>
|
||||
</figure>
|
||||
|
||||
## TDengine Security Settings
|
||||
|
||||
### `taosd`
|
||||
|
||||
The `taosd` cluster uses TCP connections based on its own protocol for data exchange, which has low risk, but the transmission process is not encrypted, so there is still some security risk.
|
||||
|
||||
Enabling compression may help with TCP data obfuscation.
|
||||
|
||||
- **compressMsgSize**: Whether to compress RPC messages. Integer, optional: -1: Do not compress any messages; 0: Compress all messages; N (N>0): Only compress messages larger than N bytes.
|
||||
|
||||
To ensure the traceability of database operations, it is recommended to enable the audit function.
|
||||
|
||||
- **audit**: Audit function switch, 0 is off, 1 is on. Default is on.
|
||||
- **auditInterval**: Reporting interval, in milliseconds. Default is 5000.
|
||||
- **auditCreateTable**: Whether to enable the audit function for creating sub-tables. 0 is off, 1 is on. Default is on.
|
||||
|
||||
To ensure the security of data files, database encryption can be enabled.
|
||||
|
||||
- **encryptAlgorithm**: Data encryption algorithm.
|
||||
- **encryptScope**: Data encryption scope.
|
||||
|
||||
Enabling the whitelist can restrict access addresses and further enhance privacy.
|
||||
|
||||
- **enableWhiteList**: Whitelist function switch, 0 is off, 1 is on; default is off.
|
||||
|
||||
### `taosc`
|
||||
|
||||
Users and other components use the native client library (`taosc`) and its own protocol to connect to `taosd`, which has low data security risk, but the transmission process is still not encrypted, so there is some security risk.
|
||||
|
||||
### `taosAdapter`
|
||||
|
||||
`taosAdapter` uses the native client library (`taosc`) and its own protocol to connect to `taosd`, and also supports RPC message compression, so there is no data security issue.
|
||||
|
||||
Applications and other components connect to `taosAdapter` through various language connectors. By default, the connection is based on HTTP 1.1 and is not encrypted. To ensure the security of data transmission between `taosAdapter` and other components, SSL encrypted connections need to be configured. Modify the following configuration in the `/etc/taos/taosadapter.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
enable = true
|
||||
certFile = "/path/to/certificate-file"
|
||||
keyFile = "/path/to/private-key"
|
||||
```
|
||||
|
||||
Configure HTTPS/SSL access in the connector to complete encrypted access.
|
||||
|
||||
To further enhance security, the whitelist function can be enabled, and configured in `taosd`, which also applies to the `taosAdapter` component.
|
||||
|
||||
### `taosX`
|
||||
|
||||
`taosX` includes REST API and gRPC interfaces, where the gRPC interface is used for `taos-agent` connections.
|
||||
|
||||
- The REST API interface is based on HTTP 1.1 and is not encrypted, posing a security risk.
|
||||
- The gRPC interface is based on HTTP 2 and is not encrypted, posing a security risk.
|
||||
|
||||
To ensure data security, it is recommended that the `taosX` API interface is limited to internal access only. Modify the following configuration in the `/etc/taos/taosx.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
listen = "127.0.0.1:6050"
|
||||
grpc = "127.0.0.1:6055"
|
||||
```
|
||||
|
||||
Starting from TDengine 3.3.6.0, `taosX` supports HTTPS connections. Add the following configuration in the `/etc/taos/taosx.toml` file:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
ssl_cert = "/path/to/server.pem"
|
||||
ssl_key = "/path/to/server.key"
|
||||
ssl_ca = "/path/to/ca.pem"
|
||||
```
|
||||
|
||||
And modify the API address to HTTPS connection in Explorer:
|
||||
|
||||
```toml
|
||||
# Local connection to taosX API
|
||||
x_api = "https://127.0.01:6050"
|
||||
# Public IP or domain address
|
||||
grpc = "https://public.domain.name:6055"
|
||||
```
|
||||
|
||||
### `taosExplorer`
|
||||
|
||||
Similar to the `taosAdapter` component, the `taosExplorer` component provides HTTP services for external access. Modify the following configuration in the `/etc/taos/explorer.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
# SSL certificate file
|
||||
certificate = "/path/to/ca.file"
|
||||
|
||||
# SSL certificate private key
|
||||
certificate_key = "/path/to/key.file"
|
||||
```
|
||||
|
||||
Then, use HTTPS to access Explorer, such as [https://192.168.12.34](https://192.168.12.34:6060).
|
||||
|
||||
### `taosxAgent`
|
||||
|
||||
After `taosX` enables HTTPS, the `Agent` component and `taosX` use HTTP 2 encrypted connections, using Arrow-Flight RPC for data exchange. The transmission content is in binary format, and only registered `Agent` connections are valid, ensuring data security.
|
||||
|
||||
It is recommended to always enable HTTPS connections for `Agent` services in insecure or public network environments.
|
||||
|
||||
### `taosKeeper`
|
||||
|
||||
`taosKeeper` uses WebSocket connections to communicate with `taosAdapter`, writing monitoring information reported by other components into TDengine.
|
||||
|
||||
The current version of `taosKeeper` has security risks:
|
||||
|
||||
- The monitoring address cannot be restricted to the local machine. By default, it monitors all addresses on port 6043, posing a risk of network attacks. This risk can be ignored when deploying with Docker or Kubernetes without exposing the `taosKeeper` port.
|
||||
- The configuration file contains plaintext passwords, so the visibility of the configuration file needs to be reduced. In `/etc/taos/taoskeeper.toml`:
|
||||
|
||||
```toml
|
||||
[tdengine]
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
usessl = false
|
||||
```
|
||||
|
||||
## Security Enhancements
|
||||
|
||||
We recommend using TDengine within a local area network.
|
||||
|
||||
If you must provide access outside the local area network, consider adding the following configurations:
|
||||
|
||||
### Load Balancing
|
||||
|
||||
Use load balancing to provide `taosAdapter` services externally.
|
||||
|
||||
Take Nginx as an example to configure multi-node load balancing:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 6041;
|
||||
|
||||
location / {
|
||||
proxy_pass http://websocket;
|
||||
# Headers for websocket compatible
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
# Forwarded headers
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Server $hostname;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
upstream websocket {
|
||||
server 192.168.11.61:6041;
|
||||
server 192.168.11.62:6041;
|
||||
server 192.168.11.63:6041;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If the `taosAdapter` component is not configured with SSL secure connections, SSL needs to be configured to ensure secure access. SSL can be configured at a higher-level API Gateway or in Nginx; if you have stronger security requirements for the connections between components, you can configure SSL in all components. The Nginx configuration is as follows:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
|
||||
ssl_certificate /path/to/your/certificate.crt;
|
||||
ssl_certificate_key /path/to/your/private.key;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Security Gateway
|
||||
|
||||
In modern internet production systems, the use of security gateways is also very common. [traefik](https://traefik.io/) is a good open-source choice. We take traefik as an example to explain the security configuration in the API gateway.
|
||||
|
||||
Traefik provides various security configurations through middleware, including:
|
||||
|
||||
1. Authentication: Traefik provides multiple authentication methods such as BasicAuth, DigestAuth, custom authentication middleware, and OAuth 2.0.
|
||||
2. IP Whitelist: Restrict the allowed client IPs.
|
||||
3. Rate Limit: Control the number of requests sent to the service.
|
||||
4. Custom Headers: Add configurations such as `allowedHosts` through custom headers to improve security.
|
||||
|
||||
A common middleware example is as follows:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
|
||||
- "traefik.http.routers.tdengine.entrypoints=https"
|
||||
- "traefik.http.routers.tdengine.tls.certresolver=default"
|
||||
- "traefik.http.routers.tdengine.service=tdengine"
|
||||
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
|
||||
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
|
||||
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
|
||||
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
|
||||
```
|
||||
|
||||
The above example completes the following configurations:
|
||||
|
||||
- TLS authentication uses the `default` configuration, which can be configured in the configuration file or traefik startup parameters, as follows:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: "traefik:v2.3.2"
|
||||
hostname: "traefik"
|
||||
networks:
|
||||
- traefik
|
||||
command:
|
||||
- "--log.level=INFO"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--providers.docker.swarmmode=true"
|
||||
- "--providers.docker.network=traefik"
|
||||
- "--providers.docker.watch=true"
|
||||
- "--entrypoints.http.address=:80"
|
||||
- "--entrypoints.https.address=:443"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge=true"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
|
||||
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
|
||||
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
|
||||
```
|
||||
|
||||
The above startup parameters configure the `default` TSL certificate resolver and automatic acme authentication (automatic certificate application and renewal).
|
||||
|
||||
- Middleware `redirect-to-https`: Configure redirection from HTTP to HTTPS, forcing the use of secure connections.
|
||||
|
||||
```yaml
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
```
|
||||
|
||||
- Middleware `check-header`: Configure custom header checks. External access must add custom headers and match header values to prevent unauthorized access. This is a very simple and effective security mechanism when providing API access.
|
||||
- Middleware `tdengine-ipwhitelist`: Configure IP whitelist. Only allow specified IPs to access, using CIDR routing rules for matching, and can set internal and external IP addresses.
|
||||
|
||||
## Summary
|
||||
|
||||
Data security is a key indicator of the TDengine product. These measures are designed to protect TDengine deployments from unauthorized access and data breaches while maintaining performance and functionality. However, the security configuration of TDengine itself is not the only guarantee in production. It is more important to develop solutions that better match customer needs in combination with the user's business system.
|
|
@ -0,0 +1,81 @@
|
|||
---
|
||||
sidebar_label: Perspective
|
||||
title: Integration With Perspective
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
Perspective is an open-source and powerful data visualization library developed by [Prospective.co](https://www.perspective.co/). Leveraging the technologies of WebAssembly and Web Workers, it enables interactive real-time data analysis in web applications and provides high-performance visualization capabilities on the browser side. With its help, developers can build dashboards, charts, etc. that update in real time, and users can easily interact with the data, filtering, sorting, and exploring it as needed. It boasts high flexibility, adapting to various data formats and business scenarios. It is also fast, ensuring smooth interaction even when dealing with large-scale data. Moreover, it has excellent usability, allowing both beginners and professional developers to quickly build visualization interfaces.
|
||||
|
||||
In terms of data connection, Perspective, through the Python connector of TDengine, perfectly supports TDengine data sources. It can efficiently retrieve various types of data, such as massive time-series data, from TDengine. Additionally, it offers real-time functions including the display of complex charts, in-depth statistical analysis, and trend prediction, helping users gain insights into the value of the data and providing strong support for decision-making. It is an ideal choice for building applications with high requirements for real-time data visualization and analysis.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
Perform the following installation operations in the Linux system:
|
||||
|
||||
- TDengine is installed and running normally (both Enterprise and Community versions are available).
|
||||
- taosAdapter is running normally, refer to [taosAdapter Reference](../../../tdengine-reference/components/taosadapter/).
|
||||
- Python version 3.10 or higher has been installed (if not installed, please refer to [Python Installation](https://docs.python.org/)).
|
||||
- Download or clone the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project. After entering the root directory of the project, run the "install.sh" script to download and install the TDengine client library and related dependencies locally.
|
||||
|
||||
## Visualize data
|
||||
|
||||
**Step 1**, Run the "run.sh" script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project to start the Perspective service. This service will retrieve data from the TDengine database every 300 milliseconds and transmit the data in a streaming form to the web-based `Perspective Viewer`.
|
||||
|
||||
```shell
|
||||
sh run.sh
|
||||
```
|
||||
|
||||
**Step 2**, Start a static web service. Then, access the prsp-viewer.html resource in the browser, and the visualized data can be displayed.
|
||||
|
||||
```python
|
||||
python -m http.server 8081
|
||||
```
|
||||
|
||||
The effect presented after accessing the web page through the browser is shown in the following figure:
|
||||
|
||||

|
||||
|
||||
## Instructions for use
|
||||
|
||||
### Write Data to TDengine
|
||||
|
||||
The `producer.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project can periodically insert data into the TDengine database with the help of the TDengine Python connector. This script will generate random data and insert it into the database, thus simulating the process of writing real-time data. The specific execution steps are as follows:
|
||||
|
||||
1. Establish a connection to TDengine.
|
||||
2. Create the `power` database and the `meters` table.
|
||||
3. Generate random data every 300 milliseconds and write it into the TDengine database.
|
||||
|
||||
For detailed instructions on writing using the Python connector, please refer to [Python Parameter Binding](../../../tdengine-reference/client-libraries/python/#parameter-binding).
|
||||
|
||||
### Load Data from TDengine
|
||||
|
||||
The `perspective_server.py` script in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project will start a Perspective server. This server will read data from TDengine and stream the data to a Perspective table via the Tornado WebSocket.
|
||||
|
||||
1. Start a Perspective server.
|
||||
2. Establish a connection to TDengine.
|
||||
3. Create a Perspective table (the table structure needs to match the type of the table in the TDengine database).
|
||||
4. Call the `Tornado.PeriodicCallback` function to start a scheduled task, thereby achieving the update of the data in the Perspective table. The sample code is as follows:
|
||||
|
||||
```python
|
||||
{{#include docs/examples/perspective/perspective_server.py:perspective_server}}
|
||||
```
|
||||
|
||||
### HTML Page Configuration
|
||||
|
||||
The `prsp-viewer.html` file in the root directory of the [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) project embeds the `Perspective Viewer` into the HTML page. It connects to the Perspective server via a WebSocket and displays real-time data according to the chart configuration.
|
||||
|
||||
- Configure the displayed charts and the rules for data analysis.
|
||||
- Establish a Websocket connection with the Perspective server.
|
||||
- Import the Perspective library, connect to the Perspective server via a WebSocket, and load the `meters_values` table to display dynamic data.
|
||||
|
||||
```html
|
||||
{{#include docs/examples/perspective/prsp-viewer.html:perspective_viewer}}
|
||||
```
|
||||
|
||||
## Reference Materials
|
||||
|
||||
- [Perspective Docs](https://perspective.finos.org/)
|
||||
- [TDengine Python Connector](../../../tdengine-reference/client-libraries/python/)
|
||||
- [TDengine Stream Processing](../../../advanced-features/stream-processing/)
|
Binary file not shown.
After Width: | Height: | Size: 50 KiB |
Binary file not shown.
After Width: | Height: | Size: 62 KiB |
|
@ -170,7 +170,7 @@ The effective value of charset is UTF-8.
|
|||
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-2199023255552, default value 52428800; Enterprise parameter|
|
||||
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||
|
@ -246,6 +246,9 @@ The effective value of charset is UTF-8.
|
|||
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||
| streamNotifyMessageSize | After 3.3.6.0 | Not supported | Internal parameter, controls the message size for event notifications, default value is 8192 |
|
||||
| streamNotifyFrameSize | After 3.3.6.0 | Not supported | Internal parameter, controls the underlying frame size when sending event notification messages, default value is 256 |
|
||||
| adapterFqdn | After 3.3.6.0 | Not supported | Internal parameter, The address of the taosadapter services, default value is localhost |
|
||||
| adapterPort | After 3.3.6.0 | Not supported | Internal parameter, The port of the taosadapter services, default value is 6041 |
|
||||
| adapterToken | After 3.3.6.0 | Not supported | Internal parameter, The string obtained by Base64-encoding `{username}:{password}`, default value is `cm9vdDp0YW9zZGF0YQ==` |
|
||||
|
||||
### Log Related
|
||||
|
||||
|
|
|
@ -379,6 +379,7 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
`query_times` specifies the number of times to run the query, numeric type.
|
||||
|
||||
**Note: from version 3.3.5.6 and beyond, simultaneous configuration for `specified_table_query` and `super_table_query` in a JSON file is no longer supported **
|
||||
|
||||
For other common parameters, see [General Configuration Parameters](#general-configuration-parameters)
|
||||
|
||||
|
@ -508,6 +509,15 @@ Note: Data types in the taosBenchmark configuration file must be in lowercase to
|
|||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>queryStb.json</summary>
|
||||
|
||||
```json
|
||||
{{#include /TDengine/tools/taos-tools/example/queryStb.json}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
#### Subscription Example
|
||||
|
||||
<details>
|
||||
|
|
|
@ -43,6 +43,7 @@ In TDengine, the following data types can be used in the data model of basic tab
|
|||
| 16 | VARCHAR | Custom | Alias for BINARY type |
|
||||
| 17 | GEOMETRY | Custom | Geometry type, supported starting from version 3.1.0.0 |
|
||||
| 18 | VARBINARY | Custom | Variable-length binary data, supported starting from version 3.1.1.0 |
|
||||
| 19 | DECIMAL | 8 or 16 | High-precision numeric type. The range of values depends on the precision and scale specified in the type. Supported starting from version 3.3.6. See the description below. |
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -61,6 +62,18 @@ In TDengine, the following data types can be used in the data model of basic tab
|
|||
- VARBINARY is a data type for storing binary data, with a maximum length of 65,517 bytes for data columns and 16,382 bytes for label columns. Binary data can be written via SQL or schemaless methods (needs to be converted to a string starting with \x), or through stmt methods (can use binary directly). Displayed as hexadecimal starting with \x.
|
||||
|
||||
:::
|
||||
### DECIMAL Data Type
|
||||
|
||||
The `DECIMAL` data type is used for high-precision numeric storage and is supported starting from version 3.3.6. The definition syntax is: `DECIMAL(18, 2)`, `DECIMAL(38, 10)`, where two parameters must be specified: `precision` and `scale`. `Precision` refers to the maximum number of significant digits supported, and `scale` refers to the maximum number of decimal places. For example, `DECIMAL(8, 4)` represents a range of `[-9999.9999, 9999.9999]`. When defining the `DECIMAL` data type, the range of `precision` is `[1, 38]`, and the range of `scale` is `[0, precision]`. If `scale` is 0, it represents integers only. You can also omit `scale`, in which case it defaults to 0. For example, `DECIMAL(18)` is equivalent to `DECIMAL(18, 0)`.
|
||||
|
||||
When the `precision` value is less than or equal to 18, 8 bytes of storage (DECIMAL64) are used internally. When the `precision` is in the range `(18, 38]`, 16 bytes of storage (DECIMAL) are used. When writing `DECIMAL` type data in SQL, numeric values can be written directly. If the value exceeds the maximum representable value for the type, a `DECIMAL_OVERFLOW` error will be reported. If the value does not exceed the maximum representable value but the number of decimal places exceeds the `scale`, it will be automatically rounded. For example, if the type is defined as `DECIMAL(10, 2)` and the value `10.987` is written, the actual stored value will be `10.99`.
|
||||
|
||||
The `DECIMAL` type only supports regular columns and does not currently support tag columns. The `DECIMAL` type supports SQL-based writes only and does not currently support `stmt` or schemaless writes.
|
||||
|
||||
When performing operations between integer types and the `DECIMAL` type, the integer type is converted to the `DECIMAL` type before the calculation. When the `DECIMAL` type is involved in calculations with `DOUBLE`, `FLOAT`, `VARCHAR`, or `NCHAR` types, it is converted to `DOUBLE` type for computation.
|
||||
|
||||
When querying `DECIMAL` type expressions, if the intermediate result of the calculation exceeds the maximum value that the current type can represent, a `DECIMAL_OVERFLOW` error is reported.
|
||||
|
||||
|
||||
## Constants
|
||||
|
||||
|
|
|
@ -1186,6 +1186,7 @@ CAST(expr AS type_name)
|
|||
1) Invalid character situations when converting string types to numeric types, e.g., "a" might convert to 0, but will not throw an error.
|
||||
2) When converting to numeric types, if the value exceeds the range that `type_name` can represent, it will overflow, but will not throw an error.
|
||||
3) When converting to string types, if the converted length exceeds the length specified in `type_name`, it will be truncated, but will not throw an error.
|
||||
- The DECIMAL type does not support conversion to or from JSON, VARBINARY, or GEOMETRY types.
|
||||
|
||||
#### TO_ISO8601
|
||||
|
||||
|
@ -1691,12 +1692,14 @@ AVG(expr)
|
|||
|
||||
**Function Description**: Calculates the average value of the specified field.
|
||||
|
||||
**Return Data Type**: DOUBLE.
|
||||
**Return Data Type**: DOUBLE, DECIMAL.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**: When the input type is DECIMAL, the output type is also DECIMAL. The precision and scale of the output conform to the rules described in the data type section. The result type is obtained by dividing the SUM type by UINT64. If the SUM result causes a DECIMAL type overflow, a DECIMAL OVERFLOW error is reported.
|
||||
|
||||
### COUNT
|
||||
|
||||
```sql
|
||||
|
@ -1847,12 +1850,14 @@ SUM(expr)
|
|||
|
||||
**Function Description**: Calculates the sum of a column in a table/supertable.
|
||||
|
||||
**Return Data Type**: DOUBLE, BIGINT.
|
||||
**Return Data Type**: DOUBLE, BIGINT,DECIMAL.
|
||||
|
||||
**Applicable Data Types**: Numeric types.
|
||||
|
||||
**Applicable to**: Tables and supertables.
|
||||
|
||||
**Description**: When the input type is DECIMAL, the output type is DECIMAL(38, scale), where precision is the maximum value currently supported, and scale is the scale of the input type. If the SUM result overflows, a DECIMAL OVERFLOW error is reported.
|
||||
|
||||
### HYPERLOGLOG
|
||||
|
||||
```sql
|
||||
|
@ -2254,6 +2259,7 @@ ignore_null_values: {
|
|||
- INTERP is used to obtain the record value of a specified column at the specified time slice. It has a dedicated syntax (interp_clause) when used. For syntax introduction, see [reference link](../query-data/#interp).
|
||||
- When there is no row data that meets the conditions at the specified time slice, the INTERP function will interpolate according to the settings of the [FILL](../time-series-extensions/#fill-clause) parameter.
|
||||
- When INTERP is applied to a supertable, it will sort all the subtable data under that supertable by primary key column and perform interpolation calculations, and can also be used with PARTITION BY tbname to force the results to a single timeline.
|
||||
- When using INTERP with FILL PREV/NEXT/NEAR modes, its behavior differs from window queries. If data exists at the slice, no FILL operation will be performed, even if the current value is NULL.
|
||||
- INTERP can be used with the pseudocolumn _irowts to return the timestamp corresponding to the interpolation point (supported from version 3.0.2.0).
|
||||
- INTERP can be used with the pseudocolumn _isfilled to display whether the return result is from the original record or generated by the interpolation algorithm (supported from version 3.0.3.0).
|
||||
- INTERP can only use the pseudocolumn `_irowts_origin` when using FILL PREV/NEXT/NEAR modes. `_irowts_origin` is supported from version 3.3.4.9.
|
||||
|
|
|
@ -84,10 +84,10 @@ The FILL statement specifies the filling mode when data is missing in a window i
|
|||
|
||||
1. No filling: NONE (default filling mode).
|
||||
2. VALUE filling: Fixed value filling, where the fill value must be specified. For example: FILL(VALUE, 1.23). Note that the final fill value is determined by the type of the corresponding column, such as FILL(VALUE, 1.23), if the corresponding column is of INT type, then the fill value is 1. If multiple columns in the query list need FILL, then each FILL column must specify a VALUE, such as `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`. Note, only ordinary columns in the SELECT expression need to specify FILL VALUE, such as `_wstart`, `_wstart+1a`, `now`, `1+1` and the partition key (like tbname) used with partition by do not need to specify VALUE, like `timediff(last(ts), _wstart)` needs to specify VALUE.
|
||||
3. PREV filling: Fill data using the previous non-NULL value. For example: FILL(PREV).
|
||||
3. PREV filling: Fill data using the previous value. For example: FILL(PREV).
|
||||
4. NULL filling: Fill data with NULL. For example: FILL(NULL).
|
||||
5. LINEAR filling: Perform linear interpolation filling based on the nearest non-NULL values before and after. For example: FILL(LINEAR).
|
||||
6. NEXT filling: Fill data using the next non-NULL value. For example: FILL(NEXT).
|
||||
6. NEXT filling: Fill data using the next value. For example: FILL(NEXT).
|
||||
|
||||
Among these filling modes, except for the NONE mode which does not fill by default, other modes will be ignored if there is no data in the entire query time range, resulting in no fill data and an empty query result. This behavior is reasonable under some modes (PREV, NEXT, LINEAR) because no data means no fill value can be generated. For other modes (NULL, VALUE), theoretically, fill values can be generated, and whether to output fill values depends on the application's needs. To meet the needs of applications that require forced filling of data or NULL, without breaking the compatibility of existing filling modes, two new filling modes have been added starting from version 3.0.3.0:
|
||||
|
||||
|
@ -112,7 +112,7 @@ The differences between NULL, NULL_F, VALUE, VALUE_F filling modes for different
|
|||
|
||||
Time windows can be divided into sliding time windows and tumbling time windows.
|
||||
|
||||
The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window.
|
||||
The INTERVAL clause is used to generate windows of equal time periods, and SLIDING is used to specify the time the window slides forward. Each executed query is a time window, and the time window slides forward as time flows. When defining continuous queries, it is necessary to specify the size of the time window (time window) and the forward sliding times for each execution. As shown, [t0s, t0e], [t1s, t1e], [t2s, t2e] are the time window ranges for three continuous queries, and the sliding time range is indicated by sliding time. Query filtering, aggregation, and other operations are performed independently for each time window. When SLIDING is equal to INTERVAL, the sliding window becomes a tumbling window. By default, windows begin at Unix time 0 (1970-01-01 00:00:00 UTC). If interval_offset is specified, the windows start from "Unix time 0 + interval_offset".
|
||||
|
||||
<figure>
|
||||
<Image img={imgStep01} alt=""/>
|
||||
|
|
|
@ -11,7 +11,7 @@ import imgStream from './assets/stream-processing-01.png';
|
|||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
|
@ -164,6 +164,7 @@ For non-window computations, the trigger of stream computing is real-time; for w
|
|||
2. WINDOW_CLOSE: Triggered when the window closes (window closure is determined by event time, can be used in conjunction with watermark)
|
||||
3. MAX_DELAY time: Trigger computation if the window closes. If the window does not close, and the time since it has not closed exceeds the time specified by max delay, then trigger computation.
|
||||
4. FORCE_WINDOW_CLOSE: Based on the current time of the operating system, only compute and push the results of the currently closed window. The window is only computed once at the moment of closure and will not be recalculated subsequently. This mode currently only supports INTERVAL windows (does not support sliding); FILL_HISTORY must be 0, IGNORE EXPIRED must be 1, IGNORE UPDATE must be 1; FILL only supports PREV, NULL, NONE, VALUE.
|
||||
5. CONTINUOUS_WINDOW_CLOSE: Results are output when the window is closed. Modifying or deleting data does not immediately trigger a recalculation. Instead, periodic recalculations are performed every rec_time_val duration. If rec_time_val is not specified, the recalculation period is 60 minutes. If the recalculation time exceeds rec_time_val, the next recalculation will be automatically initiated after the current one is completed. Currently, this mode only supports INTERVAL windows. If the FILL clause is used, relevant information of the adapter needs to be configured, including adapterFqdn, adapterPort, and adapterToken. The adapterToken is a string obtained by Base64-encoding `{username}:{password}`. For example, after encoding `root:taosdata`, the result is `cm9vdDp0YW9zZGF0YQ==`.
|
||||
|
||||
Since the closure of the window is determined by event time, if the event stream is interrupted or continuously delayed, the event time cannot be updated, which may result in not obtaining the latest computation results.
|
||||
|
||||
|
@ -531,6 +532,24 @@ These fields are present only when "windowType" is "Count".
|
|||
#### Fields for Window Invalidation
|
||||
|
||||
Due to scenarios such as data disorder, updates, or deletions during stream computing, windows that have already been generated might be removed or their results need to be recalculated. In such cases, a notification with the eventType "WINDOW_INVALIDATION" is sent to inform which windows have been invalidated.
|
||||
|
||||
For events with "eventType" as "WINDOW_INVALIDATION", the following fields are included:
|
||||
1. "windowStart": A long integer timestamp representing the start time of the window.
|
||||
1. "windowEnd": A long integer timestamp representing the end time of the window.
|
||||
|
||||
## Support for Virtual Tables in Stream Computing
|
||||
|
||||
Starting with v3.3.6.0, stream computing can use virtual tables—including virtual regular tables, virtual sub-tables, and virtual super tables—as data sources for computation. The syntax is identical to that for non‑virtual tables.
|
||||
|
||||
However, because the behavior of virtual tables differs from that of non‑virtual tables, the following restrictions apply when using stream computing:
|
||||
|
||||
1. The schema of virtual regular tables/virtual sub-tables involved in stream computing cannot be modified.
|
||||
1. During stream computing, if the data source corresponding to a column in a virtual table is changed, the stream computation will not pick up the change; it will still read from the old data source.
|
||||
1. During stream computing, if the original table corresponding to a column in a virtual table is deleted and later a new table with the same name and a column with the same name is created, the stream computation will not read data from the new table.
|
||||
1. The watermark for stream computing must be 0; otherwise, an error will occur during creation.
|
||||
1. If the data source for stream computing is a virtual super table, sub-tables that are added after the stream computing task starts will not participate in the computation.
|
||||
1. The timestamps of different underlying tables in a virtual table may not be completely consistent; merging the data might produce null values, and interpolation is currently not supported.
|
||||
1. Out-of-order data, updates, or deletions are not handled. In other words, when creating a stream, you cannot specify `ignore update 0` or `ignore expired 0`; otherwise, an error will be reported.
|
||||
1. Historical data computation is not supported. That is, when creating a stream, you cannot specify `fill_history 1`; otherwise, an error will be reported.
|
||||
1. The trigger modes MAX_DELAY, CONTINUOUS_WINDOW_CLOSE and FORCE_WINDOW_CLOSE are not supported.
|
||||
1. The COUNT_WINDOW type is not supported.
|
||||
|
|
|
@ -36,6 +36,7 @@ In this document, it specifically refers to the internal levels of the second-le
|
|||
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|
||||
## SQL Syntax
|
||||
|
||||
|
|
|
@ -830,6 +830,12 @@ This section introduces APIs that are all synchronous interfaces. After being ca
|
|||
- res: [Input] Result set.
|
||||
- **Return Value**: Non-`NULL`: successful, returns a pointer to a TAOS_FIELD structure, each element representing the metadata of a column. `NULL`: failure.
|
||||
|
||||
- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)`
|
||||
- **Interface Description**: Retrieves the attributes of each column in the query result set (column name, data type, column length). Used in conjunction with `taos_num_fields()`, it can be used to parse the data of a tuple (a row) returned by `taos_fetch_row()`. In addition to the basic information provided by TAOS_FIELD, TAOS_FIELD_E also includes `precision` and `scale` information for the data type.
|
||||
- **Parameter Description**:
|
||||
- res: [Input] Result set.
|
||||
- **Return Value**: Non-`NULL`: Success, returns a pointer to a TAOS_FIELD_E structure, where each element represents the metadata of a column. `NULL`: Failure.
|
||||
|
||||
- `void taos_stop_query(TAOS_RES *res)`
|
||||
- **Interface Description**: Stops the execution of the current query.
|
||||
- **Parameter Description**:
|
||||
|
|
|
@ -121,6 +121,7 @@ Please refer to the specific error codes:
|
|||
| 0x2378 | consumer create error | Data subscription creation failed, check the error information and taos log for troubleshooting. |
|
||||
| 0x2379 | seek offset must not be a negative number | The seek interface parameter must not be negative, use the correct parameters. |
|
||||
| 0x237a | vGroup not found in result set | VGroup not assigned to the current consumer, due to the Rebalance mechanism causing the Consumer and VGroup to be unbound. |
|
||||
| 0x2390 | background thread write error in Efficient Writing | In the event of an efficient background thread write error, you can stop writing and rebuild the connection. |
|
||||
|
||||
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -320,7 +321,15 @@ The configuration parameters in properties are as follows:
|
|||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION: Disable SSL certificate validation. Effective only when using WebSocket connections. true: enabled, false: not enabled. Default is false.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME: App name, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is java.
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP: App IP, can be used for display in the `show connections` query result. Effective only when using WebSocket connections. Default value is empty.
|
||||
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE: Efficient Writing mode. Currently, only the `stmt` method is supported. Effective only when using WebSocket connections. DeDefault value is empty, meaning Efficient Writing mode is not enabled.
|
||||
- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM: In Efficient Writing mode, this refers to the number of background write threads. Effective only when using WebSocket connections. Default value is 10.
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW: In Efficient Writing mode, this is the batch size for writing data, measured in rows. Effective only when using WebSocket connections. Default value is 1000.
|
||||
- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW: In Efficient Writing mode, this is the cache size, measured in rows. Effective only when using WebSocket connections. Default value is 10000.
|
||||
- TSDBDriver.PROPERTY_KEY_COPY_DATA: In Efficient Writing mode, this determines Whether to copy the binary data passed by the application through the `addBatch` method. Effective only when using WebSocket connections. Default value is false.
|
||||
- TSDBDriver.PROPERTY_KEY_STRICT_CHECK: In Efficient Writing mode, this determines whether to validate the length of table names and variable-length data types. Effective only when using WebSocket connections. Default value is false.
|
||||
- TSDBDriver.PROPERTY_KEY_RETRY_TIMES: In Efficient Writing mode, this is the number of retry attempts for failed write operations. Effective only when using WebSocket connections. Default value is 3.
|
||||
|
||||
Additionally, for native JDBC connections, other parameters such as log level and SQL length can be specified by specifying the URL and Properties.
|
||||
|
||||
**Priority of Configuration Parameters**
|
||||
|
|
|
@ -25,6 +25,7 @@ Support all platforms that can run Node.js.
|
|||
|
||||
| Node.js Connector Version | Major Changes | TDengine Version |
|
||||
| ------------------------- | ------------------------------------------------------------------------ | --------------------------- |
|
||||
| 3.1.5 | Password supports special characters. | - |
|
||||
| 3.1.4 | Modified the readme.| - |
|
||||
| 3.1.3 | Upgraded the es5-ext version to address vulnerabilities in the lower version. | - |
|
||||
| 3.1.2 | Optimized data protocol and parsing, significantly improved performance. | - |
|
||||
|
|
|
@ -559,10 +559,13 @@ This document details the server error codes that may be encountered when using
|
|||
|
||||
## virtual table
|
||||
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------|
|
||||
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
|
||||
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |
|
||||
| Error Code | Description | Possible Error Scenarios or Reasons | Recommended Actions for Users |
|
||||
|------------|---------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------|
|
||||
| 0x80006200 | Virtual table scan internal error | virtual table scan operator internal error, generally does not occur | Check error logs, contact development for handling |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | The incorrect execution plan generated causes the downstream operator type of the virtual table scan operator to be incorrect. | Check error logs, contact development for handling |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | The timestamp primary key column of a virtual table should not have a data source. If it does, this error will occur during subsequent queries on the virtual table. | Check error logs, contact development for handling |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | Create virtual child table using non-virtual super table | create virtual child table using virtual super table |
|
||||
| 0x80006204 | Virtual table not support decimal type | Create virtual table using decimal type | create virtual table without using decimal type |
|
||||
| 0x80006205 | Virtual table not support in STMT query and STMT insert | Use virtual table in stmt query and stmt insert | do not use virtual table in stmt query and insert |
|
||||
| 0x80006206 | Virtual table not support in Topic | Use virtual table in topic | do not use virtual table in topic |
|
||||
| 0x80006206 | Virtual super table query not support origin table from different databases | Virtual super table ‘s child table's origin table from different databases | make sure virtual super table's child table's origin table from same database |
|
||||
|
|
|
@ -9,6 +9,7 @@ TARGETS = connect_example \
|
|||
with_reqid_demo \
|
||||
sml_insert_demo \
|
||||
stmt_insert_demo \
|
||||
stmt2_insert_demo \
|
||||
tmq_demo
|
||||
|
||||
SOURCES = connect_example.c \
|
||||
|
@ -18,6 +19,7 @@ SOURCES = connect_example.c \
|
|||
with_reqid_demo.c \
|
||||
sml_insert_demo.c \
|
||||
stmt_insert_demo.c \
|
||||
stmt2_insert_demo.c \
|
||||
tmq_demo.c
|
||||
|
||||
LIBS = -ltaos -lpthread
|
||||
|
@ -31,4 +33,4 @@ $(TARGETS):
|
|||
$(CC) $(CFLAGS) -o $@ $(wildcard $(@F).c) $(LIBS)
|
||||
|
||||
clean:
|
||||
rm -f $(TARGETS)
|
||||
rm -f $(TARGETS)
|
||||
|
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
// TAOS standard API example. The same syntax as MySQL, but only a subset
|
||||
// to compile: gcc -o stmt2_insert_demo stmt2_insert_demo.c -ltaos
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/time.h>
|
||||
#include "taos.h"
|
||||
|
||||
#define NUM_OF_SUB_TABLES 10
|
||||
#define NUM_OF_ROWS 10
|
||||
|
||||
/**
|
||||
* @brief Executes an SQL query and checks for errors.
|
||||
*
|
||||
* @param taos Pointer to TAOS connection.
|
||||
* @param sql SQL query string.
|
||||
*/
|
||||
void executeSQL(TAOS *taos, const char *sql) {
|
||||
TAOS_RES *res = taos_query(taos, sql);
|
||||
int code = taos_errno(res);
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "Error: %s\n", taos_errstr(res));
|
||||
taos_free_result(res);
|
||||
taos_close(taos);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
taos_free_result(res);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Checks return status and exits if an error occurs.
|
||||
*
|
||||
* @param stmt2 Pointer to TAOS_STMT2.
|
||||
* @param code Error code.
|
||||
* @param msg Error message prefix.
|
||||
*/
|
||||
void checkErrorCode(TAOS_STMT2 *stmt2, int code, const char *msg) {
|
||||
if (code != 0) {
|
||||
fprintf(stderr, "%s. Code: %d, Error: %s\n", msg, code, taos_stmt2_error(stmt2));
|
||||
taos_stmt2_close(stmt2);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Prepares data bindings for batch insertion.
|
||||
*
|
||||
* @param table_name Pointer to store allocated table names.
|
||||
* @param tags Pointer to store allocated tag bindings.
|
||||
* @param params Pointer to store allocated parameter bindings.
|
||||
*/
|
||||
void prepareBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) {
|
||||
*table_name = (char **)malloc(NUM_OF_SUB_TABLES * sizeof(char *));
|
||||
*tags = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *));
|
||||
*params = (TAOS_STMT2_BIND **)malloc(NUM_OF_SUB_TABLES * sizeof(TAOS_STMT2_BIND *));
|
||||
|
||||
for (int i = 0; i < NUM_OF_SUB_TABLES; i++) {
|
||||
// Allocate and assign table name
|
||||
(*table_name)[i] = (char *)malloc(20 * sizeof(char));
|
||||
sprintf((*table_name)[i], "d_bind_%d", i);
|
||||
|
||||
// Allocate memory for tags data
|
||||
int *gid = (int *)malloc(sizeof(int));
|
||||
int *gid_len = (int *)malloc(sizeof(int));
|
||||
*gid = i;
|
||||
*gid_len = sizeof(int);
|
||||
|
||||
char *location = (char *)malloc(20 * sizeof(char));
|
||||
int *location_len = (int *)malloc(sizeof(int));
|
||||
*location_len = sprintf(location, "location_%d", i);
|
||||
|
||||
(*tags)[i] = (TAOS_STMT2_BIND *)malloc(2 * sizeof(TAOS_STMT2_BIND));
|
||||
(*tags)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, gid, gid_len, NULL, 1};
|
||||
(*tags)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_BINARY, location, location_len, NULL, 1};
|
||||
|
||||
// Allocate memory for columns data
|
||||
(*params)[i] = (TAOS_STMT2_BIND *)malloc(4 * sizeof(TAOS_STMT2_BIND));
|
||||
|
||||
int64_t *ts = (int64_t *)malloc(NUM_OF_ROWS * sizeof(int64_t));
|
||||
float *current = (float *)malloc(NUM_OF_ROWS * sizeof(float));
|
||||
int *voltage = (int *)malloc(NUM_OF_ROWS * sizeof(int));
|
||||
float *phase = (float *)malloc(NUM_OF_ROWS * sizeof(float));
|
||||
int32_t *ts_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
int32_t *current_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
int32_t *voltage_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
int32_t *phase_len = (int32_t *)malloc(NUM_OF_ROWS * sizeof(int32_t));
|
||||
|
||||
(*params)[i][0] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_TIMESTAMP, ts, ts_len, NULL, NUM_OF_ROWS};
|
||||
(*params)[i][1] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, current, current_len, NULL, NUM_OF_ROWS};
|
||||
(*params)[i][2] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_INT, voltage, voltage_len, NULL, NUM_OF_ROWS};
|
||||
(*params)[i][3] = (TAOS_STMT2_BIND){TSDB_DATA_TYPE_FLOAT, phase, phase_len, NULL, NUM_OF_ROWS};
|
||||
|
||||
for (int j = 0; j < NUM_OF_ROWS; j++) {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
ts[j] = tv.tv_sec * 1000LL + tv.tv_usec / 1000 + j;
|
||||
current[j] = (float)rand() / RAND_MAX * 30;
|
||||
voltage[j] = rand() % 300;
|
||||
phase[j] = (float)rand() / RAND_MAX;
|
||||
|
||||
ts_len[j] = sizeof(int64_t);
|
||||
current_len[j] = sizeof(float);
|
||||
voltage_len[j] = sizeof(int);
|
||||
phase_len[j] = sizeof(float);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Frees allocated memory for binding data.
|
||||
*
|
||||
* @param table_name Pointer to allocated table names.
|
||||
* @param tags Pointer to allocated tag bindings.
|
||||
* @param params Pointer to allocated parameter bindings.
|
||||
*/
|
||||
void freeBindData(char ***table_name, TAOS_STMT2_BIND ***tags, TAOS_STMT2_BIND ***params) {
|
||||
for (int i = 0; i < NUM_OF_SUB_TABLES; i++) {
|
||||
free((*table_name)[i]);
|
||||
for (int j = 0; j < 2; j++) {
|
||||
free((*tags)[i][j].buffer);
|
||||
free((*tags)[i][j].length);
|
||||
}
|
||||
free((*tags)[i]);
|
||||
|
||||
for (int j = 0; j < 4; j++) {
|
||||
free((*params)[i][j].buffer);
|
||||
free((*params)[i][j].length);
|
||||
}
|
||||
free((*params)[i]);
|
||||
}
|
||||
free(*table_name);
|
||||
free(*tags);
|
||||
free(*params);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Inserts data using the TAOS stmt2 API.
|
||||
*
|
||||
* @param taos Pointer to TAOS connection.
|
||||
*/
|
||||
void insertData(TAOS *taos) {
|
||||
TAOS_STMT2_OPTION option = {0, false, false, NULL, NULL};
|
||||
TAOS_STMT2 *stmt2 = taos_stmt2_init(taos, &option);
|
||||
if (!stmt2) {
|
||||
fprintf(stderr, "Failed to initialize TAOS statement.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
// stmt2 prepare sql
|
||||
checkErrorCode(stmt2, taos_stmt2_prepare(stmt2, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 0),
|
||||
"Statement preparation failed");
|
||||
|
||||
char **table_name;
|
||||
TAOS_STMT2_BIND **tags, **params;
|
||||
prepareBindData(&table_name, &tags, ¶ms);
|
||||
// stmt2 bind batch
|
||||
TAOS_STMT2_BINDV bindv = {NUM_OF_SUB_TABLES, table_name, tags, params};
|
||||
checkErrorCode(stmt2, taos_stmt2_bind_param(stmt2, &bindv, -1), "Parameter binding failed");
|
||||
// stmt2 exec batch
|
||||
int affected;
|
||||
checkErrorCode(stmt2, taos_stmt2_exec(stmt2, &affected), "Execution failed");
|
||||
printf("Successfully inserted %d rows.\n", affected);
|
||||
// free and close
|
||||
freeBindData(&table_name, &tags, ¶ms);
|
||||
taos_stmt2_close(stmt2);
|
||||
}
|
||||
|
||||
int main() {
|
||||
const char *host = "localhost";
|
||||
const char *user = "root";
|
||||
const char *password = "taosdata";
|
||||
uint16_t port = 6030;
|
||||
TAOS *taos = taos_connect(host, user, password, NULL, port);
|
||||
if (taos == NULL) {
|
||||
fprintf(stderr, "Failed to connect to %s:%hu, ErrCode: 0x%x, ErrMessage: %s.\n", host, port, taos_errno(NULL),
|
||||
taos_errstr(NULL));
|
||||
taos_cleanup();
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
// create database and table
|
||||
executeSQL(taos, "CREATE DATABASE IF NOT EXISTS power");
|
||||
executeSQL(taos, "USE power");
|
||||
executeSQL(taos,
|
||||
"CREATE STABLE IF NOT EXISTS power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||
"(groupId INT, location BINARY(24))");
|
||||
insertData(taos);
|
||||
taos_close(taos);
|
||||
taos_cleanup();
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
"main": "index.js",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@tdengine/websocket": "^3.1.2"
|
||||
"@tdengine/websocket": "^3.1.5"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
const { sleep } = require("@tdengine/websocket");
|
||||
const taos = require("@tdengine/websocket");
|
||||
|
||||
// ANCHOR: create_consumer
|
||||
|
@ -52,6 +51,12 @@ async function prepare() {
|
|||
await wsSql.close();
|
||||
}
|
||||
|
||||
const delay = function(ms) {
|
||||
return new Promise(function(resolve) {
|
||||
setTimeout(resolve, ms);
|
||||
});
|
||||
};
|
||||
|
||||
async function insert() {
|
||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||
conf.setUser('root');
|
||||
|
@ -60,7 +65,7 @@ async function insert() {
|
|||
let wsSql = await taos.sqlConnect(conf);
|
||||
for (let i = 0; i < 50; i++) {
|
||||
await wsSql.exec(`INSERT INTO d1001 USING ${stable} (location, groupId) TAGS ("California.SanFrancisco", 3) VALUES (NOW, ${10 + i}, ${200 + i}, ${0.32 + i})`);
|
||||
await sleep(100);
|
||||
await delay(100);
|
||||
}
|
||||
await wsSql.close();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
const { sleep } = require("@tdengine/websocket");
|
||||
const taos = require("@tdengine/websocket");
|
||||
|
||||
const db = 'power';
|
||||
|
|
|
@ -0,0 +1,207 @@
|
|||
# ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
||||
# ┃ ██████ ██████ ██████ █ █ █ █ █ █▄ ▀███ █ ┃
|
||||
# ┃ ▄▄▄▄▄█ █▄▄▄▄▄ ▄▄▄▄▄█ ▀▀▀▀▀█▀▀▀▀▀ █ ▀▀▀▀▀█ ████████▌▐███ ███▄ ▀█ █ ▀▀▀▀▀ ┃
|
||||
# ┃ █▀▀▀▀▀ █▀▀▀▀▀ █▀██▀▀ ▄▄▄▄▄ █ ▄▄▄▄▄█ ▄▄▄▄▄█ ████████▌▐███ █████▄ █ ▄▄▄▄▄ ┃
|
||||
# ┃ █ ██████ █ ▀█▄ █ ██████ █ ███▌▐███ ███████▄ █ ┃
|
||||
# ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫
|
||||
# ┃ Copyright (c) 2017, the Perspective Authors. ┃
|
||||
# ┃ ╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌ ┃
|
||||
# ┃ This file is part of the Perspective library, distributed under the terms ┃
|
||||
# ┃ of the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). ┃
|
||||
# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
|
||||
|
||||
import logging
|
||||
import tornado.websocket
|
||||
import tornado.web
|
||||
import tornado.ioloop
|
||||
from datetime import date, datetime
|
||||
import perspective
|
||||
import perspective.handlers.tornado
|
||||
import json
|
||||
import taosws
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger('main')
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TDengine connection parameters
|
||||
# =============================================================================
|
||||
TAOS_HOST = "localhost" # TDengine server host
|
||||
TAOS_PORT = 6041 # TDengine server port
|
||||
TAOS_USER = "root" # TDengine username
|
||||
TAOS_PASSWORD = "taosdata" # TDengine password
|
||||
|
||||
TAOS_DATABASE = "power" # TDengine database name
|
||||
TAOS_TABLENAME = "meters" # TDengine table name
|
||||
|
||||
# =============================================================================
|
||||
# Perspective server parameters
|
||||
# =============================================================================
|
||||
PERSPECTIVE_TABLE_NAME = "meters_values" # name of the Perspective table
|
||||
PERSPECTIVE_REFRESH_RATE = 250 # refresh rate in milliseconds
|
||||
|
||||
|
||||
class CustomJSONEncoder(json.JSONEncoder):
|
||||
"""
|
||||
Custom JSON encoder that serializes datetime and date objects
|
||||
"""
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, date):
|
||||
return obj.isoformat()
|
||||
return super().default(obj)
|
||||
|
||||
|
||||
json.JSONEncoder.default = CustomJSONEncoder().default
|
||||
|
||||
|
||||
def convert_ts(ts) -> datetime:
|
||||
"""
|
||||
Convert a timestamp string to a datetime object
|
||||
"""
|
||||
for fmt in ('%Y-%m-%d %H:%M:%S.%f %z', '%Y-%m-%d %H:%M:%S %z'):
|
||||
try:
|
||||
return datetime.strptime(ts, fmt)
|
||||
except ValueError:
|
||||
continue
|
||||
raise ValueError(f"Time data '{ts}' does not match any format")
|
||||
|
||||
|
||||
def create_tdengine_connection(
|
||||
host: str = TAOS_HOST,
|
||||
port: int = TAOS_PORT,
|
||||
user: str = TAOS_USER,
|
||||
password: str = TAOS_PASSWORD,
|
||||
) -> taosws.Connection:
|
||||
try:
|
||||
# connect to the tdengine server
|
||||
conn = taosws.connect(
|
||||
user=user,
|
||||
password=password,
|
||||
host=host,
|
||||
port=port,
|
||||
)
|
||||
# switch to the right database
|
||||
conn.execute(f"USE {TAOS_DATABASE}")
|
||||
# connection successful
|
||||
logger.info(f"Connected to tdengine successfully: {host}:{port}")
|
||||
return conn
|
||||
except Exception as err:
|
||||
logger.error(f"Failed to connect to tdengine: {host}:{port} -- ErrMessage: {err}")
|
||||
raise err
|
||||
|
||||
|
||||
def read_tdengine(
|
||||
conn: taosws.Connection,
|
||||
) -> list[dict]:
|
||||
try:
|
||||
# query the database
|
||||
sql = f"""
|
||||
SELECT `ts`, location, groupid, current, voltage, phase
|
||||
FROM {TAOS_TABLENAME}
|
||||
WHERE `ts` >= NOW() - 12h
|
||||
ORDER BY `ts` DESC
|
||||
LIMIT 1000
|
||||
"""
|
||||
logger.debug(f"Executing query: {sql}")
|
||||
res = conn.query(sql)
|
||||
data = [
|
||||
{
|
||||
"timestamp": convert_ts(row[0]),
|
||||
"location": row[1],
|
||||
"groupid": row[2],
|
||||
"current": row[3],
|
||||
"voltage": row[4],
|
||||
"phase": row[5],
|
||||
}
|
||||
for row in res
|
||||
]
|
||||
logger.info(f"select result: {data}")
|
||||
return data
|
||||
except Exception as err:
|
||||
logger.error(f"Failed to query tdengine: {err}")
|
||||
raise err
|
||||
|
||||
|
||||
// ANCHOR: perspective_server
|
||||
def perspective_thread(perspective_server: perspective.Server, tdengine_conn: taosws.Connection):
|
||||
"""
|
||||
Create a new Perspective table and update it with new data every 50ms
|
||||
"""
|
||||
# create a new Perspective table
|
||||
client = perspective_server.new_local_client()
|
||||
schema = {
|
||||
"timestamp": datetime,
|
||||
"location": str,
|
||||
"groupid": int,
|
||||
"current": float,
|
||||
"voltage": int,
|
||||
"phase": float,
|
||||
}
|
||||
# define the table schema
|
||||
table = client.table(
|
||||
schema,
|
||||
limit=1000, # maximum number of rows in the table
|
||||
name=PERSPECTIVE_TABLE_NAME, # table name. Use this with perspective-viewer on the client side
|
||||
)
|
||||
logger.info("Created new Perspective table")
|
||||
|
||||
# update with new data
|
||||
def updater():
|
||||
data = read_tdengine(tdengine_conn)
|
||||
table.update(data)
|
||||
logger.debug(f"Updated Perspective table: {len(data)} rows")
|
||||
|
||||
logger.info(f"Starting tornado ioloop update loop every {PERSPECTIVE_REFRESH_RATE} milliseconds")
|
||||
# start the periodic callback to update the table data
|
||||
callback = tornado.ioloop.PeriodicCallback(callback=updater, callback_time=PERSPECTIVE_REFRESH_RATE)
|
||||
callback.start()
|
||||
|
||||
// ANCHOR_END: perspective_server
|
||||
|
||||
def make_app(perspective_server):
|
||||
"""
|
||||
Create a new Tornado application with a websocket handler that
|
||||
serves a Perspective table. PerspectiveTornadoHandler handles
|
||||
the websocket connection and streams the Perspective table changes
|
||||
to the client.
|
||||
"""
|
||||
return tornado.web.Application([
|
||||
(
|
||||
r"/websocket", # websocket endpoint. Use this URL to configure the websocket client OR Prospective Server adapter
|
||||
perspective.handlers.tornado.PerspectiveTornadoHandler, # PerspectiveTornadoHandler handles perspective table updates <-> websocket client
|
||||
{"perspective_server": perspective_server}, # pass the perspective server to the handler
|
||||
),
|
||||
])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logger.info("TDEngine <-> Perspective Demo")
|
||||
|
||||
# create a new Perspective server
|
||||
logger.info("Creating new Perspective server")
|
||||
perspective_server = perspective.Server()
|
||||
# create the tdengine connection
|
||||
logger.info("Creating new TDEngine connection")
|
||||
tdengine_conn = create_tdengine_connection()
|
||||
|
||||
# setup and start the Tornado app
|
||||
logger.info("Creating Tornado server")
|
||||
app = make_app(perspective_server)
|
||||
app.listen(8085, address='0.0.0.0')
|
||||
logger.info("Listening on http://localhost:8080")
|
||||
|
||||
try:
|
||||
# start the io loop
|
||||
logger.info("Starting ioloop to update Perspective table data via tornado websocket...")
|
||||
loop = tornado.ioloop.IOLoop.current()
|
||||
loop.call_later(0, perspective_thread, perspective_server, tdengine_conn)
|
||||
loop.start()
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("Keyboard interrupt detected. Shutting down tornado server...")
|
||||
loop.stop()
|
||||
loop.close()
|
||||
logging.info("Shut down")
|
|
@ -0,0 +1,135 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Perspective Viewer Dashboard</title>
|
||||
<link rel="stylesheet" crossorigin="anonymous"
|
||||
href="https://unpkg.com/@finos/perspective-viewer/dist/css/themes.css"/>
|
||||
<style>
|
||||
/* define the layout of the entire dashboard */
|
||||
#dashboard {
|
||||
display: grid;
|
||||
/* define a grid layout with two rows and two columns */
|
||||
grid-template-columns: 1fr 1fr;
|
||||
grid-template-rows: auto auto auto;
|
||||
gap: 20px;
|
||||
padding: 20px;
|
||||
/* limit the maximum height of the Dashboard to the viewport height */
|
||||
max-height: 100vh;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
/* define the style */
|
||||
.viewer-container {
|
||||
/* adjust the height of the container to ensure it can be displayed on one screen */
|
||||
height: calc((100vh - 30px) / 2);
|
||||
width: 100%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
background-color: #333;
|
||||
border-radius: 8px;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
perspective-viewer {
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #242526;
|
||||
color: white;
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<!-- introduce JavaScript files related to Perspective Viewer -->
|
||||
<script type="module" src="https://unpkg.com/@finos/perspective@3.1.3/dist/cdn/perspective.js"></script>
|
||||
<script type="module" src="https://unpkg.com/@finos/perspective-viewer@3.1.3/dist/cdn/perspective-viewer.js"></script>
|
||||
<script type="module"
|
||||
src="https://unpkg.com/@finos/perspective-viewer-datagrid@3.1.3/dist/cdn/perspective-viewer-datagrid.js"></script>
|
||||
<script type="module"
|
||||
src="https://unpkg.com/@finos/perspective-viewer-d3fc@3.1.3/dist/cdn/perspective-viewer-d3fc.js"></script>
|
||||
|
||||
// ANCHOR: perspective_viewer
|
||||
<script type="module">
|
||||
// import the Perspective library
|
||||
import perspective from "https://unpkg.com/@finos/perspective@3.1.3/dist/cdn/perspective.js";
|
||||
|
||||
document.addEventListener("DOMContentLoaded", async function () {
|
||||
// an asynchronous function for loading the view
|
||||
async function load_viewer(viewerId, config) {
|
||||
try {
|
||||
const table_name = "meters_values";
|
||||
const viewer = document.getElementById(viewerId);
|
||||
// connect WebSocket server
|
||||
const websocket = await perspective.websocket("ws://localhost:8085/websocket");
|
||||
// open server table
|
||||
const server_table = await websocket.open_table(table_name);
|
||||
// load the table into the view
|
||||
await viewer.load(server_table);
|
||||
// use view configuration
|
||||
await viewer.restore(config);
|
||||
} catch (error) {
|
||||
console.error('发生错误:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// configuration of the view
|
||||
const config1 = {
|
||||
"version": "3.3.1", // Perspective library version (compatibility identifier)
|
||||
"plugin": "Datagrid", // View mode: Datagrid (table) or D3FC (chart)
|
||||
"plugin_config": { // Plugin-specific configuration
|
||||
"columns": {
|
||||
"current": {
|
||||
"width": 150 // Column width in pixels
|
||||
}
|
||||
},
|
||||
"edit_mode": "READ_ONLY", // Edit mode: READ_ONLY (immutable) or EDIT (editable)
|
||||
"scroll_lock": false // Whether to lock scroll position
|
||||
},
|
||||
"columns_config": {}, // Custom column configurations (colors, formatting, etc.)
|
||||
"settings": true, // Whether to show settings panel (true/false)
|
||||
"theme": "Power Meters", // Custom theme name (must be pre-defined)
|
||||
"title": "Meters list data", // View title
|
||||
"group_by": ["location", "groupid"], // Row grouping fields (equivalent to `row_pivots`)
|
||||
"split_by": [], // Column grouping fields (equivalent to `column_pivots`)
|
||||
"columns": [ // Columns to display (in order)
|
||||
"timestamp",
|
||||
"location",
|
||||
"current",
|
||||
"voltage",
|
||||
"phase"
|
||||
],
|
||||
"filter": [], // Filter conditions (triplet format array)
|
||||
"sort": [], // Sorting rules (format: [field, direction])
|
||||
"expressions": {}, // Custom expressions (e.g., calculated columns)
|
||||
"aggregates": { // Aggregation function configuration
|
||||
"timestamp": "last", // Aggregation: last (takes the latest value)
|
||||
"voltage": "last", // Aggregation: last
|
||||
"phase": "last", // Aggregation: last
|
||||
"current": "last" // Aggregation: last
|
||||
}
|
||||
};
|
||||
|
||||
// load the first view
|
||||
await load_viewer("prsp-viewer-1", config1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<!-- define the HTML Structure of the Dashboard -->
|
||||
<div id="dashboard">
|
||||
<div class="viewer-container">
|
||||
<perspective-viewer id="prsp-viewer-1" theme="Pro Dark"></perspective-viewer>
|
||||
</div>
|
||||
</div>
|
||||
// ANCHOR_END: perspective_viewer
|
||||
</body>
|
||||
|
||||
</html>
|
|
@ -182,7 +182,7 @@ INTERVAL(interval_val [, interval_offset])
|
|||
```
|
||||
|
||||
时间窗口子句包括 3 个子句:
|
||||
- INTERVAL 子句:用于产生相等时间周期的窗口,interval_val 指定每个时间窗口的大小,interval_offset 指定窗口偏移量;
|
||||
- INTERVAL 子句:用于产生相等时间周期的窗口,interval_val 指定每个时间窗口的大小,interval_offset 指定窗口偏移量;默认情况下,窗口是从 Unix time 0(1970-01-01 00:00:00 UTC)开始划分的;如果设置了 interval_offset,那么窗口的划分将从 “Unix time 0 + interval_offset” 开始;
|
||||
- SLIDING 子句:用于指定窗口向前滑动的时间;
|
||||
- FILL:用于指定窗口区间数据缺失的情况下,数据的填充模式。
|
||||
|
||||
|
@ -688,4 +688,4 @@ select a.* from meters a left asof join meters b on timetruncate(a.ts, 1s) < tim
|
|||
|
||||
查询结果顺序的限制包括如下这些。
|
||||
- 普通表、子表、subquery 且无分组条件无排序的场景下,查询结果会按照驱动表的主键列顺序输出。
|
||||
- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。
|
||||
- 由于超级表查询、Full Join 或有分组条件无排序的场景下,查询结果没有固定的输出顺序,因此,在有排序需求且输出无固定顺序的场景下,需要进行排序操作。部分依赖时间线的函数可能会因为没有有效的时间线输出而无法执行。
|
||||
|
|
|
@ -23,7 +23,7 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name
|
|||
SUBTABLE(expression) AS subquery
|
||||
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE | CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
|
@ -136,6 +136,7 @@ create stream if not exists count_history_s fill_history 1 into count_history as
|
|||
```sql
|
||||
create stream if not exists continuous_query_s trigger force_window_close into continuous_query as select count(*) from power.meters interval(10s) sliding(1s)
|
||||
```
|
||||
5. CONTINUOUS_WINDOW_CLOSE:窗口关闭时输出结果。修改、删除数据,并不会立即触发重算,每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val,那么重算周期是60分钟。如果重算的时间长度超过 rec_time_val,在本次重算后,自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL,需要配置 adapter的相关信息:adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。
|
||||
|
||||
窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,此时事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
|
|
|
@ -141,9 +141,20 @@ stmt 绑定参数的示例代码如下:
|
|||
```
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
|
||||
stmt2 绑定参数的示例代码如下(需要 TDengine v3.3.5.0 及以上):
|
||||
|
||||
```c
|
||||
{{#include docs/examples/c/stmt2_insert_demo.c}}
|
||||
```
|
||||
|
||||
stmt 绑定参数的示例代码如下:
|
||||
|
||||
```c
|
||||
{{#include docs/examples/c/stmt_insert_demo.c}}
|
||||
```
|
||||
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="REST API" value="rest">
|
||||
不支持
|
||||
|
|
|
@ -56,7 +56,7 @@ dataDir /mnt/data6 2 0
|
|||
|
||||
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。
|
||||
|
||||
为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 minDiskFreeSize,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB,即会跳过可用空间小于 2GB 的挂载点。
|
||||
为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 minDiskFreeSize,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,若配置值大于 2GB,则会跳过可用空间小于 2GB 的挂载点。
|
||||
|
||||
从 3.3.2.0 版本开始,引入了一个新的配置 disable_create_new_file,用于控制在某个挂载点上禁止生成新文件,其缺省值为 false,即每个挂载点上默认都可以生成新文件。
|
||||
|
||||
|
|
|
@ -0,0 +1,274 @@
|
|||
---
|
||||
sidebar_label: 安全配置
|
||||
title: 安全配置
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
TDengine 的分布式、多组件特性导致 TDengine 的安全配置是生产系统中比较关注的问题。本文档旨在对 TDengine 各组件及在不同部署方式下的安全问题进行说明,并提供部署和配置建议,为用户的数据安全提供支持。
|
||||
|
||||
## 安全配置涉及组件
|
||||
|
||||
TDengine 包含多个组件,有:
|
||||
|
||||
- `taosd`: 内核组件。
|
||||
- `taosc`: 客户端库。
|
||||
- `taosAdapter`: REST API 和 WebSocket 服务。
|
||||
- `taosKeeper`:监控服务组件。
|
||||
- `taosX`:数据管道和备份恢复组件。
|
||||
- `taosxAgent`:外部数据源数据接入辅助组件。
|
||||
- `taosExplorer`:Web 可视化管理界面。
|
||||
|
||||
与 TDengine 部署和应用相关,还会存在以下组件:
|
||||
|
||||
- 通过各种连接器接入并使用 TDengine 数据库的应用。
|
||||
- 外部数据源:指接入 TDengine 的其他数据源,如 MQTT、OPC、Kafka 等。
|
||||
|
||||
各组件关系如下:
|
||||
|
||||

|
||||
|
||||
关于各组件的详细介绍,请参考 [组件介绍](./intro)。
|
||||
|
||||
## TDengine 安全设置
|
||||
|
||||
### `taosd`
|
||||
|
||||
taosd 集群间使用 TCP 连接基于自有协议进行数据交换,风险较低,但传输过程不是加密的,仍有一定安全风险。
|
||||
|
||||
启用压缩可能对 TCP 数据混淆有帮助。
|
||||
|
||||
- **compressMsgSize**:是否对 RPC 消息进行压缩,整数,可选:-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩。
|
||||
|
||||
为了保证数据库操作可追溯,建议启用审计功能。
|
||||
|
||||
- **audit**:审计功能开关,0 为关,1 为开。默认打开。
|
||||
- **auditInterval**:上报间隔,单位为毫秒。默认 5000。
|
||||
- **auditCreateTable**:是否针对创建子表开启申计功能。 0 为关,1 为开。默认打开。
|
||||
|
||||
为保证数据文件安全,可启用数据库加密。
|
||||
|
||||
- **encryptAlgorithm**:数据加密算法。
|
||||
- **encryptScope**:数据加密范围。
|
||||
|
||||
启用白名单可限制访问地址,进一步增强私密性。
|
||||
|
||||
- **enableWhiteList**:白名单功能开关,0 为关, 1 为开;默认关闭。
|
||||
|
||||
### `taosc`
|
||||
|
||||
用户和其他组件与 `taosd` 之间使用原生客户端库(taosc)和自有协议进行连接,数据安全风险较低,但传输过程仍然不是加密的,有一定安全风险。
|
||||
|
||||
### `taosAdapter`
|
||||
|
||||
taosadapter 与 taosd 之间使用原生客户端库(taosc)和自有协议进行连接,同样支持 RPC 消息压缩,不会造成数据安全问题。
|
||||
|
||||
应用和其他组件通过各语言连接器与 taosadapter 进行连接。默认情况下,连接是基于 HTTP 1.1 且不加密的。要保证 taosadapter 与其他组件之间的数据传输安全,需要配置 SSL 加密连接。在 `/etc/taos/taosadapter.toml` 配置文件中修改如下配置:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
enable = true
|
||||
certFile = "/path/to/certificate-file"
|
||||
keyFile = "/path/to/private-key"
|
||||
```
|
||||
|
||||
在连接器中配置 HTTPS/SSL 访问方式,完成加密访问。
|
||||
|
||||
为进一步增强安全性,可启用白名单功能,在 `taosd` 中配置,对 taosdapter 组件同样生效。
|
||||
|
||||
### `taosX`
|
||||
|
||||
`taosX` 对外包括 REST API 接口和 gRPC 接口,其中 gRPC 接口用于 taos-agent 连接。
|
||||
|
||||
- REST API 接口是基于 HTTP 1.1 且不加密的,有安全风险。
|
||||
- gRPC 接口基于 HTTP 2 且不加密,有安全风险 。
|
||||
|
||||
为了保证数据安全,建议 taosX API 接口仅限内部访问。在 `/etc/taos/taosx.toml` 配置文件中修改如下配置:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
listen = "127.0.0.1:6050"
|
||||
grpc = "127.0.0.1:6055"
|
||||
```
|
||||
|
||||
从 TDengine 3.3.6.0 开始,taosX 支持 HTTPS 连接,在 `/etc/taos/taosx.toml` 文件中添加如下配置:
|
||||
|
||||
```toml
|
||||
[serve]
|
||||
ssl_cert = "/path/to/server.pem"
|
||||
ssl_key = "/path/to/server.key"
|
||||
ssl_ca = "/path/to/ca.pem"
|
||||
```
|
||||
|
||||
并在 Explorer 中修改 API 地址为 HTTPS 连接:
|
||||
|
||||
```toml
|
||||
# taosX API 本地连接
|
||||
x_api = "https://127.0.01:6050"
|
||||
# Public IP 或者域名地址
|
||||
grpc = "https://public.domain.name:6055"
|
||||
```
|
||||
|
||||
### `taosExplorer`
|
||||
|
||||
与 `taosAdapter` 组件相似,`taosExplorer` 组件提供 HTTP 服务对外访问。在 `/etc/taos/explorer.toml` 配置文件中修改如下配置:
|
||||
|
||||
```toml
|
||||
[ssl]
|
||||
# SSL certificate file
|
||||
certificate = "/path/to/ca.file"
|
||||
|
||||
# SSL certificate private key
|
||||
certificate_key = "/path/to/key.file"
|
||||
```
|
||||
|
||||
之后,使用 HTTPS 进行 Explorer 访问,如 [https://192.168.12.34](https://192.168.12.34:6060) 。
|
||||
|
||||
### `taosxAgent`
|
||||
|
||||
taosX 启用 HTTPS 后,Agent 组件与 taosx 之间使用 HTTP 2 加密连接,使用 Arrow-Flight RPC 进行数据交换,传输内容是二进制格式,且仅注册过的 Agent 连接有效,保障数据安全。
|
||||
|
||||
建议在不安全网络或公共网络环境下的 Agent 服务,始终开启 HTTPS 连接。
|
||||
|
||||
### `taosKeeper`
|
||||
|
||||
taosKeeper 使用 WebSocket 连接与 taosadpater 通信,将其他组件上报的监控信息写入 TDengine。
|
||||
|
||||
`taosKeeper` 当前版本存在安全风险:
|
||||
|
||||
- 监控地址不可限制在本机,默认监控 所有地址的 6043 端口,存在网络攻击风险。使用 Docker 或 Kubernetes 部署不暴露 taosKeeper 端口时,此风险可忽略。
|
||||
- 配置文件中配置明文密码,需要降低配置文件可见性。在 `/etc/taos/taoskeeper.toml` 中存在:
|
||||
|
||||
```toml
|
||||
[tdengine]
|
||||
host = "localhost"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
usessl = false
|
||||
```
|
||||
|
||||
## 安全增强
|
||||
|
||||
我们建议使用在局域网内部使用 TDengine。
|
||||
|
||||
如果必须在局域网外部提供访问,请考虑添加以下配置:
|
||||
|
||||
### 负载均衡
|
||||
|
||||
使用负载均衡对外提供 taosAdapter 服务。
|
||||
|
||||
以 Nginx 为例,配置多节点负载均衡:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 6041;
|
||||
|
||||
location / {
|
||||
proxy_pass http://websocket;
|
||||
# Headers for websocket compatible
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
# Forwarded headers
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Server $hostname;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
upstream websocket {
|
||||
server 192.168.11.61:6041;
|
||||
server 192.168.11.62:6041;
|
||||
server 192.168.11.63:6041;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
如果 taosAdapter 组件未配置 SSL 安全连接,还需要配置 SSL 才能保证安全访问。SSL 可以配置在更上层的 API Gateway,也可以配置在 Nginx 中;如果你对各组件之间的安全性有更强的要求,您可以在所有组件中都配置 SSL。Nginx 配置如下:
|
||||
|
||||
```nginx
|
||||
http {
|
||||
server {
|
||||
listen 443 ssl;
|
||||
|
||||
ssl_certificate /path/to/your/certificate.crt;
|
||||
ssl_certificate_key /path/to/your/private.key;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 安全网关
|
||||
|
||||
在现在互联网生产系统中,安全网关使用也很普遍。[traefik](https://traefik.io/) 是一个很好的开源选择,我们以 traefik 为例,解释在 API 网关中的安全配置。
|
||||
|
||||
Traefik 中通过 middleware 中间件提供多种安全配置,包括:
|
||||
|
||||
1. 认证(Authentication):Traefik 提供 BasicAuth、DigestAuth、自定义认证中间件、OAuth 2.0 等多种认证方式。
|
||||
2. IP 白名单(IPWhitelist):限制允许访问的客户端 IP。
|
||||
3. 频率限制(RateLimit):控制发送到服务的请求数。
|
||||
4. 自定义 Headers:通过自定义 Headers 添加 `allowedHosts` 等配置,提高安全性。
|
||||
|
||||
一个常见的中间件示例如下:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.tdengine.rule=Host(`api.tdengine.example.com`)"
|
||||
- "traefik.http.routers.tdengine.entrypoints=https"
|
||||
- "traefik.http.routers.tdengine.tls.certresolver=default"
|
||||
- "traefik.http.routers.tdengine.service=tdengine"
|
||||
- "traefik.http.services.tdengine.loadbalancer.server.port=6041"
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
- "traefik.http.middlewares.check-header.headers.customrequestheaders.X-Secret-Header=SecretValue"
|
||||
- "traefik.http.middlewares.check-header.headers.customresponseheaders.X-Header-Check=true"
|
||||
- "traefik.http.middlewares.tdengine-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
|
||||
- "traefik.http.routers.tdengine.middlewares=redirect-to-https,check-header,tdengine-ipwhitelist"
|
||||
```
|
||||
|
||||
上面的示例完成以下配置:
|
||||
|
||||
- TLS 认证使用 `default` 配置,这个配置可使用配置文件或 traefik 启动参数中配置,如下:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: "traefik:v2.3.2"
|
||||
hostname: "traefik"
|
||||
networks:
|
||||
- traefik
|
||||
command:
|
||||
- "--log.level=INFO"
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--providers.docker.swarmmode=true"
|
||||
- "--providers.docker.network=traefik"
|
||||
- "--providers.docker.watch=true"
|
||||
- "--entrypoints.http.address=:80"
|
||||
- "--entrypoints.https.address=:443"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge=true"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.provider=alidns"
|
||||
- "--certificatesresolvers.default.acme.dnschallenge.resolvers=ns1.alidns.com"
|
||||
- "--certificatesresolvers.default.acme.email=linhehuo@gmail.com"
|
||||
- "--certificatesresolvers.default.acme.storage=/letsencrypt/acme.json"
|
||||
```
|
||||
|
||||
上面的启动参数配置了 `default` TSL 证书解析器和自动 acme 认证(自动证书申请和延期)。
|
||||
|
||||
- 中间件 `redirect-to-https`:配置从 HTTP 到 HTTPS 的转发,强制使用安全连接。
|
||||
|
||||
```yaml
|
||||
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
|
||||
```
|
||||
|
||||
- 中间件 `check-header`:配置自定义 Headers 检查。外部访问必须添加自定义 Header 并匹配 Header 值,避免非法访问。这在提供 API 访问时是一个非常简单有效的安全机制。
|
||||
- 中间件 `tdengine-ipwhitelist`:配置 IP 白名单。仅允许指定 IP 访问,使用 CIDR 路由规则进行匹配,可以设置内网及外网 IP 地址。
|
||||
|
||||
## 总结
|
||||
|
||||
数据安全是 TDengine 产品的一项关键指标,这些措施旨在保护 TDengine 部署免受未经授权的访问和数据泄露,同时保持性能和功能。但 TDengine 自身的安全配置不是生产中的唯一保障,结合用户业务系统制定更加匹配客户需求的解决方案更加重要。
|
|
@ -0,0 +1,86 @@
|
|||
---
|
||||
sidebar_label: Perspective
|
||||
title: 与 Perspective 集成
|
||||
toc_max_heading_level: 4
|
||||
---
|
||||
|
||||
## 概述
|
||||
|
||||
Perspective 是一款开源且强大的数据可视化库,由 [Prospective.co](https://www.perspective.co/) 开发,运用 `WebAssembly` 和 `Web Workers` 技术,在 Web 应用中实现交互式实时数据分析,能在浏览器端提供高性能可视化能力。借助它,开发者可构建实时更新的仪表盘、图表等,用户能轻松与数据交互,按需求筛选、排序及挖掘数据。其灵活性高,适配多种数据格式与业务场景;速度快,处理大规模数据也能保障交互流畅;易用性佳,新手和专业开发者都能快速搭建可视化界面。
|
||||
|
||||
在数据连接方面,Perspective 通过 TDengine 的 Python 连接器,完美支持 TDengine 数据源,可高效获取其中海量时序数据等各类数据,并提供展示复杂图表、深度统计分析和趋势预测等实时功能,助力用户洞察数据价值,为决策提供有力支持,是构建对实时数据可视化和分析要求高的应用的理想选择。
|
||||
|
||||
|
||||

|
||||
|
||||
## 前置条件
|
||||
|
||||
在 Linux 系统中进行如下安装操作:
|
||||
|
||||
- TDengine 服务已部署并正常运行(企业及社区版均可)。
|
||||
- taosAdapter 能够正常运行,详细参考 [taosAdapter 使用手册](../../../reference/components/taosadapter)。
|
||||
- Python 3.10 及以上版本已安装(如未安装,可参考 [Python 安装](https://docs.python.org/)。
|
||||
- 下载或克隆 [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目,进入项目根目录后运行 “install.sh” 脚本,以便在本地下载并安装 TDengine 客户端库以及相关的依赖项。
|
||||
|
||||
## 可视化数据
|
||||
|
||||
**第 1 步**,运行 [perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 “run.sh” 脚本,以此启动 Perspective 服务。该服务会每隔 300 毫秒从 TDengine 数据库中获取一次数据,并将数据以流的形式传输至基于 Web 的 `Perspective Viewer` 。
|
||||
|
||||
```shell
|
||||
sh run.sh
|
||||
```
|
||||
|
||||
**第 2 步**,启动一个静态 Web 服务,随后在浏览器中访问 `prsp-viewer.html` 资源,便能展示可视化数据。
|
||||
|
||||
```python
|
||||
python -m http.server 8081
|
||||
```
|
||||
|
||||
通过浏览器访问该 Web 页面后所呈现出的效果如下图所示:
|
||||
|
||||

|
||||
|
||||
## 使用说明
|
||||
|
||||
### 写入数据
|
||||
|
||||
[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `producer.py` 脚本,借助 TDengine Python 连接器,可定期向 TDengine 数据库插入数据。此脚本会生成随机数据并将其插入数据库,以此模拟实时数据的写入过程。具体执行步骤如下:
|
||||
|
||||
1. 建立与 TDengine 的连接。
|
||||
1. 创建 power 数据库和 meters 表。
|
||||
1. 每隔 300 毫秒生成一次随机数据,并写入 TDengine 数据库中。
|
||||
|
||||
Python 连接器详细写入说明可参见 [Python 参数绑定](../../../reference/connector/python/#参数绑定)。
|
||||
|
||||
### 加载数据
|
||||
|
||||
[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `perspective_server.py` 脚本会启动一个 Perspective 服务器,该服务器会从 TDengine 读取数据,并通过 Tornado WebSocket 将数据流式传输到一个 Perspective 表中。
|
||||
|
||||
1. 启动一个 Perspective 服务器
|
||||
1. 建立与 TDengine 的连接。
|
||||
1. 创建一个 Perspective 表(表结构需要与 TDengine 数据库中表的类型保持匹配)。
|
||||
1. 调用 `Tornado.PeriodicCallback` 函数来启动定时任务,进而实现对 Perspective 表数据的更新,示例代码如下:
|
||||
|
||||
```python
|
||||
{{#include docs/examples/perspective/perspective_server.py:perspective_server}}
|
||||
```
|
||||
|
||||
### HTML 页面配置
|
||||
|
||||
[perspective-connect-demo](https://github.com/taosdata/perspective-connect-demo) 项目根目录中的 `prsp-viewer.html`文件将 `Perspective Viewer` 嵌入到 HTML 页面中。它通过 WebSocket 连接到 Perspective 服务器,并根据图表配置显示实时数据。
|
||||
|
||||
- 配置展示的图表以及数据分析的规则。
|
||||
- 与 Perspective 服务器建立 Websocket 连接。
|
||||
- 引入 Perspective 库,通过 WebSocket 连接到 Perspective 服务器,加载 meters_values 表来展示动态数据。
|
||||
|
||||
```html
|
||||
{{#include docs/examples/perspective/prsp-viewer.html:perspective_viewer}}
|
||||
```
|
||||
|
||||
## 参考资料
|
||||
|
||||
- [Perspective 文档](https://perspective.finos.org/)
|
||||
- [TDengine Python 连接器](../../../reference/connector/python)
|
||||
- [TDengine 流计算](../../../advanced/stream/)
|
||||
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 50 KiB |
Binary file not shown.
After Width: | Height: | Size: 62 KiB |
|
@ -582,9 +582,9 @@ charset 的有效值是 UTF-8。
|
|||
- 说明:当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。 **`企业版参数`**
|
||||
- 类型:整数
|
||||
- 单位:byte
|
||||
- 默认值:52428800
|
||||
- 最小值:52428800
|
||||
- 最大值:1073741824
|
||||
- 默认值:52428800 (50MB)
|
||||
- 最小值:52428800 (50MB)
|
||||
- 最大值:2199023255552 (2TB)
|
||||
- 动态修改:支持通过 SQL 修改,立即生效。
|
||||
- 支持版本:从 v3.1.0.0 版本开始引入
|
||||
|
||||
|
@ -1095,6 +1095,29 @@ charset 的有效值是 UTF-8。
|
|||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### adapterFqdn
|
||||
- 说明:taosadapter服务的地址 `内部参数`
|
||||
- 类型:fqdn
|
||||
- 默认值:localhost
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### adapterPort
|
||||
- 说明:taosadapter服务的端口号 `内部参数`
|
||||
- 类型:整数
|
||||
- 默认值:6041
|
||||
- 最小值:1
|
||||
- 最大值:65056
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
#### adapterToken
|
||||
- 说明:为 `{username}:{password}` 经过 Base64 编码之后的字符串 `内部参数`
|
||||
- 类型:字符串
|
||||
- 默认值:`cm9vdDp0YW9zZGF0YQ==`
|
||||
- 动态修改:不支持
|
||||
- 支持版本:v3.3.6.0 引入
|
||||
|
||||
### 日志相关
|
||||
|
||||
#### logDir
|
||||
|
|
|
@ -290,6 +290,8 @@ taosBenchmark -f <json file>
|
|||
|
||||
其它通用参数详见 [通用配置参数](#通用配置参数)。
|
||||
|
||||
**说明:从 v3.3.5.6 及以上版本不再支持 json 文件中同时配置 `specified_table_query` 和 `super_table_query`**
|
||||
|
||||
#### 执行指定查询语句
|
||||
|
||||
查询指定表(可以指定超级表、子表或普通表)的配置参数在 `specified_table_query` 中设置。
|
||||
|
@ -416,6 +418,15 @@ taosBenchmark -f <json file>
|
|||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>queryStb.json</summary>
|
||||
|
||||
```json
|
||||
{{#include /TDengine/tools/taos-tools/example/queryStb.json}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### 订阅 JSON 示例
|
||||
|
||||
<details>
|
||||
|
|
|
@ -44,6 +44,7 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
|
||||
| 17 | GEOMETRY | 自定义 | 几何类型,3.1.0.0 版本开始支持
|
||||
| 18 | VARBINARY | 自定义 | 可变长的二进制数据, 3.1.1.0 版本开始支持|
|
||||
| 19 | DECIMAL | 8或16 | 高精度数值类型, 取值范围取决于类型中指定的precision和scale, 自3.3.6开始支持, 见下文描述|
|
||||
|
||||
:::note
|
||||
|
||||
|
@ -63,6 +64,18 @@ CREATE DATABASE db_name PRECISION 'ns';
|
|||
|
||||
:::
|
||||
|
||||
### DECIMAL数据类型
|
||||
`DECIMAL`数据类型用于高精度数值存储, 自版本3.3.6开始支持, 定义语法: DECIMAL(18, 2), DECIMAL(38, 10), 其中需要指定两个参数, 分别为`precision`和`scale`. `precision`是指最大支持的有效数字个数, `scale`是指最大支持的小数位数. 如DECIMAL(8, 4), 可表示范围即[-9999.9999, 9999.9999]. 定义DECIMAL数据类型时, `precision`范围为: [1,38], scale的范围为: [0,precision], scale为0时, 仅表示整数. 也可以不指定scale, 默认为0, 如DECIMAL(18), 与DECIMAL(18,0)相同。
|
||||
|
||||
当`precision`值不大于18时, 内部使用8字节存储(DECIMAL64), 当precision范围为(18, 38]时, 使用16字节存储(DECIMAL). SQL中写入DECIMAL类型数据时, 可直接使用数值写入, 当写入值大于类型可表示的最大值时会报DECIMAL_OVERFLOW错误, 当未大于类型表示的最大值, 但小数位数超过SCALE时, 会自动四舍五入处理, 如定义类型DECIMAL(10, 2), 写入10.987, 则实际存储值为10.99。
|
||||
|
||||
DECIMAL类型仅支持普通列, 暂不支持tag列. DECIMAL类型只支持SQL写入, 暂不支持stmt写入和schemeless写入。
|
||||
|
||||
整数类型和DECIMAL类型操作时, 会将整数类型转换为DECIMAL类型再进行计算. DECIMAL类型与DOUBLE/FLOAT/VARCHAR/NCHAR等类型计算时, 转换为DOUBLE类型进行计算.
|
||||
|
||||
查询DECIMAL类型表达式时, 若计算的中间结果超出当前类型可表示的最大值时, 报DECIMAL OVERFLOW错误.
|
||||
|
||||
|
||||
## 常量
|
||||
|
||||
TDengine 支持多个类型的常量,细节如下表:
|
||||
|
|
|
@ -1137,6 +1137,7 @@ CAST(expr AS type_name)
|
|||
- 字符串类型转换数值类型时可能出现的无效字符情况,例如 "a" 可能转为 0,但不会报错。
|
||||
- 转换到数值类型时,数值大于 type_name 可表示的范围时,则会溢出,但不会报错。
|
||||
- 转换到字符串类型时,如果转换后长度超过 type_name 中指定的长度,则会截断,但不会报错。
|
||||
- DECIMAL类型不支持与JSON,VARBINARY,GEOMERTY类型的互转.
|
||||
|
||||
#### TO_CHAR
|
||||
|
||||
|
@ -1618,12 +1619,14 @@ AVG(expr)
|
|||
|
||||
**功能说明**:统计指定字段的平均值。
|
||||
|
||||
**返回数据类型**:DOUBLE。
|
||||
**返回数据类型**:DOUBLE, DECIMAL。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**说明**: 当输入类型为DECIMAL类型时, 输出类型也为DECIMAL类型, 输出的precision和scale大小符合数据类型章节中的描述规则, 通过计算SUM类型和UINT64的除法得到结果类型, 若SUM的结果导致DECIMAL类型溢出, 则报DECIMAL OVERFLOW错误。
|
||||
|
||||
### COUNT
|
||||
|
||||
```sql
|
||||
|
@ -1805,12 +1808,14 @@ SUM(expr)
|
|||
|
||||
**功能说明**:统计表/超级表中某列的和。
|
||||
|
||||
**返回数据类型**:DOUBLE、BIGINT。
|
||||
**返回数据类型**:DOUBLE、BIGINT,DECIMAL。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**说明**: 输入类型为DECIMAL类型时, 输出类型为DECIMAL(38, scale), precision为当前支持的最大值, scale为输入类型的scale, 若SUM的结果溢出时, 报DECIMAL OVERFLOW错误.
|
||||
|
||||
### VAR_POP
|
||||
|
||||
```sql
|
||||
|
@ -2174,6 +2179,7 @@ ignore_null_values: {
|
|||
- INTERP 用于在指定时间断面获取指定列的记录值,使用时有专用语法(interp_clause),语法介绍[参考链接](../select/#interp) 。
|
||||
- 当指定时间断面不存在符合条件的行数据时,INTERP 函数会根据 [FILL](../distinguished/#fill-子句) 参数的设定进行插值。
|
||||
- INTERP 作用于超级表时,会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
|
||||
- INTERP在FILL PREV/NEXT/NEAR时, 行为与窗口查询有所区别, 当截面存在数据时, 不会进行FILL, 即便当前值为NULL.
|
||||
- INTERP 可以与伪列 `_irowts` 一起使用,返回插值点所对应的时间戳(v3.0.2.0 以后支持)。
|
||||
- INTERP 可以与伪列 `_isfilled` 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(v3.0.3.0 以后支持)。
|
||||
- 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`, 用于返回 `interp` 函数所使用的原始数据的时间戳列。若范围内无值, 则返回 NULL。`_irowts_origin` 在 v3.3.4.9 以后支持。
|
||||
|
|
|
@ -77,10 +77,10 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
|
|||
|
||||
1. 不进行填充:NONE(默认填充模式)。
|
||||
2. VALUE 填充:固定值填充,此时需要指定填充的数值。例如 `FILL(VALUE, 1.23)`。这里需要注意,最终填充的值受由相应列的类型决定,如 `FILL(VALUE, 1.23)`,相应列为 INT 类型,则填充值为 1,若查询列表中有多列需要 FILL,则需要给每一个 FILL 列指定 VALUE,如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`,注意,SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE,如 `_wstart`、`_wstart+1a`、`now`、`1+1` 以及使用 `partition by` 时的 `partition key` (如 tbname)都不需要指定 VALUE,如 `timediff(last(ts), _wstart)` 则需要指定 VALUE。
|
||||
3. PREV 填充:使用前一个非 NULL 值填充数据。例如 FILL(PREV)。
|
||||
3. PREV 填充:使用前一个值填充数据。例如 FILL(PREV)。
|
||||
4. NULL 填充:使用 NULL 填充数据。例如 FILL(NULL)。
|
||||
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如 FILL(LINEAR)。
|
||||
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如 FILL(NEXT)。
|
||||
6. NEXT 填充:使用下一个值填充数据。例如 FILL(NEXT)。
|
||||
|
||||
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略,即不产生填充数据,查询结果为空。这种行为在部分模式(PREV、NEXT、LINEAR)下具有合理性,因为在这些模式下没有数据意味着无法产生填充数值。而对另外一些模式(NULL、VALUE)来说,理论上是可以产生填充数值的,至于需不需要输出填充数值,取决于应用的需求。所以为了满足这类需要强制填充数据或 NULL 的应用的需求,同时不破坏现有填充模式的行为兼容性,从 v3.0.3.0 开始,增加了两种新的填充模式:
|
||||
|
||||
|
@ -104,7 +104,7 @@ NULL、NULL_F、VALUE、 VALUE_F 这几种填充模式针对不同场景区别
|
|||
|
||||
时间窗口又可分为滑动时间窗口和翻转时间窗口。
|
||||
|
||||
INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e],[t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。
|
||||
INTERVAL 子句用于产生相等时间周期的窗口,SLIDING 用以指定窗口向前滑动的时间。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window )大小和每次前向增量时间(forward sliding times)。如图,[t0s, t0e] ,[t1s , t1e],[t2s, t2e] 是分别是执行三次连续查询的时间窗口范围,窗口的前向滑动的时间范围 sliding time 标识 。查询过滤、聚合等操作按照每个时间窗口为独立的单位执行。当 SLIDING 与 INTERVAL 相等的时候,滑动窗口即为翻转窗口。默认情况下,窗口是从 Unix time 0(1970-01-01 00:00:00 UTC)开始划分的;如果设置了 interval_offset,那么窗口的划分将从 “Unix time 0 + interval_offset” 开始。
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ description: 流式计算的相关 SQL 的详细语法
|
|||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name[(field1_name, field2_name [PRIMARY KEY], ...)] [TAGS (create_definition [, create_definition] ...)] SUBTABLE(expression) AS subquery [notification_definition]
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE]
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time | FORCE_WINDOW_CLOSE| CONTINUOUS_WINDOW_CLOSE [recalculate rec_time_val] ]
|
||||
WATERMARK time
|
||||
IGNORE EXPIRED [0|1]
|
||||
DELETE_MARK time
|
||||
|
@ -165,8 +165,11 @@ SELECT * from information_schema.`ins_streams`;
|
|||
2. WINDOW_CLOSE:窗口关闭时触发(窗口关闭由事件时间决定,可配合 watermark 使用)
|
||||
|
||||
3. MAX_DELAY time:若窗口关闭,则触发计算。若窗口未关闭,且未关闭时长超过 max delay 指定的时间,则触发计算。
|
||||
|
||||
4. FORCE_WINDOW_CLOSE:以操作系统当前时间为准,只计算当前关闭窗口的结果,并推送出去。窗口只会在被关闭的时刻计算一次,后续不会再重复计算。该模式当前只支持 INTERVAL 窗口(不支持滑动);FILL_HISTORY 必须为 0,IGNORE EXPIRED 必须为 1,IGNORE UPDATE 必须为 1;FILL 只支持 PREV、NULL、NONE、VALUE。
|
||||
|
||||
5. CONTINUOUS_WINDOW_CLOSE:窗口关闭时输出结果。修改、删除数据,并不会立即触发重算,每等待 rec_time_val 时长,会进行周期性重算。如果不指定 rec_time_val,那么重算周期是60分钟。如果重算的时间长度超过 rec_time_val,在本次重算后,自动开启下一次重算。该模式当前只支持 INTERVAL 窗口。如果使用 FILL,需要配置 adapter的相关信息:adapterFqdn、adapterPort、adapterToken。adapterToken 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
|
||||
|
||||
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
|
||||
|
||||
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY 最小时间是 5s,如果低于 5s,创建流计算时会报错。
|
||||
|
@ -524,6 +527,24 @@ CREATE STREAM avg_current_stream FILL_HISTORY 1
|
|||
#### 窗口失效相关字段
|
||||
|
||||
因为流计算过程中会遇到数据乱序、更新、删除等情况,可能造成已生成的窗口被删除,或者结果需要重新计算。此时会向通知地址发送一条 WINDOW_INVALIDATION 的通知,说明哪些窗口已经被删除。
|
||||
|
||||
这部分是 eventType 为 WINDOW_INVALIDATION 时,event 对象才有的字段。
|
||||
1. windowStart:长整型时间戳,表示窗口的开始时间,精度与结果表的时间精度一致。
|
||||
1. windowEnd: 长整型时间戳,表示窗口的结束时间,精度与结果表的时间精度一致。
|
||||
|
||||
## 流式计算对虚拟表的支持
|
||||
|
||||
从 v3.3.6.0 开始,流计算能够使用虚拟表(包括虚拟普通表、虚拟子表、虚拟超级表)作为数据源进行计算,语法和非虚拟表完全一致。
|
||||
|
||||
但是虚拟表的行为与非虚拟表存在差异,所以目前在使用流计算对虚拟表进行计算时存在以下限制:
|
||||
|
||||
1. 流计算中涉及的虚拟普通表/虚拟子表的 schema 不允许更改。
|
||||
1. 流计算过程中,如果修改虚拟表某一列对应的数据源,对流计算来说不生效。即:流计算仍只读取老的数据源。
|
||||
1. 流计算过程中,如果虚拟表某一列对应的原始表被删除,之后新建了同名的表和同名的列,流计算不会读取新表的数据。
|
||||
1. 流计算的 watermark 只能是 0,否则创建时就报错。
|
||||
1. 如果流计算的数据源是虚拟超级表,流计算任务启动后新增的子表不参与计算。
|
||||
1. 虚拟表的不同原始表的时间戳不完全一致,数据合并后可能会产生空值,暂不支持插值处理。
|
||||
1. 不处理数据的乱序、更新或删除。即:流创建时不能指定 `ignore update 0` 或者 `ignore expired 0`,否则报错。
|
||||
1. 不支持历史数据计算,即:流创建时不能指定 `fill_history 1`,否则报错。
|
||||
1. 不支持触发模式:MAX_DELAY, FORCE_WINDOW_CLOSE, CONTINUOUS_WINDOW_CLOSE。
|
||||
1. 不支持窗口类型:COUNT_WINDOW。
|
||||
|
|
|
@ -37,6 +37,7 @@ description: 可配置压缩算法
|
|||
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| decimal | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|
||||
|
||||
## SQL 语法
|
||||
|
|
|
@ -826,6 +826,12 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
|
|||
- res:[入参] 结果集。
|
||||
- **返回值**:非 `NULL`:成功,返回一个指向 TAOS_FIELD 结构体的指针,每个元素代表一列的元数据。`NULL`:失败。
|
||||
|
||||
- `TAOS_FIELD_E *taos_fetch_fields_e(TAOS_RES *res)`
|
||||
- **接口说明**:获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fields()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。TAOS_FIELD_E中除了TAOS_FIELD的基本信息外, 还包括了类型的`precision`和`scale`信息。
|
||||
- **参数说明**:
|
||||
- res:[入参] 结果集。
|
||||
- **返回值**:非 `NULL`:成功,返回一个指向 TAOS_FIELD_E 结构体的指针,每个元素代表一列的元数据。`NULL`:失败。
|
||||
|
||||
- `void taos_stop_query(TAOS_RES *res)`
|
||||
- **接口说明**:停止当前查询的执行。
|
||||
- **参数说明**:
|
||||
|
|
|
@ -121,6 +121,7 @@ JDBC 连接器可能报错的错误码包括 4 种:
|
|||
| 0x2378 | consumer create error | 创建数据订阅失败,请根据错误信息检查 taos log 进行问题定位。 |
|
||||
| 0x2379 | seek offset must not be a negative number | seek 接口参数不能为负值,请使用正确的参数 |
|
||||
| 0x237a | vGroup not found in result set | VGroup 没有分配给当前 consumer,由于 Rebalance 机制导致 Consumer 与 VGroup 不是绑定的关系 |
|
||||
| 0x2390 | background thread write error in Efficient Writing | 高效写入后台线程写入错误,可以停止写入,重建连接 |
|
||||
|
||||
- [TDengine Java Connector Error Code](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java)
|
||||
<!-- - [TDengine_ERROR_CODE](../error-code) -->
|
||||
|
@ -315,7 +316,15 @@ properties 中的配置参数如下:
|
|||
- TSDBDriver.PROPERTY_KEY_DISABLE_SSL_CERT_VALIDATION:关闭 SSL 证书验证 。仅在使用 WebSocket 连接时生效。true:启用,false:不启用。默认为 false。
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_APP_NAME:App 名称,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为 java。
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP:App IP,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。
|
||||
- TSDBDriver.PROPERTY_KEY_APP_IP:App IP,可用于 `show connections` 查询结果显示。仅在使用 WebSocket 连接时生效。默认值为空。
|
||||
|
||||
- TSDBDriver.PROPERTY_KEY_ASYNC_WRITE:高效写入模式,目前仅支持 `stmt` 方式。仅在使用 WebSocket 连接时生效。默认值为空,即不启用高效写入模式。
|
||||
- TSDBDriver.PROPERTY_KEY_BACKEND_WRITE_THREAD_NUM:高效写入模式下,后台写入线程数。仅在使用 WebSocket 连接时生效。默认值为 10。
|
||||
- TSDBDriver.PROPERTY_KEY_BATCH_SIZE_BY_ROW:高效写入模式下,写入数据的批大小,单位是行。仅在使用 WebSocket 连接时生效。默认值为 1000。
|
||||
- TSDBDriver.PROPERTY_KEY_CACHE_SIZE_BY_ROW:高效写入模式下,缓存的大小,单位是行。仅在使用 WebSocket 连接时生效。默认值为 10000。
|
||||
- TSDBDriver.PROPERTY_KEY_COPY_DATA:高效写入模式下,是否拷贝应用通过 addBatch 传入的二进制类型数据。仅在使用 WebSocket 连接时生效。默认值为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_STRICT_CHECK:高效写入模式下,是否校验表名长度和变长数据类型长度。仅在使用 WebSocket 连接时生效。默认值为 false。
|
||||
- TSDBDriver.PROPERTY_KEY_RETRY_TIMES:高效写入模式下,写入失败重试次数。仅在使用 WebSocket 连接时生效。默认值为 3。
|
||||
|
||||
此外对 JDBC 原生连接,通过指定 URL 和 Properties 还可以指定其他参数,比如日志级别、SQL 长度等。
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-conne
|
|||
|
||||
| Node.js 连接器 版本 | 主要变化 | TDengine 版本 |
|
||||
| ------------------| ----------------------| ----------------|
|
||||
| 3.1.5 | 密码支持特殊字符 | - |
|
||||
| 3.1.4 | 修改 readme | - |
|
||||
| 3.1.3 | 升级了 es5-ext 版本,解决低版本的漏洞 | - |
|
||||
| 3.1.2 | 对数据协议和解析进行了优化,性能得到大幅提升| - |
|
||||
|
|
|
@ -578,11 +578,14 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
|
||||
## virtual table
|
||||
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
|------------|---------------------------------------------------------|------------------------------------------------|----------------------------|
|
||||
| 0x80006200 | Virtual table scan 算子内部错误 | virtual table scan 算子内部逻辑错误,一般不会出现 | 具体查看client端的错误日志提示 |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | 由于生成的执行计划不对,导致 virtual table scan 算子的下游算子类型不正确 | 保留 explain 执行计划,联系开发处理 |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | 虚拟表的时间戳主键列不应该有数据源,如果有,后续查询虚拟表的时候就会出现该错误 | 检查错误日志,联系开发处理 |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | 虚拟子表必须建在虚拟超级表下,否则就会出现该错误 | 创建虚拟子表的时候,USING 虚拟超级表 |
|
||||
| 错误码 | 错误描述 | 可能的出错场景或者可能的原因 | 建议用户采取的措施 |
|
||||
|------------|---------------------------------------------------------|------------------------------------------------|-------------------------|
|
||||
| 0x80006200 | Virtual table scan 算子内部错误 | virtual table scan 算子内部逻辑错误,一般不会出现 | 具体查看client端的错误日志提示 |
|
||||
| 0x80006201 | Virtual table scan invalid downstream operator type | 由于生成的执行计划不对,导致 virtual table scan 算子的下游算子类型不正确 | 保留 explain 执行计划,联系开发处理 |
|
||||
| 0x80006202 | Virtual table prim timestamp column should not has ref | 虚拟表的时间戳主键列不应该有数据源,如果有,后续查询虚拟表的时候就会出现该错误 | 检查错误日志,联系开发处理 |
|
||||
| 0x80006203 | Create virtual child table must use virtual super table | 虚拟子表必须建在虚拟超级表下,否则就会出现该错误 | 创建虚拟子表的时候,USING 虚拟超级表 |
|
||||
| 0x80006204 | Virtual table not support decimal type | 虚拟表不支持 decimal 类型 | 创建虚拟表时不使用 decimal 类型的列/tag |
|
||||
| 0x80006205 | Virtual table not support in STMT query and STMT insert | 不支持在 stmt 写入和查询中使用虚拟表 | 不在 stmt 写入和查询中使用虚拟表 |
|
||||
| 0x80006206 | Virtual table not support in Topic | 不支持在订阅中使用虚拟表 | 不在订阅中使用虚拟表 |
|
||||
| 0x80006207 | Virtual super table query not support origin table from different databases | 虚拟超级表不支持子表的数据源来自不同的数据库 | 确保虚拟超级表的子表的数据源都来自同一个数据库 |
|
||||
|
||||
|
|
|
@ -1,291 +1,293 @@
|
|||
---
|
||||
title: 夏令时使用指南
|
||||
description: TDengine 中关于夏令时使用问题的解释和建议
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。
|
||||
|
||||
## 定义
|
||||
|
||||
### 时区
|
||||
|
||||
时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。
|
||||
|
||||
### IANA 时区
|
||||
|
||||
IANA(Internet Assigned Numbers Authority)时区数据库,也称为 tz database,提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。
|
||||
|
||||
IANA 使用“区域/城市”格式(如 Europe/Berlin)来明确标识时区。
|
||||
|
||||
TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。
|
||||
|
||||
### 标准时间与当地时间
|
||||
|
||||
标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。
|
||||
|
||||
- 格林尼治标准时间(GMT):历史上使用的参考时间,位于 0° 经线。
|
||||
- 协调世界时(UTC):现代的时间标准,类似于GMT,但更加精确。
|
||||
|
||||
标准时间与时区的关系如下:
|
||||
|
||||
- 基准:标准时间(如 UTC)是时区设定的基准点。
|
||||
- 偏移量:不同时区通过相对于标准时间的偏移量来定义。例如,UTC+1 表示比 UTC 快 1 小时。
|
||||
- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。
|
||||
|
||||
相对于标准时间,每个地区根据其所在时区设定其当地时间:
|
||||
|
||||
- 时区偏移:当地时间等于标准时间加上该时区的偏移量。例如,UTC+2 表示比 UTC 时间快 2 小时。
|
||||
- 夏令时(DST):某些地区在特定时间段调整当地时间,例如将时钟拨快一小时。详见下节。
|
||||
|
||||
### 夏令时
|
||||
|
||||
夏令时(Daylight Saving Time,DST)是一种通过将时间提前一小时,以充分利用日光、节约能源的制度。通常在春季开始,秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例,对夏令时和夏令时的影响做说明。
|
||||
|
||||
按照这个规则,可以看到:
|
||||
|
||||
- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间不存在(跳变)。
|
||||
- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间出现了两次。
|
||||
|
||||
#### 夏令时与 IANA 时区数据库
|
||||
|
||||
- 记录规则:IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。
|
||||
- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。
|
||||
- 历史变更:IANA 数据库还追踪历史上的夏令时变化,以确保准确性。
|
||||
|
||||
#### 夏令时与时间戳转换
|
||||
|
||||
- 时间戳转为当地时间是确定的。例如,1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`,1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。
|
||||
- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳,如 `2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。
|
||||
|
||||
### RFC3339 时间格式
|
||||
|
||||
RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。
|
||||
|
||||
其格式如下:
|
||||
|
||||
- 基本格式:`YYYY-MM-DDTHH:MM:SSZ`
|
||||
- 时区表示:
|
||||
- Z 表示协调世界时(UTC)。
|
||||
- 偏移量格式,例如 +02:00,表示与 UTC 的时差。
|
||||
|
||||
通过明确的时区偏移,RFC 3339 格式可以在全球范围内准确地解析和比较时间。
|
||||
|
||||
RFC 3339 的优势包括:
|
||||
|
||||
- 标准化:提供统一的格式,方便跨系统数据交换。
|
||||
- 清晰性:明确时区信息,避免时间误解。
|
||||
|
||||
TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据:
|
||||
|
||||
```sql
|
||||
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
|
||||
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
|
||||
```
|
||||
|
||||
### 未定义行为
|
||||
|
||||
未定义行为(Undefined Behavior)是指特定代码或操作没有明确规定的结果,也不会对该结果作出兼容性的保证,TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。
|
||||
|
||||
## 夏令时在 TDengine 中的写入与查询
|
||||
|
||||
我们使用下表来展示夏令时在写入和查询中的影响。
|
||||
|
||||

|
||||
|
||||
### 表格说明
|
||||
|
||||
- **TIMESTAMP**:TDengine 中使用 64位整数来存储原始时间戳。
|
||||
- **UTC**:时间戳对应的 UTC 时间表示。
|
||||
- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。
|
||||
- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。
|
||||
|
||||
### 表格分析
|
||||
|
||||
- 在**夏令时开始**(柏林时间 3 月 31 日 02:00)时,时间直接从 02:00 跳到 03:00(往后跳一小时)。
|
||||
- 浅绿色是夏令时开始前一小时的时间戳;
|
||||
- 深绿色是夏令时开始后一小时的时间戳;
|
||||
- 红色为 TDengine 数据库中插入了不存在的当地时间:
|
||||
- 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00` 到 `2024-03-31 02:59:59` 的数据会被自动调整为 -1000(在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000,微秒数据库为 -1000000,纳秒数据库为 -1000000000),因为那一时刻在本地时间中不存在;
|
||||
- 在**夏令时结束**(柏林时间 10 月 27 日 03:00)时,时间从 03:00 跳到 02:00 (往前跳一小时)。
|
||||
- 浅蓝色表示时钟跳变前一小时的时间戳;
|
||||
- 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。
|
||||
- 紫色表示时钟跳变一小时后的时间戳;
|
||||
- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。
|
||||
- **UTC 时间不变**:UTC 时间保持不变,确保了时间的一致性和顺序性。
|
||||
- **RFC3339**:RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00,结束后变为 +01:00 。
|
||||
- **条件查询**:
|
||||
- **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`):
|
||||
|
||||
```sql
|
||||
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
|
||||
ts |
|
||||
=================
|
||||
-1000 |
|
||||
Query OK, 1 row(s) in set (0.003635s)
|
||||
```
|
||||
|
||||
当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
|
||||
ts | to_iso8601(ts,'Z') |
|
||||
==================================================
|
||||
-1000 | 1969-12-31T23:59:59.000Z |
|
||||
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||
1711846800000 | 2024-03-31T01:00:00.000Z |
|
||||
1711846801000 | 2024-03-31T01:00:01.000Z |
|
||||
Query OK, 5 row(s) in set (0.003339s)
|
||||
```
|
||||
|
||||
以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
|
||||
Query OK, 0 row(s) in set (0.000930s)
|
||||
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
|
||||
ts | to_iso8601(ts,'Z') |
|
||||
==================================================
|
||||
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||
Query OK, 2 row(s) in set (0.001227s)
|
||||
```
|
||||
|
||||
- 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`)重复了两次,TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。
|
||||
- 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=======================================================================================
|
||||
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
|
||||
Query OK, 5 row(s) in set (0.001370s)
|
||||
````
|
||||
|
||||
- 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=======================================================================================
|
||||
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
Query OK, 1 row(s) in set (0.004480s)
|
||||
```
|
||||
|
||||
- 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据):
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
================================================================================================
|
||||
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
Query OK, 3 row(s) in set (0.004428s)
|
||||
```
|
||||
|
||||
## 总结与建议
|
||||
|
||||
### 总结
|
||||
|
||||
仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。
|
||||
|
||||
- 写入:
|
||||
- 无法写入夏令时跳变时不存在的时间数据。
|
||||
- 写入夏令时跳变时重复的时间是未定义行为。
|
||||
- 查询:
|
||||
- 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。
|
||||
- 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。
|
||||
- 显示:
|
||||
- 带时区显示不受影响。
|
||||
- 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。
|
||||
- 用户应谨慎使用不带时区的时间进行展示和应用。
|
||||
|
||||
### 建议
|
||||
|
||||
为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。
|
||||
|
||||
- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。
|
||||
|
||||
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
|
||||
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
|
||||
|
||||
```sql
|
||||
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
|
||||
Insert OK, 2 row(s) affected (0.001434s)
|
||||
|
||||
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
|
||||
ts | v1 |
|
||||
===============================
|
||||
1711846799000 | 1 |
|
||||
1711846800000 | 2 |
|
||||
Query OK, 2 row(s) in set (0.003503s)
|
||||
```
|
||||
|
||||
- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。
|
||||
|
||||
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
|
||||
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
|
||||
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
|
||||
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
|
||||
|
||||
```sql
|
||||
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
|
||||
('2024-10-27T02:59:59.000+02:00', 2)
|
||||
('2024-10-27T02:00:00.000+01:00', 3)
|
||||
('2024-10-27T02:59:59.000+01:00', 4);
|
||||
Insert OK, 4 row(s) affected (0.001514s)
|
||||
|
||||
taos> SELECT *,
|
||||
to_iso8601(ts,'Z'),
|
||||
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||
AND ts <= '2024-10-27T02:59:59.000+01:00';
|
||||
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=====================================================================================================
|
||||
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
Query OK, 4 row(s) in set (0.004275s)
|
||||
|
||||
taos> SELECT *,
|
||||
to_iso8601(ts,'Z'),
|
||||
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||
AND ts <= '2024-10-27T02:59:59.000+02:00';
|
||||
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=====================================================================================================
|
||||
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
Query OK, 2 row(s) in set (0.004275s)
|
||||
```
|
||||
|
||||
- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。
|
||||
- taosAdapter:使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。
|
||||
|
||||
```shell
|
||||
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
|
||||
-d "select ts from tz1.t1"
|
||||
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
|
||||
```
|
||||
|
||||
- Explorer:使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。
|
||||
|
||||

|
||||
|
||||
## 参考文档
|
||||
|
||||
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
|
||||
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)
|
||||
---
|
||||
title: 夏令时使用指南
|
||||
description: TDengine 中关于夏令时使用问题的解释和建议
|
||||
---
|
||||
|
||||
## 背景
|
||||
|
||||
在时序数据库的使用中,有时会遇到使用夏令时的情况。我们将 TDengine 中使用夏令时的情况和问题进行分析说明,以便您在 TDengine 的使用中更加顺利。
|
||||
|
||||
## 定义
|
||||
|
||||
### 时区
|
||||
|
||||
时区是地球上使用相同标准时间的区域。由于地球的自转,为了保证各地的时间与当地的日出日落相协调,全球划分为多个时区。
|
||||
|
||||
### IANA 时区
|
||||
|
||||
IANA(Internet Assigned Numbers Authority)时区数据库,也称为 tz database,提供全球时区信息的标准参考。它是现代各类系统和软件处理时区相关操作的基础。
|
||||
|
||||
IANA 使用“区域/城市”格式(如 Europe/Berlin)来明确标识时区。
|
||||
|
||||
TDengine 在不同组件中均支持使用 IANA 时区(除 Windows taos.cfg 时区设置外)。
|
||||
|
||||
### 标准时间与当地时间
|
||||
|
||||
标准时间是根据地球上某个固定经线确定的时间。它为各个时区提供了一个统一的参考点。
|
||||
|
||||
- 格林尼治标准时间(GMT):历史上使用的参考时间,位于 0° 经线。
|
||||
- 协调世界时(UTC):现代的时间标准,类似于GMT,但更加精确。
|
||||
|
||||
标准时间与时区的关系如下:
|
||||
|
||||
- 基准:标准时间(如 UTC)是时区设定的基准点。
|
||||
- 偏移量:不同时区通过相对于标准时间的偏移量来定义。例如,UTC+1 表示比 UTC 快 1 小时。
|
||||
- 区域划分:全球被划分为多个时区,每个时区使用一个或多个标准时间。
|
||||
|
||||
相对于标准时间,每个地区根据其所在时区设定其当地时间:
|
||||
|
||||
- 时区偏移:当地时间等于标准时间加上该时区的偏移量。例如,UTC+2 表示比 UTC 时间快 2 小时。
|
||||
- 夏令时(DST):某些地区在特定时间段调整当地时间,例如将时钟拨快一小时。详见下节。
|
||||
|
||||
### 夏令时
|
||||
|
||||
夏令时(Daylight Saving Time,DST)是一种通过将时间提前一小时,以充分利用日光、节约能源的制度。通常在春季开始,秋季结束。夏令时的具体开始和结束时间因地区而异。以下均以柏林时间为例,对夏令时和夏令时的影响做说明。
|
||||
|
||||

|
||||
|
||||
按照这个规则,可以看到:
|
||||
|
||||
- 柏林当地时间 2024 年 03 月 31 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间不存在(跳变)。
|
||||
- 柏林当地时间 2024 年 10 月 27 日 02:00:00 到 03:00:00 (不含 03:00:00)之间的时间出现了两次。
|
||||
|
||||
#### 夏令时与 IANA 时区数据库
|
||||
|
||||
- 记录规则:IANA 时区数据库详细记录了各地的夏令时规则,包括开始和结束的日期与时间。
|
||||
- 自动调整:许多操作系统和软件利用 IANA 数据库来自动处理夏令时的调整。
|
||||
- 历史变更:IANA 数据库还追踪历史上的夏令时变化,以确保准确性。
|
||||
|
||||
#### 夏令时与时间戳转换
|
||||
|
||||
- 时间戳转为当地时间是确定的。例如,1729990654 为柏林时间**夏令时** `2024-10-27 02:57:34`,1729994254 为柏林时间**冬令时** `2024-10-27 02:57:34`(这两个本地时间除时间偏移量外是一样的)。
|
||||
- 不指定时间偏移量时,当地时间转为时间戳是不确定的。夏令时跳过的时间不存在会造成无法转换成时间戳,如 **柏林时间** `2024-03-31 02:34:56` 不存在,所以无法转换为时间戳。夏令时结束时重复导致无法确定是哪个时间戳,如 `2024-10-27 02:57:34` 不指定时间偏移量无法确定 是 1729990654 还是 1729994254。指定时间偏移量才能确定时间戳,如 `2024-10-27 02:57:34 CEST(+02:00) `,指定了夏令时 `2024-10-27 02:57:34` 时间戳 1729990654 。
|
||||
|
||||
### RFC3339 时间格式
|
||||
|
||||
RFC 3339 是一种互联网时间格式标准,用于表示日期和时间。它基于 ISO 8601 标准,但更具体地规定了一些格式细节。
|
||||
|
||||
其格式如下:
|
||||
|
||||
- 基本格式:`YYYY-MM-DDTHH:MM:SSZ`
|
||||
- 时区表示:
|
||||
- Z 表示协调世界时(UTC)。
|
||||
- 偏移量格式,例如 +02:00,表示与 UTC 的时差。
|
||||
|
||||
通过明确的时区偏移,RFC 3339 格式可以在全球范围内准确地解析和比较时间。
|
||||
|
||||
RFC 3339 的优势包括:
|
||||
|
||||
- 标准化:提供统一的格式,方便跨系统数据交换。
|
||||
- 清晰性:明确时区信息,避免时间误解。
|
||||
|
||||
TDengine 在 REST API 和 Explorer UI 中,均使用 RFC3339 格式进行展示。在 SQL 语句中,可使用 RFC3339 格式写入时间戳数据:
|
||||
|
||||
```sql
|
||||
insert into t1 values('2024-10-27T01:59:59.000Z', 0);
|
||||
select * from t1 where ts >= '2024-10-27T01:59:59.000Z';
|
||||
```
|
||||
|
||||
### 未定义行为
|
||||
|
||||
未定义行为(Undefined Behavior)是指特定代码或操作没有明确规定的结果,也不会对该结果作出兼容性的保证,TDengine 可能在某个版本后对当前的行为作出修改而不会通知用户。所以,在 TDengine 中,用户不可依赖当前未定义的行为进行判断或应用。
|
||||
|
||||
## 夏令时在 TDengine 中的写入与查询
|
||||
|
||||
我们使用下表来展示夏令时在写入和查询中的影响。
|
||||
|
||||

|
||||
|
||||
### 表格说明
|
||||
|
||||
- **TIMESTAMP**:TDengine 中使用 64位整数来存储原始时间戳。
|
||||
- **UTC**:时间戳对应的 UTC 时间表示。
|
||||
- **Europe/Berlin**:表示时区 Europe/Berlin 对应的 RFC3339 格式时间。
|
||||
- **Local**:表示时区 Europe/Berlin 对应的当地时间(不含时区)。
|
||||
|
||||
### 表格分析
|
||||
|
||||
- 在**夏令时开始**(柏林时间 3 月 31 日 02:00)时,时间直接从 02:00 跳到 03:00(往后跳一小时)。
|
||||
- 浅绿色是夏令时开始前一小时的时间戳;
|
||||
- 深绿色是夏令时开始后一小时的时间戳;
|
||||
- 红色为 TDengine 数据库中插入了不存在的当地时间:
|
||||
- 使用 SQL `INSERT INTO t1 values('2024-03-31 02:59:59',..)` 插入 `2024-03-31 02:00:00` 到 `2024-03-31 02:59:59` 的数据会被自动调整为 -1000(在 TDengine 中属于未定义行为,当前该值与数据库精度 precision 有关,毫秒数据库为 -1000,微秒数据库为 -1000000,纳秒数据库为 -1000000000),因为那一时刻在本地时间中不存在;
|
||||
- 在**夏令时结束**(柏林时间 10 月 27 日 03:00)时,时间从 03:00 跳到 02:00 (往前跳一小时)。
|
||||
- 浅蓝色表示时钟跳变前一小时的时间戳;
|
||||
- 深蓝色表示时钟跳变后一小时内的时间戳,其无时区的当地时间与上一小时一致。
|
||||
- 紫色表示时钟跳变一小时后的时间戳;
|
||||
- **当地时间变化**:可见,由于夏令时的调整而导致了当地时间的变化,可能导致某些时间段出现重复或缺失。
|
||||
- **UTC 时间不变**:UTC 时间保持不变,确保了时间的一致性和顺序性。
|
||||
- **RFC3339**:RFC3339 格式时间显示了时间偏移量的变化,在夏令时开始后变为 +02:00,结束后变为 +01:00 。
|
||||
- **条件查询**:
|
||||
- **夏令时开始**时,跳过的时间(`[03-31 02:00:00,03-31 03:00:00)`)不存在,所以在使用该时间进行查询时,行为不确定:`SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59'`(不存在的本地时间戳被转换为 `-1000`):
|
||||
|
||||
```sql
|
||||
taos> SELECT ts FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 02:59:59';
|
||||
ts |
|
||||
=================
|
||||
-1000 |
|
||||
Query OK, 1 row(s) in set (0.003635s)
|
||||
```
|
||||
|
||||
当不存在的时间戳与存在的时间戳共同使用时,其结果同样不符合预期,以下为起始本地时间不存在:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 02:00:00' AND '2024-03-31 03:59:59';
|
||||
ts | to_iso8601(ts,'Z') |
|
||||
==================================================
|
||||
-1000 | 1969-12-31T23:59:59.000Z |
|
||||
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||
1711846800000 | 2024-03-31T01:00:00.000Z |
|
||||
1711846801000 | 2024-03-31T01:00:01.000Z |
|
||||
Query OK, 5 row(s) in set (0.003339s)
|
||||
```
|
||||
|
||||
以下语句中第一个 SQL 查询截止时间不存在,第二个截止时间存在,第一个 SQL 查询结果不符合预期:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 02:00:00';
|
||||
Query OK, 0 row(s) in set (0.000930s)
|
||||
|
||||
taos> SELECT ts, to_iso8601(ts,'Z') FROM t1 WHERE ts BETWEEN '2024-03-31 01:00:00' AND '2024-03-31 01:59:59';
|
||||
ts | to_iso8601(ts,'Z') |
|
||||
==================================================
|
||||
1711843200000 | 2024-03-31T00:00:00.000Z |
|
||||
1711846799000 | 2024-03-31T00:59:59.000Z |
|
||||
Query OK, 2 row(s) in set (0.001227s)
|
||||
```
|
||||
|
||||
- 夏令时结束时,跳变的时间(`[10-27 02:00:00,10-27 03:00:00)` 不包含 `10-27 03:00:00`)重复了两次,TDengine 在使用该区间内的时间戳进行查询时,也属于未定义行为。
|
||||
- 查询 `[2024-10-27 02:00:00, 2024-10-27 03:00:00]` 之间的数据结果,包含了两次重复的时间戳和 `2024-10-27 03:00:00` 这个时间点的数据:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts BETWEEN '2024-10-27 02:00:00' AND '2024-10-27 03:00:00';
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=======================================================================================
|
||||
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729994400000 | 2024-10-27T02:00:00.000Z | 2024-10-27 03:00:00 |
|
||||
Query OK, 5 row(s) in set (0.001370s)
|
||||
````
|
||||
|
||||
- 但以下查询 [2024-10-27 02:00:00.000,2024-10-27 02:57:34.999] 区间只能查询到第一个2024-10-27 02:00:00 时间点的数据:
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:00.999';
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=======================================================================================
|
||||
1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
Query OK, 1 row(s) in set (0.004480s)
|
||||
```
|
||||
|
||||
- 以下查询 `[2024-10-27 02:00:01,2024-10-27 02:57:35]` 却能查到 3 条数据(包含一条 02:59:59 的当地时间数据):
|
||||
|
||||
```sql
|
||||
taos> SELECT ts, to_iso8601(ts,'Z'), TO_CHAR(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1 WHERE ts >= '2024-10-27 02:00:00' AND ts <= '2024-10-27 02:57:35';;
|
||||
ts | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
================================================================================================
|
||||
2024-10-27 02:00:00.000 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
2024-10-27 02:59:59.000 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
2024-10-27 02:00:00.000 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
Query OK, 3 row(s) in set (0.004428s)
|
||||
```
|
||||
|
||||
## 总结与建议
|
||||
|
||||
### 总结
|
||||
|
||||
仅针对使用当地时间带来的影响作说明,使用 UNIX 时间戳或 RFC3339 无影响。
|
||||
|
||||
- 写入:
|
||||
- 无法写入夏令时跳变时不存在的时间数据。
|
||||
- 写入夏令时跳变时重复的时间是未定义行为。
|
||||
- 查询:
|
||||
- 查询条件指定夏令时开始时跳变的时间,其查询结果为未定义行为。
|
||||
- 查询条件指定夏令时结束时重复的时间,其查询结果为未定义行为。
|
||||
- 显示:
|
||||
- 带时区显示不受影响。
|
||||
- 显示当地时间是准确的,但夏令时结束时重复的时间会无法区分。
|
||||
- 用户应谨慎使用不带时区的时间进行展示和应用。
|
||||
|
||||
### 建议
|
||||
|
||||
为避免夏令时给查询和写入造成不必要的影响,在 TDengine 中,建议使用明确的时间偏移量进行写入和查询。
|
||||
|
||||
- 使用 UNIX 时间戳:使用 UNIX 时间戳可避免时区问题。
|
||||
|
||||
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||
| 1711846799000 | 2024-03-31T00:59:59.000Z | 2024-03-31T01:59:59.000+01:00 | 2024-03-31 01:59:59 |
|
||||
| 1711846800000 | 2024-03-31T01:00:00.000Z | 2024-03-31T03:00:00.000+02:00 | 2024-03-31 03:00:00 |
|
||||
|
||||
```sql
|
||||
taos> insert into t1 values(1711846799000, 1)(1711846800000, 2);
|
||||
Insert OK, 2 row(s) affected (0.001434s)
|
||||
|
||||
taos> select * from t1 where ts between 1711846799000 and 1711846800000;
|
||||
ts | v1 |
|
||||
===============================
|
||||
1711846799000 | 1 |
|
||||
1711846800000 | 2 |
|
||||
Query OK, 2 row(s) in set (0.003503s)
|
||||
```
|
||||
|
||||
- 使用 RFC3339 时间格式:带时区偏移量的 RFC3339 时间格式可以有效避免夏令时的不确定性。
|
||||
|
||||
| TIMESTAMP | UTC | Europe/Berlin | Local |
|
||||
| ------------: | :----------------------: | :---------------------------: | :-----------------: |
|
||||
| 1729987200000 | 2024-10-27T00:00:00.000Z | 2024-10-27T02:00:00.000+02:00 | 2024-10-27 02:00:00 |
|
||||
| 1729990799000 | 2024-10-27T00:59:59.000Z | 2024-10-27T02:59:59.000+02:00 | 2024-10-27 02:59:59 |
|
||||
| 1729990800000 | 2024-10-27T01:00:00.000Z | 2024-10-27T02:00:00.000+01:00 | 2024-10-27 02:00:00 |
|
||||
| 1729994399000 | 2024-10-27T01:59:59.000Z | 2024-10-27T02:59:59.000+01:00 | 2024-10-27 02:59:59 |
|
||||
|
||||
```sql
|
||||
taos> insert into t1 values ('2024-10-27T02:00:00.000+02:00', 1)
|
||||
('2024-10-27T02:59:59.000+02:00', 2)
|
||||
('2024-10-27T02:00:00.000+01:00', 3)
|
||||
('2024-10-27T02:59:59.000+01:00', 4);
|
||||
Insert OK, 4 row(s) affected (0.001514s)
|
||||
|
||||
taos> SELECT *,
|
||||
to_iso8601(ts,'Z'),
|
||||
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||
AND ts <= '2024-10-27T02:59:59.000+01:00';
|
||||
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=====================================================================================================
|
||||
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
1729990800000 | 3 | 2024-10-27T01:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729994399000 | 4 | 2024-10-27T01:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
Query OK, 4 row(s) in set (0.004275s)
|
||||
|
||||
taos> SELECT *,
|
||||
to_iso8601(ts,'Z'),
|
||||
to_char(ts, 'YYYY-MM-DD HH:mi:ss') FROM t1
|
||||
WHERE ts >= '2024-10-27T02:00:00.000+02:00'
|
||||
AND ts <= '2024-10-27T02:59:59.000+02:00';
|
||||
ts | v1 | to_iso8601(ts,'Z') | to_char(ts, 'YYYY-MM-DD HH:mi:ss') |
|
||||
=====================================================================================================
|
||||
1729987200000 | 1 | 2024-10-27T00:00:00.000Z | 2024-10-27 02:00:00 |
|
||||
1729990799000 | 2 | 2024-10-27T00:59:59.000Z | 2024-10-27 02:59:59 |
|
||||
Query OK, 2 row(s) in set (0.004275s)
|
||||
```
|
||||
|
||||
- 查询时注意时区设定:在查询和显示时,如果需要本地时间,务必考虑夏令时的影响。
|
||||
- taosAdapter:使用 REST API 时,支持设置 IANA 时区,结果使用 RFC3339 格式返回。
|
||||
|
||||
```shell
|
||||
$ curl -uroot:taosdata 'localhost:6041/rest/sql?tz=Europe/Berlin'\
|
||||
-d "select ts from tz1.t1"
|
||||
{"code":0,"column_meta":[["ts","TIMESTAMP",8]],"data":[["1970-01-01T00:59:59.000+01:00"],["2024-03-31T01:00:00.000+01:00"],["2024-03-31T01:59:59.000+01:00"],["2024-03-31T03:00:00.000+02:00"],["2024-03-31T03:00:01.000+02:00"],["2024-10-27T02:00:00.000+02:00"],["2024-10-27T02:59:59.000+02:00"],["2024-10-27T02:00:00.000+01:00"],["2024-10-27T02:59:59.000+01:00"],["2024-10-27T03:00:00.000+01:00"]],"rows":10}
|
||||
```
|
||||
|
||||
- Explorer:使用 Explorer 页面进行 SQL 查询时,用户可配置客户端时区,以 RFC3339 格式显示。
|
||||
|
||||

|
||||
|
||||
## 参考文档
|
||||
|
||||
- IANA Time Zone Database: [https://www.iana.org/time-zones](https://www.iana.org/time-zones)
|
||||
- RFC3339: [https://datatracker.ietf.org/doc/html/rfc3339](https://datatracker.ietf.org/doc/html/rfc3339)
|
||||
|
|
|
@ -1430,6 +1430,7 @@ typedef struct {
|
|||
int64_t watermark1;
|
||||
int64_t watermark2;
|
||||
int32_t ttl;
|
||||
int32_t keep;
|
||||
SArray* pFuncs;
|
||||
int32_t commentLen;
|
||||
char* pComment;
|
||||
|
|
|
@ -102,6 +102,8 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
|||
int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper,
|
||||
const char* stbFullName, bool newSubTableRule, STaskNotifyEventStat* pNotifyEventStat);
|
||||
|
||||
void qSetStreamMergeInfo(qTaskInfo_t tinfo, SArray* pVTables);
|
||||
|
||||
/**
|
||||
* Set multiple input data blocks for the stream scan.
|
||||
* @param tinfo
|
||||
|
|
|
@ -263,6 +263,7 @@ typedef struct SDynQueryCtrlStbJoin {
|
|||
|
||||
typedef struct SDynQueryCtrlVtbScan {
|
||||
bool scanAllCols;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
uint64_t suid;
|
||||
SVgroupsInfo* pVgroupList;
|
||||
} SDynQueryCtrlVtbScan;
|
||||
|
@ -666,6 +667,7 @@ typedef struct SStbJoinDynCtrlBasic {
|
|||
|
||||
typedef struct SVtbScanDynCtrlBasic {
|
||||
bool scanAllCols;
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
uint64_t suid;
|
||||
int32_t accountId;
|
||||
SEpSet mgmtEpSet;
|
||||
|
|
|
@ -1072,6 +1072,9 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_VTABLE_PRIMTS_HAS_REF TAOS_DEF_ERROR_CODE(0, 0x6202)
|
||||
#define TSDB_CODE_VTABLE_NOT_VIRTUAL_SUPER_TABLE TAOS_DEF_ERROR_CODE(0, 0x6203)
|
||||
#define TSDB_CODE_VTABLE_NOT_SUPPORT_DATA_TYPE TAOS_DEF_ERROR_CODE(0, 0x6204)
|
||||
#define TSDB_CODE_VTABLE_NOT_SUPPORT_STMT TAOS_DEF_ERROR_CODE(0, 0x6205)
|
||||
#define TSDB_CODE_VTABLE_NOT_SUPPORT_TOPIC TAOS_DEF_ERROR_CODE(0, 0x6206)
|
||||
#define TSDB_CODE_VTABLE_NOT_SUPPORT_CROSS_DB TAOS_DEF_ERROR_CODE(0, 0x6207)
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -606,7 +606,8 @@ typedef enum ELogicConditionType {
|
|||
#define TFS_MAX_LEVEL (TFS_MAX_TIERS - 1)
|
||||
#define TFS_PRIMARY_LEVEL 0
|
||||
#define TFS_PRIMARY_ID 0
|
||||
#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024
|
||||
#define TFS_MIN_DISK_FREE_SIZE 50 * 1024 * 1024 // 50MB
|
||||
#define TFS_MIN_DISK_FREE_SIZE_MAX (2ULL * 1024 * 1024 * 1024 * 1024) // 2TB
|
||||
|
||||
enum { TRANS_STAT_INIT = 0, TRANS_STAT_EXECUTING, TRANS_STAT_EXECUTED, TRANS_STAT_ROLLBACKING, TRANS_STAT_ROLLBACKED };
|
||||
enum { TRANS_OPER_INIT = 0, TRANS_OPER_EXECUTE, TRANS_OPER_ROLLBACK };
|
||||
|
|
|
@ -150,6 +150,7 @@ clean_service
|
|||
# Remove all links
|
||||
${csudo}rm -f ${bin_link_dir}/taos || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosudf || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
|
|
|
@ -75,7 +75,7 @@ static int32_t smlProcessTagTelnet(SSmlHandle *info, char *data, char *sqlEnd){
|
|||
const char *sql = data;
|
||||
while (sql < sqlEnd) {
|
||||
JUMP_SPACE(sql, sqlEnd)
|
||||
if (unlikely(*sql == '\0')) break;
|
||||
if (unlikely(*sql == '\0' || *sql == '\n')) break;
|
||||
|
||||
const char *key = sql;
|
||||
size_t keyLen = 0;
|
||||
|
|
|
@ -3970,6 +3970,8 @@ int32_t tSerializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->keep));
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
_exit:
|
||||
|
@ -4070,6 +4072,13 @@ int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp)
|
|||
pRsp->pColRefs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pRsp->keep));
|
||||
} else {
|
||||
pRsp->keep = 0;
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
_exit:
|
||||
|
|
|
@ -1073,12 +1073,6 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
|
|||
|
||||
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
size_t metaSize = pBlock->info.rows * sizeof(int32_t);
|
||||
char* tmp = taosMemoryRealloc(pCol->varmeta.offset, metaSize); // preview calloc is too small
|
||||
if (tmp == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pCol->varmeta.offset = (int32_t*)tmp;
|
||||
memcpy(pCol->varmeta.offset, pStart, metaSize);
|
||||
pStart += metaSize;
|
||||
} else {
|
||||
|
@ -2692,6 +2686,7 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf
|
|||
taskIdStr, flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId,
|
||||
pDataBlock->info.id.groupId, pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version,
|
||||
pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName);
|
||||
goto _exit;
|
||||
if (len >= size - 1) {
|
||||
goto _exit;
|
||||
}
|
||||
|
|
|
@ -1000,8 +1000,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3PageCacheSize", tsS3PageCacheSize, 4, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER_LAZY,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "s3UploadDelaySec", tsS3UploadDelaySec, 1, 60 * 60 * 24 * 30, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
|
||||
// min free disk space used to check if the disk is full [50MB, 1GB]
|
||||
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, 1024 * 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, TFS_MIN_DISK_FREE_SIZE_MAX, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableWhiteList", tsEnableWhiteList, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "streamNotifyMessageSize", tsStreamNotifyMessageSize, 8, 1024 * 1024, CFG_SCOPE_SERVER, CFG_DYN_NONE,CFG_CATEGORY_LOCAL));
|
||||
|
|
|
@ -600,7 +600,7 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
|
|||
|
||||
SWWorkerPool *pStreamCtrlPool = &pMgmt->streamCtrlPool;
|
||||
pStreamCtrlPool->name = "vnode-stream-ctrl";
|
||||
pStreamCtrlPool->max = 1;
|
||||
pStreamCtrlPool->max = 4;
|
||||
if ((code = tWWorkerInit(pStreamCtrlPool)) != 0) return code;
|
||||
|
||||
SWWorkerPool *pStreamChkPool = &pMgmt->streamChkPool;
|
||||
|
|
|
@ -375,6 +375,7 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, EStreamTaskT
|
|||
uint64_t uid = 0;
|
||||
SArray** pTaskList = NULL;
|
||||
if (pSourceTaskList) {
|
||||
uid = pStream->uid;
|
||||
pTaskList = &pSourceTaskList;
|
||||
} else {
|
||||
streamGetUidTaskList(pStream, type, &uid, &pTaskList);
|
||||
|
@ -454,6 +455,7 @@ static int32_t addSourceTaskVTableOutput(SStreamTask* pTask, SSHashObj* pVgTasks
|
|||
TSDB_CHECK_NULL(pTaskMap, code, lino, _end, terrno);
|
||||
|
||||
pTask->outputInfo.type = TASK_OUTPUT__VTABLE_MAP;
|
||||
pTask->msgInfo.msgType = TDMT_STREAM_TASK_DISPATCH;
|
||||
STaskDispatcherVtableMap *pDispatcher = &pTask->outputInfo.vtableMapDispatcher;
|
||||
pDispatcher->taskInfos = taosArrayInit(taskNum, sizeof(STaskDispatcherFixed));
|
||||
TSDB_CHECK_NULL(pDispatcher->taskInfos, code, lino, _end, terrno);
|
||||
|
@ -462,26 +464,32 @@ static int32_t addSourceTaskVTableOutput(SStreamTask* pTask, SSHashObj* pVgTasks
|
|||
|
||||
int32_t iter = 0, vgId = 0;
|
||||
uint64_t uid = 0;
|
||||
STaskDispatcherFixed* pAddr = NULL;
|
||||
void* p = NULL;
|
||||
while (NULL != (p = tSimpleHashIterate(pVtables, p, &iter))) {
|
||||
char* vgUid = tSimpleHashGetKey(p, NULL);
|
||||
vgId = *(int32_t*)vgUid;
|
||||
uid = *(uint64_t*)((int32_t*)vgUid + 1);
|
||||
|
||||
pAddr = tSimpleHashGet(pVgTasks, &vgId, sizeof(vgId));
|
||||
if (NULL == pAddr) {
|
||||
void *px = tSimpleHashGet(pVgTasks, &vgId, sizeof(vgId));
|
||||
if (NULL == px) {
|
||||
mError("tSimpleHashGet vgId %d not found", vgId);
|
||||
return code;
|
||||
}
|
||||
SStreamTask* pMergeTask = *(SStreamTask**)px;
|
||||
if (pMergeTask == NULL) {
|
||||
mError("tSimpleHashGet pMergeTask %d not found", vgId);
|
||||
return code;
|
||||
}
|
||||
|
||||
void* px = tSimpleHashGet(pTaskMap, &pAddr->taskId, sizeof(int32_t));
|
||||
px = tSimpleHashGet(pTaskMap, &pMergeTask->id.taskId, sizeof(pMergeTask->id.taskId));
|
||||
int32_t idx = 0;
|
||||
if (px == NULL) {
|
||||
px = taosArrayPush(pDispatcher->taskInfos, pAddr);
|
||||
STaskDispatcherFixed addr = {
|
||||
.taskId = pMergeTask->id.taskId, .nodeId = pMergeTask->info.nodeId, .epSet = pMergeTask->info.epSet};
|
||||
px = taosArrayPush(pDispatcher->taskInfos, &addr);
|
||||
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
|
||||
idx = taosArrayGetSize(pDispatcher->taskInfos) - 1;
|
||||
code = tSimpleHashPut(pTaskMap, &pAddr->taskId, sizeof(int32_t), &idx, sizeof(int32_t));
|
||||
code = tSimpleHashPut(pTaskMap, &pMergeTask->id.taskId, sizeof(pMergeTask->id.taskId), &idx, sizeof(idx));
|
||||
if (code) {
|
||||
mError("tSimpleHashPut uid to task idx failed, error:%d", code);
|
||||
return code;
|
||||
|
@ -495,9 +503,15 @@ static int32_t addSourceTaskVTableOutput(SStreamTask* pTask, SSHashObj* pVgTasks
|
|||
mError("tSimpleHashPut uid to STaskDispatcherFixed failed, error:%d", code);
|
||||
return code;
|
||||
}
|
||||
|
||||
mDebug("source task[%s,vg:%d] add vtable output map, vuid %" PRIu64 " => [%d, vg:%d]",
|
||||
pTask->id.idStr, pTask->info.nodeId, uid, pAddr->taskId, pAddr->nodeId);
|
||||
|
||||
code = streamTaskSetUpstreamInfo(pMergeTask, pTask);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("failed to set upstream info of merge task, error:%d", code);
|
||||
return code;
|
||||
}
|
||||
|
||||
mDebug("source task[%s,vg:%d] add vtable output map, vuid %" PRIu64 " => [%d, vg:%d]", pTask->id.idStr,
|
||||
pTask->info.nodeId, uid, pMergeTask->id.taskId, pMergeTask->info.nodeId);
|
||||
}
|
||||
|
||||
_end:
|
||||
|
@ -662,7 +676,6 @@ static int32_t addVTableMergeTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pS
|
|||
}
|
||||
|
||||
static int32_t buildMergeTaskHash(SArray* pMergeTaskList, SSHashObj** ppVgTasks) {
|
||||
STaskDispatcherFixed addr;
|
||||
int32_t code = 0;
|
||||
int32_t taskNum = taosArrayGetSize(pMergeTaskList);
|
||||
|
||||
|
@ -676,11 +689,7 @@ static int32_t buildMergeTaskHash(SArray* pMergeTaskList, SSHashObj** ppVgTasks)
|
|||
for (int32_t i = 0; i < taskNum; ++i) {
|
||||
SStreamTask* pTask = taosArrayGetP(pMergeTaskList, i);
|
||||
|
||||
addr.taskId = pTask->id.taskId;
|
||||
addr.nodeId = pTask->info.nodeId;
|
||||
addr.epSet = pTask->info.epSet;
|
||||
|
||||
code = tSimpleHashPut(*ppVgTasks, &addr.nodeId, sizeof(addr.nodeId), &addr, sizeof(addr));
|
||||
code = tSimpleHashPut(*ppVgTasks, &pTask->info.nodeId, sizeof(pTask->info.nodeId), &pTask, POINTER_BYTES);
|
||||
if (code) {
|
||||
mError("tSimpleHashPut %d STaskDispatcherFixed failed", i);
|
||||
return code;
|
||||
|
@ -725,10 +734,9 @@ static int32_t addVTableSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* p
|
|||
}
|
||||
|
||||
plan->pVTables = *(SSHashObj**)p;
|
||||
*(SSHashObj**)p = NULL;
|
||||
|
||||
code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam,
|
||||
hasAggTasks, pVgTasks, pSourceTaskList);
|
||||
plan->pVTables = NULL;
|
||||
if (code != 0) {
|
||||
mError("failed to create stream task, code:%s", tstrerror(code));
|
||||
|
||||
|
@ -857,8 +865,12 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, S
|
|||
}
|
||||
|
||||
if (needHistoryTask(pStream)) {
|
||||
EStreamTaskType type = (pStream->conf.trigger == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE) ? STREAM_RECALCUL_TASK
|
||||
: STREAM_HISTORY_TASK;
|
||||
EStreamTaskType type = 0;
|
||||
if (pStream->conf.trigger == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE && (pStream->conf.fillHistory == 0)) {
|
||||
type = STREAM_RECALCUL_TASK; // only the recalculating task
|
||||
} else {
|
||||
type = STREAM_HISTORY_TASK; // set the fill-history option
|
||||
}
|
||||
code = doAddAggTask(pStream, pMnode, plan, pEpset, pVgroup, pSnode, type, useTriggerParam);
|
||||
if (code != 0) {
|
||||
goto END;
|
||||
|
@ -1220,7 +1232,7 @@ static int32_t addVgroupToRes(char* fDBName, int32_t vvgId, uint64_t vuid, SRefC
|
|||
char dbVgId[TSDB_DB_NAME_LEN + 32];
|
||||
SSHashObj *pTarVg = NULL, *pNewVg = NULL;
|
||||
|
||||
TSDB_CHECK_CODE(getTableVgId(pDb, 1, fDBName, &vgId, pCol->refColName), lino, _return);
|
||||
TSDB_CHECK_CODE(getTableVgId(pDb, 1, fDBName, &vgId, pCol->refTableName), lino, _return);
|
||||
|
||||
snprintf(dbVgId, sizeof(dbVgId), "%s.%d", pCol->refDbName, vgId);
|
||||
|
||||
|
|
|
@ -2326,6 +2326,7 @@ static int32_t mndBuildStbCfgImp(SDbObj *pDb, SStbObj *pStb, const char *tbName,
|
|||
pRsp->watermark1 = pStb->watermark[0];
|
||||
pRsp->watermark2 = pStb->watermark[1];
|
||||
pRsp->ttl = pStb->ttl;
|
||||
pRsp->keep = pStb->keep;
|
||||
pRsp->commentLen = pStb->commentLen;
|
||||
if (pStb->commentLen > 0) {
|
||||
pRsp->pComment = taosStrdup(pStb->comment);
|
||||
|
|
|
@ -1307,6 +1307,8 @@ int32_t setTaskAttrInResBlock(SStreamObj *pStream, SStreamTask *pTask, SSDataBlo
|
|||
STR_WITH_SIZE_TO_VARSTR(level, "agg", 3);
|
||||
} else if (pTask->info.taskLevel == TASK_LEVEL__SINK) {
|
||||
STR_WITH_SIZE_TO_VARSTR(level, "sink", 4);
|
||||
} else if (pTask->info.taskLevel == TASK_LEVEL__MERGE) {
|
||||
STR_WITH_SIZE_TO_VARSTR(level, "merge", 5);
|
||||
}
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
|
|
|
@ -48,7 +48,7 @@ int meteEncodeColRefEntry(SEncoder *pCoder, const SMetaEntry *pME) {
|
|||
const SColRefWrapper *pw = &pME->colRef;
|
||||
TAOS_CHECK_RETURN(tEncodeI32v(pCoder, pw->nCols));
|
||||
TAOS_CHECK_RETURN(tEncodeI32v(pCoder, pw->version));
|
||||
uDebug("encode cols:%d", pw->nCols);
|
||||
uTrace("encode cols:%d", pw->nCols);
|
||||
|
||||
for (int32_t i = 0; i < pw->nCols; i++) {
|
||||
SColRef *p = &pw->pColRef[i];
|
||||
|
@ -171,7 +171,7 @@ int meteEncodeColCmprEntry(SEncoder *pCoder, const SMetaEntry *pME) {
|
|||
const SColCmprWrapper *pw = &pME->colCmpr;
|
||||
TAOS_CHECK_RETURN(tEncodeI32v(pCoder, pw->nCols));
|
||||
TAOS_CHECK_RETURN(tEncodeI32v(pCoder, pw->version));
|
||||
uDebug("encode cols:%d", pw->nCols);
|
||||
uTrace("encode cols:%d", pw->nCols);
|
||||
|
||||
for (int32_t i = 0; i < pw->nCols; i++) {
|
||||
SColCmpr *p = &pw->pColCmpr[i];
|
||||
|
|
|
@ -2515,8 +2515,8 @@ int32_t metaHandleEntry2(SMeta *pMeta, const SMetaEntry *pEntry) {
|
|||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
pMeta->changed = true;
|
||||
metaDebug("vgId:%d, %s success, version:%" PRId64 " type:%d uid:%" PRId64 " name:%s", vgId, __func__,
|
||||
pEntry->version, pEntry->type, pEntry->uid, pEntry->type > 0 ? pEntry->name : "");
|
||||
metaDebug("vgId:%d, index:%" PRId64 ", handle meta entry success, type:%d tb:%s uid:%" PRId64, vgId, pEntry->version,
|
||||
pEntry->type, pEntry->type > 0 ? pEntry->name : "", pEntry->uid);
|
||||
} else {
|
||||
metaErr(vgId, code);
|
||||
}
|
||||
|
|
|
@ -381,8 +381,8 @@ static int32_t metaCreateChildTable(SMeta *pMeta, int64_t version, SVCreateTbReq
|
|||
// handle entry
|
||||
code = metaHandleEntry2(pMeta, &entry);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
metaInfo("vgId:%d, child table:%s uid %" PRId64 " suid:%" PRId64 " is created, version:%" PRId64,
|
||||
TD_VID(pMeta->pVnode), pReq->name, pReq->uid, pReq->ctb.suid, version);
|
||||
metaInfo("vgId:%d, index:%" PRId64 ", child table is created, tb:%s uid:%" PRId64 " suid:%" PRId64,
|
||||
TD_VID(pMeta->pVnode), version, pReq->name, pReq->uid, pReq->ctb.suid);
|
||||
} else {
|
||||
metaError("vgId:%d, %s failed at %s:%d since %s, uid:%" PRId64 " name:%s suid:%" PRId64 " version:%" PRId64,
|
||||
TD_VID(pMeta->pVnode), __func__, __FILE__, __LINE__, tstrerror(code), pReq->uid, pReq->name,
|
||||
|
@ -493,8 +493,8 @@ static int32_t metaCreateNormalTable(SMeta *pMeta, int64_t version, SVCreateTbRe
|
|||
// handle entry
|
||||
code = metaHandleEntry2(pMeta, &entry);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
metaInfo("vgId:%d, normal table:%s uid %" PRId64 " is created, version:%" PRId64, TD_VID(pMeta->pVnode), pReq->name,
|
||||
pReq->uid, version);
|
||||
metaInfo("vgId:%d, index:%" PRId64 ", normal table is created, tb:%s uid:%" PRId64, TD_VID(pMeta->pVnode), version,
|
||||
pReq->name, pReq->uid);
|
||||
} else {
|
||||
metaError("vgId:%d, %s failed at %s:%d since %s, uid:%" PRId64 " name:%s version:%" PRId64, TD_VID(pMeta->pVnode),
|
||||
__func__, __FILE__, __LINE__, tstrerror(code), pReq->uid, pReq->name, version);
|
||||
|
@ -557,8 +557,8 @@ static int32_t metaCreateVirtualNormalTable(SMeta *pMeta, int64_t version, SVCre
|
|||
// handle entry
|
||||
code = metaHandleEntry2(pMeta, &entry);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
metaInfo("vgId:%d, normal table:%s uid %" PRId64 " is created, version:%" PRId64, TD_VID(pMeta->pVnode), pReq->name,
|
||||
pReq->uid, version);
|
||||
metaInfo("vgId:%d, index:%" PRId64 ", virtual normal table is created, tb:%s uid:%" PRId64, TD_VID(pMeta->pVnode),
|
||||
version, pReq->name, pReq->uid);
|
||||
} else {
|
||||
metaError("vgId:%d, %s failed at %s:%d since %s, uid:%" PRId64 " name:%s version:%" PRId64, TD_VID(pMeta->pVnode),
|
||||
__func__, __FILE__, __LINE__, tstrerror(code), pReq->uid, pReq->name, version);
|
||||
|
@ -625,8 +625,8 @@ static int32_t metaCreateVirtualChildTable(SMeta *pMeta, int64_t version, SVCrea
|
|||
// handle entry
|
||||
code = metaHandleEntry2(pMeta, &entry);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
metaInfo("vgId:%d, normal table:%s uid %" PRId64 " is created, version:%" PRId64, TD_VID(pMeta->pVnode), pReq->name,
|
||||
pReq->uid, version);
|
||||
metaInfo("vgId:%d, index:%" PRId64 ", virtual child table is created, tb:%s uid:%" PRId64, TD_VID(pMeta->pVnode),
|
||||
version, pReq->name, pReq->uid);
|
||||
} else {
|
||||
metaError("vgId:%d, %s failed at %s:%d since %s, uid:%" PRId64 " name:%s version:%" PRId64, TD_VID(pMeta->pVnode),
|
||||
__func__, __FILE__, __LINE__, tstrerror(code), pReq->uid, pReq->name, version);
|
||||
|
|
|
@ -1200,7 +1200,7 @@ int32_t tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList, const
|
|||
}
|
||||
|
||||
tqDebug("s-task:%s %d tables are set to be queried target table", id, (int32_t)taosArrayGetSize(tbUidList));
|
||||
return tqCollectPhysicalTables(pReader, id);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) {
|
||||
|
@ -1498,8 +1498,7 @@ static int32_t tqCollectPhysicalTables(STqReader* pReader, const char* idstr) {
|
|||
pScanInfo->cacheHit = 0;
|
||||
|
||||
pVirtualTables = pScanInfo->pVirtualTables;
|
||||
if (taosHashGetSize(pVirtualTables) == 0 || taosHashGetSize(pReader->tbIdHash) == 0 ||
|
||||
taosArrayGetSize(pReader->pColIdList) == 0) {
|
||||
if (taosHashGetSize(pVirtualTables) == 0 || taosArrayGetSize(pReader->pColIdList) == 0) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
|
@ -1507,13 +1506,10 @@ static int32_t tqCollectPhysicalTables(STqReader* pReader, const char* idstr) {
|
|||
TSDB_CHECK_NULL(pPhysicalTables, code, lino, _end, terrno);
|
||||
taosHashSetFreeFp(pPhysicalTables, destroySourceScanTables);
|
||||
|
||||
pIter = taosHashIterate(pReader->tbIdHash, NULL);
|
||||
pIter = taosHashIterate(pVirtualTables, NULL);
|
||||
while (pIter != NULL) {
|
||||
int64_t vTbUid = *(int64_t*)taosHashGetKey(pIter, NULL);
|
||||
|
||||
px = taosHashGet(pVirtualTables, &vTbUid, sizeof(int64_t));
|
||||
TSDB_CHECK_NULL(px, code, lino, _end, terrno);
|
||||
SArray* pColInfos = *(SArray**)px;
|
||||
SArray* pColInfos = *(SArray**)pIter;
|
||||
TSDB_CHECK_NULL(pColInfos, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
|
||||
// Traverse all required columns and collect corresponding physical tables
|
||||
|
@ -1548,7 +1544,7 @@ static int32_t tqCollectPhysicalTables(STqReader* pReader, const char* idstr) {
|
|||
j++;
|
||||
}
|
||||
}
|
||||
pIter = taosHashIterate(pReader->tbIdHash, pIter);
|
||||
pIter = taosHashIterate(pVirtualTables, pIter);
|
||||
}
|
||||
|
||||
pScanInfo->pPhysicalTables = pPhysicalTables;
|
||||
|
@ -1574,9 +1570,8 @@ _end:
|
|||
|
||||
static void freeTableSchemaCache(const void* key, size_t keyLen, void* value, void* ud) {
|
||||
if (value) {
|
||||
SSchemaWrapper** ppSchemaWrapper = value;
|
||||
tDeleteSchemaWrapper(*ppSchemaWrapper);
|
||||
*ppSchemaWrapper = NULL;
|
||||
SSchemaWrapper* pSchemaWrapper = value;
|
||||
tDeleteSchemaWrapper(pSchemaWrapper);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1686,8 +1681,8 @@ int32_t tqRetrieveVTableDataBlock(STqReader* pReader, SSDataBlock** pRes, const
|
|||
SColumnInfoData* pOutCol = taosArrayGet(pBlock->pDataBlock, j);
|
||||
TSDB_CHECK_NULL(pOutCol, code, lino, _end, terrno);
|
||||
if (i >= nColInfos) {
|
||||
tqInfo("%s has %d column info, but vtable column %d is missing, id: %s", __func__, nColInfos, pOutCol->info.colId,
|
||||
idstr);
|
||||
tqTrace("%s has %d column info, but vtable column %d is missing, id: %s", __func__, nColInfos,
|
||||
pOutCol->info.colId, idstr);
|
||||
colDataSetNNULL(pOutCol, 0, numOfRows);
|
||||
j++;
|
||||
continue;
|
||||
|
@ -1699,17 +1694,26 @@ int32_t tqRetrieveVTableDataBlock(STqReader* pReader, SSDataBlock** pRes, const
|
|||
i++;
|
||||
continue;
|
||||
} else if (pCol->vColId > pOutCol->info.colId) {
|
||||
tqInfo("%s does not find column info for vtable column %d, closest vtable column is %d, id: %s", __func__,
|
||||
pOutCol->info.colId, pCol->vColId, idstr);
|
||||
tqTrace("%s does not find column info for vtable column %d, closest vtable column is %d, id: %s", __func__,
|
||||
pOutCol->info.colId, pCol->vColId, idstr);
|
||||
colDataSetNNULL(pOutCol, 0, numOfRows);
|
||||
j++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// copy data from physical table to the result block of virtual table
|
||||
// skip this column if it is from another physical table
|
||||
if (pCol->pTbUid != pTbUid) {
|
||||
// skip this column since it is from another physical table
|
||||
} else if (pSubmitTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
|
||||
tqTrace("skip column %d of virtual table %" PRId64 " since it is from table %" PRId64
|
||||
", current block table %" PRId64 ", id: %s",
|
||||
pCol->vColId, vTbUid, pCol->pTbUid, pTbUid, idstr);
|
||||
colDataSetNNULL(pOutCol, 0, numOfRows);
|
||||
i++;
|
||||
j++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// copy data from physical table to the result block of virtual table
|
||||
if (pSubmitTbData->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
|
||||
// try to find the corresponding column data of physical table
|
||||
SColData* pColData = NULL;
|
||||
for (int32_t k = 0; k < nInputCols; ++k) {
|
||||
|
@ -1860,7 +1864,7 @@ _end:
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %s, id: %s", __func__, lino, tstrerror(code), idstr);
|
||||
}
|
||||
return (code == TSDB_CODE_SUCCESS);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool tqReaderIsQueriedSourceTable(STqReader* pReader, uint64_t uid) {
|
||||
|
|
|
@ -862,7 +862,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
|
|||
int32_t vgId = TD_VID(pVnode);
|
||||
int64_t suid = pTask->outputInfo.tbSink.stbUid;
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t timeout = 300; // 5min
|
||||
int32_t timeout = 60; // 1min
|
||||
int64_t start = taosGetTimestampSec();
|
||||
|
||||
while (pTableSinkInfo->uid == 0) {
|
||||
|
@ -985,6 +985,8 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
if (code) {
|
||||
tqDebug("s-task:%s failed to build auto create table-name:%s, groupId:0x%" PRId64, id, dstTableName, groupId);
|
||||
return code;
|
||||
} else {
|
||||
tqDebug("s-task:%s no table name given, generated sub-table-name:%s, groupId:0x%" PRId64, id, dstTableName, groupId);
|
||||
}
|
||||
} else {
|
||||
if (pTask->subtableWithoutMd5 != 1 && !isAutoTableName(dstTableName) &&
|
||||
|
|
|
@ -77,7 +77,7 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) {
|
|||
.pOtherBackend = NULL,
|
||||
};
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__MERGE) {
|
||||
handle.vnode = ((STQ*)pMeta->ahandle)->pVnode;
|
||||
handle.initTqReader = 1;
|
||||
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
|
||||
|
@ -86,7 +86,8 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) {
|
|||
|
||||
initStorageAPI(&handle.api);
|
||||
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG) {
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE || pTask->info.taskLevel == TASK_LEVEL__AGG ||
|
||||
pTask->info.taskLevel == TASK_LEVEL__MERGE) {
|
||||
if (pTask->info.fillHistory == STREAM_RECALCUL_TASK) {
|
||||
handle.pStateBackend = pTask->pRecalState;
|
||||
handle.pOtherBackend = pTask->pState;
|
||||
|
@ -113,6 +114,8 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) {
|
|||
tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
qSetStreamMergeInfo(pTask->exec.pExecutor, pTask->pVTables);
|
||||
}
|
||||
|
||||
streamSetupScheduleTrigger(pTask);
|
||||
|
|
|
@ -544,6 +544,12 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
qWarn("vnodeGetBatchMeta failed, msgType:%d", req->msgType);
|
||||
}
|
||||
break;
|
||||
case TDMT_VND_VSUBTABLES_META:
|
||||
// error code has been set into reqMsg, no need to handle it here.
|
||||
if (TSDB_CODE_SUCCESS != vnodeGetVSubtablesMeta(pVnode, &reqMsg)) {
|
||||
qWarn("vnodeGetVSubtablesMeta failed, msgType:%d", req->msgType);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
qError("invalid req msgType %d", req->msgType);
|
||||
reqMsg.code = TSDB_CODE_INVALID_MSG;
|
||||
|
@ -730,7 +736,7 @@ int32_t vnodeGetVSubtablesMeta(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
qError("tSerializeSVSubTablesRsp failed, error:%d", rspSize);
|
||||
goto _return;
|
||||
}
|
||||
pRsp = rpcMallocCont(rspSize);
|
||||
pRsp = taosMemoryCalloc(1, rspSize);
|
||||
if (pRsp == NULL) {
|
||||
code = terrno;
|
||||
qError("rpcMallocCont %d failed, error:%d", rspSize, terrno);
|
||||
|
@ -755,9 +761,11 @@ _return:
|
|||
qError("vnd get virtual subtables failed cause of %s", tstrerror(code));
|
||||
}
|
||||
|
||||
*pMsg = rspMsg;
|
||||
|
||||
tDestroySVSubTablesRsp(&rsp);
|
||||
|
||||
tmsgSendRsp(&rspMsg);
|
||||
//tmsgSendRsp(&rspMsg);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1354,7 +1354,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
|
|||
}
|
||||
}
|
||||
|
||||
vDebug("vgId:%d, add %d new created tables into query table list", TD_VID(pVnode), (int32_t)taosArrayGetSize(tbUids));
|
||||
vTrace("vgId:%d, add %d new created tables into query table list", TD_VID(pVnode), (int32_t)taosArrayGetSize(tbUids));
|
||||
if (tqUpdateTbUidList(pVnode->pTq, tbUids, true) < 0) {
|
||||
vError("vgId:%d, failed to update tbUid list since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||
}
|
||||
|
|
|
@ -335,12 +335,12 @@ void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
|
|||
if (taosGetQitem(qall, (void **)&pMsg) == 0) continue;
|
||||
|
||||
if (vnodeIsMsgBlock(pMsg->msgType)) {
|
||||
vGTrace(&pMsg->info.traceId, "vgId:%d, msg:%p, get from vnode-apply queue, type:%s handle:%p index:%" PRId64
|
||||
vGDebug(&pMsg->info.traceId, "vgId:%d, msg:%p, get from vnode-apply queue, type:%s handle:%p index:%" PRId64
|
||||
", blocking msg obtained sec:%d seq:%" PRId64,
|
||||
vgId, pMsg, TMSG_INFO(pMsg->msgType), pMsg->info.handle, pMsg->info.conn.applyIndex, pVnode->blockSec,
|
||||
pVnode->blockSeq);
|
||||
} else {
|
||||
vGTrace(&pMsg->info.traceId, "vgId:%d, msg:%p, get from vnode-apply queue, type:%s handle:%p index:%" PRId64, vgId, pMsg,
|
||||
vGDebug(&pMsg->info.traceId, "vgId:%d, msg:%p, get from vnode-apply queue, type:%s handle:%p index:%" PRId64, vgId, pMsg,
|
||||
TMSG_INFO(pMsg->msgType), pMsg->info.handle, pMsg->info.conn.applyIndex);
|
||||
}
|
||||
|
||||
|
@ -437,10 +437,10 @@ static int32_t vnodeSyncApplyMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, const SFsm
|
|||
pMsg->info.conn.applyIndex = pMeta->index;
|
||||
pMsg->info.conn.applyTerm = pMeta->term;
|
||||
|
||||
vGTrace(&pMsg->info.traceId,
|
||||
"vgId:%d, commit-cb is excuted, fsm:%p, index:%" PRId64 ", term:%" PRIu64 ", msg-index:%" PRId64
|
||||
vGDebug(&pMsg->info.traceId,
|
||||
"vgId:%d, index:%" PRId64 ", execute commit cb, fsm:%p, term:%" PRIu64 ", msg-index:%" PRId64
|
||||
", weak:%d, code:%d, state:%d %s, type:%s code:0x%x",
|
||||
pVnode->config.vgId, pFsm, pMeta->index, pMeta->term, pMsg->info.conn.applyIndex, pMeta->isWeak, pMeta->code,
|
||||
pVnode->config.vgId, pMeta->index, pFsm, pMeta->term, pMsg->info.conn.applyIndex, pMeta->isWeak, pMeta->code,
|
||||
pMeta->state, syncStr(pMeta->state), TMSG_INFO(pMsg->msgType), pMsg->code);
|
||||
|
||||
int32_t code = tmsgPutToQueue(&pVnode->msgCb, APPLY_QUEUE, pMsg);
|
||||
|
@ -456,7 +456,11 @@ static int32_t vnodeSyncCommitMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMet
|
|||
SVnode *pVnode = pFsm->data;
|
||||
vnodePostBlockMsg(pVnode, pMsg);
|
||||
|
||||
SRpcMsg rsp = {.code = pMsg->code, .info = pMsg->info};
|
||||
SRpcMsg rsp = {
|
||||
.code = pMsg->code,
|
||||
.info = pMsg->info,
|
||||
};
|
||||
|
||||
if (rsp.info.handle != NULL) {
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
|
@ -482,9 +486,10 @@ static SyncIndex vnodeSyncAppliedIndex(const SSyncFSM *pFSM) {
|
|||
|
||||
static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, SRpcMsg *pMsg, SFsmCbMeta *pMeta) {
|
||||
SVnode *pVnode = pFsm->data;
|
||||
vTrace("vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
|
||||
pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
vGDebug(&pMsg->info.traceId,
|
||||
"vgId:%d, rollback-cb is excuted, fsm:%p, index:%" PRId64 ", weak:%d, code:%d, state:%d %s, type:%s",
|
||||
pVnode->config.vgId, pFsm, pMeta->index, pMeta->isWeak, pMeta->code, pMeta->state, syncStr(pMeta->state),
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
}
|
||||
|
||||
static int32_t vnodeSnapshotStartRead(const SSyncFSM *pFsm, void *pParam, void **ppReader) {
|
||||
|
|
|
@ -317,7 +317,9 @@ typedef struct SCtgVSubTablesCtx {
|
|||
int32_t vgNum;
|
||||
bool clonedVgroups;
|
||||
SArray* pVgroups;
|
||||
|
||||
|
||||
int32_t resCode;
|
||||
int32_t resDoneNum;
|
||||
SVSubTablesRsp* pResList;
|
||||
int32_t resIdx;
|
||||
} SCtgVSubTablesCtx;
|
||||
|
|
|
@ -3151,13 +3151,20 @@ int32_t ctgHandleGetVSubTablesRsp(SCtgTaskReq* tReq, int32_t reqType, const SDat
|
|||
SCtgTask* pTask = tReq->pTask;
|
||||
int32_t newCode = TSDB_CODE_SUCCESS;
|
||||
SCtgVSubTablesCtx* pCtx = (SCtgVSubTablesCtx*)pTask->taskCtx;
|
||||
int32_t resIdx = atomic_fetch_add_32(&pCtx->resIdx, 1);
|
||||
|
||||
CTG_ERR_JRET(ctgProcessRspMsg(pCtx->pResList + atomic_fetch_add_32(&pCtx->resIdx, 1), reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
|
||||
|
||||
if (atomic_load_32(&pCtx->resIdx) < pCtx->vgNum) {
|
||||
CTG_RET(code);
|
||||
code = ctgProcessRspMsg(pCtx->pResList + resIdx, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target);
|
||||
if (code) {
|
||||
pCtx->resCode = code;
|
||||
}
|
||||
|
||||
int32_t doneNum = atomic_add_fetch_32(&pCtx->resDoneNum, 1);
|
||||
if (doneNum < pCtx->vgNum) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code = pCtx->resCode;
|
||||
|
||||
_return:
|
||||
|
||||
newCode = ctgHandleTaskEnd(pTask, code);
|
||||
|
|
|
@ -633,7 +633,8 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
if (TDMT_VND_TABLE_CFG == msgType) {
|
||||
SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
|
||||
pName = ctx->pName;
|
||||
} else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType) {
|
||||
} else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType ||
|
||||
TDMT_VND_VSUBTABLES_META == msgType) {
|
||||
if (CTG_TASK_GET_TB_META_BATCH == pTask->type) {
|
||||
SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
|
||||
SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
|
||||
|
@ -714,7 +715,8 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
if (TDMT_VND_TABLE_CFG == msgType) {
|
||||
SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
|
||||
pName = ctx->pName;
|
||||
} else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType) {
|
||||
} else if (TDMT_VND_TABLE_META == msgType || TDMT_VND_TABLE_NAME == msgType ||
|
||||
TDMT_VND_VSUBTABLES_META == msgType) {
|
||||
if (CTG_TASK_GET_TB_META_BATCH == pTask->type) {
|
||||
SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
|
||||
SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
|
||||
|
|
|
@ -784,6 +784,11 @@ static void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STab
|
|||
if (pCfg->ttl > 0) {
|
||||
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len),
|
||||
" TTL %d", pCfg->ttl);
|
||||
}
|
||||
|
||||
if (pCfg->keep > 0) {
|
||||
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len),
|
||||
" KEEP %dm", pCfg->keep);
|
||||
}
|
||||
|
||||
if (TSDB_SUPER_TABLE == pCfg->tableType || TSDB_NORMAL_TABLE == pCfg->tableType) {
|
||||
|
|
|
@ -76,6 +76,7 @@ typedef struct SStbJoinDynCtrlInfo {
|
|||
|
||||
typedef struct SVtbScanDynCtrlInfo {
|
||||
bool scanAllCols;
|
||||
char* dbName;
|
||||
tsem_t ready;
|
||||
SEpSet epSet;
|
||||
SUseDbRsp* pRsp;
|
||||
|
|
|
@ -510,18 +510,19 @@ typedef struct SStreamFillSupporter {
|
|||
} SStreamFillSupporter;
|
||||
|
||||
typedef struct SStreamRecParam {
|
||||
char pSql[2048];
|
||||
int32_t sqlCapcity;
|
||||
char pUrl[TSDB_EP_LEN + 17]; // "http://localhost:6041/rest/sql"
|
||||
char pAuth[512 + 22]; // Authorization: Basic token
|
||||
char pStbFullName[TSDB_TABLE_FNAME_LEN];
|
||||
char pWstartName[TSDB_COL_NAME_LEN];
|
||||
char pWendName[TSDB_COL_NAME_LEN];
|
||||
char pGroupIdName[TSDB_COL_NAME_LEN];
|
||||
char pIsWindowFilledName[TSDB_COL_NAME_LEN];
|
||||
void* pIteData;
|
||||
int32_t iter;
|
||||
TSKEY gap;
|
||||
char pSql[2048];
|
||||
int32_t sqlCapcity;
|
||||
char pUrl[TSDB_EP_LEN + 17]; // "http://localhost:6041/rest/sql"
|
||||
char pAuth[512 + 22]; // Authorization: Basic token
|
||||
char pStbFullName[TSDB_TABLE_FNAME_LEN];
|
||||
char pWstartName[TSDB_COL_NAME_LEN];
|
||||
char pWendName[TSDB_COL_NAME_LEN];
|
||||
char pGroupIdName[TSDB_COL_NAME_LEN];
|
||||
char pIsWindowFilledName[TSDB_COL_NAME_LEN];
|
||||
void* pIteData;
|
||||
int32_t iter;
|
||||
TSKEY gap;
|
||||
SSHashObj* pColIdMap;
|
||||
} SStreamRecParam;
|
||||
|
||||
typedef struct SStreamScanInfo {
|
||||
|
@ -546,7 +547,10 @@ typedef struct SStreamScanInfo {
|
|||
uint64_t numOfExec; // execution times
|
||||
STqReader* tqReader;
|
||||
|
||||
SHashObj* pVtableMergeHandles; // key: vtable uid, value: SStreamVtableMergeHandle
|
||||
SHashObj* pVtableMergeHandles; // key: vtable uid, value: SStreamVtableMergeHandle
|
||||
SDiskbasedBuf* pVtableMergeBuf; // page buffer used by vtable merge
|
||||
SArray* pVtableReadyHandles;
|
||||
STableListInfo* pTableListInfo;
|
||||
|
||||
uint64_t groupId;
|
||||
bool igCheckGroupId;
|
||||
|
|
|
@ -188,7 +188,7 @@ int32_t createEventNonblockOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p
|
|||
|
||||
int32_t createVirtualTableMergeOperatorInfo(SOperatorInfo** pDownstream, SReadHandle* readHandle, STableListInfo* pTableListInfo, int32_t numOfDownstream, SVirtualScanPhysiNode * pJoinNode, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo);
|
||||
|
||||
int32_t createStreamVtableMergeOperatorInfo(SReadHandle* pHandle, SVirtualScanPhysiNode* pVirtualScanNode, SNode* pTagCond, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo);
|
||||
int32_t createStreamVtableMergeOperatorInfo(SOperatorInfo* pDownstream, SReadHandle* pHandle, SVirtualScanPhysiNode* pVirtualScanNode, SNode* pTagCond, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo, SOperatorInfo** pInfo);
|
||||
// clang-format on
|
||||
|
||||
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,
|
||||
|
|
|
@ -77,6 +77,7 @@ typedef struct {
|
|||
char* stbFullName; // used to generate dest child table name
|
||||
bool newSubTableRule; // used to generate dest child table name
|
||||
STaskNotifyEventStat* pNotifyEventStat; // used to store notify event statistics
|
||||
SArray * pVTables; // used to store merge info for merge task, SArray<SVCTableMergeInfo>
|
||||
} SStreamTaskInfo;
|
||||
|
||||
struct SExecTaskInfo {
|
||||
|
|
|
@ -30,15 +30,20 @@ typedef enum {
|
|||
SVM_NEXT_FOUND = 1,
|
||||
} SVM_NEXT_RESULT;
|
||||
|
||||
int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle **ppHandle, int32_t nSrcTbls, int32_t numPageLimit,
|
||||
SDiskbasedBuf *pBuf, SSDataBlock *pResBlock, const char *idstr);
|
||||
int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle **ppHandle, int64_t vuid, int32_t nSrcTbls,
|
||||
int32_t numPageLimit, int32_t primaryTsIndex, SDiskbasedBuf *pBuf,
|
||||
SSDataBlock *pResBlock, const char *idstr);
|
||||
|
||||
void streamVtableMergeDestroyHandle(SStreamVtableMergeHandle **ppHandle);
|
||||
void streamVtableMergeDestroyHandle(void *ppHandle);
|
||||
|
||||
int64_t streamVtableMergeHandleGetVuid(SStreamVtableMergeHandle *pHandle);
|
||||
|
||||
int32_t streamVtableMergeAddBlock(SStreamVtableMergeHandle *pHandle, SSDataBlock *pDataBlock, const char *idstr);
|
||||
|
||||
int32_t streamVtableMergeNextTuple(SStreamVtableMergeHandle *pHandle, SSDataBlock *pResBlock, SVM_NEXT_RESULT *pRes,
|
||||
const char *idstr);
|
||||
int32_t streamVtableMergeMoveNext(SStreamVtableMergeHandle *pHandle, SVM_NEXT_RESULT *pRes, const char *idstr);
|
||||
|
||||
int32_t streamVtableMergeCurrent(SStreamVtableMergeHandle *pHandle, SSDataBlock **ppDataBlock, int32_t *pRowIdx,
|
||||
const char *idstr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ void setDeleteFillValueInfo(TSKEY start, TSKEY end, SStreamFillSupporter*
|
|||
void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* pFillSup, SSDataBlock* pRes);
|
||||
int32_t initFillSupRowInfo(SStreamFillSupporter* pFillSup, SSDataBlock* pRes);
|
||||
void getStateKeepInfo(SNonBlockAggSupporter* pNbSup, bool isRecOp, int32_t* pNumRes, TSKEY* pTsRes);
|
||||
int32_t initStreamFillOperatorColumnMapInfo(SExprSupp* pExprSup, SOperatorInfo* pOperator);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -81,6 +81,7 @@ void setEventWindowInfo(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SRowBuf
|
|||
// stream client
|
||||
int32_t streamClientGetResultRange(SStreamRecParam* pParam, SSHashObj* pRangeMap, SArray* pRangeRes);
|
||||
int32_t streamClientGetFillRange(SStreamRecParam* pParam, SWinKey* pKey, SArray* pRangeRes, void* pEmptyRow, int32_t size, int32_t* pOffsetInfo, int32_t numOfCols);
|
||||
int32_t streamClientCheckCfg(SStreamRecParam* pParam);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -100,6 +100,9 @@ void freeUseDbOutput(void* pOutput) {
|
|||
}
|
||||
|
||||
static void destroyVtbScanDynCtrlInfo(SVtbScanDynCtrlInfo* pVtbScan) {
|
||||
if (pVtbScan->dbName) {
|
||||
taosMemoryFreeClear(pVtbScan->dbName);
|
||||
}
|
||||
if (pVtbScan->childTableList) {
|
||||
taosArrayDestroy(pVtbScan->childTableList);
|
||||
}
|
||||
|
@ -1136,13 +1139,15 @@ int32_t dynProcessUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
pScanResInfo->vtbScan.pRsp = taosMemoryMalloc(sizeof(SUseDbRsp));
|
||||
QUERY_CHECK_NULL(pScanResInfo->vtbScan.pRsp, code, lino, _return, terrno);
|
||||
|
||||
QUERY_CHECK_CODE(tDeserializeSUseDbRsp(pMsg->pData, (int32_t)pMsg->len, pScanResInfo->vtbScan.pRsp), lino, _return);
|
||||
code = tDeserializeSUseDbRsp(pMsg->pData, (int32_t)pMsg->len, pScanResInfo->vtbScan.pRsp);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
|
||||
taosMemoryFreeClear(pMsg->pData);
|
||||
|
||||
QUERY_CHECK_CODE(tsem_post(&pScanResInfo->vtbScan.ready), lino, _return);
|
||||
code = tsem_post(&pScanResInfo->vtbScan.ready);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
_return:
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
return code;
|
||||
|
@ -1157,7 +1162,8 @@ static int32_t buildDbVgInfoMap(SOperatorInfo* pOperator, SReadHandle* pHandle,
|
|||
|
||||
pReq = taosMemoryMalloc(sizeof(SUseDbReq));
|
||||
QUERY_CHECK_NULL(pReq, code, lino, _return, terrno);
|
||||
QUERY_CHECK_CODE(tNameGetFullDbName(name, pReq->db), lino, _return);
|
||||
code = tNameGetFullDbName(name, pReq->db);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
int32_t contLen = tSerializeSUseDbReq(NULL, 0, pReq);
|
||||
buf1 = taosMemoryCalloc(1, contLen);
|
||||
QUERY_CHECK_NULL(buf1, code, lino, _return, terrno);
|
||||
|
@ -1177,11 +1183,14 @@ static int32_t buildDbVgInfoMap(SOperatorInfo* pOperator, SReadHandle* pHandle,
|
|||
pMsgSendInfo->fp = dynProcessUseDbRsp;
|
||||
pMsgSendInfo->requestId = pTaskInfo->id.queryId;
|
||||
|
||||
QUERY_CHECK_CODE(asyncSendMsgToServer(pHandle->pMsgCb->clientRpc, &pScanResInfo->vtbScan.epSet, NULL, pMsgSendInfo), lino, _return);
|
||||
code = asyncSendMsgToServer(pHandle->pMsgCb->clientRpc, &pScanResInfo->vtbScan.epSet, NULL, pMsgSendInfo);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
|
||||
QUERY_CHECK_CODE(tsem_wait(&pScanResInfo->vtbScan.ready), lino, _return);
|
||||
code = tsem_wait(&pScanResInfo->vtbScan.ready);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
|
||||
QUERY_CHECK_CODE(queryBuildUseDbOutput(output, pScanResInfo->vtbScan.pRsp), lino, _return);
|
||||
code = queryBuildUseDbOutput(output, pScanResInfo->vtbScan.pRsp);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
|
||||
_return:
|
||||
if (code) {
|
||||
|
@ -1250,12 +1259,13 @@ int32_t dynHashValueComp(void const* lp, void const* rp) {
|
|||
int32_t getVgId(SDBVgInfo* dbInfo, char* dbFName, int32_t* vgId, char *tbName) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
QUERY_CHECK_CODE(dynMakeVgArraySortBy(dbInfo, dynVgInfoComp), lino, _return);
|
||||
code = dynMakeVgArraySortBy(dbInfo, dynVgInfoComp);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
|
||||
int32_t vgNum = (int32_t)taosArrayGetSize(dbInfo->vgArray);
|
||||
if (vgNum <= 0) {
|
||||
qError("db vgroup cache invalid, db:%s, vgroup number:%d", dbFName, vgNum);
|
||||
QUERY_CHECK_CODE(TSDB_CODE_TSC_DB_NOT_SELECTED, lino, _return);
|
||||
QUERY_CHECK_CODE(code = TSDB_CODE_TSC_DB_NOT_SELECTED, lino, _return);
|
||||
}
|
||||
|
||||
SVgroupInfo* vgInfo = NULL;
|
||||
|
@ -1309,8 +1319,10 @@ int32_t getDbVgInfo(SOperatorInfo* pOperator, SName *name, SDBVgInfo **dbVgInfo)
|
|||
|
||||
if (find == NULL) {
|
||||
output = taosMemoryMalloc(sizeof(SUseDbOutput));
|
||||
QUERY_CHECK_CODE(buildDbVgInfoMap(pOperator, pHandle, name, pTaskInfo, output), line, _return);
|
||||
QUERY_CHECK_CODE(taosHashPut(pInfo->vtbScan.dbVgInfoMap, name->dbname, strlen(name->dbname), &output, POINTER_BYTES), line, _return);
|
||||
code = buildDbVgInfoMap(pOperator, pHandle, name, pTaskInfo, output);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
code = taosHashPut(pInfo->vtbScan.dbVgInfoMap, name->dbname, strlen(name->dbname), &output, POINTER_BYTES);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
} else {
|
||||
output = *find;
|
||||
}
|
||||
|
@ -1357,12 +1369,14 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) {
|
|||
|
||||
while (true) {
|
||||
if (pVtbScan->curTableIdx == pVtbScan->lastTableIdx) {
|
||||
QUERY_CHECK_CODE(pOperator->pDownstream[0]->fpSet.getNextFn(pOperator->pDownstream[0], pRes), line, _return);
|
||||
code = pOperator->pDownstream[0]->fpSet.getNextFn(pOperator->pDownstream[0], pRes);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
} else {
|
||||
uint64_t* id = taosArrayGet(pVtbScan->childTableList, pVtbScan->curTableIdx);
|
||||
QUERY_CHECK_NULL(id, code, line, _return, terrno);
|
||||
pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_LOCK, &pHandle->api.metaFn);
|
||||
QUERY_CHECK_CODE(pHandle->api.metaReaderFn.getTableEntryByUid(&mr, *id), line, _return);
|
||||
code = pHandle->api.metaReaderFn.getTableEntryByUid(&mr, *id);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
|
||||
for (int32_t j = 0; j < mr.me.colRef.nCols; j++) {
|
||||
if (mr.me.colRef.pColRef[j].hasRef && colNeedScan(pOperator, mr.me.colRef.pColRef[j].id)) {
|
||||
|
@ -1370,15 +1384,22 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) {
|
|||
char dbFname[TSDB_DB_FNAME_LEN] = {0};
|
||||
char orgTbFName[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
|
||||
if (strncmp(mr.me.colRef.pColRef[j].refDbName, pVtbScan->dbName, strlen(pVtbScan->dbName)) != 0) {
|
||||
QUERY_CHECK_CODE(code = TSDB_CODE_VTABLE_NOT_SUPPORT_CROSS_DB, line, _return);
|
||||
}
|
||||
toName(pInfo->vtbScan.acctId, mr.me.colRef.pColRef[j].refDbName, mr.me.colRef.pColRef[j].refTableName, &name);
|
||||
QUERY_CHECK_CODE(getDbVgInfo(pOperator, &name, &dbVgInfo), line, _return);
|
||||
QUERY_CHECK_CODE(tNameGetFullDbName(&name, dbFname), line, _return);
|
||||
QUERY_CHECK_CODE(tNameGetFullTableName(&name, orgTbFName), line, _return);
|
||||
code = getDbVgInfo(pOperator, &name, &dbVgInfo);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
tNameGetFullDbName(&name, dbFname);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
tNameGetFullTableName(&name, orgTbFName);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
|
||||
void *pVal = taosHashGet(pVtbScan->orgTbVgColMap, orgTbFName, sizeof(orgTbFName));
|
||||
if (!pVal) {
|
||||
SOrgTbInfo map = {0};
|
||||
QUERY_CHECK_CODE(getVgId(dbVgInfo, dbFname, &map.vgId, name.tname), line, _return);
|
||||
code = getVgId(dbVgInfo, dbFname, &map.vgId, name.tname);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
tstrncpy(map.tbName, orgTbFName, sizeof(map.tbName));
|
||||
map.colMap = taosArrayInit(10, sizeof(SColIdNameKV));
|
||||
QUERY_CHECK_NULL(map.colMap, code, line, _return, terrno);
|
||||
|
@ -1386,7 +1407,8 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) {
|
|||
colIdNameKV.colId = mr.me.colRef.pColRef[j].id;
|
||||
tstrncpy(colIdNameKV.colName, mr.me.colRef.pColRef[j].refColName, sizeof(colIdNameKV.colName));
|
||||
QUERY_CHECK_NULL(taosArrayPush(map.colMap, &colIdNameKV), code, line, _return, terrno);
|
||||
QUERY_CHECK_CODE(taosHashPut(pVtbScan->orgTbVgColMap, orgTbFName, sizeof(orgTbFName), &map, sizeof(map)), line, _return);
|
||||
code = taosHashPut(pVtbScan->orgTbVgColMap, orgTbFName, sizeof(orgTbFName), &map, sizeof(map));
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
} else {
|
||||
SOrgTbInfo *tbInfo = (SOrgTbInfo *)pVal;
|
||||
SColIdNameKV colIdNameKV = {0};
|
||||
|
@ -1398,13 +1420,15 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) {
|
|||
}
|
||||
|
||||
pVtbScan->vtbScanParam = NULL;
|
||||
QUERY_CHECK_CODE(buildVtbScanOperatorParam(pInfo, &pVtbScan->vtbScanParam, *id), line, _return);
|
||||
code = buildVtbScanOperatorParam(pInfo, &pVtbScan->vtbScanParam, *id);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
|
||||
void* pIter = taosHashIterate(pVtbScan->orgTbVgColMap, NULL);
|
||||
while (pIter != NULL) {
|
||||
SOrgTbInfo* pMap = (SOrgTbInfo*)pIter;
|
||||
SOperatorParam* pExchangeParam = NULL;
|
||||
QUERY_CHECK_CODE(buildExchangeOperatorParamForVScan(&pExchangeParam, 0, pMap), line, _return);
|
||||
code = buildExchangeOperatorParamForVScan(&pExchangeParam, 0, pMap);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
QUERY_CHECK_NULL(taosArrayPush(((SVTableScanOperatorParam*)pVtbScan->vtbScanParam->value)->pOpParamArray, &pExchangeParam), code, line, _return, terrno);
|
||||
pIter = taosHashIterate(pVtbScan->orgTbVgColMap, pIter);
|
||||
}
|
||||
|
@ -1412,7 +1436,8 @@ int32_t vtbScan(SOperatorInfo* pOperator, SSDataBlock** pRes) {
|
|||
|
||||
// reset downstream operator's status
|
||||
pOperator->pDownstream[0]->status = OP_NOT_OPENED;
|
||||
QUERY_CHECK_CODE(pOperator->pDownstream[0]->fpSet.getNextExtFn(pOperator->pDownstream[0], pVtbScan->vtbScanParam, pRes), line, _return);
|
||||
code = pOperator->pDownstream[0]->fpSet.getNextExtFn(pOperator->pDownstream[0], pVtbScan->vtbScanParam, pRes);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
}
|
||||
|
||||
if (*pRes) {
|
||||
|
@ -1478,7 +1503,8 @@ static int32_t initVtbScanInfo(SOperatorInfo* pOperator, SDynQueryCtrlOperatorIn
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t line = 0;
|
||||
|
||||
QUERY_CHECK_CODE(tsem_init(&pInfo->vtbScan.ready, 0, 0), line, _return);
|
||||
code = tsem_init(&pInfo->vtbScan.ready, 0, 0);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
|
||||
pInfo->vtbScan.scanAllCols = pPhyciNode->vtbScan.scanAllCols;
|
||||
pInfo->vtbScan.suid = pPhyciNode->vtbScan.suid;
|
||||
|
@ -1487,6 +1513,8 @@ static int32_t initVtbScanInfo(SOperatorInfo* pOperator, SDynQueryCtrlOperatorIn
|
|||
pInfo->vtbScan.readHandle = *pHandle;
|
||||
pInfo->vtbScan.curTableIdx = 0;
|
||||
pInfo->vtbScan.lastTableIdx = -1;
|
||||
pInfo->vtbScan.dbName = taosStrdup(pPhyciNode->vtbScan.dbName);
|
||||
QUERY_CHECK_NULL(pInfo->vtbScan.dbName, code, line, _return, terrno);
|
||||
|
||||
pInfo->vtbScan.readColList = taosArrayInit(LIST_LENGTH(pPhyciNode->vtbScan.pScanCols), sizeof(col_id_t));
|
||||
QUERY_CHECK_NULL(pInfo->vtbScan.readColList, code, line, _return, terrno);
|
||||
|
@ -1499,7 +1527,8 @@ static int32_t initVtbScanInfo(SOperatorInfo* pOperator, SDynQueryCtrlOperatorIn
|
|||
|
||||
pInfo->vtbScan.childTableList = taosArrayInit(10, sizeof(uint64_t));
|
||||
QUERY_CHECK_NULL(pInfo->vtbScan.childTableList, code, line, _return, terrno);
|
||||
QUERY_CHECK_CODE(pHandle->api.metaFn.getChildTableList(pHandle->vnode, pInfo->vtbScan.suid, pInfo->vtbScan.childTableList), line, _return);
|
||||
code = pHandle->api.metaFn.getChildTableList(pHandle->vnode, pInfo->vtbScan.suid, pInfo->vtbScan.childTableList);
|
||||
QUERY_CHECK_CODE(code, line, _return);
|
||||
|
||||
pInfo->vtbScan.dbVgInfoMap = taosHashInit(taosArrayGetSize(pInfo->vtbScan.childTableList), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
|
||||
QUERY_CHECK_NULL(pInfo->vtbScan.dbVgInfoMap, code, line, _return, terrno);
|
||||
|
@ -1518,6 +1547,7 @@ int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numO
|
|||
QRY_PARAM_CHECK(pOptrInfo);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t line = 0;
|
||||
__optr_fn_t nextFp = NULL;
|
||||
SOperatorInfo* pOperator = NULL;
|
||||
SDynQueryCtrlOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SDynQueryCtrlOperatorInfo));
|
||||
|
@ -1554,7 +1584,8 @@ int32_t createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numO
|
|||
nextFp = seqStableJoin;
|
||||
break;
|
||||
case DYN_QTYPE_VTB_SCAN:
|
||||
QUERY_CHECK_CODE(initVtbScanInfo(pOperator, pInfo, pHandle, pPhyciNode, pTaskInfo), code, _error);
|
||||
code = initVtbScanInfo(pOperator, pInfo, pHandle, pPhyciNode, pTaskInfo);
|
||||
QUERY_CHECK_CODE(code, line, _error);
|
||||
nextFp = vtbScan;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1239,7 +1239,7 @@ int32_t addDynamicExchangeSource(SOperatorInfo* pOperator) {
|
|||
freeOperatorParam(pOperator->pOperatorGetParam, OP_GET_PARAM);
|
||||
pOperator->pOperatorGetParam = NULL;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
|
||||
|
|
|
@ -3049,7 +3049,7 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag, const char* taskIdStr
|
|||
qInfo("%s===stream===%s: Block is Empty. block type %d", taskIdStr, flag, pBlock->info.type);
|
||||
return;
|
||||
}
|
||||
if (qDebugFlag & DEBUG_DEBUG) {
|
||||
if (qDebugFlag & DEBUG_INFO) {
|
||||
char* pBuf = NULL;
|
||||
int32_t code = dumpBlockData(pBlock, flag, &pBuf, taskIdStr);
|
||||
if (code == 0) {
|
||||
|
@ -3069,13 +3069,13 @@ void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr
|
|||
pBlock->info.version);
|
||||
return;
|
||||
}
|
||||
if (qDebugFlag & DEBUG_DEBUG) {
|
||||
if (qDebugFlag & DEBUG_INFO) {
|
||||
char* pBuf = NULL;
|
||||
char flagBuf[64];
|
||||
snprintf(flagBuf, sizeof(flagBuf), "%s %s", flag, opStr);
|
||||
int32_t code = dumpBlockData(pBlock, flagBuf, &pBuf, taskIdStr);
|
||||
if (code == 0) {
|
||||
qDebug("%s", pBuf);
|
||||
qInfo("%s", pBuf);
|
||||
taosMemoryFree(pBuf);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,8 +141,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
const char* id) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN &&
|
||||
pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN) {
|
||||
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
|
||||
if (pOperator->numOfDownstream == 0) {
|
||||
qError("failed to find stream scan operator to set the input data block, %s" PRIx64, id);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
@ -275,6 +274,15 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
void qSetStreamMergeInfo(qTaskInfo_t tinfo, SArray* pVTables) {
|
||||
if (tinfo == 0 || pVTables == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
SStreamTaskInfo* pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo;
|
||||
pStreamInfo->pVTables = pVTables;
|
||||
}
|
||||
|
||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
|
|
@ -382,7 +382,7 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
|
|||
return terrno;
|
||||
}
|
||||
|
||||
if (pHandle->vnode) {
|
||||
if (pHandle->vnode && (pTaskInfo->pSubplan->pVTables == NULL)) {
|
||||
code = createScanTableListInfo(&pTableScanNode->scan, pTableScanNode->pGroupTags, pTableScanNode->groupSort,
|
||||
pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo);
|
||||
if (code) {
|
||||
|
@ -515,8 +515,6 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
|
|||
code = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo, &pOperator);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN == type && model != OPTR_EXEC_MODEL_STREAM) {
|
||||
code = createVirtualTableMergeOperatorInfo(NULL, pHandle, NULL, 0, (SVirtualScanPhysiNode*)pPhyNode, pTaskInfo, &pOperator);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN == type && model == OPTR_EXEC_MODEL_STREAM) {
|
||||
code = createStreamVtableMergeOperatorInfo(pHandle, (SVirtualScanPhysiNode*)pPhyNode, pTagCond, pTaskInfo, &pOperator);
|
||||
} else {
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
pTaskInfo->code = code;
|
||||
|
@ -689,6 +687,26 @@ int32_t createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHand
|
|||
|
||||
|
||||
code = createVirtualTableMergeOperatorInfo(ops, pHandle, pTableListInfo, size, (SVirtualScanPhysiNode*)pPhyNode, pTaskInfo, &pOptr);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_VIRTUAL_TABLE_SCAN == type && model == OPTR_EXEC_MODEL_STREAM) {
|
||||
SVirtualScanPhysiNode* pVirtualTableScanNode = (SVirtualScanPhysiNode*)pPhyNode;
|
||||
STableListInfo* pTableListInfo = tableListCreate();
|
||||
if (!pTableListInfo) {
|
||||
pTaskInfo->code = terrno;
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
|
||||
return terrno;
|
||||
}
|
||||
|
||||
code = createScanTableListInfo(&pVirtualTableScanNode->scan, pVirtualTableScanNode->pGroupTags,
|
||||
pVirtualTableScanNode->groupSort, pHandle, pTableListInfo, pTagCond, pTagIndexCond,
|
||||
pTaskInfo);
|
||||
if (code) {
|
||||
pTaskInfo->code = code;
|
||||
tableListDestroy(pTableListInfo);
|
||||
qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
|
||||
return code;
|
||||
}
|
||||
|
||||
code = createStreamVtableMergeOperatorInfo(ops[0], pHandle, pVirtualTableScanNode, pTagCond, pTableListInfo, pTaskInfo, &pOptr);
|
||||
} else {
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
pTaskInfo->code = code;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "functionMgt.h"
|
||||
#include "operator.h"
|
||||
#include "querytask.h"
|
||||
#include "streaminterval.h"
|
||||
#include "taoserror.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
|
@ -162,6 +163,9 @@ int32_t createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode*
|
|||
code = setRowTsColumnOutputInfo(pOperator->exprSupp.pCtx, numOfCols, &pInfo->pPseudoColInfo);
|
||||
TSDB_CHECK_CODE(code, lino, _error);
|
||||
|
||||
code = initStreamFillOperatorColumnMapInfo(&pOperator->exprSupp, downstream);
|
||||
TSDB_CHECK_CODE(code, lino, _error);
|
||||
|
||||
setOperatorInfo(pOperator, "ProjectOperator", QUERY_NODE_PHYSICAL_PLAN_PROJECT, false, OP_NOT_OPENED, pInfo,
|
||||
pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doProjectOperation, NULL, destroyProjectOperatorInfo,
|
||||
|
|
|
@ -1237,11 +1237,13 @@ static int32_t createVTableScanInfoFromParam(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
pAPI->metaReaderFn.initReader(&orgTable, pInfo->base.readHandle.vnode, META_READER_LOCK, &pAPI->metaFn);
|
||||
QUERY_CHECK_CODE(pAPI->metaReaderFn.getTableEntryByName(&orgTable, strstr(pParam->pOrgTbInfo->tbName, ".") + 1), lino, _return);
|
||||
code = pAPI->metaReaderFn.getTableEntryByName(&orgTable, strstr(pParam->pOrgTbInfo->tbName, ".") + 1);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
switch (orgTable.me.type) {
|
||||
case TSDB_CHILD_TABLE:
|
||||
pAPI->metaReaderFn.initReader(&superTable, pInfo->base.readHandle.vnode, META_READER_LOCK, &pAPI->metaFn);
|
||||
QUERY_CHECK_CODE(pAPI->metaReaderFn.getTableEntryByUid(&superTable, orgTable.me.ctbEntry.suid), lino, _return);
|
||||
code = pAPI->metaReaderFn.getTableEntryByUid(&superTable, orgTable.me.ctbEntry.suid);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
schema = &superTable.me.stbEntry.schemaRow;
|
||||
break;
|
||||
case TSDB_NORMAL_TABLE:
|
||||
|
@ -1289,8 +1291,10 @@ static int32_t createVTableScanInfoFromParam(SOperatorInfo* pOperator) {
|
|||
blockDataDestroy(pInfo->pResBlock);
|
||||
pInfo->pResBlock = NULL;
|
||||
}
|
||||
QUERY_CHECK_CODE(createOneDataBlockWithColArray(pInfo->pOrgBlock, pBlockColArray, &pInfo->pResBlock), lino, _return);
|
||||
QUERY_CHECK_CODE(initQueryTableDataCondWithColArray(&pInfo->base.cond, &pInfo->base.orgCond, &pInfo->base.readHandle, pColArray), lino, _return);
|
||||
code = createOneDataBlockWithColArray(pInfo->pOrgBlock, pBlockColArray, &pInfo->pResBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
code = initQueryTableDataCondWithColArray(&pInfo->base.cond, &pInfo->base.orgCond, &pInfo->base.readHandle, pColArray);
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
pInfo->base.cond.twindows.skey = pParam->window.ekey + 1;
|
||||
pInfo->base.cond.suid = orgTable.me.type == TSDB_CHILD_TABLE ? superTable.me.uid : 0;
|
||||
pInfo->currentGroupId = 0;
|
||||
|
@ -1304,7 +1308,8 @@ static int32_t createVTableScanInfoFromParam(SOperatorInfo* pOperator) {
|
|||
uint64_t pUid = orgTable.me.uid;
|
||||
STableKeyInfo info = {.groupId = 0, .uid = pUid};
|
||||
int32_t tableIdx = 0;
|
||||
QUERY_CHECK_CODE(taosHashPut(pListInfo->map, &pUid, sizeof(uint64_t), &tableIdx, sizeof(int32_t)), lino, _return);
|
||||
code = taosHashPut(pListInfo->map, &pUid, sizeof(uint64_t), &tableIdx, sizeof(int32_t));
|
||||
QUERY_CHECK_CODE(code, lino, _return);
|
||||
QUERY_CHECK_NULL(taosArrayPush(pListInfo->pTableList, &info), code, lino, _return, terrno);
|
||||
qDebug("add dynamic table scan uid:%" PRIu64 ", %s", info.uid, GET_TASKID(pTaskInfo));
|
||||
|
||||
|
@ -1470,12 +1475,14 @@ int32_t doTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
|||
|
||||
SSDataBlock* result = NULL;
|
||||
while (true) {
|
||||
QUERY_CHECK_CODE(startNextGroupScan(pOperator, &result), lino, _end);
|
||||
code = startNextGroupScan(pOperator, &result);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
if (result || pOperator->status == OP_EXEC_DONE) {
|
||||
SSDataBlock* res = NULL;
|
||||
if (result) {
|
||||
QUERY_CHECK_CODE(createOneDataBlockWithTwoBlock(result, pInfo->pOrgBlock, &res), lino, _end);
|
||||
code = createOneDataBlockWithTwoBlock(result, pInfo->pOrgBlock, &res);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pInfo->pResBlock = res;
|
||||
blockDataDestroy(result);
|
||||
}
|
||||
|
@ -3157,7 +3164,7 @@ int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STime
|
|||
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
const char* id = GET_TASKID(pTaskInfo);
|
||||
SSHashObj* pVtableInfos = pTaskInfo->pSubplan->pVTables;
|
||||
bool isVtableSourceScan = (pTaskInfo->pSubplan->pVTables != NULL);
|
||||
|
||||
code = blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
@ -3168,7 +3175,7 @@ int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STime
|
|||
pBlockInfo->version = pBlock->info.version;
|
||||
|
||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||
if (pVtableInfos == NULL) {
|
||||
if (!isVtableSourceScan) {
|
||||
pBlockInfo->id.groupId = tableListGetTableGroupId(pTableScanInfo->base.pTableListInfo, pBlock->info.id.uid);
|
||||
} else {
|
||||
// use original table uid as groupId for vtable
|
||||
|
@ -3213,7 +3220,7 @@ int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STime
|
|||
}
|
||||
|
||||
// currently only the tbname pseudo column
|
||||
if (pInfo->numOfPseudoExpr > 0) {
|
||||
if (pInfo->numOfPseudoExpr > 0 && !isVtableSourceScan) {
|
||||
code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
|
||||
pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache);
|
||||
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
||||
|
@ -3762,9 +3769,20 @@ static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
|||
SStorageAPI* pAPI = &pTaskInfo->storageAPI;
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
SStreamTaskInfo* pStreamInfo = &pTaskInfo->streamInfo;
|
||||
SSHashObj* pVtableInfos = pTaskInfo->pSubplan->pVTables;
|
||||
|
||||
qDebug("stream scan started, %s", id);
|
||||
|
||||
if (pVtableInfos != NULL && pStreamInfo->recoverStep != STREAM_RECOVER_STEP__NONE) {
|
||||
qError("stream vtable source scan should not have recovery step: %d", pStreamInfo->recoverStep);
|
||||
pStreamInfo->recoverStep = STREAM_RECOVER_STEP__NONE;
|
||||
}
|
||||
|
||||
if (pVtableInfos != NULL && !pInfo->igCheckUpdate) {
|
||||
qError("stream vtable source scan should have igCheckUpdate");
|
||||
pInfo->igCheckUpdate = false;
|
||||
}
|
||||
|
||||
if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__PREPARE1 ||
|
||||
pStreamInfo->recoverStep == STREAM_RECOVER_STEP__PREPARE2) {
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
|
@ -3863,6 +3881,10 @@ static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
|||
// TODO: refactor
|
||||
FETCH_NEXT_BLOCK:
|
||||
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
|
||||
if (pVtableInfos != NULL) {
|
||||
qInfo("stream vtable source scan would ignore all data blocks");
|
||||
pInfo->validBlockIndex = total;
|
||||
}
|
||||
if (pInfo->validBlockIndex >= total) {
|
||||
doClearBufferedBlocks(pInfo);
|
||||
(*ppRes) = NULL;
|
||||
|
@ -4013,6 +4035,10 @@ FETCH_NEXT_BLOCK:
|
|||
return code;
|
||||
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
|
||||
qDebug("stream scan mode:%d, %s", pInfo->scanMode, id);
|
||||
if (pVtableInfos != NULL && pInfo->scanMode != STREAM_SCAN_FROM_READERHANDLE) {
|
||||
qError("stream vtable source scan should not have scan mode: %d", pInfo->scanMode);
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||
}
|
||||
switch (pInfo->scanMode) {
|
||||
case STREAM_SCAN_FROM_RES: {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||
|
@ -4167,6 +4193,11 @@ FETCH_NEXT_BLOCK:
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pVtableInfos != NULL && pInfo->pCreateTbRes->info.rows > 0) {
|
||||
qError("stream vtable source scan should not have create table res");
|
||||
blockDataCleanup(pInfo->pCreateTbRes);
|
||||
}
|
||||
|
||||
if (pInfo->pCreateTbRes->info.rows > 0) {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_RES;
|
||||
qDebug("create table res exists, rows:%" PRId64 " return from stream scan, %s",
|
||||
|
@ -4178,8 +4209,11 @@ FETCH_NEXT_BLOCK:
|
|||
code = doCheckUpdate(pInfo, pBlockInfo->window.ekey, pInfo->pRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
setStreamOperatorState(&pInfo->basic, pInfo->pRes->info.type);
|
||||
code = doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (pVtableInfos == NULL) {
|
||||
// filter should be applied in merge task for vtables
|
||||
code = doFilter(pInfo->pRes, pOperator->exprSupp.pFilterInfo, NULL);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
code = blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
@ -4215,7 +4249,7 @@ FETCH_NEXT_BLOCK:
|
|||
|
||||
goto NEXT_SUBMIT_BLK;
|
||||
} else if (pInfo->blockType == STREAM_INPUT__CHECKPOINT) {
|
||||
if (pInfo->validBlockIndex >= total) {
|
||||
if (pInfo->validBlockIndex >= total || pVtableInfos != NULL) {
|
||||
doClearBufferedBlocks(pInfo);
|
||||
(*ppRes) = NULL;
|
||||
return code;
|
||||
|
@ -4494,6 +4528,18 @@ void destroyStreamScanOperatorInfo(void* param) {
|
|||
taosHashCleanup(pStreamScan->pVtableMergeHandles);
|
||||
pStreamScan->pVtableMergeHandles = NULL;
|
||||
}
|
||||
if (pStreamScan->pVtableMergeBuf) {
|
||||
destroyDiskbasedBuf(pStreamScan->pVtableMergeBuf);
|
||||
pStreamScan->pVtableMergeBuf = NULL;
|
||||
}
|
||||
if (pStreamScan->pVtableReadyHandles) {
|
||||
taosArrayDestroy(pStreamScan->pVtableReadyHandles);
|
||||
pStreamScan->pVtableReadyHandles = NULL;
|
||||
}
|
||||
if (pStreamScan->pTableListInfo) {
|
||||
tableListDestroy(pStreamScan->pTableListInfo);
|
||||
pStreamScan->pTableListInfo = NULL;
|
||||
}
|
||||
if (pStreamScan->matchInfo.pList) {
|
||||
taosArrayDestroy(pStreamScan->matchInfo.pList);
|
||||
}
|
||||
|
@ -4681,15 +4727,13 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t createStreamVtableBlock(SColMatchInfo *pMatchInfo, SSDataBlock **ppRes, const char *idstr) {
|
||||
static SSDataBlock* createStreamVtableBlock(SColMatchInfo *pMatchInfo, const char *idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SSDataBlock *pRes = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(pMatchInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*ppRes = NULL;
|
||||
|
||||
code = createDataBlock(&pRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
int32_t numOfOutput = taosArrayGetSize(pMatchInfo->pList);
|
||||
|
@ -4703,18 +4747,16 @@ static int32_t createStreamVtableBlock(SColMatchInfo *pMatchInfo, SSDataBlock **
|
|||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
*ppRes = pRes;
|
||||
pRes = NULL;
|
||||
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s, id: %s", __func__, lino, tstrerror(code), idstr);
|
||||
if (pRes != NULL) {
|
||||
blockDataDestroy(pRes);
|
||||
}
|
||||
pRes = NULL;
|
||||
terrno = code;
|
||||
}
|
||||
if (pRes != NULL) {
|
||||
blockDataDestroy(pRes);
|
||||
}
|
||||
return code;
|
||||
return pRes;
|
||||
}
|
||||
|
||||
static int32_t createStreamNormalScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
|
||||
|
@ -4838,8 +4880,7 @@ static int32_t createStreamNormalScanOperatorInfo(SReadHandle* pHandle, STableSc
|
|||
|
||||
if (pVtableInfos != NULL) {
|
||||
// save vtable info into tqReader for vtable source scan
|
||||
SSDataBlock* pResBlock = NULL;
|
||||
code = createStreamVtableBlock(&pInfo->matchInfo, &pResBlock, idstr);
|
||||
SSDataBlock* pResBlock = createStreamVtableBlock(&pInfo->matchInfo, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
code = pAPI->tqReaderFn.tqReaderSetVtableInfo(pInfo->tqReader, pHandle->vnode, pAPI, pVtableInfos, &pResBlock,
|
||||
idstr);
|
||||
|
@ -4962,8 +5003,8 @@ _error:
|
|||
taosArrayDestroy(pColIds);
|
||||
}
|
||||
|
||||
if (pInfo != NULL) {
|
||||
STableScanInfo* p = (STableScanInfo*) pInfo->pTableScanOp->info;
|
||||
if (pInfo != NULL && pInfo->pTableScanOp != NULL) {
|
||||
STableScanInfo* p = (STableScanInfo*)pInfo->pTableScanOp->info;
|
||||
if (p != NULL) {
|
||||
p->base.pTableListInfo = NULL;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ typedef struct SVMBufPageInfo {
|
|||
typedef struct SStreamVtableMergeSource {
|
||||
SDiskbasedBuf* pBuf; // buffer for storing data
|
||||
int32_t* pTotalPages; // total pages of all sources in the buffer
|
||||
int32_t primaryTsIndex;
|
||||
|
||||
SSDataBlock* pInputDataBlock; // data block to be written to the buffer
|
||||
int64_t currentExpireTimeMs; // expire time of the input data block
|
||||
|
@ -44,12 +45,15 @@ typedef struct SStreamVtableMergeHandle {
|
|||
SDiskbasedBuf* pBuf;
|
||||
int32_t numOfPages;
|
||||
int32_t numPageLimit;
|
||||
int32_t primaryTsIndex;
|
||||
|
||||
int64_t vuid;
|
||||
int32_t nSrcTbls;
|
||||
SHashObj* pSources;
|
||||
SSDataBlock* datablock; // Does not store data, only used to save the schema of input/output data blocks
|
||||
|
||||
SMultiwayMergeTreeInfo* pMergeTree;
|
||||
int32_t numEmptySources;
|
||||
int64_t globalLatestTs;
|
||||
} SStreamVtableMergeHandle;
|
||||
|
||||
|
@ -90,6 +94,9 @@ static int32_t svmSourceFlushInput(SStreamVtableMergeSource* pSource, const char
|
|||
|
||||
// check data block size
|
||||
pBlock = pSource->pInputDataBlock;
|
||||
if (blockDataGetNumOfRows(pBlock) == 0) {
|
||||
goto _end;
|
||||
}
|
||||
int32_t size = blockDataGetSize(pBlock) + sizeof(int32_t) + taosArrayGetSize(pBlock->pDataBlock) * sizeof(int32_t);
|
||||
QUERY_CHECK_CONDITION(size <= getBufPageSize(pSource->pBuf), code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
|
||||
|
@ -123,37 +130,36 @@ _end:
|
|||
static int32_t svmSourceAddBlock(SStreamVtableMergeSource* pSource, SSDataBlock* pDataBlock, const char* idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
int32_t pageSize = 0;
|
||||
int32_t holdSize = 0;
|
||||
SSDataBlock* pInputDataBlock = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
pInputDataBlock = pSource->pInputDataBlock;
|
||||
if (pInputDataBlock == NULL) {
|
||||
code = createOneDataBlock(pDataBlock, false, &pInputDataBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pSource->pInputDataBlock = pInputDataBlock;
|
||||
}
|
||||
QUERY_CHECK_CONDITION(taosArrayGetSize(pDataBlock->pDataBlock) >= taosArrayGetSize(pInputDataBlock->pDataBlock), code,
|
||||
lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
int32_t start = 0;
|
||||
int32_t nrows = blockDataGetNumOfRows(pDataBlock);
|
||||
int32_t pageSize =
|
||||
getBufPageSize(pSource->pBuf) - sizeof(int32_t) - taosArrayGetSize(pInputDataBlock->pDataBlock) * sizeof(int32_t);
|
||||
while (start < nrows) {
|
||||
int32_t holdSize = blockDataGetSize(pInputDataBlock);
|
||||
QUERY_CHECK_CONDITION(holdSize < pageSize, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
int32_t stop = 0;
|
||||
int32_t stop = start;
|
||||
code = blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pageSize - holdSize);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (stop == start - 1) {
|
||||
// If pInputDataBlock cannot hold new rows, ignore the error and write pInputDataBlock to the buffer
|
||||
} else {
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
// append new rows to pInputDataBlock
|
||||
if (blockDataGetNumOfRows(pInputDataBlock) == 0) {
|
||||
// set expires time for the first block
|
||||
pSource->currentExpireTimeMs = taosGetTimestampMs() + tsStreamVirtualMergeMaxDelayMs;
|
||||
}
|
||||
int32_t numOfRows = stop - start + 1;
|
||||
code = blockDataEnsureCapacity(pInputDataBlock, pInputDataBlock->info.rows + numOfRows);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = blockDataMergeNRows(pInputDataBlock, pDataBlock, start, numOfRows);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
@ -176,6 +182,17 @@ _end:
|
|||
|
||||
static bool svmSourceIsEmpty(SStreamVtableMergeSource* pSource) { return listNEles(pSource->pageInfoList) == 0; }
|
||||
|
||||
static int64_t svmSourceGetExpireTime(SStreamVtableMergeSource* pSource) {
|
||||
SListNode* pn = tdListGetHead(pSource->pageInfoList);
|
||||
if (pn != NULL) {
|
||||
SVMBufPageInfo* pageInfo = (SVMBufPageInfo*)pn->data;
|
||||
if (pageInfo != NULL) {
|
||||
return pageInfo->expireTimeMs;
|
||||
}
|
||||
}
|
||||
return INT64_MAX;
|
||||
}
|
||||
|
||||
static int32_t svmSourceReadBuf(SStreamVtableMergeSource* pSource, const char* idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
@ -188,6 +205,11 @@ static int32_t svmSourceReadBuf(SStreamVtableMergeSource* pSource, const char* i
|
|||
QUERY_CHECK_NULL(pSource->pOutputDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
blockDataCleanup(pSource->pOutputDataBlock);
|
||||
int32_t numOfCols = taosArrayGetSize(pSource->pOutputDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; i++) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pSource->pOutputDataBlock->pDataBlock, i);
|
||||
pCol->hasNull = true;
|
||||
}
|
||||
|
||||
pn = tdListGetHead(pSource->pageInfoList);
|
||||
QUERY_CHECK_NULL(pn, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
|
@ -215,21 +237,15 @@ static int32_t svmSourceCurrentTs(SStreamVtableMergeSource* pSource, const char*
|
|||
SColumnInfoData* tsCol = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_CONDITION(!svmSourceIsEmpty(pSource), code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pSource->pOutputDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_CONDITION(pSource->rowIndex >= 0 && pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock),
|
||||
code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
if (blockDataGetNumOfRows(pSource->pOutputDataBlock) == 0) {
|
||||
code = svmSourceReadBuf(pSource, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
QUERY_CHECK_CONDITION(pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), code, lino, _end,
|
||||
TSDB_CODE_INVALID_PARA);
|
||||
|
||||
tsCol = taosArrayGet(pSource->pOutputDataBlock->pDataBlock, 0);
|
||||
tsCol = taosArrayGet(pSource->pOutputDataBlock->pDataBlock, pSource->primaryTsIndex);
|
||||
QUERY_CHECK_NULL(tsCol, code, lino, _end, terrno);
|
||||
QUERY_CHECK_CONDITION(tsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pTs = ((int64_t*)tsCol->pData)[pSource->rowIndex];
|
||||
pSource->latestTs = TMAX(*pTs, pSource->latestTs);
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -238,55 +254,54 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t svmSourceMoveNext(SStreamVtableMergeSource* pSource, const char* idstr, SVM_NEXT_RESULT* pRes) {
|
||||
static int32_t svmSourceMoveNext(SStreamVtableMergeSource* pSource, const char* idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SListNode* pn = NULL;
|
||||
void* page = NULL;
|
||||
int64_t latestTs = 0;
|
||||
|
||||
QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pSource->pOutputDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pRes = SVM_NEXT_NOT_READY;
|
||||
latestTs = pSource->latestTs;
|
||||
|
||||
while (true) {
|
||||
if (svmSourceIsEmpty(pSource)) {
|
||||
pSource->rowIndex = 0;
|
||||
break;
|
||||
if (pSource->rowIndex >= 0) {
|
||||
QUERY_CHECK_CONDITION(pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), code, lino, _end,
|
||||
TSDB_CODE_INVALID_PARA);
|
||||
pSource->rowIndex++;
|
||||
if (pSource->rowIndex >= blockDataGetNumOfRows(pSource->pOutputDataBlock)) {
|
||||
// Pop the page from the list and recycle it
|
||||
pn = tdListPopHead(pSource->pageInfoList);
|
||||
QUERY_CHECK_NULL(pn, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
QUERY_CHECK_NULL(pn->data, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
SVMBufPageInfo* pageInfo = (SVMBufPageInfo*)pn->data;
|
||||
page = getBufPage(pSource->pBuf, pageInfo->pageId);
|
||||
QUERY_CHECK_NULL(page, code, lino, _end, terrno);
|
||||
code = dBufSetBufPageRecycled(pSource->pBuf, page);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
(*pSource->pTotalPages)--;
|
||||
taosMemoryFreeClear(pn);
|
||||
pSource->rowIndex = -1;
|
||||
}
|
||||
}
|
||||
|
||||
QUERY_CHECK_CONDITION(pSource->rowIndex < blockDataGetNumOfRows(pSource->pOutputDataBlock), code, lino, _end,
|
||||
TSDB_CODE_INVALID_PARA);
|
||||
|
||||
pSource->rowIndex++;
|
||||
if (pSource->rowIndex >= blockDataGetNumOfRows(pSource->pOutputDataBlock)) {
|
||||
// Pop the page from the list and recycle it
|
||||
pn = tdListPopHead(pSource->pageInfoList);
|
||||
QUERY_CHECK_NULL(pn, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
QUERY_CHECK_NULL(pn->data, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
SVMBufPageInfo* pageInfo = (SVMBufPageInfo*)pn->data;
|
||||
page = getBufPage(pSource->pBuf, pageInfo->pageId);
|
||||
QUERY_CHECK_NULL(page, code, lino, _end, terrno);
|
||||
code = dBufSetBufPageRecycled(pSource->pBuf, page);
|
||||
if (pSource->rowIndex == -1) {
|
||||
if (svmSourceIsEmpty(pSource)) {
|
||||
break;
|
||||
}
|
||||
// Read the first page from the list
|
||||
code = svmSourceReadBuf(pSource, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
(*pSource->pTotalPages)--;
|
||||
taosMemoryFreeClear(pn);
|
||||
pSource->rowIndex = 0;
|
||||
}
|
||||
|
||||
if (svmSourceIsEmpty(pSource)) {
|
||||
pSource->rowIndex = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
int64_t ts = 0;
|
||||
code = svmSourceCurrentTs(pSource, idstr, &ts);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (ts > latestTs && ts >= *pSource->pGlobalLatestTs) {
|
||||
*pRes = SVM_NEXT_FOUND;
|
||||
break;
|
||||
// Check the timestamp of the current row
|
||||
int64_t currentTs = INT64_MIN;
|
||||
code = svmSourceCurrentTs(pSource, idstr, ¤tTs);
|
||||
if (currentTs > pSource->latestTs) {
|
||||
pSource->latestTs = currentTs;
|
||||
if (currentTs >= *pSource->pGlobalLatestTs) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -306,6 +321,12 @@ static int32_t svmSourceCompare(const void* pLeft, const void* pRight, void* par
|
|||
SStreamVtableMergeSource* pLeftSource = *(SStreamVtableMergeSource**)taosArrayGet(pValidSources, left);
|
||||
SStreamVtableMergeSource* pRightSource = *(SStreamVtableMergeSource**)taosArrayGet(pValidSources, right);
|
||||
|
||||
if (svmSourceIsEmpty(pLeftSource)) {
|
||||
return 1;
|
||||
} else if (svmSourceIsEmpty(pRightSource)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t leftTs = 0;
|
||||
code = svmSourceCurrentTs(pLeftSource, "", &leftTs);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -335,10 +356,14 @@ static SStreamVtableMergeSource* svmAddSource(SStreamVtableMergeHandle* pHandle,
|
|||
QUERY_CHECK_NULL(pSource, code, lino, _end, terrno);
|
||||
pSource->pBuf = pHandle->pBuf;
|
||||
pSource->pTotalPages = &pHandle->numOfPages;
|
||||
pSource->primaryTsIndex = pHandle->primaryTsIndex;
|
||||
code = createOneDataBlock(pHandle->datablock, false, &pSource->pInputDataBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pSource->pageInfoList = tdListNew(sizeof(SVMBufPageInfo));
|
||||
QUERY_CHECK_NULL(pSource->pageInfoList, code, lino, _end, terrno);
|
||||
code = createOneDataBlock(pHandle->datablock, false, &pSource->pOutputDataBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pSource->rowIndex = -1;
|
||||
pSource->latestTs = INT64_MIN;
|
||||
pSource->pGlobalLatestTs = &pHandle->globalLatestTs;
|
||||
code = taosHashPut(pHandle->pSources, &uid, sizeof(uid), &pSource, POINTER_BYTES);
|
||||
|
@ -387,14 +412,16 @@ static int32_t svmBuildTree(SStreamVtableMergeHandle* pHandle, SVM_NEXT_RESULT*
|
|||
pIter = taosHashIterate(pHandle->pSources, NULL);
|
||||
while (pIter != NULL) {
|
||||
SStreamVtableMergeSource* pSource = *(SStreamVtableMergeSource**)pIter;
|
||||
if (svmSourceIsEmpty(pSource)) {
|
||||
code = svmSourceFlushInput(pSource, idstr);
|
||||
code = svmSourceFlushInput(pSource, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (pSource->rowIndex == -1) {
|
||||
code = svmSourceMoveNext(pSource, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
if (!svmSourceIsEmpty(pSource)) {
|
||||
px = taosArrayPush(pReadySources, &pSource);
|
||||
QUERY_CHECK_NULL(px, code, lino, _end, terrno);
|
||||
globalExpireTimeMs = TMIN(globalExpireTimeMs, pSource->currentExpireTimeMs);
|
||||
globalExpireTimeMs = TMIN(globalExpireTimeMs, svmSourceGetExpireTime(pSource));
|
||||
}
|
||||
pIter = taosHashIterate(pHandle->pSources, pIter);
|
||||
}
|
||||
|
@ -427,6 +454,7 @@ static int32_t svmBuildTree(SStreamVtableMergeHandle* pHandle, SVM_NEXT_RESULT*
|
|||
void* param = NULL;
|
||||
code = tMergeTreeCreate(&pHandle->pMergeTree, taosArrayGetSize(pReadySources), pReadySources, svmSourceCompare);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
pHandle->numEmptySources = 0;
|
||||
pReadySources = NULL;
|
||||
*pRes = SVM_NEXT_FOUND;
|
||||
|
||||
|
@ -453,7 +481,7 @@ int32_t streamVtableMergeAddBlock(SStreamVtableMergeHandle* pHandle, SSDataBlock
|
|||
QUERY_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
pTbUid = pDataBlock->info.id.uid;
|
||||
pTbUid = pDataBlock->info.id.groupId;
|
||||
px = taosHashGet(pHandle->pSources, &pTbUid, sizeof(int64_t));
|
||||
|
||||
if (px == NULL) {
|
||||
|
@ -480,8 +508,31 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamVtableMergeNextTuple(SStreamVtableMergeHandle* pHandle, SSDataBlock* pResBlock, SVM_NEXT_RESULT* pRes,
|
||||
const char* idstr) {
|
||||
int32_t streamVtableMergeCurrent(SStreamVtableMergeHandle* pHandle, SSDataBlock** ppDataBlock, int32_t* pRowIdx,
|
||||
const char* idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
QUERY_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pHandle->pMergeTree, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
int32_t idx = tMergeTreeGetChosenIndex(pHandle->pMergeTree);
|
||||
SArray* pReadySources = pHandle->pMergeTree->param;
|
||||
void* px = taosArrayGet(pReadySources, idx);
|
||||
QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
SStreamVtableMergeSource* pSource = *(SStreamVtableMergeSource**)px;
|
||||
QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
*ppDataBlock = pSource->pOutputDataBlock;
|
||||
*pRowIdx = pSource->rowIndex;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s, id: %s", __func__, lino, tstrerror(code), idstr);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t streamVtableMergeMoveNext(SStreamVtableMergeHandle* pHandle, SVM_NEXT_RESULT* pRes, const char* idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
void* px = NULL;
|
||||
|
@ -489,61 +540,74 @@ int32_t streamVtableMergeNextTuple(SStreamVtableMergeHandle* pHandle, SSDataBloc
|
|||
SStreamVtableMergeSource* pSource = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pResBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pRes, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pRes = SVM_NEXT_NOT_READY;
|
||||
if (pHandle->pMergeTree == NULL) {
|
||||
SVM_NEXT_RESULT buildRes = SVM_NEXT_NOT_READY;
|
||||
code = svmBuildTree(pHandle, &buildRes, idstr);
|
||||
code = svmBuildTree(pHandle, pRes, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (buildRes == SVM_NEXT_NOT_READY) {
|
||||
goto _end;
|
||||
}
|
||||
goto _end;
|
||||
}
|
||||
|
||||
QUERY_CHECK_NULL(pHandle->pMergeTree, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
int32_t idx = tMergeTreeGetChosenIndex(pHandle->pMergeTree);
|
||||
pReadySources = pHandle->pMergeTree->param;
|
||||
px = taosArrayGet(pReadySources, idx);
|
||||
QUERY_CHECK_NULL(px, code, lino, _end, terrno);
|
||||
pSource = *(SStreamVtableMergeSource**)px;
|
||||
code = blockCopyOneRow(pSource->pOutputDataBlock, pSource->rowIndex, &pResBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
*pRes = SVM_NEXT_FOUND;
|
||||
pHandle->globalLatestTs = TMAX(pSource->latestTs, pHandle->globalLatestTs);
|
||||
|
||||
SVM_NEXT_RESULT nextRes = SVM_NEXT_NOT_READY;
|
||||
int32_t origNumOfPages = pHandle->numOfPages;
|
||||
code = svmSourceMoveNext(pSource, idstr, &nextRes);
|
||||
int32_t origNumOfPages = pHandle->numOfPages;
|
||||
code = svmSourceMoveNext(pSource, idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
bool needDestroy = false;
|
||||
if (nextRes == SVM_NEXT_NOT_READY) {
|
||||
needDestroy = true;
|
||||
} else if (taosArrayGetSize((SArray*)pHandle->pMergeTree->param) != pHandle->nSrcTbls &&
|
||||
pHandle->numOfPages != origNumOfPages) {
|
||||
// The original data for this portion is incomplete. Its merge was forcibly triggered by certain conditions, so we
|
||||
// must recheck if those conditions are still met.
|
||||
if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_DELAY) {
|
||||
int64_t globalExpireTimeMs = INT64_MAX;
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pReadySources); ++i) {
|
||||
px = taosArrayGet(pReadySources, i);
|
||||
QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
pSource = *(SStreamVtableMergeSource**)px;
|
||||
globalExpireTimeMs = TMIN(globalExpireTimeMs, pSource->currentExpireTimeMs);
|
||||
}
|
||||
needDestroy = taosGetTimestampMs() < globalExpireTimeMs;
|
||||
} else if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_MEMORY) {
|
||||
needDestroy = pHandle->numOfPages < pHandle->numPageLimit;
|
||||
} else {
|
||||
code = TSDB_CODE_INTERNAL_ERROR;
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
if (svmSourceIsEmpty(pSource)) {
|
||||
++pHandle->numEmptySources;
|
||||
}
|
||||
|
||||
bool needDestroy = false;
|
||||
if (pHandle->numEmptySources == taosArrayGetSize(pReadySources)) {
|
||||
// all sources are empty
|
||||
needDestroy = true;
|
||||
} else {
|
||||
code = tMergeTreeAdjust(pHandle->pMergeTree, tMergeTreeGetAdjustIndex(pHandle->pMergeTree));
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (pHandle->numEmptySources > 0) {
|
||||
if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_WAIT_FOREVER) {
|
||||
idx = tMergeTreeGetChosenIndex(pHandle->pMergeTree);
|
||||
px = taosArrayGet(pReadySources, idx);
|
||||
QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
pSource = *(SStreamVtableMergeSource**)px;
|
||||
QUERY_CHECK_NULL(pSource, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
int64_t currentTs = INT64_MIN;
|
||||
code = svmSourceCurrentTs(pSource, idstr, ¤tTs);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
needDestroy = currentTs > pHandle->globalLatestTs;
|
||||
} else if (pHandle->numOfPages != origNumOfPages) {
|
||||
// The original data for this portion is incomplete. Its merge was forcibly triggered by certain conditions, so
|
||||
// we must recheck if those conditions are still met.
|
||||
if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_DELAY) {
|
||||
int64_t globalExpireTimeMs = INT64_MAX;
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pReadySources); ++i) {
|
||||
px = taosArrayGet(pReadySources, i);
|
||||
QUERY_CHECK_NULL(px, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
pSource = *(SStreamVtableMergeSource**)px;
|
||||
globalExpireTimeMs = TMIN(globalExpireTimeMs, svmSourceGetExpireTime(pSource));
|
||||
}
|
||||
needDestroy = taosGetTimestampMs() < globalExpireTimeMs;
|
||||
} else if (tsStreamVirtualMergeWaitMode == STREAM_VIRTUAL_MERGE_MAX_MEMORY) {
|
||||
needDestroy = pHandle->numOfPages < pHandle->numPageLimit;
|
||||
} else {
|
||||
code = TSDB_CODE_INTERNAL_ERROR;
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (needDestroy) {
|
||||
svmDestroyTree(&pHandle->pMergeTree);
|
||||
} else {
|
||||
*pRes = SVM_NEXT_FOUND;
|
||||
}
|
||||
|
||||
_end:
|
||||
|
@ -553,8 +617,9 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle** ppHandle, int32_t nSrcTbls, int32_t numPageLimit,
|
||||
SDiskbasedBuf* pBuf, SSDataBlock* pResBlock, const char* idstr) {
|
||||
int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle** ppHandle, int64_t vuid, int32_t nSrcTbls,
|
||||
int32_t numPageLimit, int32_t primaryTsIndex, SDiskbasedBuf* pBuf,
|
||||
SSDataBlock* pResBlock, const char* idstr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SStreamVtableMergeHandle* pHandle = NULL;
|
||||
|
@ -569,6 +634,8 @@ int32_t streamVtableMergeCreateHandle(SStreamVtableMergeHandle** ppHandle, int32
|
|||
|
||||
pHandle->pBuf = pBuf;
|
||||
pHandle->numPageLimit = numPageLimit;
|
||||
pHandle->primaryTsIndex = primaryTsIndex;
|
||||
pHandle->vuid = vuid;
|
||||
pHandle->nSrcTbls = nSrcTbls;
|
||||
pHandle->pSources = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
QUERY_CHECK_NULL(pHandle->pSources, code, lino, _end, terrno);
|
||||
|
@ -590,7 +657,8 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
void streamVtableMergeDestroyHandle(SStreamVtableMergeHandle** ppHandle) {
|
||||
void streamVtableMergeDestroyHandle(void* ptr) {
|
||||
SStreamVtableMergeHandle** ppHandle = ptr;
|
||||
if (ppHandle == NULL || *ppHandle == NULL) {
|
||||
return;
|
||||
}
|
||||
|
@ -600,8 +668,16 @@ void streamVtableMergeDestroyHandle(SStreamVtableMergeHandle** ppHandle) {
|
|||
taosHashCleanup(pHandle->pSources);
|
||||
pHandle->pSources = NULL;
|
||||
}
|
||||
|
||||
blockDataDestroy(pHandle->datablock);
|
||||
svmDestroyTree(&pHandle->pMergeTree);
|
||||
|
||||
taosMemoryFreeClear(*ppHandle);
|
||||
}
|
||||
|
||||
int64_t streamVtableMergeHandleGetVuid(SStreamVtableMergeHandle* pHandle) {
|
||||
if (pHandle != NULL) {
|
||||
return pHandle->vuid;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ static int32_t doProcessSql(SStreamRecParam* pParam, SJson** ppJsonResult) {
|
|||
curlRes = curl_easy_setopt(pCurl, CURLOPT_POSTFIELDS, pParam->pSql);
|
||||
QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
||||
qTrace("===stream=== sql:%s", pParam->pSql);
|
||||
qDebug("===stream=== sql:%s", pParam->pSql);
|
||||
|
||||
curlRes = curl_easy_setopt(pCurl, CURLOPT_FOLLOWLOCATION, 1L);
|
||||
QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
@ -110,7 +110,11 @@ static int32_t doProcessSql(SStreamRecParam* pParam, SJson** ppJsonResult) {
|
|||
QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
||||
curlRes = curl_easy_perform(pCurl);
|
||||
QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
if (curlRes != CURLE_OK) {
|
||||
qError("error: unable to request data from %s.since %s. res code:%d", pParam->pUrl, curl_easy_strerror(curlRes),
|
||||
(int32_t)curlRes);
|
||||
QUERY_CHECK_CONDITION(curlRes == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
}
|
||||
|
||||
_end:
|
||||
if (pHeaders != NULL) {
|
||||
|
@ -222,8 +226,16 @@ static int32_t jsonToDataCell(const SJson* pJson, SResultCellData* pCell) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t getColumnIndex(SSHashObj* pMap, int32_t colId) {
|
||||
void* pVal = tSimpleHashGet(pMap, &colId, sizeof(int32_t));
|
||||
if (pVal == NULL) {
|
||||
return -1;
|
||||
}
|
||||
return *(int32_t*)pVal;
|
||||
}
|
||||
|
||||
static int32_t doTransformFillResult(const SJson* pJsonResult, SArray* pRangeRes, void* pEmptyRow, int32_t size,
|
||||
int32_t* pOffsetInfo, int32_t numOfCols) {
|
||||
int32_t* pOffsetInfo, int32_t numOfCols, SSHashObj* pMap) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
|
@ -239,13 +251,21 @@ static int32_t doTransformFillResult(const SJson* pJsonResult, SArray* pRangeRes
|
|||
SSliceRowData* pRowData = taosMemoryCalloc(1, sizeof(TSKEY) + size);
|
||||
pRowData->key = INT64_MIN;
|
||||
memcpy(pRowData->pRowVal, pEmptyRow, size);
|
||||
for (int32_t j = 0; j < cols && j < numOfCols; ++j) {
|
||||
SJson* pJsonCell = tjsonGetArrayItem(pRow, j);
|
||||
QUERY_CHECK_NULL(pJsonCell, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
||||
int32_t colOffset = 0;
|
||||
for (int32_t j = 0; j < numOfCols; ++j) {
|
||||
SResultCellData* pDataCell = getSliceResultCell((SResultCellData*)pRowData->pRowVal, j, pOffsetInfo);
|
||||
QUERY_CHECK_NULL(pDataCell, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
||||
int32_t colIndex = getColumnIndex(pMap, j);
|
||||
if (colIndex == -1 || colIndex >= cols) {
|
||||
qDebug("invalid result columm index:%d", colIndex);
|
||||
pDataCell->isNull = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
SJson* pJsonCell = tjsonGetArrayItem(pRow, colIndex);
|
||||
QUERY_CHECK_NULL(pJsonCell, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
||||
code = jsonToDataCell(pJsonCell, pDataCell);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
@ -278,7 +298,7 @@ int32_t streamClientGetFillRange(SStreamRecParam* pParam, SWinKey* pKey, SArray*
|
|||
SJson* pJsRes = NULL;
|
||||
code = doProcessSql(pParam, &pJsRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = doTransformFillResult(pJsRes, pRangeRes, pEmptyRow, size, pOffsetInfo, numOfCols);
|
||||
code = doTransformFillResult(pJsRes, pRangeRes, pEmptyRow, size, pOffsetInfo, numOfCols, pParam->pColIdMap);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
_end:
|
||||
|
@ -288,6 +308,33 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamClientCheckCfg(SStreamRecParam* pParam) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
const char* pTestSql = "select name, ntables, status from information_schema.ins_databases;";
|
||||
(void)memset(pParam->pSql, 0, pParam->sqlCapcity);
|
||||
tstrncpy(pParam->pSql, pTestSql, pParam->sqlCapcity);
|
||||
|
||||
SJson* pJsRes = NULL;
|
||||
code = doProcessSql(pParam, &pJsRes);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
SJson* jArray = tjsonGetObjectItem(pJsRes, "data");
|
||||
QUERY_CHECK_NULL(jArray, code, lino, _end, TSDB_CODE_FAILED);
|
||||
|
||||
int32_t rows = tjsonGetArraySize(jArray);
|
||||
if (rows < 2) {
|
||||
code = TSDB_CODE_INVALID_CFG_VALUE;
|
||||
qError("invalid taos adapter config value");
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int32_t streamClientGetResultRange(SStreamRecParam* pParam, SSHashObj* pRangeMap, SArray* pRangeRes) {
|
||||
|
@ -297,4 +344,8 @@ int32_t streamClientGetFillRange(SStreamRecParam* pParam, SWinKey* pKey, SArray*
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
int32_t streamClientCheckCfg(SStreamRecParam* pParam) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -182,6 +182,20 @@ void doBuildNonblockFillResult(SOperatorInfo* pOperator, SStreamFillSupporter* p
|
|||
}
|
||||
}
|
||||
|
||||
if (pBlock->info.rows > 0) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
void* tbname = NULL;
|
||||
int32_t winCode = TSDB_CODE_SUCCESS;
|
||||
code = pInfo->stateStore.streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname,
|
||||
false, &winCode);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (winCode != TSDB_CODE_SUCCESS) {
|
||||
pBlock->info.parTbName[0] = 0;
|
||||
} else {
|
||||
memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
|
@ -402,8 +416,6 @@ int32_t doStreamNonblockFillNext(SOperatorInfo* pOperator, SSDataBlock** ppRes)
|
|||
case STREAM_INVALID: {
|
||||
code = doApplyStreamScalarCalculation(pOperator, pBlock, pInfo->pSrcBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
memcpy(pInfo->pSrcBlock->info.parTbName, pBlock->info.parTbName, TSDB_TABLE_NAME_LEN);
|
||||
pInfo->srcRowIndex = -1;
|
||||
} break;
|
||||
case STREAM_CHECKPOINT: {
|
||||
|
@ -476,4 +488,39 @@ void destroyStreamNonblockFillOperatorInfo(void* param) {
|
|||
SStreamFillOperatorInfo* pInfo = (SStreamFillOperatorInfo*)param;
|
||||
resetTimeSlicePrevAndNextWindow(pInfo->pFillSup);
|
||||
destroyStreamFillOperatorInfo(param);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t doInitStreamColumnMapInfo(SExprSupp* pExprSup, SSHashObj* pColMap) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
for (int32_t i = 0; i < pExprSup->numOfExprs; ++i) {
|
||||
SExprInfo* pOneExpr = &pExprSup->pExprInfo[i];
|
||||
int32_t destSlotId = pOneExpr->base.resSchema.slotId;
|
||||
for (int32_t j = 0; j < pOneExpr->base.numOfParams; ++j) {
|
||||
SFunctParam* pFuncParam = &pOneExpr->base.pParam[j];
|
||||
if (pFuncParam->type == FUNC_PARAM_TYPE_COLUMN) {
|
||||
int32_t sourceSlotId = pFuncParam->pCol->slotId;
|
||||
code = tSimpleHashPut(pColMap, &sourceSlotId, sizeof(int32_t), &destSlotId, sizeof(int32_t));
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s.", __func__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t initStreamFillOperatorColumnMapInfo(SExprSupp* pExprSup, SOperatorInfo* pOperator) {
|
||||
if (pOperator != NULL && pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL) {
|
||||
SStreamFillOperatorInfo* pInfo = (SStreamFillOperatorInfo*)pOperator->info;
|
||||
if (pInfo->nbSup.recParam.pColIdMap == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return doInitStreamColumnMapInfo(pExprSup, pInfo->nbSup.recParam.pColIdMap);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include "executorInt.h"
|
||||
#include "streamexecutorInt.h"
|
||||
#include "streamsession.h"
|
||||
#include "streaminterval.h"
|
||||
#include "tcommon.h"
|
||||
#include "thash.h"
|
||||
|
@ -1746,7 +1747,8 @@ static void setValueForFillInfo(SStreamFillSupporter* pFillSup, SStreamFillInfo*
|
|||
}
|
||||
}
|
||||
|
||||
int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInterval* pInterval, int16_t* pOperatorFlag) {
|
||||
int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInterval* pInterval,
|
||||
int16_t* pOperatorFlag) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
if (IS_NORMAL_INTERVAL_OP(downstream)) {
|
||||
|
@ -1754,17 +1756,16 @@ int32_t getDownStreamInfo(SOperatorInfo* downstream, int8_t* triggerType, SInter
|
|||
*triggerType = pInfo->twAggSup.calTrigger;
|
||||
*pInterval = pInfo->interval;
|
||||
*pOperatorFlag = pInfo->basic.operatorFlag;
|
||||
} else if (IS_CONTINUE_INTERVAL_OP(downstream)) {
|
||||
} else {
|
||||
SStreamIntervalSliceOperatorInfo* pInfo = downstream->info;
|
||||
*triggerType = pInfo->twAggSup.calTrigger;
|
||||
*pInterval = pInfo->interval;
|
||||
pInfo->hasFill = true;
|
||||
*pOperatorFlag = pInfo->basic.operatorFlag;
|
||||
} else {
|
||||
code = TSDB_CODE_STREAM_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
|
@ -1891,6 +1892,10 @@ int32_t createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysi
|
|||
initNonBlockAggSupptor(&pInfo->nbSup, &pInfo->pFillSup->interval, downstream);
|
||||
code = initStreamBasicInfo(&pInfo->basic, pOperator);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
code = streamClientCheckCfg(&pInfo->nbSup.recParam);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
pInfo->basic.operatorFlag = opFlag;
|
||||
if (isFinalOperator(&pInfo->basic)) {
|
||||
pInfo->nbSup.numOfKeep++;
|
||||
|
|
|
@ -223,8 +223,10 @@ int32_t doStreamIntervalNonblockAggImpl(SOperatorInfo* pOperator, SSDataBlock* p
|
|||
code = pInfo->streamAggSup.stateStore.streamStateGetAllPrev(pInfo->streamAggSup.pState, &curKey,
|
||||
pInfo->pUpdated, pInfo->nbSup.numOfKeep);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = checkAndSaveWinStateToDisc(startIndex, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (!isRecalculateOperator(&pInfo->basic)) {
|
||||
code = checkAndSaveWinStateToDisc(startIndex, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -704,11 +706,14 @@ int32_t doStreamIntervalNonblockAggNext(SOperatorInfo* pOperator, SSDataBlock**
|
|||
if (pBlock == NULL) {
|
||||
qDebug("===stream===%s return data:%s. rev rows:%d", GET_TASKID(pTaskInfo),
|
||||
getStreamOpName(pOperator->operatorType), pInfo->basic.numOfRecv);
|
||||
if (isFinalOperator(&pInfo->basic) && isRecalculateOperator(&pInfo->basic)) {
|
||||
code = pAggSup->stateStore.streamStateFlushReaminInfoToDisk(pInfo->basic.pTsDataState);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = buildRetriveRequest(pTaskInfo, pAggSup, pInfo->basic.pTsDataState, &pInfo->nbSup);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (isFinalOperator(&pInfo->basic)) {
|
||||
if (isRecalculateOperator(&pInfo->basic)) {
|
||||
code = buildRetriveRequest(pTaskInfo, pAggSup, pInfo->basic.pTsDataState, &pInfo->nbSup);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
} else {
|
||||
code = pAggSup->stateStore.streamStateFlushReaminInfoToDisk(pInfo->basic.pTsDataState);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
break;
|
||||
|
@ -820,7 +825,7 @@ int32_t doStreamIntervalNonblockAggNext(SOperatorInfo* pOperator, SSDataBlock**
|
|||
code = closeNonblockIntervalWindow(pAggSup->pResultRows, &pInfo->twAggSup, &pInfo->interval, pInfo->pUpdated,
|
||||
pTaskInfo);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (!isHistoryOperator(&pInfo->basic)) {
|
||||
if (!isHistoryOperator(&pInfo->basic) && !isRecalculateOperator(&pInfo->basic)) {
|
||||
code = checkAndSaveWinStateToDisc(0, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
@ -1052,7 +1057,7 @@ static int32_t doStreamFinalntervalNonblockAggImpl(SOperatorInfo* pOperator, SSD
|
|||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
if (!isHistoryOperator(&pInfo->basic)) {
|
||||
if (!isHistoryOperator(&pInfo->basic) && !isRecalculateOperator(&pInfo->basic)) {
|
||||
code = checkAndSaveWinStateToDisc(0, pInfo->pUpdated, 0, pInfo->basic.pTsDataState, &pInfo->streamAggSup, &pInfo->interval);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
|
|
@ -753,18 +753,20 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN
|
|||
pInfo->hasInterpoFunc = windowinterpNeeded(pExpSup->pCtx, numOfExprs);
|
||||
initNonBlockAggSupptor(&pInfo->nbSup, &pInfo->interval, NULL);
|
||||
|
||||
setOperatorInfo(pOperator, "StreamIntervalSliceOperator", pPhyNode->type, true, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
setOperatorInfo(pOperator, "StreamIntervalSliceOperator", nodeType(pPhyNode), true, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
code = initStreamBasicInfo(&pInfo->basic, pOperator);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
if (pIntervalPhyNode->window.triggerType == STREAM_TRIGGER_CONTINUOUS_WINDOW_CLOSE) {
|
||||
qDebug("create continuous interval operator. op type:%d, task type:%d, task id:%s", nodeType(pPhyNode),
|
||||
pHandle->fillHistory, GET_TASKID(pTaskInfo));
|
||||
if (pHandle->fillHistory == STREAM_HISTORY_OPERATOR) {
|
||||
setFillHistoryOperatorFlag(&pInfo->basic);
|
||||
} else if (pHandle->fillHistory == STREAM_RECALCUL_OPERATOR) {
|
||||
setRecalculateOperatorFlag(&pInfo->basic);
|
||||
}
|
||||
pInfo->nbSup.pWindowAggFn = doStreamIntervalNonblockAggImpl;
|
||||
if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL) {
|
||||
if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_CONTINUE_INTERVAL) {
|
||||
setSingleOperatorFlag(&pInfo->basic);
|
||||
}
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamIntervalNonblockAggNext, NULL,
|
||||
|
|
|
@ -83,7 +83,8 @@ int32_t copyRecDataToBuff(TSKEY calStart, TSKEY calEnd, uint64_t uid, uint64_t v
|
|||
return pkLen + sizeof(SRecDataInfo);
|
||||
}
|
||||
|
||||
int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsDataState, SSDataBlock* pSrcBlock, EStreamType mode) {
|
||||
int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsDataState, SSDataBlock* pSrcBlock,
|
||||
EStreamType mode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
|
@ -92,8 +93,10 @@ int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsData
|
|||
}
|
||||
SColumnInfoData* pSrcStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcCalStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcCalEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcCalStartTsCol =
|
||||
(SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcCalEndTsCol =
|
||||
(SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
|
||||
SColumnInfoData* pSrcGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
TSKEY* srcStartTsCol = (TSKEY*)pSrcStartTsCol->pData;
|
||||
|
@ -113,9 +116,10 @@ int32_t saveRecalculateData(SStateStore* pStateStore, STableTsDataState* pTsData
|
|||
calStart = srcStartTsCol[i];
|
||||
calEnd = srcEndTsCol[i];
|
||||
}
|
||||
int32_t len = copyRecDataToBuff(calStart, calEnd, srcUidData[i], pSrcBlock->info.version, mode, NULL, 0,
|
||||
pTsDataState->pRecValueBuff, pTsDataState->recValueLen);
|
||||
code = pStateStore->streamStateSessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len);
|
||||
int32_t len = copyRecDataToBuff(calStart, calEnd, srcUidData[i], pSrcBlock->info.version, mode, NULL, 0,
|
||||
pTsDataState->pRecValueBuff, pTsDataState->recValueLen);
|
||||
code = pStateStore->streamStateMergeAndSaveScanRange(pTsDataState, &key.win, key.groupId,
|
||||
pTsDataState->pRecValueBuff, len);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
|
@ -332,11 +336,6 @@ static int32_t doStreamBlockScan(SOperatorInfo* pOperator, SSDataBlock** ppRes)
|
|||
case STREAM_CHECKPOINT: {
|
||||
qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK");
|
||||
} break;
|
||||
case STREAM_RETRIEVE: {
|
||||
code = saveRecalculateData(&pInfo->stateStore, pInfo->basic.pTsDataState, pBlock, STREAM_RETRIEVE);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
continue;
|
||||
} break;
|
||||
case STREAM_RECALCULATE_START: {
|
||||
if (!isSemiOperator(&pInfo->basic)) {
|
||||
code = pInfo->stateStore.streamStateFlushReaminInfoToDisk(pInfo->basic.pTsDataState);
|
||||
|
@ -388,7 +387,7 @@ static int32_t buildAndSaveRecalculateData(SSDataBlock* pSrcBlock, TSKEY* pTsCol
|
|||
len = copyRecDataToBuff(pTsCol[rowId], pTsCol[rowId], pSrcBlock->info.id.uid, pSrcBlock->info.version, STREAM_CLEAR,
|
||||
NULL, 0, pTsDataState->pRecValueBuff, pTsDataState->recValueLen);
|
||||
SSessionKey key = {.win.skey = pTsCol[rowId], .win.ekey = pTsCol[rowId], .groupId = 0};
|
||||
code = pStateStore->streamStateSessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len);
|
||||
code = pStateStore->streamState1SessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
uint64_t gpId = 0;
|
||||
code = appendPkToSpecialBlock(pDestBlock, pTsCol, pPkColDataInfo, rowId, &pSrcBlock->info.id.uid, &gpId, NULL);
|
||||
|
@ -399,7 +398,7 @@ static int32_t buildAndSaveRecalculateData(SSDataBlock* pSrcBlock, TSKEY* pTsCol
|
|||
len = copyRecDataToBuff(pTsCol[rowId], pTsCol[rowId], pSrcBlock->info.id.uid, pSrcBlock->info.version,
|
||||
STREAM_DELETE_DATA, NULL, 0, pTsDataState->pRecValueBuff,
|
||||
pTsDataState->recValueLen);
|
||||
code = pStateStore->streamStateSessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len);
|
||||
code = pStateStore->streamState1SessionSaveToDisk(pTsDataState, &key, pTsDataState->pRecValueBuff, len);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
code = appendPkToSpecialBlock(pDestBlock, pTsCol, pPkColDataInfo, rowId, &pSrcBlock->info.id.uid, &gpId, NULL);
|
||||
|
@ -1252,6 +1251,7 @@ static int32_t doDataRangeScan(SStreamScanInfo* pInfo, SExecTaskInfo* pTaskInfo,
|
|||
if (pInfo->pCreateTbRes->info.rows > 0) {
|
||||
(*ppRes) = pInfo->pCreateTbRes;
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_RES;
|
||||
break;
|
||||
}
|
||||
(*ppRes) = pTsdbBlock;
|
||||
break;
|
||||
|
@ -1341,6 +1341,7 @@ static int32_t doStreamRecalculateDataScan(SOperatorInfo* pOperator, SSDataBlock
|
|||
(*ppRes) = pInfo->pRangeScanRes;
|
||||
pInfo->pRangeScanRes = NULL;
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
|
||||
printDataBlock((*ppRes), "stream tsdb scan", GET_TASKID(pTaskInfo));
|
||||
goto _end;
|
||||
} break;
|
||||
case STREAM_SCAN_FROM_CREATE_TABLERES: {
|
||||
|
@ -1513,6 +1514,11 @@ _end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static void destroyStreamRecalculateParam(SStreamRecParam* pParam) {
|
||||
tSimpleHashCleanup(pParam->pColIdMap);
|
||||
pParam->pColIdMap = NULL;
|
||||
}
|
||||
|
||||
static void destroyStreamDataScanOperatorInfo(void* param) {
|
||||
if (param == NULL) {
|
||||
return;
|
||||
|
@ -1562,6 +1568,8 @@ static void destroyStreamDataScanOperatorInfo(void* param) {
|
|||
taosArrayDestroy(pStreamScan->pRecRangeRes);
|
||||
pStreamScan->pRecRangeRes = NULL;
|
||||
|
||||
destroyStreamRecalculateParam(&pStreamScan->recParam);
|
||||
|
||||
taosMemoryFree(pStreamScan);
|
||||
}
|
||||
|
||||
|
@ -1633,6 +1641,9 @@ static void initStreamRecalculateParam(STableScanPhysiNode* pTableScanNode, SStr
|
|||
pParam->sqlCapcity = tListLen(pParam->pSql);
|
||||
(void)tsnprintf(pParam->pUrl, tListLen(pParam->pUrl), "http://%s:%d/rest/sql", tsAdapterFqdn, tsAdapterPort);
|
||||
(void)tsnprintf(pParam->pAuth, tListLen(pParam->pAuth), "Authorization: Basic %s", tsAdapterToken);
|
||||
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pParam->pColIdMap = tSimpleHashInit(32, hashFn);
|
||||
}
|
||||
|
||||
int32_t createStreamDataScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond,
|
||||
|
|
|
@ -456,7 +456,10 @@ static int32_t fillPointResult(SStreamFillSupporter* pFillSup, SResultRowData* p
|
|||
qError("%s failed at line %d since fill errror", __func__, __LINE__);
|
||||
}
|
||||
} else {
|
||||
int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId;
|
||||
int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId;
|
||||
if (pFillSup->normalFill) {
|
||||
srcSlot = dstSlotId;
|
||||
}
|
||||
SResultCellData* pCell = NULL;
|
||||
if (IS_FILL_CONST_VALUE(pFillSup->type) &&
|
||||
(isGroupKeyFunc(pFillCol->pExpr) || isSelectGroupConstValueFunc(pFillCol->pExpr))) {
|
||||
|
@ -532,7 +535,10 @@ static void fillLinearRange(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFi
|
|||
qError("%s failed at line %d since fill errror", __func__, lino);
|
||||
}
|
||||
} else if (isInterpFunc(pFillCol->pExpr) || pFillSup->normalFill) {
|
||||
int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId;
|
||||
int32_t srcSlot = pFillCol->pExpr->base.pParam[0].pCol->slotId;
|
||||
if (pFillSup->normalFill) {
|
||||
srcSlot = dstSlotId;
|
||||
}
|
||||
SResultCellData* pCell = getSliceResultCell(pFillInfo->pResRow->pRowVal, srcSlot, pFillSup->pOffsetInfo);
|
||||
if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pCell->isNull) {
|
||||
colDataSetNULL(pDstCol, index);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue