Merge remote-tracking branch 'origin/3.0' into enh/3.0/TD-32686
This commit is contained in:
commit
febd938ea8
|
@ -156,6 +156,9 @@ pcre2_grep_test.sh
|
|||
pcre2_chartables.c
|
||||
geos-config
|
||||
config.h
|
||||
!contrib/xml2-cmake
|
||||
!contrib/xml2-cmake/linux_x86_64/include/config.h
|
||||
!contrib/xml2-cmake/CMakeLists.txt
|
||||
pcre2.h
|
||||
zconf.h
|
||||
version.h
|
||||
|
|
394
README.md
394
README.md
|
@ -26,24 +26,33 @@ English | [简体中文](README-CN.md) | [TDengine Cloud](https://cloud.tdengine
|
|||
|
||||
# Table of Contents
|
||||
|
||||
1. [What is TDengine?](#1-what-is-tdengine)
|
||||
2. [Documentation](#2-documentation)
|
||||
3. [Building](#3-building)
|
||||
1. [Install build tools](#31-install-build-tools)
|
||||
1. [Get the source codes](#32-get-the-source-codes)
|
||||
1. [Special Note](#33-special-note)
|
||||
1. [Build TDengine](#34-build-tdengine)
|
||||
4. [Installing](#4-installing)
|
||||
1. [On Linux platform](#41-on-linux-platform)
|
||||
1. [On Windows platform](#42-on-windows-platform)
|
||||
1. [On macOS platform](#43-on-macos-platform)
|
||||
1. [Quick Run](#44-quick-run)
|
||||
5. [Try TDengine](#5-try-tdengine)
|
||||
6. [Developing with TDengine](#6-developing-with-tdengine)
|
||||
7. [Contribute to TDengine](#7-contribute-to-tdengine)
|
||||
8. [Join the TDengine Community](#8-join-the-tdengine-community)
|
||||
1. [Introduction](#1-introduction)
|
||||
1. [Documentation](#2-documentation)
|
||||
1. [Prerequisites](#3-prerequisites)
|
||||
- [3.1 Prerequisites On Linux](#31-on-linux)
|
||||
- [3.2 Prerequisites On macOS](#32-on-macos)
|
||||
- [3.3 Prerequisites On Windows](#33-on-windows)
|
||||
- [3.4 Clone the repo](#34-clone-the-repo)
|
||||
1. [Building](#4-building)
|
||||
- [4.1 Build on Linux](#41-build-on-linux)
|
||||
- [4.2 Build on macOS](#42-build-on-macos)
|
||||
- [4.3 Build On Windows](#43-build-on-windows)
|
||||
1. [Packaging](#5-packaging)
|
||||
1. [Installation](#6-installation)
|
||||
- [6.1 Install on Linux](#61-install-on-linux)
|
||||
- [6.2 Install on macOS](#62-install-on-macos)
|
||||
- [6.3 Install on Windows](#63-install-on-windows)
|
||||
1. [Running](#7-running)
|
||||
- [7.1 Run TDengine on Linux](#71-run-tdengine-on-linux)
|
||||
- [7.2 Run TDengine on macOS](#72-run-tdengine-on-macos)
|
||||
- [7.3 Run TDengine on Windows](#73-run-tdengine-on-windows)
|
||||
1. [Testing](#8-testing)
|
||||
1. [Releasing](#9-releasing)
|
||||
1. [Workflow](#10-workflow)
|
||||
1. [Coverage](#11-coverage)
|
||||
1. [Contributing](#12-contributing)
|
||||
|
||||
# 1. What is TDengine?
|
||||
# 1. Introduction
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-series databases with the following advantages:
|
||||
|
||||
|
@ -65,132 +74,91 @@ For a full list of TDengine competitive advantages, please [check here](https://
|
|||
|
||||
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
|
||||
|
||||
# 3. Building
|
||||
# 3. Prerequisites
|
||||
|
||||
At the moment, TDengine server supports running on Linux/Windows/macOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
## 3.1 On Linux
|
||||
|
||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
|
||||
<details>
|
||||
|
||||
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
|
||||
<summary>Install required tools on Linux</summary>
|
||||
|
||||
To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory.
|
||||
|
||||
## 3.1 Install build tools
|
||||
|
||||
### Ubuntu 18.04 and above or Debian
|
||||
### For Ubuntu 18.04、20.04、22.04
|
||||
|
||||
```bash
|
||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
||||
sudo apt-get udpate
|
||||
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
|
||||
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools
|
||||
|
||||
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
|
||||
### For CentOS 8
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum update
|
||||
sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
|
||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||
yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core
|
||||
yum config-manager --set-enabled powertools
|
||||
yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static
|
||||
```
|
||||
|
||||
### CentOS 8/Fedora/Rocky Linux
|
||||
</details>
|
||||
|
||||
## 3.2 On macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Install required tools on macOS</summary>
|
||||
|
||||
Please intall the dependencies with [brew](https://brew.sh/).
|
||||
|
||||
```bash
|
||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release gflags git openssl-devel
|
||||
```
|
||||
|
||||
#### Install build dependencies for taosTools on CentOS
|
||||
|
||||
#### CentOS 7.9
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Fedora/Rocky Linux
|
||||
|
||||
```
|
||||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well.
|
||||
|
||||
If the PowerTools installation fails, you can try to use:
|
||||
|
||||
```
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
```
|
||||
|
||||
#### For CentOS + devtoolset
|
||||
|
||||
Besides above dependencies, please run following commands:
|
||||
|
||||
```
|
||||
sudo yum install centos-release-scl
|
||||
sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
|
||||
scl enable devtoolset-9 -- bash
|
||||
```
|
||||
|
||||
### macOS
|
||||
|
||||
```
|
||||
brew install argp-standalone gflags pkgconfig
|
||||
```
|
||||
|
||||
### Setup golang environment
|
||||
</details>
|
||||
|
||||
TDengine includes a few components like taosAdapter developed by Go language. Please refer to golang.org official documentation for golang environment setup.
|
||||
## 3.3 On Windows
|
||||
|
||||
Please use version 1.20+. For the user in China, we recommend using a proxy to accelerate package downloading.
|
||||
<details>
|
||||
|
||||
```
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
<summary>Install required tools on Windows</summary>
|
||||
|
||||
The default will not build taosAdapter, but you can use the following command to build taosAdapter as the service for RESTful interface.
|
||||
Work in Progress.
|
||||
|
||||
```
|
||||
cmake .. -DBUILD_HTTP=false
|
||||
```
|
||||
</details>
|
||||
|
||||
### Setup rust environment
|
||||
## 3.4 Clone the repo
|
||||
|
||||
TDengine includes a few components developed by Rust language. Please refer to rust-lang.org official documentation for rust environment setup.
|
||||
<details>
|
||||
|
||||
## 3.2 Get the source codes
|
||||
<summary>Clone the repo</summary>
|
||||
|
||||
First of all, you may clone the source codes from github:
|
||||
Clone the repository to the target machine:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/taosdata/TDengine.git
|
||||
cd TDengine
|
||||
```
|
||||
|
||||
You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You will need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail.
|
||||
|
||||
```
|
||||
[url "git@github.com:"]
|
||||
insteadOf = https://github.com/
|
||||
```
|
||||
> **NOTE:**
|
||||
> TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust).
|
||||
|
||||
## 3.3 Special Note
|
||||
</details>
|
||||
|
||||
[JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go),[Python Connector](https://github.com/taosdata/taos-connector-python),[Node.js Connector](https://github.com/taosdata/taos-connector-node),[C# Connector](https://github.com/taosdata/taos-connector-dotnet) ,[Rust Connector](https://github.com/taosdata/taos-connector-rust) and [Grafana plugin](https://github.com/taosdata/grafanaplugin) has been moved to standalone repository.
|
||||
# 4. Building
|
||||
|
||||
## 3.4 Build TDengine
|
||||
At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
|
||||
|
||||
### On Linux platform
|
||||
You can choose to install through source code, [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/) or [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment). This quick guide only applies to install from source.
|
||||
|
||||
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
|
||||
|
||||
To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory.
|
||||
|
||||
## 4.1 Build on Linux
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on Linux</summary>
|
||||
|
||||
You can run the bash script `build.sh` to build both TDengine and taosTools including taosBenchmark and taosdump as below:
|
||||
|
||||
|
@ -201,29 +169,46 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl
|
|||
It equals to execute following commands:
|
||||
|
||||
```bash
|
||||
mkdir debug
|
||||
cd debug
|
||||
mkdir debug && cd debug
|
||||
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
|
||||
make
|
||||
```
|
||||
|
||||
You can use Jemalloc as memory allocator instead of glibc:
|
||||
|
||||
```
|
||||
apt install autoconf
|
||||
```bash
|
||||
cmake .. -DJEMALLOC_ENABLED=true
|
||||
```
|
||||
|
||||
TDengine build script can detect the host machine's architecture on X86-64, X86, arm64 platform.
|
||||
You can also specify CPUTYPE option like aarch64 too if the detection result is not correct:
|
||||
|
||||
aarch64:
|
||||
TDengine build script can auto-detect the host machine's architecture on x86, x86-64, arm64 platform.
|
||||
You can also specify architecture manually by CPUTYPE option:
|
||||
|
||||
```bash
|
||||
cmake .. -DCPUTYPE=aarch64 && cmake --build .
|
||||
```
|
||||
|
||||
### On Windows platform
|
||||
</details>
|
||||
|
||||
## 4.2 Build on macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on macOS</summary>
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 4.3 Build on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to build on Windows</summary>
|
||||
|
||||
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
|
||||
Please specify "amd64" for 64 bits Windows or specify "x86" for 32 bits Windows when you execute vcvarsall.bat.
|
||||
|
@ -254,31 +239,67 @@ mkdir debug && cd debug
|
|||
cmake .. -G "NMake Makefiles"
|
||||
nmake
|
||||
```
|
||||
</details>
|
||||
|
||||
### On macOS platform
|
||||
# 5. Packaging
|
||||
|
||||
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
|
||||
The TDengine community installer can NOT be created by this repository only, due to some component dependencies. We are still working on this improvement.
|
||||
|
||||
```shell
|
||||
mkdir debug && cd debug
|
||||
cmake .. && cmake --build .
|
||||
```
|
||||
# 6. Installation
|
||||
|
||||
# 4. Installing
|
||||
## 6.1 Install on Linux
|
||||
|
||||
## 4.1 On Linux platform
|
||||
<details>
|
||||
|
||||
After building successfully, TDengine can be installed by
|
||||
<summary>Detailed steps to install on Linux</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
Installing from source code will also configure service management for TDengine. Users can also choose to [install from packages](https://docs.tdengine.com/get-started/deploy-from-package/) for it.
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
</details>
|
||||
|
||||
To start the service after installation, in a terminal, use:
|
||||
## 6.2 Install on macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to install on macOS</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## 6.3 Install on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to install on windows</summary>
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
# 7. Running
|
||||
|
||||
## 7.1 Run TDengine on Linux
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on Linux</summary>
|
||||
|
||||
To start the service after installation on linux, in a terminal, use:
|
||||
|
||||
```bash
|
||||
sudo systemctl start taosd
|
||||
|
@ -292,27 +313,29 @@ taos
|
|||
|
||||
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||
|
||||
## 4.2 On Windows platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
|
||||
```cmd
|
||||
nmake install
|
||||
```
|
||||
|
||||
## 4.3 On macOS platform
|
||||
|
||||
After building successfully, TDengine can be installed by:
|
||||
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
|
||||
|
||||
```bash
|
||||
sudo make install
|
||||
./build/bin/taosd -c test/cfg
|
||||
```
|
||||
|
||||
Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
|
||||
In another terminal, use the TDengine CLI to connect the server:
|
||||
|
||||
Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
```
|
||||
|
||||
To start the service after installation, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||
Option `-c test/cfg` specifies the system configuration file directory.
|
||||
|
||||
</details>
|
||||
|
||||
## 7.2 Run TDengine on macOS
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on macOS</summary>
|
||||
|
||||
To start the service after installation on macOS, double-click the /applications/TDengine to start the program, or in a terminal, use:
|
||||
|
||||
```bash
|
||||
sudo launchctl start com.tdengine.taosd
|
||||
|
@ -326,64 +349,63 @@ taos
|
|||
|
||||
If TDengine CLI connects the server successfully, welcome messages and version info are printed. Otherwise, an error message is shown.
|
||||
|
||||
## 4.4 Quick Run
|
||||
</details>
|
||||
|
||||
If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`)
|
||||
|
||||
```bash
|
||||
./build/bin/taosd -c test/cfg
|
||||
## 7.3 Run TDengine on Windows
|
||||
|
||||
<details>
|
||||
|
||||
<summary>Detailed steps to run on windows</summary>
|
||||
|
||||
You can start TDengine server on Windows platform with below commands:
|
||||
|
||||
```cmd
|
||||
.\build\bin\taosd.exe -c test\cfg
|
||||
```
|
||||
|
||||
In another terminal, use the TDengine CLI to connect the server:
|
||||
|
||||
```bash
|
||||
./build/bin/taos -c test/cfg
|
||||
```cmd
|
||||
.\build\bin\taos.exe -c test\cfg
|
||||
```
|
||||
|
||||
option "-c test/cfg" specifies the system configuration file directory.
|
||||
|
||||
# 5. Try TDengine
|
||||
</details>
|
||||
|
||||
It is easy to run SQL commands from TDengine CLI which is the same as other SQL databases.
|
||||
# 8. Testing
|
||||
|
||||
```sql
|
||||
CREATE DATABASE demo;
|
||||
USE demo;
|
||||
CREATE TABLE t (ts TIMESTAMP, speed INT);
|
||||
INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
|
||||
INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
|
||||
SELECT * FROM t;
|
||||
ts | speed |
|
||||
===================================
|
||||
19-07-15 00:00:00.000| 10|
|
||||
19-07-15 01:00:00.000| 20|
|
||||
Query OK, 2 row(s) in set (0.001700s)
|
||||
For how to run different types of tests on TDengine, please see [Testing TDengine](./tests/README.md).
|
||||
|
||||
# 9. Releasing
|
||||
|
||||
For the complete list of TDengine Releases, please see [Releases](https://github.com/taosdata/TDengine/releases).
|
||||
|
||||
# 10. Workflow
|
||||
|
||||
TDengine build check workflow can be found in this [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml). More workflows will be available soon.
|
||||
|
||||
# 11. Coverage
|
||||
|
||||
Latest TDengine test coverage report can be found on [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
|
||||
|
||||
<details>
|
||||
|
||||
<summary>How to run the coverage report locally?</summary>
|
||||
To create the test coverage report (in HTML format) locally, please run following commands:
|
||||
|
||||
```bash
|
||||
cd tests
|
||||
bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
|
||||
# on main branch and run cases in longtimeruning_cases.task
|
||||
# for more infomation about options please refer to ./run_local_coverage.sh -h
|
||||
```
|
||||
> **NOTE:**
|
||||
> Please note that the -b and -i options will recompile TDengine with the -DCOVER=true option, which may take a amount of time.
|
||||
|
||||
# 6. Developing with TDengine
|
||||
</details>
|
||||
|
||||
## Official Connectors
|
||||
# 12. Contributing
|
||||
|
||||
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
|
||||
|
||||
- [Java](https://docs.tdengine.com/reference/connectors/java/)
|
||||
- [C/C++](https://docs.tdengine.com/reference/connectors/cpp/)
|
||||
- [Python](https://docs.tdengine.com/reference/connectors/python/)
|
||||
- [Go](https://docs.tdengine.com/reference/connectors/go/)
|
||||
- [Node.js](https://docs.tdengine.com/reference/connectors/node/)
|
||||
- [Rust](https://docs.tdengine.com/reference/connectors/rust/)
|
||||
- [C#](https://docs.tdengine.com/reference/connectors/csharp/)
|
||||
- [RESTful API](https://docs.tdengine.com/reference/connectors/rest-api/)
|
||||
|
||||
# 7. Contribute to TDengine
|
||||
|
||||
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
|
||||
|
||||
# 8. Join the TDengine Community
|
||||
|
||||
For more information about TDengine, you can follow us on social media and join our Discord server:
|
||||
|
||||
- [Discord](https://discord.com/invite/VZdSuUg4pS)
|
||||
- [Twitter](https://twitter.com/TDengineDB)
|
||||
- [LinkedIn](https://www.linkedin.com/company/tdengine/)
|
||||
- [YouTube](https://www.youtube.com/@tdengine)
|
||||
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to TDengine.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# addr2line
|
||||
ExternalProject_Add(addr2line
|
||||
GIT_REPOSITORY https://github.com/davea42/libdwarf-addr2line.git
|
||||
GIT_TAG master
|
||||
GIT_TAG main
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/addr2line"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/addr2line"
|
||||
CONFIGURE_COMMAND ""
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
ExternalProject_Add(azure
|
||||
URL https://github.com/Azure/azure-sdk-for-cpp/archive/refs/tags/azure-storage-blobs_12.13.0-beta.1.tar.gz
|
||||
URL_HASH SHA256=3eca486fd60e3522d0a633025ecd652a71515b1e944799b2e8ee31fd590305a9
|
||||
DEPENDS xml2
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/azure-sdk-for-cpp-azure-storage-blobs_12.13.0-beta.1"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.3.5.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.3.5.2.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -12,7 +12,7 @@ ExternalProject_Add(curl2
|
|||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
|
||||
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
|
|
|
@ -6,9 +6,9 @@ ExternalProject_Add(openssl
|
|||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/openssl"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
#BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
|
||||
BUILD_ALWAYS 1
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./Configure --prefix=$ENV{HOME}/.cos-local.2 no-shared
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install_sw -j
|
||||
TEST_COMMAND ""
|
||||
|
|
|
@ -1,19 +1,16 @@
|
|||
|
||||
# xml2
|
||||
ExternalProject_Add(xml2
|
||||
URL https://download.gnome.org/sources/libxml2/2.11/libxml2-2.11.5.tar.xz
|
||||
URL_HASH SHA256=3727b078c360ec69fa869de14bd6f75d7ee8d36987b071e6928d4720a28df3a6
|
||||
#https://github.com/GNOME/libxml2/archive/refs/tags/v2.11.5.tar.gz
|
||||
#GIT_REPOSITORY https://github.com/GNOME/libxml2
|
||||
#GIT_TAG v2.11.5
|
||||
URL https://github.com/GNOME/libxml2/archive/refs/tags/v2.10.4.tar.gz
|
||||
URL_HASH SHA256=6f6fb27f91bb65f9d7196e3c616901b3e18a7dea31ccc2ae857940b125faa780
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/xml2"
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.2 --enable-shared=no --enable-static=yes --without-python --without-lzma
|
||||
BUILD_COMMAND make -j
|
||||
INSTALL_COMMAND make install && ln -sf $ENV{HOME}/.cos-local.2/include/libxml2/libxml $ENV{HOME}/.cos-local.2/include/libxml
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
)
|
||||
|
|
|
@ -17,7 +17,6 @@ elseif(${BUILD_WITH_COS})
|
|||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
|
@ -146,11 +145,16 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# libcurl
|
||||
if(NOT ${TD_WINDOWS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.2/)
|
||||
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(NOT ${TD_WINDOWS})
|
||||
|
||||
# s3
|
||||
if(${BUILD_WITH_S3})
|
||||
cat("${TD_SUPPORT_DIR}/ssl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/xml2_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/libs3_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/azure_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_S3)
|
||||
|
@ -160,7 +164,6 @@ elseif(${BUILD_WITH_COS})
|
|||
# cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
# cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif()
|
||||
|
@ -199,6 +202,11 @@ endif()
|
|||
# lemon
|
||||
cat("${TD_SUPPORT_DIR}/lemon_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
# Force specify CC=cc on MacOS. Because the default CC setting in the generated Makefile has issues finding standard library headers
|
||||
IF(${TD_DARWIN})
|
||||
SET(CONTRIB_CONFIG_ENV "CC=cc")
|
||||
ENDIF()
|
||||
|
||||
# download dependencies
|
||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
|
@ -652,7 +660,12 @@ if(${BUILD_PCRE2})
|
|||
endif(${BUILD_PCRE2})
|
||||
|
||||
if(${TD_LINUX} AND ${BUILD_WITH_S3})
|
||||
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
|
||||
set(ORIG_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
|
||||
string(REPLACE " -Werror " " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||
add_subdirectory(xml2-cmake)
|
||||
set(CMAKE_C_FLAGS ${ORIG_CMAKE_C_FLAGS})
|
||||
|
||||
add_subdirectory(azure-cmake)
|
||||
endif()
|
||||
|
||||
IF(TD_LINUX)
|
||||
|
|
|
@ -36,10 +36,6 @@ target_include_directories(
|
|||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
# find_library(CURL_LIBRARY curl)
|
||||
# find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
|
||||
|
@ -50,9 +46,8 @@ target_link_libraries(
|
|||
PRIVATE ${CURL_LIBRARY}
|
||||
PRIVATE ${SSL_LIBRARY}
|
||||
PRIVATE ${CRYPTO_LIBRARY}
|
||||
PRIVATE ${XML2_LIBRARY}
|
||||
|
||||
# PRIVATE xml2
|
||||
PRIVATE _libxml2
|
||||
PRIVATE zlib
|
||||
|
||||
# PRIVATE ${CoreFoundation_Library}
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
set(LIBXML2_SOURCE_DIR "${TD_CONTRIB_DIR}/libxml2")
|
||||
|
||||
set(SRCS
|
||||
"${LIBXML2_SOURCE_DIR}/SAX.c"
|
||||
"${LIBXML2_SOURCE_DIR}/entities.c"
|
||||
"${LIBXML2_SOURCE_DIR}/encoding.c"
|
||||
"${LIBXML2_SOURCE_DIR}/error.c"
|
||||
"${LIBXML2_SOURCE_DIR}/parserInternals.c"
|
||||
"${LIBXML2_SOURCE_DIR}/parser.c"
|
||||
"${LIBXML2_SOURCE_DIR}/tree.c"
|
||||
"${LIBXML2_SOURCE_DIR}/hash.c"
|
||||
"${LIBXML2_SOURCE_DIR}/list.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlIO.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlmemory.c"
|
||||
"${LIBXML2_SOURCE_DIR}/uri.c"
|
||||
"${LIBXML2_SOURCE_DIR}/valid.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xlink.c"
|
||||
"${LIBXML2_SOURCE_DIR}/HTMLparser.c"
|
||||
"${LIBXML2_SOURCE_DIR}/HTMLtree.c"
|
||||
"${LIBXML2_SOURCE_DIR}/debugXML.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xpath.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xpointer.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xinclude.c"
|
||||
"${LIBXML2_SOURCE_DIR}/nanohttp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/nanoftp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/catalog.c"
|
||||
"${LIBXML2_SOURCE_DIR}/globals.c"
|
||||
"${LIBXML2_SOURCE_DIR}/threads.c"
|
||||
"${LIBXML2_SOURCE_DIR}/c14n.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlstring.c"
|
||||
"${LIBXML2_SOURCE_DIR}/buf.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlregexp.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlschemas.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlschemastypes.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlunicode.c"
|
||||
"${LIBXML2_SOURCE_DIR}/triostr.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlreader.c"
|
||||
"${LIBXML2_SOURCE_DIR}/relaxng.c"
|
||||
"${LIBXML2_SOURCE_DIR}/dict.c"
|
||||
"${LIBXML2_SOURCE_DIR}/SAX2.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlwriter.c"
|
||||
"${LIBXML2_SOURCE_DIR}/legacy.c"
|
||||
"${LIBXML2_SOURCE_DIR}/chvalid.c"
|
||||
"${LIBXML2_SOURCE_DIR}/pattern.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlsave.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xmlmodule.c"
|
||||
"${LIBXML2_SOURCE_DIR}/schematron.c"
|
||||
"${LIBXML2_SOURCE_DIR}/xzlib.c"
|
||||
)
|
||||
add_library(_libxml2 ${SRCS})
|
||||
|
||||
#target_link_libraries(_libxml2 PRIVATE td_contrib::zlib)
|
||||
target_link_libraries(_libxml2 PRIVATE zlib)
|
||||
|
||||
target_include_directories(_libxml2 BEFORE PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include")
|
||||
target_include_directories(_libxml2 BEFORE PUBLIC "${LIBXML2_SOURCE_DIR}/include")
|
||||
|
||||
add_library(td_contrib::libxml2 ALIAS _libxml2)
|
|
@ -0,0 +1,285 @@
|
|||
/* config.h. Generated from config.h.in by configure. */
|
||||
/* config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Type cast for the gethostbyname() argument */
|
||||
#define GETHOSTBYNAME_ARG_CAST /**/
|
||||
|
||||
/* Define to 1 if you have the <arpa/inet.h> header file. */
|
||||
#define HAVE_ARPA_INET_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_H 1
|
||||
|
||||
/* Whether struct sockaddr::__ss_family exists */
|
||||
/* #undef HAVE_BROKEN_SS_FAMILY */
|
||||
|
||||
/* Define to 1 if you have the <ctype.h> header file. */
|
||||
#define HAVE_CTYPE_H 1
|
||||
|
||||
/* Define to 1 if you have the <dirent.h> header file. */
|
||||
#define HAVE_DIRENT_H 1
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Have dlopen based dso */
|
||||
#define HAVE_DLOPEN /**/
|
||||
|
||||
/* Define to 1 if you have the <dl.h> header file. */
|
||||
/* #undef HAVE_DL_H */
|
||||
|
||||
/* Define to 1 if you have the <errno.h> header file. */
|
||||
#define HAVE_ERRNO_H 1
|
||||
|
||||
/* Define to 1 if you have the <fcntl.h> header file. */
|
||||
#define HAVE_FCNTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <float.h> header file. */
|
||||
#define HAVE_FLOAT_H 1
|
||||
|
||||
/* Define to 1 if you have the `fprintf' function. */
|
||||
#define HAVE_FPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `ftime' function. */
|
||||
#define HAVE_FTIME 1
|
||||
|
||||
/* Define if getaddrinfo is there */
|
||||
#define HAVE_GETADDRINFO /**/
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
#define HAVE_GETTIMEOFDAY 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the `isascii' function. */
|
||||
#define HAVE_ISASCII 1
|
||||
|
||||
/* Define if isinf is there */
|
||||
#define HAVE_ISINF /**/
|
||||
|
||||
/* Define if isnan is there */
|
||||
#define HAVE_ISNAN /**/
|
||||
|
||||
/* Define if history library is there (-lhistory) */
|
||||
/* #undef HAVE_LIBHISTORY */
|
||||
|
||||
/* Define if pthread library is there (-lpthread) */
|
||||
#define HAVE_LIBPTHREAD /**/
|
||||
|
||||
/* Define if readline library is there (-lreadline) */
|
||||
/* #undef HAVE_LIBREADLINE */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
|
||||
/* Define to 1 if you have the `localtime' function. */
|
||||
#define HAVE_LOCALTIME 1
|
||||
|
||||
/* Define to 1 if you have the <lzma.h> header file. */
|
||||
/* #undef HAVE_LZMA_H */
|
||||
|
||||
/* Define to 1 if you have the <malloc.h> header file. */
|
||||
#define HAVE_MALLOC_H 1
|
||||
|
||||
/* Define to 1 if you have the <math.h> header file. */
|
||||
#define HAVE_MATH_H 1
|
||||
|
||||
/* Define to 1 if you have the <memory.h> header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if you have the `mmap' function. */
|
||||
#define HAVE_MMAP 1
|
||||
|
||||
/* Define to 1 if you have the `munmap' function. */
|
||||
#define HAVE_MUNMAP 1
|
||||
|
||||
/* mmap() is no good without munmap() */
|
||||
#if defined(HAVE_MMAP) && !defined(HAVE_MUNMAP)
|
||||
# undef /**/ HAVE_MMAP
|
||||
#endif
|
||||
|
||||
/* Define to 1 if you have the <ndir.h> header file, and it defines `DIR'. */
|
||||
/* #undef HAVE_NDIR_H */
|
||||
|
||||
/* Define to 1 if you have the <netdb.h> header file. */
|
||||
#define HAVE_NETDB_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <poll.h> header file. */
|
||||
#define HAVE_POLL_H 1
|
||||
|
||||
/* Define to 1 if you have the `printf' function. */
|
||||
#define HAVE_PRINTF 1
|
||||
|
||||
/* Define if <pthread.h> is there */
|
||||
#define HAVE_PTHREAD_H /**/
|
||||
|
||||
/* Define to 1 if you have the `putenv' function. */
|
||||
#define HAVE_PUTENV 1
|
||||
|
||||
/* Define to 1 if you have the `rand' function. */
|
||||
#define HAVE_RAND 1
|
||||
|
||||
/* Define to 1 if you have the `rand_r' function. */
|
||||
#define HAVE_RAND_R 1
|
||||
|
||||
/* Define to 1 if you have the <resolv.h> header file. */
|
||||
#define HAVE_RESOLV_H 1
|
||||
|
||||
/* Have shl_load based dso */
|
||||
/* #undef HAVE_SHLLOAD */
|
||||
|
||||
/* Define to 1 if you have the `signal' function. */
|
||||
#define HAVE_SIGNAL 1
|
||||
|
||||
/* Define to 1 if you have the <signal.h> header file. */
|
||||
#define HAVE_SIGNAL_H 1
|
||||
|
||||
/* Define to 1 if you have the `snprintf' function. */
|
||||
#define HAVE_SNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `sprintf' function. */
|
||||
#define HAVE_SPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `srand' function. */
|
||||
#define HAVE_SRAND 1
|
||||
|
||||
/* Define to 1 if you have the `sscanf' function. */
|
||||
#define HAVE_SSCANF 1
|
||||
|
||||
/* Define to 1 if you have the `stat' function. */
|
||||
#define HAVE_STAT 1
|
||||
|
||||
/* Define to 1 if you have the <stdarg.h> header file. */
|
||||
#define HAVE_STDARG_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the `strftime' function. */
|
||||
#define HAVE_STRFTIME 1
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/dir.h> header file, and it defines `DIR'.
|
||||
*/
|
||||
/* #undef HAVE_SYS_DIR_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/mman.h> header file. */
|
||||
#define HAVE_SYS_MMAN_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ndir.h> header file, and it defines `DIR'.
|
||||
*/
|
||||
/* #undef HAVE_SYS_NDIR_H */
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/timeb.h> header file. */
|
||||
#define HAVE_SYS_TIMEB_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the `time' function. */
|
||||
#define HAVE_TIME 1
|
||||
|
||||
/* Define to 1 if you have the <time.h> header file. */
|
||||
#define HAVE_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Whether va_copy() is available */
|
||||
#define HAVE_VA_COPY 1
|
||||
|
||||
/* Define to 1 if you have the `vfprintf' function. */
|
||||
#define HAVE_VFPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `vsnprintf' function. */
|
||||
#define HAVE_VSNPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the `vsprintf' function. */
|
||||
#define HAVE_VSPRINTF 1
|
||||
|
||||
/* Define to 1 if you have the <zlib.h> header file. */
|
||||
/* #undef HAVE_ZLIB_H */
|
||||
|
||||
/* Whether __va_copy() is available */
|
||||
/* #undef HAVE___VA_COPY */
|
||||
|
||||
/* Define as const if the declaration of iconv() needs const. */
|
||||
#define ICONV_CONST
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "libxml2"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT ""
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME ""
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING ""
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME ""
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION ""
|
||||
|
||||
/* Type cast for the send() function 2nd arg */
|
||||
#define SEND_ARG2_CAST /**/
|
||||
|
||||
/* Define to 1 if you have the ANSI C header files. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Support for IPv6 */
|
||||
#define SUPPORT_IP6 /**/
|
||||
|
||||
/* Define if va_list is an array type */
|
||||
#define VA_LIST_IS_ARRAY 1
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "2.9.8"
|
||||
|
||||
/* Determine what socket length (socklen_t) data type is */
|
||||
#define XML_SOCKLEN_T socklen_t
|
||||
|
||||
/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
|
||||
<pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
|
||||
#define below would cause a syntax error. */
|
||||
/* #undef _UINT32_T */
|
||||
|
||||
/* ss_family is not defined here, use __ss_family instead */
|
||||
/* #undef ss_family */
|
||||
|
||||
/* Define to the type of an unsigned integer type of width exactly 32 bits if
|
||||
such a type exists and the standard includes do not define it. */
|
||||
/* #undef uint32_t */
|
|
@ -0,0 +1,501 @@
|
|||
/*
|
||||
* Summary: compile-time version information
|
||||
* Description: compile-time version information for the XML library
|
||||
*
|
||||
* Copy: See Copyright for the status of this software.
|
||||
*
|
||||
* Author: Daniel Veillard
|
||||
*/
|
||||
|
||||
#ifndef __XML_VERSION_H__
|
||||
#define __XML_VERSION_H__
|
||||
|
||||
#include <libxml/xmlexports.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* use those to be sure nothing nasty will happen if
|
||||
* your library and includes mismatch
|
||||
*/
|
||||
#ifndef LIBXML2_COMPILING_MSCCDEF
|
||||
XMLPUBFUN void XMLCALL xmlCheckVersion(int version);
|
||||
#endif /* LIBXML2_COMPILING_MSCCDEF */
|
||||
|
||||
/**
|
||||
* LIBXML_DOTTED_VERSION:
|
||||
*
|
||||
* the version string like "1.2.3"
|
||||
*/
|
||||
#define LIBXML_DOTTED_VERSION "2.10.3"
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION:
|
||||
*
|
||||
* the version number: 1.2.3 value is 10203
|
||||
*/
|
||||
#define LIBXML_VERSION 21003
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION_STRING:
|
||||
*
|
||||
* the version number string, 1.2.3 value is "10203"
|
||||
*/
|
||||
#define LIBXML_VERSION_STRING "21003"
|
||||
|
||||
/**
|
||||
* LIBXML_VERSION_EXTRA:
|
||||
*
|
||||
* extra version information, used to show a git commit description
|
||||
*/
|
||||
#define LIBXML_VERSION_EXTRA ""
|
||||
|
||||
/**
|
||||
* LIBXML_TEST_VERSION:
|
||||
*
|
||||
* Macro to check that the libxml version in use is compatible with
|
||||
* the version the software has been compiled against
|
||||
*/
|
||||
#define LIBXML_TEST_VERSION xmlCheckVersion(21003);
|
||||
|
||||
#ifndef VMS
|
||||
#if 0
|
||||
/**
|
||||
* WITH_TRIO:
|
||||
*
|
||||
* defined if the trio support need to be configured in
|
||||
*/
|
||||
#define WITH_TRIO
|
||||
#else
|
||||
/**
|
||||
* WITHOUT_TRIO:
|
||||
*
|
||||
* defined if the trio support should not be configured in
|
||||
*/
|
||||
#define WITHOUT_TRIO
|
||||
#endif
|
||||
#else /* VMS */
|
||||
/**
|
||||
* WITH_TRIO:
|
||||
*
|
||||
* defined if the trio support need to be configured in
|
||||
*/
|
||||
#define WITH_TRIO 1
|
||||
#endif /* VMS */
|
||||
|
||||
/**
|
||||
* LIBXML_THREAD_ENABLED:
|
||||
*
|
||||
* Whether the thread support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_THREAD_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_THREAD_ALLOC_ENABLED:
|
||||
*
|
||||
* Whether the allocation hooks are per-thread
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_THREAD_ALLOC_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_TREE_ENABLED:
|
||||
*
|
||||
* Whether the DOM like tree manipulation API support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_TREE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_OUTPUT_ENABLED:
|
||||
*
|
||||
* Whether the serialization/saving support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_OUTPUT_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_PUSH_ENABLED:
|
||||
*
|
||||
* Whether the push parsing interfaces are configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_PUSH_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_READER_ENABLED:
|
||||
*
|
||||
* Whether the xmlReader parsing interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_READER_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_PATTERN_ENABLED:
|
||||
*
|
||||
* Whether the xmlPattern node selection interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_PATTERN_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_WRITER_ENABLED:
|
||||
*
|
||||
* Whether the xmlWriter saving interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_WRITER_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SAX1_ENABLED:
|
||||
*
|
||||
* Whether the older SAX1 interface is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SAX1_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_FTP_ENABLED:
|
||||
*
|
||||
* Whether the FTP support is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_FTP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_HTTP_ENABLED:
|
||||
*
|
||||
* Whether the HTTP support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_HTTP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_VALID_ENABLED:
|
||||
*
|
||||
* Whether the DTD validation support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_VALID_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_HTML_ENABLED:
|
||||
*
|
||||
* Whether the HTML support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_HTML_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_LEGACY_ENABLED:
|
||||
*
|
||||
* Whether the deprecated APIs are compiled in for compatibility
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_LEGACY_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_C14N_ENABLED:
|
||||
*
|
||||
* Whether the Canonicalization support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_C14N_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_CATALOG_ENABLED:
|
||||
*
|
||||
* Whether the Catalog support is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_CATALOG_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPATH_ENABLED:
|
||||
*
|
||||
* Whether XPath is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XPATH_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPTR_ENABLED:
|
||||
*
|
||||
* Whether XPointer is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XPTR_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XPTR_LOCS_ENABLED:
|
||||
*
|
||||
* Whether support for XPointer locations is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_XPTR_LOCS_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_XINCLUDE_ENABLED:
|
||||
*
|
||||
* Whether XInclude is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_XINCLUDE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ICONV_ENABLED:
|
||||
*
|
||||
* Whether iconv support is available
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_ICONV_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ICU_ENABLED:
|
||||
*
|
||||
* Whether icu support is available
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_ICU_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ISO8859X_ENABLED:
|
||||
*
|
||||
* Whether ISO-8859-* support is made available in case iconv is not
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_ISO8859X_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_DEBUG_ENABLED:
|
||||
*
|
||||
* Whether Debugging module is configured in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_DEBUG_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* DEBUG_MEMORY_LOCATION:
|
||||
*
|
||||
* Whether the memory debugging is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define DEBUG_MEMORY_LOCATION
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_DEBUG_RUNTIME:
|
||||
*
|
||||
* Whether the runtime debugging is configured in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_DEBUG_RUNTIME
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_UNICODE_ENABLED:
|
||||
*
|
||||
* Whether the Unicode related interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_UNICODE_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_REGEXP_ENABLED:
|
||||
*
|
||||
* Whether the regular expressions interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_REGEXP_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_AUTOMATA_ENABLED:
|
||||
*
|
||||
* Whether the automata interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_AUTOMATA_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_EXPR_ENABLED:
|
||||
*
|
||||
* Whether the formal expressions interfaces are compiled in
|
||||
*
|
||||
* This code is unused and disabled unconditionally for now.
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_EXPR_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SCHEMAS_ENABLED:
|
||||
*
|
||||
* Whether the Schemas validation interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SCHEMAS_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_SCHEMATRON_ENABLED:
|
||||
*
|
||||
* Whether the Schematron validation interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_SCHEMATRON_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_MODULES_ENABLED:
|
||||
*
|
||||
* Whether the module interfaces are compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_MODULES_ENABLED
|
||||
/**
|
||||
* LIBXML_MODULE_EXTENSION:
|
||||
*
|
||||
* the string suffix used by dynamic modules (usually shared libraries)
|
||||
*/
|
||||
#define LIBXML_MODULE_EXTENSION ".so"
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ZLIB_ENABLED:
|
||||
*
|
||||
* Whether the Zlib support is compiled in
|
||||
*/
|
||||
#if 1
|
||||
#define LIBXML_ZLIB_ENABLED
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_LZMA_ENABLED:
|
||||
*
|
||||
* Whether the Lzma support is compiled in
|
||||
*/
|
||||
#if 0
|
||||
#define LIBXML_LZMA_ENABLED
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
|
||||
/**
|
||||
* ATTRIBUTE_UNUSED:
|
||||
*
|
||||
* Macro used to signal to GCC unused function parameters
|
||||
*/
|
||||
|
||||
#ifndef ATTRIBUTE_UNUSED
|
||||
# if ((__GNUC__ > 2) || ((__GNUC__ == 2) && (__GNUC_MINOR__ >= 7)))
|
||||
# define ATTRIBUTE_UNUSED __attribute__((unused))
|
||||
# else
|
||||
# define ATTRIBUTE_UNUSED
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ATTR_ALLOC_SIZE:
|
||||
*
|
||||
* Macro used to indicate to GCC this is an allocator function
|
||||
*/
|
||||
|
||||
#ifndef LIBXML_ATTR_ALLOC_SIZE
|
||||
# if (!defined(__clang__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))))
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x) __attribute__((alloc_size(x)))
|
||||
# else
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
# endif
|
||||
#else
|
||||
# define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LIBXML_ATTR_FORMAT:
|
||||
*
|
||||
* Macro used to indicate to GCC the parameter are printf like
|
||||
*/
|
||||
|
||||
#ifndef LIBXML_ATTR_FORMAT
|
||||
# if ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)))
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args) __attribute__((__format__(__printf__,fmt,args)))
|
||||
# else
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
# endif
|
||||
#else
|
||||
# define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
#endif
|
||||
|
||||
#ifndef XML_DEPRECATED
|
||||
# ifdef IN_LIBXML
|
||||
# define XML_DEPRECATED
|
||||
# else
|
||||
/* Available since at least GCC 3.1 */
|
||||
# define XML_DEPRECATED __attribute__((deprecated))
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#else /* ! __GNUC__ */
|
||||
/**
|
||||
* ATTRIBUTE_UNUSED:
|
||||
*
|
||||
* Macro used to signal to GCC unused function parameters
|
||||
*/
|
||||
#define ATTRIBUTE_UNUSED
|
||||
/**
|
||||
* LIBXML_ATTR_ALLOC_SIZE:
|
||||
*
|
||||
* Macro used to indicate to GCC this is an allocator function
|
||||
*/
|
||||
#define LIBXML_ATTR_ALLOC_SIZE(x)
|
||||
/**
|
||||
* LIBXML_ATTR_FORMAT:
|
||||
*
|
||||
* Macro used to indicate to GCC the parameter are printf like
|
||||
*/
|
||||
#define LIBXML_ATTR_FORMAT(fmt,args)
|
||||
/**
|
||||
* XML_DEPRECATED:
|
||||
*
|
||||
* Macro used to indicate that a function, variable, type or struct member
|
||||
* is deprecated.
|
||||
*/
|
||||
#ifndef XML_DEPRECATED
|
||||
#define XML_DEPRECATED
|
||||
#endif
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
#endif
|
|
@ -109,7 +109,7 @@ If you are using Maven to manage your project, simply add the following dependen
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -15,6 +15,19 @@ When inserting data using parameter binding, it can avoid the resource consumpti
|
|||
|
||||
**Tips: It is recommended to use parameter binding for data insertion**
|
||||
|
||||
:::note
|
||||
We only recommend using the following two forms of SQL for parameter binding data insertion:
|
||||
|
||||
```sql
|
||||
a. Subtables already exists:
|
||||
1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?)
|
||||
b. Automatic table creation on insert:
|
||||
1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?)
|
||||
2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?)
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
Next, we continue to use smart meters as an example to demonstrate the efficient writing functionality of parameter binding with various language connectors:
|
||||
|
||||
1. Prepare a parameterized SQL insert statement for inserting data into the supertable `meters`. This statement allows dynamically specifying subtable names, tags, and column values.
|
||||
|
|
|
@ -26,7 +26,8 @@ Flink Connector supports all platforms that can run Flink 1.19 and above version
|
|||
|
||||
| Flink Connector Version | Major Changes | TDengine Version|
|
||||
|-------------------------| ------------------------------------ | ---------------- |
|
||||
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.0 and higher|
|
||||
| 2.0.1 | Sink supports writing types from Rowdata implementations.| - |
|
||||
| 2.0.0 | 1.Support SQL queries on data in TDengine database. <br/> 2. Support CDC subscription to data in TDengine database.<br/> 3. Supports reading and writing to TDengine database using Table SQL. | 3.3.5.1 and higher|
|
||||
| 1.0.0 | Support Sink function to write data from other sources to TDengine in the future.| 3.3.2.0 and higher|
|
||||
|
||||
## Exception and error codes
|
||||
|
@ -114,7 +115,7 @@ If using Maven to manage a project, simply add the following dependencies in pom
|
|||
<dependency>
|
||||
<groupId>com.taosdata.flink</groupId>
|
||||
<artifactId>flink-connector-tdengine</artifactId>
|
||||
<version>2.0.0</version>
|
||||
<version>2.0.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -77,12 +77,7 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|
||||
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|
||||
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|
||||
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
|
||||
|
||||
### Region Related
|
||||
|
|
|
@ -403,7 +403,7 @@ Specify the configuration parameters for tag and data columns in `super_tables`
|
|||
|
||||
- **min**: The minimum value for the data type of the column/tag. Generated values will be greater than or equal to the minimum value.
|
||||
|
||||
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the minimum value.
|
||||
- **max**: The maximum value for the data type of the column/tag. Generated values will be less than the maximum value.
|
||||
|
||||
- **scalingFactor**: Floating-point precision enhancement factor, only effective when the data type is float/double, valid values range from 1 to 1000000 positive integers. Used to enhance the precision of generated floating points, especially when min or max values are small. This attribute enhances the precision after the decimal point by powers of 10: a scalingFactor of 10 means enhancing the precision by 1 decimal place, 100 means 2 places, and so on.
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ database_option: {
|
|||
- MINROWS: The minimum number of records in a file block, default is 100.
|
||||
- KEEP: Indicates the number of days data files are kept, default value is 3650, range [1, 365000], and must be greater than or equal to 3 times the DURATION parameter value. The database will automatically delete data that has been saved for longer than the KEEP value to free up storage space. KEEP can use unit-specified formats, such as KEEP 100h, KEEP 10d, etc., supports m (minutes), h (hours), and d (days) three units. It can also be written without a unit, like KEEP 50, where the default unit is days. The enterprise version supports multi-tier storage feature, thus, multiple retention times can be set (multiple separated by commas, up to 3, satisfying keep 0 \<= keep 1 \<= keep 2, such as KEEP 100h,100d,3650d); the community version does not support multi-tier storage feature (even if multiple retention times are configured, it will not take effect, KEEP will take the longest retention time).
|
||||
- KEEP_TIME_OFFSET: Effective from version 3.2.0.0. The delay execution time for deleting or migrating data that has been saved for longer than the KEEP value, default value is 0 (hours). After the data file's save time exceeds KEEP, the deletion or migration operation will not be executed immediately, but will wait an additional interval specified by this parameter, to avoid peak business periods.
|
||||
- STT_TRIGGER: Indicates the number of file merges triggered by disk files. The open-source version is fixed at 1, the enterprise version can be set from 1 to 16. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value.
|
||||
- STT_TRIGGER: Indicates the number of file merges triggered by disk files. For scenarios with few tables and high-frequency writing, this parameter is recommended to use the default configuration; for scenarios with many tables and low-frequency writing, this parameter is recommended to be set to a larger value.
|
||||
- SINGLE_STABLE: Indicates whether only one supertable can be created in this database, used in cases where the supertable has a very large number of columns.
|
||||
- 0: Indicates that multiple supertables can be created.
|
||||
- 1: Indicates that only one supertable can be created.
|
||||
|
@ -144,10 +144,6 @@ You can view cacheload through show \<db_name>.vgroups;
|
|||
|
||||
If cacheload is very close to cachesize, then cachesize may be too small. If cacheload is significantly less than cachesize, then cachesize is sufficient. You can decide whether to modify cachesize based on this principle. The specific modification value can be determined based on the available system memory, whether to double it or increase it several times.
|
||||
|
||||
4. stt_trigger
|
||||
|
||||
Please stop database writing before modifying the stt_trigger parameter.
|
||||
|
||||
:::note
|
||||
Other parameters are not supported for modification in version 3.0.0.0
|
||||
|
||||
|
|
|
@ -2171,7 +2171,7 @@ ignore_negative: {
|
|||
|
||||
**Usage Instructions**:
|
||||
|
||||
- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE() from.
|
||||
- Can be used with the columns associated with the selection. For example: select _rowts, DERIVATIVE(col1, 1s, 1) from tb1.
|
||||
|
||||
### DIFF
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
|
|||
|
||||
| taos-jdbcdriver Version | Major Changes | TDengine Version |
|
||||
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
| 3.5.2 | Fixed WebSocket result set free bug. | - |
|
||||
| 3.5.1 | Fixed the getObject issue in data subscription. | - |
|
||||
| 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data. <br/> 2. Optimized the performance of small queries in WebSocket connection. <br/> 3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher |
|
||||
| 3.4.0 | 1. Replaced fastjson library with jackson. <br/> 2. WebSocket uses a separate protocol identifier. <br/> 3. Optimized background thread usage to avoid user misuse leading to timeouts. | - |
|
||||
|
|
|
@ -25,6 +25,10 @@ Download links for TDengine 3.x version installation packages are as follows:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.3.5.2
|
||||
|
||||
<Release type="tdengine" version="3.3.5.2" />
|
||||
|
||||
## 3.3.5.0
|
||||
|
||||
<Release type="tdengine" version="3.3.5.0" />
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
title: TDengine 3.3.5.2 Release Notes
|
||||
sidebar_label: 3.3.5.2
|
||||
description: Version 3.3.5.2 Notes
|
||||
slug: /release-history/release-notes/3.3.5.2
|
||||
---
|
||||
|
||||
## Features
|
||||
1. feat: taosX now support multiple stables with template for MQTT
|
||||
|
||||
## Enhancements
|
||||
1. enh: improve taosX error message if database is invalid
|
||||
2. enh: use poetry group depencencies and reduce dep when install [#251](https://github.com/taosdata/taos-connector-python/issues/251)
|
||||
3. enh: improve backup restore using taosX
|
||||
4. enh: during the multi-level storage data migration, if the migration time is too long, it may cause the Vnode to switch leader
|
||||
5. enh: adjust the systemctl strategy for managing the taosd process, if three consecutive restarts fail within 60 seconds, the next restart will be delayed until 900 seconds later
|
||||
|
||||
## Fixes
|
||||
1. fix: the maxRetryWaitTime parameter is used to control the maximum reconnection timeout time for the client when the cluster is unable to provide services, but it does not take effect when encountering a Sync timeout error
|
||||
2. fix: supports immediate subscription to the new tag value after modifying the tag value of the sub-table
|
||||
3. fix: the tmq_consumer_poll function for data subscription does not return an error code when the call fails
|
||||
4. fix: taosd may crash when more than 100 views are created and the show views command is executed
|
||||
5. fix: when using stmt2 to insert data, if not all data columns are bound, the insertion operation will fail
|
||||
6. fix: when using stmt2 to insert data, if the database name or table name is enclosed in backticks, the insertion operation will fail
|
||||
7. fix: when closing a vnode, if there are ongoing file merge tasks, taosd may crash
|
||||
8. fix: frequent execution of the “drop table with tb_uid” statement may lead to a deadlock in taosd
|
||||
9. fix: the potential deadlock during the switching of log files
|
||||
10. fix: prohibit the creation of databases with the same names as system databases (information_schema, performance_schema)
|
||||
11. fix: when the inner query of a nested query come from a super table, the sorting information cannot be pushed up
|
||||
12. fix: incorrect error reporting when attempting to write Geometry data types that do not conform to topological specifications through the STMT interface
|
||||
13. fix: when using the percentile function and session window in a query statement, if an error occurs, taosd may crash
|
||||
14. fix: the issue of being unable to dynamically modify system parameters
|
||||
15. fix: random error of tranlict transaction in replication
|
||||
16. fix: the same consumer executes the unsubscribe operation and immediately attempts to subscribe to other different topics, the subscription API will return an error
|
||||
17. fix: fix CVE-2022-28948 security issue in go connector
|
||||
18. fix: when a subquery in a view contains an ORDER BY clause with an alias, and the query function itself also has an alias, querying the view will result in an error
|
||||
19. fix: when changing the database from a single replica to a mulit replica, if there are some metadata generated by earlier versions that are no longer used in the new version, the modification operation will fail
|
||||
20. fix: column names were not correctly copied when using SELECT * FROM subqueries
|
||||
21. fix: when performing max/min function on string type data, the results are inaccurate and taosd will crash
|
||||
22. fix: stream computing does not support the use of the HAVING clause, but no error is reported during creation
|
||||
23. fix: the version information displayed by taos shell for the server is inaccurate, such as being unable to correctly distinguish between the community edition and the enterprise edition
|
||||
24. fix: in certain specific query scenarios, when JOIN and CAST are used together, taosd may crash
|
||||
|
|
@ -5,6 +5,7 @@ slug: /release-history/release-notes
|
|||
|
||||
[3.3.5.0](./3-3-5-0/)
|
||||
|
||||
[3.3.5.2](./3.3.5.2)
|
||||
[3.3.4.8](./3-3-4-8/)
|
||||
|
||||
[3.3.4.3](./3-3-4-3/)
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.locationtech.jts</groupId>
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
package com.taosdata.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.taosdata.jdbc.AbstractStatement;
|
||||
|
||||
import java.sql.*;
|
||||
|
|
|
@ -104,8 +104,9 @@ public class JdbcDemo {
|
|||
|
||||
private void executeQuery(String sql) {
|
||||
long start = System.currentTimeMillis();
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
ResultSet resultSet = statement.executeQuery(sql);
|
||||
try (Statement statement = connection.createStatement();
|
||||
ResultSet resultSet = statement.executeQuery(sql)) {
|
||||
|
||||
long end = System.currentTimeMillis();
|
||||
printSql(sql, true, (end - start));
|
||||
Util.printResult(resultSet);
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
<!-- druid -->
|
||||
<dependency>
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -70,7 +70,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -67,7 +67,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
<!-- <scope>system</scope>-->
|
||||
<!-- <systemPath>${project.basedir}/src/main/resources/lib/taos-jdbcdriver-2.0.15-dist.jar</systemPath>-->
|
||||
</dependency>
|
||||
|
|
|
@ -263,7 +263,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
Class<SourceRecords<RowData>> typeClass = (Class<SourceRecords<RowData>>) (Class<?>) SourceRecords.class;
|
||||
SourceSplitSql sql = new SourceSplitSql("select ts, `current`, voltage, phase, tbname from meters");
|
||||
TDengineSource<SourceRecords<RowData>> source = new TDengineSource<>(connProps, sql, typeClass);
|
||||
DataStreamSource<SourceRecords<RowData>> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source");
|
||||
DataStreamSource<SourceRecords<RowData>> input = env.fromSource(source, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<SourceRecords<RowData>, String>) records -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
Iterator<RowData> iterator = records.iterator();
|
||||
|
@ -304,7 +304,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "RowData");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
TDengineCdcSource<RowData> tdengineSource = new TDengineCdcSource<>("topic_meters", config, RowData.class);
|
||||
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source");
|
||||
DataStreamSource<RowData> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<RowData, String>) rowData -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("tsxx: " + rowData.getTimestamp(0, 0) +
|
||||
|
@ -343,7 +343,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
|
||||
Class<ConsumerRecords<RowData>> typeClass = (Class<ConsumerRecords<RowData>>) (Class<?>) ConsumerRecords.class;
|
||||
TDengineCdcSource<ConsumerRecords<RowData>> tdengineSource = new TDengineCdcSource<>("topic_meters", config, typeClass);
|
||||
DataStreamSource<ConsumerRecords<RowData>> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source");
|
||||
DataStreamSource<ConsumerRecords<RowData>> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<ConsumerRecords<RowData>, String>) records -> {
|
||||
Iterator<ConsumerRecord<RowData>> iterator = records.iterator();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -388,7 +388,7 @@ splitSql.setSelect("ts, current, voltage, phase, groupid, location")
|
|||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER, "com.taosdata.flink.entity.ResultDeserializer");
|
||||
config.setProperty(TDengineCdcParams.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||
TDengineCdcSource<ResultBean> tdengineSource = new TDengineCdcSource<>("topic_meters", config, ResultBean.class);
|
||||
DataStreamSource<ResultBean> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "kafka-source");
|
||||
DataStreamSource<ResultBean> input = env.fromSource(tdengineSource, WatermarkStrategy.noWatermarks(), "tdengine-source");
|
||||
DataStream<String> resultStream = input.map((MapFunction<ResultBean, String>) rowData -> {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("ts: " + rowData.getTs() +
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
<!-- ANCHOR_END: dep-->
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
|||
<dependency>
|
||||
<groupId>com.taosdata.jdbc</groupId>
|
||||
<artifactId>taos-jdbcdriver</artifactId>
|
||||
<version>3.5.1</version>
|
||||
<version>3.5.2</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -15,6 +15,19 @@ import TabItem from "@theme/TabItem";
|
|||
|
||||
**Tips: 数据写入推荐使用参数绑定方式**
|
||||
|
||||
:::note
|
||||
我们只推荐使用下面两种形式的 SQL 进行参数绑定写入:
|
||||
|
||||
```sql
|
||||
一、确定子表存在:
|
||||
1. INSERT INTO meters (tbname, ts, current, voltage, phase) VALUES(?, ?, ?, ?, ?)
|
||||
二、自动建表:
|
||||
1. INSERT INTO meters (tbname, ts, current, voltage, phase, location, group_id) VALUES(?, ?, ?, ?, ?, ?, ?)
|
||||
2. INSERT INTO ? USING meters TAGS (?, ?) VALUES (?, ?, ?, ?)
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
下面我们继续以智能电表为例,展示各语言连接器使用参数绑定高效写入的功能:
|
||||
1. 准备一个参数化的 SQL 插入语句,用于向超级表 `meters` 中插入数据。这个语句允许动态地指定子表名、标签和列值。
|
||||
2. 循环生成多个子表及其对应的数据行。对于每个子表:
|
||||
|
|
|
@ -24,7 +24,8 @@ Flink Connector 支持所有能运行 Flink 1.19 及以上版本的平台。
|
|||
## 版本历史
|
||||
| Flink Connector 版本 | 主要变化 | TDengine 版本 |
|
||||
| ------------------| ------------------------------------ | ---------------- |
|
||||
| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据<br/> 2. 支持 CDC 订阅 TDengine 数据库中的数据<br/> 3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.0 及以上版本 |
|
||||
| 2.0.1 | Sink 支持对所有继承自 RowData 并已实现的类型进行数据写入| - |
|
||||
| 2.0.0 | 1. 支持 SQL 查询 TDengine 数据库中的数据<br/> 2. 支持 CDC 订阅 TDengine 数据库中的数据<br/> 3. 支持 Table SQL 方式读取和写入 TDengine 数据库| 3.3.5.1 及以上版本 |
|
||||
| 1.0.0 | 支持 Sink 功能,将来着其他数据源的数据写入到 TDengine| 3.3.2.0 及以上版本|
|
||||
|
||||
## 异常和错误码
|
||||
|
@ -111,7 +112,7 @@ env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);
|
|||
<dependency>
|
||||
<groupId>com.taosdata.flink</groupId>
|
||||
<artifactId>flink-connector-tdengine</artifactId>
|
||||
<version>2.0.0</version>
|
||||
<version>2.0.1</version>
|
||||
</dependency>
|
||||
```
|
||||
|
||||
|
|
|
@ -73,12 +73,7 @@ taosd 命令行参数如下
|
|||
|minReservedMemorySize | |不支持动态修改 |最小预留的系统可用内存数量,除预留外的内存都可以被用于查询,单位:MB,默认预留大小为系统物理内存的 20%,取值范围 1024 - 1000000000|
|
||||
|singleQueryMaxMemorySize| |不支持动态修改 |单个查询在单个节点(dnode)上可以使用的内存上限,超过该上限将返回错误,单位:MB,默认值:0(无上限),取值范围 0 - 1000000000|
|
||||
|filterScalarMode | |不支持动态修改 |强制使用标量过滤模式,0:关闭;1:开启,默认值 0|
|
||||
|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志|
|
||||
|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小|
|
||||
|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法|
|
||||
|queryMaxConcurrentTables| |不支持动态修改 |内部参数,查询计划的并发数目|
|
||||
|queryRsmaTolerance | |不支持动态修改 |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
|
||||
|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息|
|
||||
|pqSortMemThreshold | |不支持动态修改 |内部参数,排序使用的内存阈值|
|
||||
|
||||
### 区域相关
|
||||
|
@ -194,7 +189,7 @@ charset 的有效值是 UTF-8。
|
|||
|numOfQnodeQueryThreads | |支持动态修改 重启生效 |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||
|numOfSnodeSharedThreads | |支持动态修改 重启生效 |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||
|numOfSnodeUniqueThreads | |支持动态修改 重启生效 |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||
|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4|
|
||||
|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 0.5|
|
||||
|ttlUnit | |不支持动态修改 |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400|
|
||||
|ttlPushInterval | |支持动态修改 立即生效 |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10|
|
||||
|ttlChangeOnWrite | |支持动态修改 立即生效 |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0|
|
||||
|
|
|
@ -319,7 +319,7 @@ INFO: Consumed total msgs: 3000, total rows: 30000000
|
|||
|
||||
- **min** : 数据类型的 列/标签 的最小值。生成的值将大于或等于最小值。
|
||||
|
||||
- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。
|
||||
- **max** : 数据类型的 列/标签 的最大值。生成的值将小于最大值。
|
||||
|
||||
- **scalingFactor** : 浮点数精度增强因子,仅当数据类型是 float/double 时生效,有效值范围为 1 至 1000000 的正整数。用于增强生成浮点数的精度,特别是在 min 或 max 值较小的情况下。此属性按 10 的幂次增强小数点后的精度:scalingFactor 为 10 表示增强 1 位小数精度,100 表示增强 2 位,依此类推。
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ database_option: {
|
|||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
|
||||
|
||||
- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
||||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
|
@ -146,10 +146,6 @@ alter_database_option: {
|
|||
|
||||
如果 cacheload 非常接近 cachesize,则 cachesize 可能过小。 如果 cacheload 明显小于 cachesize 则 cachesize 是够用的。可以根据这个原则判断是否需要修改 cachesize 。具体修改值可以根据系统可用内存情况来决定是加倍或者是提高几倍。
|
||||
|
||||
4. stt_trigger
|
||||
|
||||
在修改 stt_trigger 参数之前请先停止数据库写入。
|
||||
|
||||
:::note
|
||||
其它参数在 3.0.0.0 中暂不支持修改
|
||||
|
||||
|
@ -209,7 +205,7 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3
|
|||
BALANCE VGROUP LEADER
|
||||
```
|
||||
|
||||
触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。
|
||||
触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。(企业版功能)
|
||||
|
||||
## 查看数据库工作状态
|
||||
|
||||
|
|
|
@ -2099,7 +2099,7 @@ ignore_negative: {
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE(col1, 1s, 1) from tb1。
|
||||
|
||||
### DIFF
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致
|
|||
|
||||
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
|
||||
| ------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| 3.5.2 | 解决了 WebSocket 查询结果集释放 bug | - |
|
||||
| 3.5.1 | 解决了数据订阅获取时间戳对象类型问题 | - |
|
||||
| 3.5.0 | 1. 优化了 WebSocket 连接参数绑定性能,支持参数绑定查询使用二进制数据 <br/> 2. 优化了 WebSocket 连接在小查询上的性能 <br/> 3. WebSocket 连接上支持设置时区和应用信息 | 3.3.5.0 及更高版本 |
|
||||
| 3.4.0 | 1. 使用 jackson 库替换 fastjson 库 <br/> 2. WebSocket 采用独立协议标识 <br/> 3. 优化后台拉取线程使用,避免用户误用导致超时 | - |
|
||||
|
|
|
@ -24,6 +24,10 @@ TDengine 3.x 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.3.5.2
|
||||
|
||||
<Release type="tdengine" version="3.3.5.2" />
|
||||
|
||||
## 3.3.5.0
|
||||
|
||||
<Release type="tdengine" version="3.3.5.0" />
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
title: 3.3.5.2 版本说明
|
||||
sidebar_label: 3.3.5.2
|
||||
description: 3.3.5.2 版本说明
|
||||
---
|
||||
|
||||
## 特性
|
||||
1. 特性:taosX MQTT 数据源支持根据模板创建多个超级表
|
||||
|
||||
## 优化
|
||||
1. 优化:改进 taosX 数据库不可用时的错误信息
|
||||
2. 优化:使用 Poetry 标准管理依赖项并减少 Python 连接器安装依赖项 [#251](https://github.com/taosdata/taos-connector-python/issues/251)
|
||||
3. 优化:taosX 增量备份和恢复优化
|
||||
4. 优化:在多级存储数据迁移过程中,如果迁移时间过长,可能会导致 Vnode 切主
|
||||
5. 优化:调整 systemctl 守护 taosd 进程的策略,如果 60 秒内连续三次重启失败,下次重启将推迟至 900 秒后
|
||||
|
||||
## 修复
|
||||
1. 修复:maxRetryWaitTime 参数用于控制当集群无法提供服务时客户端的最大重连超时时间,但在遇到 Sync timeout 错误时,该参数不生效
|
||||
2. 修复:支持在修改子表的 tag 值后,即时订阅到更新后的 tag 值
|
||||
3. 修复:数据订阅的 tmq_consumer_poll 函数调用失败时没有返回错误码
|
||||
4. 修复:当创建超过 100 个视图并执行 show views 命令时,taosd 可能会发生崩溃
|
||||
5. 修复:当使用 stmt2 写入数据时,如果未绑定所有的数据列,写入操作将会失败
|
||||
6. 修复:当使用 stmt2 写入数据时,如果数据库名或表名使用了反引号,写入操作将会失败
|
||||
7. 修复:关闭 vnode 时如果有正在进行的文件合并任务,taosd 可能会崩溃
|
||||
8. 修复:频繁执行 drop table with `tb_uid` 语句可能导致 taosd 死锁
|
||||
9. 修复:日志文件切换过程中可能出现的死锁问题
|
||||
10. 修复:禁止创建与系统库(information_schema, performance_schema)同名的数据库
|
||||
11. 修复:当嵌套查询的内层查询来源于超级表时,排序信息无法被上推
|
||||
12. 修复:通过 STMT 接口尝试写入不符合拓扑规范的 Geometry 数据类型时误报错误
|
||||
13. 修复:在查询语句中使用 percentile 函数和会话窗口时,如果出现错误,taosd 可能会崩溃
|
||||
14. 修复:无法动态修改系统参数的问题
|
||||
15. 修复:订阅同步偶发 Translict transaction 错误
|
||||
16. 修复:同一消费者在执行取消订阅操作后,立即尝试订阅其他不同的主题时,会返回错误
|
||||
17. 修复:Go 连接器安全修复 CVE-2022-28948
|
||||
18. 修复:当视图中的子查询包含带别名的 ORDER BY 子句,并且查询函数自身也带有别名时,查询该视图会引发错误
|
||||
19. 修复:在将数据库从单副本修改为多副本时,如果存在一些由较早版本生成且在新版本中已不再使用的元数据,会导致修改操作失败
|
||||
20. 修复:在使用 SELECT * FROM 子查询时,列名未能正确复制到外层查询
|
||||
21. 修复:对字符串类型数据执行 max/min 函数时,结果不准确且 taosd 可能会崩溃
|
||||
22. 修复:流式计算不支持使用 HAVING 语句,但在创建时未报告错误
|
||||
23. 修复:taos shell 显示的服务端版本信息不准确,例如无法正确区分社区版和企业版
|
||||
24. 修复:在某些特定的查询场景下,当 JOIN 和 CAST 联合使用时,taosd 可能会崩溃
|
||||
|
|
@ -4,6 +4,7 @@ sidebar_label: 版本说明
|
|||
description: 各版本版本说明
|
||||
---
|
||||
|
||||
[3.3.5.2](./3.3.5.2)
|
||||
[3.3.5.0](./3.3.5.0)
|
||||
[3.3.4.8](./3.3.4.8)
|
||||
[3.3.4.3](./3.3.4.3)
|
||||
|
|
|
@ -160,6 +160,7 @@ typedef enum EStreamType {
|
|||
STREAM_PARTITION_DELETE_DATA,
|
||||
STREAM_GET_RESULT,
|
||||
STREAM_DROP_CHILD_TABLE,
|
||||
STREAM_NOTIFY_EVENT,
|
||||
} EStreamType;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
@ -408,6 +409,9 @@ typedef struct STUidTagInfo {
|
|||
#define UD_GROUPID_COLUMN_INDEX 1
|
||||
#define UD_TAG_COLUMN_INDEX 2
|
||||
|
||||
// stream notify event block column
|
||||
#define NOTIFY_EVENT_STR_COLUMN_INDEX 0
|
||||
|
||||
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
|
||||
int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol);
|
||||
|
||||
|
|
|
@ -285,6 +285,8 @@ bool isAutoTableName(char* ctbName);
|
|||
int32_t buildCtbNameAddGroupId(const char* stbName, char* ctbName, uint64_t groupId, size_t cap);
|
||||
int32_t buildCtbNameByGroupId(const char* stbName, uint64_t groupId, char** pName);
|
||||
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
|
||||
int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule,
|
||||
char** dstTableName);
|
||||
|
||||
int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList);
|
||||
|
||||
|
|
|
@ -34,6 +34,9 @@ extern "C" {
|
|||
#define GLOBAL_CONFIG_FILE_VERSION 1
|
||||
#define LOCAL_CONFIG_FILE_VERSION 1
|
||||
|
||||
#define RPC_MEMORY_USAGE_RATIO 0.1
|
||||
#define QUEUE_MEMORY_USAGE_RATIO 0.6
|
||||
|
||||
typedef enum {
|
||||
DND_CA_SM4 = 1,
|
||||
} EEncryptAlgor;
|
||||
|
@ -110,6 +113,7 @@ extern int32_t tsNumOfQnodeFetchThreads;
|
|||
extern int32_t tsNumOfSnodeStreamThreads;
|
||||
extern int32_t tsNumOfSnodeWriteThreads;
|
||||
extern int64_t tsQueueMemoryAllowed;
|
||||
extern int64_t tsApplyMemoryAllowed;
|
||||
extern int32_t tsRetentionSpeedLimitMB;
|
||||
|
||||
extern int32_t tsNumOfCompactThreads;
|
||||
|
|
|
@ -269,6 +269,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_TSMA_OPTIONS,
|
||||
QUERY_NODE_ANOMALY_WINDOW,
|
||||
QUERY_NODE_RANGE_AROUND,
|
||||
QUERY_NODE_STREAM_NOTIFY_OPTIONS,
|
||||
|
||||
// Statement nodes are used in parser and planner module.
|
||||
QUERY_NODE_SET_OPERATOR = 100,
|
||||
|
@ -2956,6 +2957,11 @@ typedef struct {
|
|||
// 3.3.0.0
|
||||
SArray* pCols; // array of SField
|
||||
int64_t smaId;
|
||||
// 3.3.6.0
|
||||
SArray* pNotifyAddrUrls;
|
||||
int32_t notifyEventTypes;
|
||||
int32_t notifyErrorHandle;
|
||||
int8_t notifyHistory;
|
||||
} SCMCreateStreamReq;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -98,6 +98,9 @@ int32_t qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
|
|||
|
||||
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
|
||||
|
||||
int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper,
|
||||
const char* stbFullName, bool newSubTableRule);
|
||||
|
||||
/**
|
||||
* Set multiple input data blocks for the stream scan.
|
||||
* @param tinfo
|
||||
|
|
|
@ -566,19 +566,44 @@ typedef struct SStreamOptions {
|
|||
int64_t setFlag;
|
||||
} SStreamOptions;
|
||||
|
||||
typedef enum EStreamNotifyOptionSetFlag {
|
||||
SNOTIFY_OPT_ERROR_HANDLE_SET = BIT_FLAG_MASK(0),
|
||||
SNOTIFY_OPT_NOTIFY_HISTORY_SET = BIT_FLAG_MASK(1),
|
||||
} EStreamNotifyOptionSetFlag;
|
||||
|
||||
typedef enum EStreamNotifyEventType {
|
||||
SNOTIFY_EVENT_WINDOW_OPEN = BIT_FLAG_MASK(0),
|
||||
SNOTIFY_EVENT_WINDOW_CLOSE = BIT_FLAG_MASK(1),
|
||||
} EStreamNotifyEventType;
|
||||
|
||||
typedef enum EStreamNotifyErrorHandleType {
|
||||
SNOTIFY_ERROR_HANDLE_PAUSE,
|
||||
SNOTIFY_ERROR_HANDLE_DROP,
|
||||
} EStreamNotifyErrorHandleType;
|
||||
|
||||
typedef struct SStreamNotifyOptions {
|
||||
ENodeType type;
|
||||
SNodeList* pAddrUrls;
|
||||
EStreamNotifyEventType eventTypes;
|
||||
EStreamNotifyErrorHandleType errorHandle;
|
||||
bool notifyHistory;
|
||||
EStreamNotifyOptionSetFlag setFlag;
|
||||
} SStreamNotifyOptions;
|
||||
|
||||
typedef struct SCreateStreamStmt {
|
||||
ENodeType type;
|
||||
char streamName[TSDB_TABLE_NAME_LEN];
|
||||
char targetDbName[TSDB_DB_NAME_LEN];
|
||||
char targetTabName[TSDB_TABLE_NAME_LEN];
|
||||
bool ignoreExists;
|
||||
SStreamOptions* pOptions;
|
||||
SNode* pQuery;
|
||||
SNode* pPrevQuery;
|
||||
SNodeList* pTags;
|
||||
SNode* pSubtable;
|
||||
SNodeList* pCols;
|
||||
SCMCreateStreamReq* pReq;
|
||||
ENodeType type;
|
||||
char streamName[TSDB_TABLE_NAME_LEN];
|
||||
char targetDbName[TSDB_DB_NAME_LEN];
|
||||
char targetTabName[TSDB_TABLE_NAME_LEN];
|
||||
bool ignoreExists;
|
||||
SStreamOptions* pOptions;
|
||||
SNode* pQuery;
|
||||
SNode* pPrevQuery;
|
||||
SNodeList* pTags;
|
||||
SNode* pSubtable;
|
||||
SNodeList* pCols;
|
||||
SStreamNotifyOptions* pNotifyOptions;
|
||||
SCMCreateStreamReq* pReq;
|
||||
} SCreateStreamStmt;
|
||||
|
||||
typedef struct SDropStreamStmt {
|
||||
|
|
|
@ -65,10 +65,14 @@ typedef struct SStreamTaskSM SStreamTaskSM;
|
|||
typedef struct SStreamQueueItem SStreamQueueItem;
|
||||
typedef struct SActiveCheckpointInfo SActiveCheckpointInfo;
|
||||
|
||||
#define SSTREAM_TASK_VER 4
|
||||
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
|
||||
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
|
||||
#define SSTREAM_TASK_VER 5
|
||||
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
|
||||
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 // Append subtable name with groupId
|
||||
#define SSTREAM_TASK_APPEND_STABLE_NAME_VER 4 // Append subtable name with stableName and groupId
|
||||
#define SSTREAM_TASK_ADD_NOTIFY_VER 5 // Support event notification at window open/close
|
||||
|
||||
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
|
||||
|
||||
extern int32_t streamMetaRefPool;
|
||||
extern int32_t streamTaskRefPool;
|
||||
|
@ -427,6 +431,15 @@ typedef struct STaskCheckInfo {
|
|||
TdThreadMutex checkInfoLock;
|
||||
} STaskCheckInfo;
|
||||
|
||||
typedef struct SNotifyInfo {
|
||||
SArray* pNotifyAddrUrls;
|
||||
int32_t notifyEventTypes;
|
||||
int32_t notifyErrorHandle;
|
||||
char* streamName;
|
||||
char* stbFullName;
|
||||
SSchemaWrapper* pSchemaWrapper;
|
||||
} SNotifyInfo;
|
||||
|
||||
struct SStreamTask {
|
||||
int64_t ver;
|
||||
SStreamTaskId id;
|
||||
|
@ -449,6 +462,7 @@ struct SStreamTask {
|
|||
SStreamState* pState; // state backend
|
||||
SUpstreamInfo upstreamInfo;
|
||||
STaskCheckInfo taskCheckInfo;
|
||||
SNotifyInfo notifyInfo;
|
||||
|
||||
// the followings attributes don't be serialized
|
||||
SScanhistorySchedInfo schedHistoryInfo;
|
||||
|
|
|
@ -245,6 +245,7 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_OFFSET_LEN 64 // it is a null-terminated string
|
||||
#define TSDB_USER_CGROUP_LEN (TSDB_USER_LEN + TSDB_CGROUP_LEN) // it is a null-terminated string
|
||||
#define TSDB_STREAM_NAME_LEN 193 // it is a null-terminated string
|
||||
#define TSDB_STREAM_NOTIFY_URL_LEN 128 // it includes the terminating '\0'
|
||||
#define TSDB_DB_NAME_LEN 65
|
||||
#define TSDB_DB_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_NAME_DELIMITER_LEN)
|
||||
#define TSDB_PRIVILEDGE_CONDITION_LEN 48 * 1024
|
||||
|
@ -460,13 +461,13 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_DB_SCHEMALESS_OFF 0
|
||||
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
|
||||
#define TSDB_MIN_STT_TRIGGER 1
|
||||
#ifdef TD_ENTERPRISE
|
||||
// #ifdef TD_ENTERPRISE
|
||||
#define TSDB_MAX_STT_TRIGGER 16
|
||||
#define TSDB_DEFAULT_SST_TRIGGER 2
|
||||
#else
|
||||
#define TSDB_MAX_STT_TRIGGER 1
|
||||
#define TSDB_DEFAULT_SST_TRIGGER 1
|
||||
#endif
|
||||
// #else
|
||||
// #define TSDB_MAX_STT_TRIGGER 1
|
||||
// #define TSDB_DEFAULT_SST_TRIGGER 1
|
||||
// #endif
|
||||
#define TSDB_STT_TRIGGER_ARRAY_SIZE 16 // maximum of TSDB_MAX_STT_TRIGGER of TD_ENTERPRISE and TD_COMMUNITY
|
||||
#define TSDB_MIN_HASH_PREFIX (2 - TSDB_TABLE_NAME_LEN)
|
||||
#define TSDB_MAX_HASH_PREFIX (TSDB_TABLE_NAME_LEN - 2)
|
||||
|
|
|
@ -79,6 +79,9 @@ void taosResetLog();
|
|||
void taosDumpData(uint8_t *msg, int32_t len);
|
||||
void taosSetNoNewFile();
|
||||
|
||||
// Fast uint64_t to string conversion, equivalent to sprintf(buf, "%lu", val) but with 10x better performance.
|
||||
char *u64toaFastLut(uint64_t val, char *buf);
|
||||
|
||||
void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...)
|
||||
#ifdef __GNUC__
|
||||
__attribute__((format(printf, 4, 5)))
|
||||
|
|
|
@ -55,6 +55,7 @@ typedef struct {
|
|||
typedef enum {
|
||||
DEF_QITEM = 0,
|
||||
RPC_QITEM = 1,
|
||||
APPLY_QITEM = 2,
|
||||
} EQItype;
|
||||
|
||||
typedef void (*FItem)(SQueueInfo *pInfo, void *pItem);
|
||||
|
|
|
@ -174,6 +174,7 @@ help() {
|
|||
echo " config_qemu_guest_agent - Configure QEMU guest agent"
|
||||
echo " deploy_docker - Deploy Docker"
|
||||
echo " deploy_docker_compose - Deploy Docker Compose"
|
||||
echo " install_trivy - Install Trivy"
|
||||
echo " clone_enterprise - Clone the enterprise repository"
|
||||
echo " clone_community - Clone the community repository"
|
||||
echo " clone_taosx - Clone TaosX repository"
|
||||
|
@ -316,6 +317,17 @@ add_config_if_not_exist() {
|
|||
grep -qF -- "$config" "$file" || echo "$config" >> "$file"
|
||||
}
|
||||
|
||||
# Function to check if a tool is installed
|
||||
check_installed() {
|
||||
local command_name="$1"
|
||||
if command -v "$command_name" >/dev/null 2>&1; then
|
||||
echo "$command_name is already installed. Skipping installation."
|
||||
return 0
|
||||
else
|
||||
echo "$command_name is not installed."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
# General error handling function
|
||||
check_status() {
|
||||
local message_on_failure="$1"
|
||||
|
@ -584,9 +596,12 @@ centos_skip_check() {
|
|||
# Deploy cmake
|
||||
deploy_cmake() {
|
||||
# Check if cmake is installed
|
||||
if command -v cmake >/dev/null 2>&1; then
|
||||
echo "Cmake is already installed. Skipping installation."
|
||||
cmake --version
|
||||
# if command -v cmake >/dev/null 2>&1; then
|
||||
# echo "Cmake is already installed. Skipping installation."
|
||||
# cmake --version
|
||||
# return
|
||||
# fi
|
||||
if check_installed "cmake"; then
|
||||
return
|
||||
fi
|
||||
install_package "cmake3"
|
||||
|
@ -1058,11 +1073,13 @@ deploy_go() {
|
|||
GOPATH_DIR="/root/go"
|
||||
|
||||
# Check if Go is installed
|
||||
if command -v go >/dev/null 2>&1; then
|
||||
echo "Go is already installed. Skipping installation."
|
||||
# if command -v go >/dev/null 2>&1; then
|
||||
# echo "Go is already installed. Skipping installation."
|
||||
# return
|
||||
# fi
|
||||
if check_installed "go"; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Fetch the latest version number of Go
|
||||
GO_LATEST_DATA=$(curl --retry 10 --retry-delay 5 --retry-max-time 120 -s https://golang.google.cn/VERSION?m=text)
|
||||
GO_LATEST_VERSION=$(echo "$GO_LATEST_DATA" | grep -oP 'go[0-9]+\.[0-9]+\.[0-9]+')
|
||||
|
@ -1731,6 +1748,42 @@ deploy_docker_compose() {
|
|||
fi
|
||||
}
|
||||
|
||||
# Instal trivy
|
||||
install_trivy() {
|
||||
echo -e "${YELLOW}Installing Trivy...${NO_COLOR}"
|
||||
# Check if Trivy is already installed
|
||||
# if command -v trivy >/dev/null 2>&1; then
|
||||
# echo "Trivy is already installed. Skipping installation."
|
||||
# trivy --version
|
||||
# return
|
||||
# fi
|
||||
if check_installed "trivy"; then
|
||||
return
|
||||
fi
|
||||
# Install jq
|
||||
install_package jq
|
||||
# Get latest version
|
||||
LATEST_VERSION=$(curl -s https://api.github.com/repos/aquasecurity/trivy/releases/latest | jq -r .tag_name)
|
||||
# Download
|
||||
if [ -f /etc/debian_version ]; then
|
||||
wget https://github.com/aquasecurity/trivy/releases/download/"${LATEST_VERSION}"/trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb
|
||||
# Install
|
||||
dpkg -i trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb
|
||||
|
||||
elif [ -f /etc/redhat-release ]; then
|
||||
wget https://github.com/aquasecurity/trivy/releases/download/"${LATEST_VERSION}"/trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm
|
||||
# Install
|
||||
rpm -ivh trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm
|
||||
else
|
||||
echo "Unsupported Linux distribution."
|
||||
exit 1
|
||||
fi
|
||||
# Check
|
||||
trivy --version
|
||||
check_status "Failed to install Trivy" "Trivy installed successfully." $?
|
||||
rm -rf trivy_"${LATEST_VERSION#v}"_Linux-64bit.deb trivy_"${LATEST_VERSION#v}"_Linux-64bit.rpm
|
||||
}
|
||||
|
||||
# Reconfigure cloud-init
|
||||
reconfig_cloud_init() {
|
||||
echo "Reconfiguring cloud-init..."
|
||||
|
@ -2004,6 +2057,7 @@ deploy_dev() {
|
|||
install_nginx
|
||||
deploy_docker
|
||||
deploy_docker_compose
|
||||
install_trivy
|
||||
check_status "Failed to deploy some tools" "Deploy all tools successfully" $?
|
||||
}
|
||||
|
||||
|
@ -2159,6 +2213,9 @@ main() {
|
|||
deploy_docker_compose)
|
||||
deploy_docker_compose
|
||||
;;
|
||||
install_trivy)
|
||||
install_trivy
|
||||
;;
|
||||
clone_enterprise)
|
||||
clone_enterprise
|
||||
;;
|
||||
|
|
|
@ -6,12 +6,6 @@ SUCCESS_FILE="success.txt"
|
|||
FAILED_FILE="failed.txt"
|
||||
REPORT_FILE="report.txt"
|
||||
|
||||
# Initialize/clear result files
|
||||
> "$SUCCESS_FILE"
|
||||
> "$FAILED_FILE"
|
||||
> "$LOG_FILE"
|
||||
> "$REPORT_FILE"
|
||||
|
||||
# Switch to the target directory
|
||||
TARGET_DIR="../../tests/system-test/"
|
||||
|
||||
|
@ -24,6 +18,12 @@ else
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Initialize/clear result files
|
||||
> "$SUCCESS_FILE"
|
||||
> "$FAILED_FILE"
|
||||
> "$LOG_FILE"
|
||||
> "$REPORT_FILE"
|
||||
|
||||
# Define the Python commands to execute
|
||||
commands=(
|
||||
"python3 ./test.py -f 2-query/join.py"
|
||||
|
@ -102,4 +102,4 @@ fi
|
|||
echo "Detailed logs can be found in: $(realpath "$LOG_FILE")"
|
||||
echo "Successful commands can be found in: $(realpath "$SUCCESS_FILE")"
|
||||
echo "Failed commands can be found in: $(realpath "$FAILED_FILE")"
|
||||
echo "Test report can be found in: $(realpath "$REPORT_FILE")"
|
||||
echo "Test report can be found in: $(realpath "$REPORT_FILE")"
|
||||
|
|
|
@ -90,7 +90,7 @@ fi
|
|||
|
||||
kill_service_of() {
|
||||
_service=$1
|
||||
pid=$(ps -C $_service | grep -v $uninstallScript | awk '{print $2}')
|
||||
pid=$(ps -C $_service | grep -w $_service | grep -v $uninstallScript | awk '{print $1}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
|
@ -140,9 +140,8 @@ clean_service_of() {
|
|||
clean_service_on_systemd_of $_service
|
||||
elif ((${service_mod} == 1)); then
|
||||
clean_service_on_sysvinit_of $_service
|
||||
else
|
||||
kill_service_of $_service
|
||||
fi
|
||||
kill_service_of $_service
|
||||
}
|
||||
|
||||
remove_service_of() {
|
||||
|
|
|
@ -40,7 +40,7 @@ if command -v sudo > /dev/null; then
|
|||
fi
|
||||
|
||||
function kill_client() {
|
||||
pid=$(ps -C ${clientName2} | grep -v $uninstallScript2 | awk '{print $2}')
|
||||
pid=$(ps -C ${clientName2} | grep -w ${clientName2} | grep -v $uninstallScript2 | awk '{print $1}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
|
|
|
@ -131,6 +131,8 @@ typedef struct SStmtQueue {
|
|||
SStmtQNode* head;
|
||||
SStmtQNode* tail;
|
||||
uint64_t qRemainNum;
|
||||
TdThreadMutex mutex;
|
||||
TdThreadCond waitCond;
|
||||
} SStmtQueue;
|
||||
|
||||
typedef struct STscStmt {
|
||||
|
|
|
@ -253,7 +253,7 @@ void taos_cleanup(void) {
|
|||
taosCloseRef(id);
|
||||
|
||||
nodesDestroyAllocatorSet();
|
||||
// cleanupAppInfo();
|
||||
cleanupAppInfo();
|
||||
rpcCleanup();
|
||||
tscDebug("rpc cleanup");
|
||||
|
||||
|
|
|
@ -39,31 +39,39 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
|
|||
}
|
||||
|
||||
bool stmtDequeue(STscStmt* pStmt, SStmtQNode** param) {
|
||||
while (0 == atomic_load_64(&pStmt->queue.qRemainNum)) {
|
||||
taosUsleep(1);
|
||||
return false;
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
SStmtQNode* orig = pStmt->queue.head;
|
||||
|
||||
SStmtQNode* node = pStmt->queue.head->next;
|
||||
pStmt->queue.head = pStmt->queue.head->next;
|
||||
|
||||
// taosMemoryFreeClear(orig);
|
||||
|
||||
*param = node;
|
||||
|
||||
(void)atomic_sub_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
|
||||
*param = node;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void stmtEnqueue(STscStmt* pStmt, SStmtQNode* param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
|
||||
pStmt->queue.tail->next = param;
|
||||
pStmt->queue.tail = param;
|
||||
|
||||
pStmt->stat.bindDataNum++;
|
||||
(void)atomic_add_fetch_64(&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
}
|
||||
|
||||
static int32_t stmtCreateRequest(STscStmt* pStmt) {
|
||||
|
@ -415,9 +423,11 @@ void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
|
|||
pTblBuf->buffIdx = 1;
|
||||
pTblBuf->buffOffset = sizeof(*pQueue->head);
|
||||
|
||||
(void)taosThreadMutexLock(&pQueue->mutex);
|
||||
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
|
||||
pQueue->qRemainNum = 0;
|
||||
pQueue->head->next = NULL;
|
||||
(void)taosThreadMutexUnlock(&pQueue->mutex);
|
||||
}
|
||||
|
||||
int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
|
||||
|
@ -809,6 +819,8 @@ int32_t stmtStartBindThread(STscStmt* pStmt) {
|
|||
}
|
||||
|
||||
int32_t stmtInitQueue(STscStmt* pStmt) {
|
||||
(void)taosThreadCondInit(&pStmt->queue.waitCond, NULL);
|
||||
(void)taosThreadMutexInit(&pStmt->queue.mutex, NULL);
|
||||
STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head));
|
||||
pStmt->queue.tail = pStmt->queue.head;
|
||||
|
||||
|
@ -1619,11 +1631,18 @@ int stmtClose(TAOS_STMT* stmt) {
|
|||
|
||||
pStmt->queue.stopQueue = true;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
if (pStmt->bindThreadInUse) {
|
||||
(void)taosThreadJoin(pStmt->bindThread, NULL);
|
||||
pStmt->bindThreadInUse = false;
|
||||
}
|
||||
|
||||
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
|
||||
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
|
||||
|
||||
STMT_DLOG("stmt %p closed, stbInterlaceMode: %d, statInfo: ctgGetTbMetaNum=>%" PRId64 ", getCacheTbInfo=>%" PRId64
|
||||
", parseSqlNum=>%" PRId64 ", pStmt->stat.bindDataNum=>%" PRId64
|
||||
", settbnameAPI:%u, bindAPI:%u, addbatchAPI:%u, execAPI:%u"
|
||||
|
@ -1757,7 +1776,9 @@ _return:
|
|||
}
|
||||
|
||||
int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
|
||||
int code = 0;
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
int32_t preCode = pStmt->errCode;
|
||||
|
||||
STMT_DLOG_E("start to get param num");
|
||||
|
||||
|
@ -1765,7 +1786,7 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
|
|||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
|
||||
STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
|
||||
|
||||
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
|
||||
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
|
||||
|
@ -1777,23 +1798,29 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
|
|||
pStmt->exec.pRequest = NULL;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtCreateRequest(pStmt));
|
||||
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
|
||||
|
||||
if (pStmt->bInfo.needParse) {
|
||||
STMT_ERR_RET(stmtParseSql(pStmt));
|
||||
STMT_ERRI_JRET(stmtParseSql(pStmt));
|
||||
}
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
*nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues);
|
||||
} else {
|
||||
STMT_ERR_RET(stmtFetchColFields(stmt, nums, NULL));
|
||||
STMT_ERRI_JRET(stmtFetchColFields(stmt, nums, NULL));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_return:
|
||||
|
||||
pStmt->errCode = preCode;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
|
||||
int code = 0;
|
||||
STscStmt* pStmt = (STscStmt*)stmt;
|
||||
int32_t preCode = pStmt->errCode;
|
||||
|
||||
STMT_DLOG_E("start to get param");
|
||||
|
||||
|
@ -1802,10 +1829,10 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
|
|||
}
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
|
||||
STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR);
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
|
||||
STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
|
||||
|
||||
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
|
||||
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
|
||||
|
@ -1817,27 +1844,29 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
|
|||
pStmt->exec.pRequest = NULL;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtCreateRequest(pStmt));
|
||||
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
|
||||
|
||||
if (pStmt->bInfo.needParse) {
|
||||
STMT_ERR_RET(stmtParseSql(pStmt));
|
||||
STMT_ERRI_JRET(stmtParseSql(pStmt));
|
||||
}
|
||||
|
||||
int32_t nums = 0;
|
||||
TAOS_FIELD_E* pField = NULL;
|
||||
STMT_ERR_RET(stmtFetchColFields(stmt, &nums, &pField));
|
||||
STMT_ERRI_JRET(stmtFetchColFields(stmt, &nums, &pField));
|
||||
if (idx >= nums) {
|
||||
tscError("idx %d is too big", idx);
|
||||
taosMemoryFree(pField);
|
||||
STMT_ERR_RET(TSDB_CODE_INVALID_PARA);
|
||||
STMT_ERRI_JRET(TSDB_CODE_INVALID_PARA);
|
||||
}
|
||||
|
||||
*type = pField[idx].type;
|
||||
*bytes = pField[idx].bytes;
|
||||
|
||||
taosMemoryFree(pField);
|
||||
_return:
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
taosMemoryFree(pField);
|
||||
pStmt->errCode = preCode;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
TAOS_RES* stmtUseResult(TAOS_STMT* stmt) {
|
||||
|
|
|
@ -39,31 +39,35 @@ static FORCE_INLINE int32_t stmtAllocQNodeFromBuf(STableBufInfo* pTblBuf, void**
|
|||
}
|
||||
|
||||
static bool stmtDequeue(STscStmt2* pStmt, SStmtQNode** param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
while (0 == atomic_load_64((int64_t*)&pStmt->queue.qRemainNum)) {
|
||||
taosUsleep(1);
|
||||
return false;
|
||||
(void)taosThreadCondWait(&pStmt->queue.waitCond, &pStmt->queue.mutex);
|
||||
if (atomic_load_8((int8_t*)&pStmt->queue.stopQueue)) {
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
SStmtQNode* orig = pStmt->queue.head;
|
||||
|
||||
SStmtQNode* node = pStmt->queue.head->next;
|
||||
pStmt->queue.head = pStmt->queue.head->next;
|
||||
|
||||
// taosMemoryFreeClear(orig);
|
||||
|
||||
*param = node;
|
||||
|
||||
(void)atomic_sub_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void stmtEnqueue(STscStmt2* pStmt, SStmtQNode* param) {
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
|
||||
pStmt->queue.tail->next = param;
|
||||
pStmt->queue.tail = param;
|
||||
|
||||
pStmt->stat.bindDataNum++;
|
||||
(void)atomic_add_fetch_64((int64_t*)&pStmt->queue.qRemainNum, 1);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
}
|
||||
|
||||
static int32_t stmtCreateRequest(STscStmt2* pStmt) {
|
||||
|
@ -339,9 +343,11 @@ static void stmtResetQueueTableBuf(STableBufInfo* pTblBuf, SStmtQueue* pQueue) {
|
|||
pTblBuf->buffIdx = 1;
|
||||
pTblBuf->buffOffset = sizeof(*pQueue->head);
|
||||
|
||||
(void)taosThreadMutexLock(&pQueue->mutex);
|
||||
pQueue->head = pQueue->tail = pTblBuf->pCurBuff;
|
||||
pQueue->qRemainNum = 0;
|
||||
pQueue->head->next = NULL;
|
||||
(void)taosThreadMutexUnlock(&pQueue->mutex);
|
||||
}
|
||||
|
||||
static int32_t stmtCleanExecInfo(STscStmt2* pStmt, bool keepTable, bool deepClean) {
|
||||
|
@ -735,6 +741,8 @@ static int32_t stmtStartBindThread(STscStmt2* pStmt) {
|
|||
}
|
||||
|
||||
static int32_t stmtInitQueue(STscStmt2* pStmt) {
|
||||
(void)taosThreadCondInit(&pStmt->queue.waitCond, NULL);
|
||||
(void)taosThreadMutexInit(&pStmt->queue.mutex, NULL);
|
||||
STMT_ERR_RET(stmtAllocQNodeFromBuf(&pStmt->sql.siInfo.tbBuf, (void**)&pStmt->queue.head));
|
||||
pStmt->queue.tail = pStmt->queue.head;
|
||||
|
||||
|
@ -1066,13 +1074,16 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E
|
|||
}
|
||||
|
||||
static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_ALL** fields) {
|
||||
int32_t code = 0;
|
||||
int32_t preCode = pStmt->errCode;
|
||||
|
||||
if (pStmt->errCode != TSDB_CODE_SUCCESS) {
|
||||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
tscError("invalid operation to get query column fileds");
|
||||
STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR);
|
||||
STMT_ERRI_JRET(TSDB_CODE_TSC_STMT_API_ERROR);
|
||||
}
|
||||
|
||||
STableDataCxt** pDataBlock = NULL;
|
||||
|
@ -1084,21 +1095,25 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL
|
|||
(STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName));
|
||||
if (NULL == pDataBlock) {
|
||||
tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName);
|
||||
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
|
||||
STMT_ERRI_JRET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
|
||||
STMT_ERRI_JRET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
|
||||
if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE) {
|
||||
pStmt->bInfo.needParse = true;
|
||||
qDestroyStmtDataBlock(*pDataBlock);
|
||||
if (taosHashRemove(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)) != 0) {
|
||||
tscError("get fileds %s remove exec blockHash fail", pStmt->bInfo.tbFName);
|
||||
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
|
||||
STMT_ERRI_JRET(TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_return:
|
||||
|
||||
pStmt->errCode = preCode;
|
||||
|
||||
return code;
|
||||
}
|
||||
/*
|
||||
SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) {
|
||||
|
@ -1752,11 +1767,18 @@ int stmtClose2(TAOS_STMT2* stmt) {
|
|||
|
||||
pStmt->queue.stopQueue = true;
|
||||
|
||||
(void)taosThreadMutexLock(&pStmt->queue.mutex);
|
||||
(void)taosThreadCondSignal(&(pStmt->queue.waitCond));
|
||||
(void)taosThreadMutexUnlock(&pStmt->queue.mutex);
|
||||
|
||||
if (pStmt->bindThreadInUse) {
|
||||
(void)taosThreadJoin(pStmt->bindThread, NULL);
|
||||
pStmt->bindThreadInUse = false;
|
||||
}
|
||||
|
||||
(void)taosThreadCondDestroy(&pStmt->queue.waitCond);
|
||||
(void)taosThreadMutexDestroy(&pStmt->queue.mutex);
|
||||
|
||||
if (pStmt->options.asyncExecFn && !pStmt->semWaited) {
|
||||
if (tsem_wait(&pStmt->asyncQuerySem) != 0) {
|
||||
tscError("failed to wait asyncQuerySem");
|
||||
|
@ -1828,7 +1850,7 @@ int stmtParseColFields2(TAOS_STMT2* stmt) {
|
|||
if (pStmt->exec.pRequest && STMT_TYPE_QUERY == pStmt->sql.type && pStmt->sql.runTimes) {
|
||||
taos_free_result(pStmt->exec.pRequest);
|
||||
pStmt->exec.pRequest = NULL;
|
||||
STMT_ERR_RET(stmtCreateRequest(pStmt));
|
||||
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
|
||||
}
|
||||
|
||||
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
|
||||
|
@ -1854,7 +1876,9 @@ int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_ALL** fields) {
|
|||
}
|
||||
|
||||
int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
|
||||
int32_t code = 0;
|
||||
STscStmt2* pStmt = (STscStmt2*)stmt;
|
||||
int32_t preCode = pStmt->errCode;
|
||||
|
||||
STMT_DLOG_E("start to get param num");
|
||||
|
||||
|
@ -1862,7 +1886,7 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
|
|||
return pStmt->errCode;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
|
||||
STMT_ERRI_JRET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
|
||||
|
||||
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
|
||||
STMT_TYPE_MULTI_INSERT != pStmt->sql.type) {
|
||||
|
@ -1874,19 +1898,23 @@ int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) {
|
|||
pStmt->exec.pRequest = NULL;
|
||||
}
|
||||
|
||||
STMT_ERR_RET(stmtCreateRequest(pStmt));
|
||||
STMT_ERRI_JRET(stmtCreateRequest(pStmt));
|
||||
|
||||
if (pStmt->bInfo.needParse) {
|
||||
STMT_ERR_RET(stmtParseSql(pStmt));
|
||||
STMT_ERRI_JRET(stmtParseSql(pStmt));
|
||||
}
|
||||
|
||||
if (STMT_TYPE_QUERY == pStmt->sql.type) {
|
||||
*nums = taosArrayGetSize(pStmt->sql.pQuery->pPlaceholderValues);
|
||||
} else {
|
||||
STMT_ERR_RET(stmtFetchColFields2(stmt, nums, NULL));
|
||||
STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, NULL));
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_return:
|
||||
|
||||
pStmt->errCode = preCode;
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
TAOS_RES* stmtUseResult2(TAOS_STMT2* stmt) {
|
||||
|
|
|
@ -74,8 +74,9 @@ enum {
|
|||
};
|
||||
|
||||
typedef struct {
|
||||
tmr_h timer;
|
||||
int32_t rsetId;
|
||||
tmr_h timer;
|
||||
int32_t rsetId;
|
||||
TdThreadMutex lock;
|
||||
} SMqMgmt;
|
||||
|
||||
struct tmq_list_t {
|
||||
|
@ -1603,13 +1604,21 @@ static void tmqMgmtInit(void) {
|
|||
tmqMgmt.timer = taosTmrInit(1000, 100, 360000, "TMQ");
|
||||
|
||||
if (tmqMgmt.timer == NULL) {
|
||||
tmqInitRes = terrno;
|
||||
goto END;
|
||||
}
|
||||
|
||||
tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
|
||||
if (tmqMgmt.rsetId < 0) {
|
||||
tmqInitRes = terrno;
|
||||
goto END;
|
||||
}
|
||||
|
||||
if (taosThreadMutexInit(&tmqMgmt.lock, NULL) != 0){
|
||||
goto END;
|
||||
}
|
||||
return;
|
||||
|
||||
END:
|
||||
tmqInitRes = terrno;
|
||||
}
|
||||
|
||||
void tmqMgmtClose(void) {
|
||||
|
@ -1618,10 +1627,28 @@ void tmqMgmtClose(void) {
|
|||
tmqMgmt.timer = NULL;
|
||||
}
|
||||
|
||||
(void) taosThreadMutexLock(&tmqMgmt.lock);
|
||||
if (tmqMgmt.rsetId >= 0) {
|
||||
tmq_t *tmq = taosIterateRef(tmqMgmt.rsetId, 0);
|
||||
int64_t refId = 0;
|
||||
|
||||
while (tmq) {
|
||||
refId = tmq->refId;
|
||||
if (refId == 0) {
|
||||
break;
|
||||
}
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED);
|
||||
|
||||
if (taosRemoveRef(tmqMgmt.rsetId, tmq->refId) != 0) {
|
||||
qWarn("taosRemoveRef tmq refId:%" PRId64 " failed, error:%s", refId, tstrerror(terrno));
|
||||
}
|
||||
|
||||
tmq = taosIterateRef(tmqMgmt.rsetId, refId);
|
||||
}
|
||||
taosCloseRef(tmqMgmt.rsetId);
|
||||
tmqMgmt.rsetId = -1;
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&tmqMgmt.lock);
|
||||
}
|
||||
|
||||
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
|
||||
|
@ -2617,8 +2644,13 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
|
|||
|
||||
int32_t tmq_consumer_close(tmq_t* tmq) {
|
||||
if (tmq == NULL) return TSDB_CODE_INVALID_PARA;
|
||||
int32_t code = 0;
|
||||
(void) taosThreadMutexLock(&tmqMgmt.lock);
|
||||
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__CLOSED){
|
||||
goto end;
|
||||
}
|
||||
tqInfoC("consumer:0x%" PRIx64 " start to close consumer, status:%d", tmq->consumerId, tmq->status);
|
||||
int32_t code = tmq_unsubscribe(tmq);
|
||||
code = tmq_unsubscribe(tmq);
|
||||
if (code == 0) {
|
||||
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__CLOSED);
|
||||
code = taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
|
||||
|
@ -2626,6 +2658,9 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
|
|||
tqErrorC("tmq close failed to remove ref:%" PRId64 ", code:%d", tmq->refId, code);
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
(void)taosThreadMutexUnlock(&tmqMgmt.lock);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -735,7 +735,7 @@ TEST(stmt2Case, insert_ntb_get_fields_Test) {
|
|||
{
|
||||
const char* sql = "insert into stmt2_testdb_4.? values(?,?)";
|
||||
printf("case 2 : %s\n", sql);
|
||||
getFieldsError(taos, sql, TSDB_CODE_PAR_TABLE_NOT_EXIST);
|
||||
getFieldsError(taos, sql, TSDB_CODE_TSC_STMT_TBNAME_ERROR);
|
||||
}
|
||||
|
||||
// case 3 : wrong para nums
|
||||
|
@ -1496,8 +1496,51 @@ TEST(stmt2Case, geometry) {
|
|||
checkError(stmt, code);
|
||||
ASSERT_EQ(affected_rows, 3);
|
||||
|
||||
// test wrong wkb input
|
||||
unsigned char wkb2[3][61] = {
|
||||
{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
|
||||
},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}};
|
||||
params[1].buffer = wkb2;
|
||||
code = taos_stmt2_bind_param(stmt, &bindv, -1);
|
||||
ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE);
|
||||
|
||||
taos_stmt2_close(stmt);
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_13");
|
||||
taos_close(taos);
|
||||
}
|
||||
|
||||
// TD-33582
|
||||
TEST(stmt2Case, errcode) {
|
||||
TAOS* taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt2_testdb_14");
|
||||
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt2_testdb_14");
|
||||
do_query(taos, "use stmt2_testdb_14");
|
||||
|
||||
TAOS_STMT2_OPTION option = {0};
|
||||
TAOS_STMT2* stmt = taos_stmt2_init(taos, &option);
|
||||
ASSERT_NE(stmt, nullptr);
|
||||
char* sql = "select * from t where ts > ? and name = ? foo = ?";
|
||||
int code = taos_stmt2_prepare(stmt, sql, 0);
|
||||
checkError(stmt, code);
|
||||
|
||||
int fieldNum = 0;
|
||||
TAOS_FIELD_ALL* pFields = NULL;
|
||||
code = taos_stmt2_get_fields(stmt, &fieldNum, &pFields);
|
||||
ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR);
|
||||
|
||||
// get fail dont influence the next stmt prepare
|
||||
sql = "nsert into ? (ts, name) values (?, ?)";
|
||||
code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkError(stmt, code);
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
||||
|
|
|
@ -212,15 +212,6 @@ void insertData(TAOS *taos, TAOS_STMT_OPTIONS *option, const char *sql, int CTB_
|
|||
|
||||
void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_E *expectedTagFields,
|
||||
int expectedTagFieldNum, TAOS_FIELD_E *expectedColFields, int expectedColFieldNum) {
|
||||
// create database and table
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3");
|
||||
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3");
|
||||
do_query(taos, "USE stmt_testdb_3");
|
||||
do_query(
|
||||
taos,
|
||||
"CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||
"(groupId INT, location BINARY(24))");
|
||||
|
||||
TAOS_STMT *stmt = taos_stmt_init(taos);
|
||||
ASSERT_NE(stmt, nullptr);
|
||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
|
@ -267,6 +258,24 @@ void getFields(TAOS *taos, const char *sql, int expectedALLFieldNum, TAOS_FIELD_
|
|||
taos_stmt_close(stmt);
|
||||
}
|
||||
|
||||
void getFieldsError(TAOS *taos, const char *sql, int expectedErrocode) {
|
||||
TAOS_STMT *stmt = taos_stmt_init(taos);
|
||||
ASSERT_NE(stmt, nullptr);
|
||||
STscStmt *pStmt = (STscStmt *)stmt;
|
||||
|
||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
|
||||
int fieldNum = 0;
|
||||
TAOS_FIELD_E *pFields = NULL;
|
||||
code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields);
|
||||
ASSERT_EQ(code, expectedErrocode);
|
||||
ASSERT_EQ(pStmt->errCode, TSDB_CODE_SUCCESS);
|
||||
|
||||
taosMemoryFree(pFields);
|
||||
|
||||
taos_stmt_close(stmt);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
|
@ -298,6 +307,15 @@ TEST(stmtCase, get_fields) {
|
|||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
||||
// create database and table
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3");
|
||||
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_3");
|
||||
do_query(taos, "USE stmt_testdb_3");
|
||||
do_query(
|
||||
taos,
|
||||
"CREATE STABLE IF NOT EXISTS stmt_testdb_3.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||
"(groupId INT, location BINARY(24))");
|
||||
// nomarl test
|
||||
{
|
||||
TAOS_FIELD_E tagFields[2] = {{"groupid", TSDB_DATA_TYPE_INT, 0, 0, sizeof(int)},
|
||||
{"location", TSDB_DATA_TYPE_BINARY, 0, 0, 24}};
|
||||
|
@ -307,6 +325,12 @@ TEST(stmtCase, get_fields) {
|
|||
{"phase", TSDB_DATA_TYPE_FLOAT, 0, 0, sizeof(float)}};
|
||||
getFields(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", 7, &tagFields[0], 2, &colFields[0], 4);
|
||||
}
|
||||
// error case [TD-33570]
|
||||
{ getFieldsError(taos, "INSERT INTO ? VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); }
|
||||
|
||||
{ getFieldsError(taos, "INSERT INTO ? USING meters TAGS(?,?) VALUES (?,?,?,?)", TSDB_CODE_TSC_STMT_TBNAME_ERROR); }
|
||||
|
||||
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_3");
|
||||
taos_close(taos);
|
||||
}
|
||||
|
@ -520,9 +544,6 @@ TEST(stmtCase, geometry) {
|
|||
int code = taos_stmt_prepare(stmt, stmt_sql, 0);
|
||||
checkError(stmt, code);
|
||||
|
||||
// code = taos_stmt_set_tbname(stmt, "tb1");
|
||||
// checkError(stmt, code);
|
||||
|
||||
code = taos_stmt_bind_param_batch(stmt, params);
|
||||
checkError(stmt, code);
|
||||
|
||||
|
@ -532,11 +553,58 @@ TEST(stmtCase, geometry) {
|
|||
code = taos_stmt_execute(stmt);
|
||||
checkError(stmt, code);
|
||||
|
||||
//test wrong wkb input
|
||||
unsigned char wkb2[3][61] = {
|
||||
{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
|
||||
},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f},
|
||||
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40}};
|
||||
params[1].buffer = wkb2;
|
||||
code = taos_stmt_bind_param_batch(stmt, params);
|
||||
ASSERT_EQ(code, TSDB_CODE_FUNC_FUNTION_PARA_VALUE);
|
||||
|
||||
taosMemoryFree(t64_len);
|
||||
taosMemoryFree(wkb_len);
|
||||
taos_stmt_close(stmt);
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_5");
|
||||
taos_close(taos);
|
||||
}
|
||||
//TD-33582
|
||||
TEST(stmtCase, errcode) {
|
||||
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
ASSERT_NE(taos, nullptr);
|
||||
|
||||
do_query(taos, "DROP DATABASE IF EXISTS stmt_testdb_4");
|
||||
do_query(taos, "CREATE DATABASE IF NOT EXISTS stmt_testdb_4");
|
||||
do_query(taos, "USE stmt_testdb_4");
|
||||
do_query(
|
||||
taos,
|
||||
"CREATE STABLE IF NOT EXISTS stmt_testdb_4.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS "
|
||||
"(groupId INT, location BINARY(24))");
|
||||
|
||||
TAOS_STMT *stmt = taos_stmt_init(taos);
|
||||
ASSERT_NE(stmt, nullptr);
|
||||
char *sql = "select * from t where ts > ? and name = ? foo = ?";
|
||||
int code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkError(stmt, code);
|
||||
|
||||
int fieldNum = 0;
|
||||
TAOS_FIELD_E *pFields = NULL;
|
||||
code = stmtGetParamNum(stmt, &fieldNum);
|
||||
ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR);
|
||||
|
||||
code = taos_stmt_get_tag_fields(stmt, &fieldNum, &pFields);
|
||||
ASSERT_EQ(code, TSDB_CODE_PAR_SYNTAX_ERROR);
|
||||
// get fail dont influence the next stmt prepare
|
||||
sql = "nsert into ? (ts, name) values (?, ?)";
|
||||
code = taos_stmt_prepare(stmt, sql, 0);
|
||||
checkError(stmt, code);
|
||||
}
|
||||
#pragma GCC diagnostic pop
|
|
@ -54,6 +54,23 @@ target_link_libraries(
|
|||
INTERFACE api
|
||||
)
|
||||
|
||||
if(NOT ${TD_WINDOWS})
|
||||
target_include_directories(
|
||||
common
|
||||
PUBLIC "$ENV{HOME}/.cos-local.2/include"
|
||||
)
|
||||
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
target_link_libraries(
|
||||
common
|
||||
PUBLIC ${CURL_LIBRARY}
|
||||
PUBLIC ${SSL_LIBRARY}
|
||||
PUBLIC ${CRYPTO_LIBRARY}
|
||||
)
|
||||
endif()
|
||||
|
||||
if(${BUILD_S3})
|
||||
if(${BUILD_WITH_S3})
|
||||
target_include_directories(
|
||||
|
@ -65,10 +82,6 @@ if(${BUILD_S3})
|
|||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.2)
|
||||
find_library(S3_LIBRARY s3)
|
||||
find_library(CURL_LIBRARY curl $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(XML2_LIBRARY xml2)
|
||||
find_library(SSL_LIBRARY ssl $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
find_library(CRYPTO_LIBRARY crypto $ENV{HOME}/.cos-local.2/lib64 $ENV{HOME}/.cos-local.2/lib NO_DEFAULT_PATH)
|
||||
target_link_libraries(
|
||||
common
|
||||
|
||||
|
@ -77,7 +90,7 @@ if(${BUILD_S3})
|
|||
PUBLIC ${CURL_LIBRARY}
|
||||
PUBLIC ${SSL_LIBRARY}
|
||||
PUBLIC ${CRYPTO_LIBRARY}
|
||||
PUBLIC ${XML2_LIBRARY}
|
||||
PUBLIC _libxml2
|
||||
)
|
||||
|
||||
add_definitions(-DUSE_S3)
|
||||
|
@ -88,7 +101,6 @@ if(${BUILD_S3})
|
|||
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(MINIXML_LIBRARY mxml)
|
||||
find_library(CURL_LIBRARY curl)
|
||||
target_link_libraries(
|
||||
common
|
||||
|
||||
|
|
|
@ -9959,6 +9959,16 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
|
|||
}
|
||||
|
||||
TAOS_CHECK_EXIT(tEncodeI64(&encoder, pReq->smaId));
|
||||
|
||||
int32_t addrSize = taosArrayGetSize(pReq->pNotifyAddrUrls);
|
||||
TAOS_CHECK_EXIT(tEncodeI32(&encoder, addrSize));
|
||||
for (int32_t i = 0; i < addrSize; ++i) {
|
||||
const char *url = taosArrayGetP(pReq->pNotifyAddrUrls, i);
|
||||
TAOS_CHECK_EXIT((tEncodeCStr(&encoder, url)));
|
||||
}
|
||||
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyEventTypes));
|
||||
TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->notifyErrorHandle));
|
||||
TAOS_CHECK_EXIT(tEncodeI8(&encoder, pReq->notifyHistory));
|
||||
tEndEncode(&encoder);
|
||||
|
||||
_exit:
|
||||
|
@ -10093,6 +10103,30 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
|
|||
TAOS_CHECK_EXIT(tDecodeI64(&decoder, &pReq->smaId));
|
||||
}
|
||||
|
||||
if (!tDecodeIsEnd(&decoder)) {
|
||||
int32_t addrSize = 0;
|
||||
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &addrSize));
|
||||
pReq->pNotifyAddrUrls = taosArrayInit(addrSize, POINTER_BYTES);
|
||||
if (pReq->pNotifyAddrUrls == NULL) {
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
for (int32_t i = 0; i < addrSize; ++i) {
|
||||
char *url = NULL;
|
||||
TAOS_CHECK_EXIT(tDecodeCStr(&decoder, &url));
|
||||
url = taosStrndup(url, TSDB_STREAM_NOTIFY_URL_LEN);
|
||||
if (url == NULL) {
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
if (taosArrayPush(pReq->pNotifyAddrUrls, &url) == NULL) {
|
||||
taosMemoryFree(url);
|
||||
TAOS_CHECK_EXIT(terrno);
|
||||
}
|
||||
}
|
||||
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyEventTypes));
|
||||
TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->notifyErrorHandle));
|
||||
TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pReq->notifyHistory));
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
_exit:
|
||||
tDecoderClear(&decoder);
|
||||
|
@ -10155,6 +10189,7 @@ void tFreeSCMCreateStreamReq(SCMCreateStreamReq *pReq) {
|
|||
taosArrayDestroy(pReq->fillNullCols);
|
||||
taosArrayDestroy(pReq->pVgroupVerList);
|
||||
taosArrayDestroy(pReq->pCols);
|
||||
taosArrayDestroyP(pReq->pNotifyAddrUrls, NULL);
|
||||
}
|
||||
|
||||
int32_t tEncodeSRSmaParam(SEncoder *pCoder, const SRSmaParam *pRSmaParam) {
|
||||
|
|
|
@ -3061,6 +3061,33 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t buildSinkDestTableName(char* parTbName, const char* stbFullName, uint64_t gid, bool newSubTableRule,
|
||||
char** dstTableName) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
if (parTbName[0]) {
|
||||
if (newSubTableRule && !isAutoTableName(parTbName) && !alreadyAddGroupId(parTbName, gid) && gid != 0 &&
|
||||
stbFullName) {
|
||||
*dstTableName = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
|
||||
TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno);
|
||||
|
||||
tstrncpy(*dstTableName, parTbName, TSDB_TABLE_NAME_LEN);
|
||||
code = buildCtbNameAddGroupId(stbFullName, *dstTableName, gid, TSDB_TABLE_NAME_LEN);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
} else {
|
||||
*dstTableName = taosStrdup(parTbName);
|
||||
TSDB_CHECK_NULL(*dstTableName, code, lino, _end, terrno);
|
||||
}
|
||||
} else {
|
||||
code = buildCtbNameByGroupId(stbFullName, gid, dstTableName);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
_end:
|
||||
return code;
|
||||
}
|
||||
|
||||
// return length of encoded data, return -1 if failed
|
||||
int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) {
|
||||
int32_t code = blockDataCheck(pBlock);
|
||||
|
|
|
@ -500,7 +500,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
|
|||
TAOS_RETURN(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
struct SConfig *taosGetCfg() { return tsCfg; }
|
||||
struct SConfig *taosGetCfg() {
|
||||
return tsCfg;
|
||||
}
|
||||
|
||||
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
||||
char *apolloUrl) {
|
||||
|
@ -818,8 +820,13 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfSnodeWriteThreads = tsNumOfCores / 4;
|
||||
tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4);
|
||||
|
||||
tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
|
||||
tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
|
||||
tsQueueMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * QUEUE_MEMORY_USAGE_RATIO;
|
||||
tsQueueMemoryAllowed = TRANGE(tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10LL,
|
||||
TSDB_MAX_MSG_SIZE * QUEUE_MEMORY_USAGE_RATIO * 10000LL);
|
||||
|
||||
tsApplyMemoryAllowed = tsTotalMemoryKB * 1024 * RPC_MEMORY_USAGE_RATIO * (1 - QUEUE_MEMORY_USAGE_RATIO);
|
||||
tsApplyMemoryAllowed = TRANGE(tsApplyMemoryAllowed, TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10LL,
|
||||
TSDB_MAX_MSG_SIZE * (1 - QUEUE_MEMORY_USAGE_RATIO) * 10000LL);
|
||||
|
||||
tsLogBufferMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
|
||||
tsLogBufferMemoryAllowed = TRANGE(tsLogBufferMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
|
||||
|
@ -857,7 +864,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeWriteThreads, 2, 1024, CFG_SCOPE_SERVER, CFG_DYN_SERVER_LAZY,CFG_CATEGORY_LOCAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * RPC_MEMORY_USAGE_RATIO * 10L, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER, CFG_DYN_SERVER,CFG_CATEGORY_GLOBAL));
|
||||
|
@ -1569,7 +1576,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsNumOfSnodeWriteThreads = pItem->i32;
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "rpcQueueMemoryAllowed");
|
||||
tsQueueMemoryAllowed = pItem->i64;
|
||||
tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO;
|
||||
tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO);
|
||||
|
||||
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "simdEnable");
|
||||
tsSIMDEnable = (bool)pItem->bval;
|
||||
|
@ -2392,6 +2400,12 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
code = TSDB_CODE_SUCCESS;
|
||||
goto _exit;
|
||||
}
|
||||
if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
|
||||
tsQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * QUEUE_MEMORY_USAGE_RATIO;
|
||||
tsApplyMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64 * (1 - QUEUE_MEMORY_USAGE_RATIO);
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (strcasecmp(name, "numOfCompactThreads") == 0) {
|
||||
#ifdef TD_ENTERPRISE
|
||||
|
@ -2497,7 +2511,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
{"experimental", &tsExperimental},
|
||||
|
||||
{"numOfRpcSessions", &tsNumOfRpcSessions},
|
||||
{"rpcQueueMemoryAllowed", &tsQueueMemoryAllowed},
|
||||
{"shellActivityTimer", &tsShellActivityTimer},
|
||||
{"readTimeout", &tsReadTimeout},
|
||||
{"safetyCheckLevel", &tsSafetyCheckLevel},
|
||||
|
|
|
@ -181,7 +181,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
|
|||
req.numOfSupportVnodes = tsNumOfSupportVnodes;
|
||||
req.numOfDiskCfg = tsDiskCfgNum;
|
||||
req.memTotal = tsTotalMemoryKB * 1024;
|
||||
req.memAvail = req.memTotal - tsQueueMemoryAllowed - 16 * 1024 * 1024;
|
||||
req.memAvail = req.memTotal - tsQueueMemoryAllowed - tsApplyMemoryAllowed - 16 * 1024 * 1024;
|
||||
tstrncpy(req.dnodeEp, tsLocalEp, TSDB_EP_LEN);
|
||||
tstrncpy(req.machineId, pMgmt->pData->machineId, TSDB_MACHINE_ID_LEN + 1);
|
||||
|
||||
|
|
|
@ -323,7 +323,7 @@ int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
|
|||
return TSDB_CODE_INVALID_MSG;
|
||||
}
|
||||
|
||||
EQItype itype = APPLY_QUEUE == qtype ? DEF_QITEM : RPC_QITEM;
|
||||
EQItype itype = APPLY_QUEUE == qtype ? APPLY_QITEM : RPC_QITEM;
|
||||
SRpcMsg *pMsg;
|
||||
code = taosAllocateQitem(sizeof(SRpcMsg), itype, pRpc->contLen, (void **)&pMsg);
|
||||
if (code) {
|
||||
|
|
|
@ -36,7 +36,8 @@ void Testbase::InitLog(const char* path) {
|
|||
tstrncpy(tsLogDir, path, PATH_MAX);
|
||||
|
||||
taosGetSystemInfo();
|
||||
tsQueueMemoryAllowed = tsTotalMemoryKB * 0.1;
|
||||
tsQueueMemoryAllowed = tsTotalMemoryKB * 0.06;
|
||||
tsApplyMemoryAllowed = tsTotalMemoryKB * 0.04;
|
||||
if (taosInitLog("taosdlog", 1, false) != 0) {
|
||||
printf("failed to init log file\n");
|
||||
}
|
||||
|
|
|
@ -753,6 +753,77 @@ static int32_t doStreamCheck(SMnode *pMnode, SStreamObj *pStreamObj) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void *notifyAddrDup(void *p) { return taosStrdup((char *)p); }
|
||||
|
||||
static int32_t addStreamTaskNotifyInfo(const SCMCreateStreamReq *createReq, const SStreamObj *pStream,
|
||||
SStreamTask *pTask) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
pTask->notifyInfo.pNotifyAddrUrls = taosArrayDup(createReq->pNotifyAddrUrls, notifyAddrDup);
|
||||
TSDB_CHECK_NULL(pTask->notifyInfo.pNotifyAddrUrls, code, lino, _end, terrno);
|
||||
pTask->notifyInfo.notifyEventTypes = createReq->notifyEventTypes;
|
||||
pTask->notifyInfo.notifyErrorHandle = createReq->notifyErrorHandle;
|
||||
pTask->notifyInfo.streamName = taosStrdup(createReq->name);
|
||||
TSDB_CHECK_NULL(pTask->notifyInfo.streamName, code, lino, _end, terrno);
|
||||
pTask->notifyInfo.stbFullName = taosStrdup(createReq->targetStbFullName);
|
||||
TSDB_CHECK_NULL(pTask->notifyInfo.stbFullName, code, lino, _end, terrno);
|
||||
pTask->notifyInfo.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
|
||||
TSDB_CHECK_NULL(pTask->notifyInfo.pSchemaWrapper, code, lino, _end, terrno);
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t addStreamNotifyInfo(SCMCreateStreamReq *createReq, SStreamObj *pStream) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
int32_t level = 0;
|
||||
int32_t nTasks = 0;
|
||||
SArray *pLevel = NULL;
|
||||
|
||||
TSDB_CHECK_NULL(createReq, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pStream, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
if (taosArrayGetSize(createReq->pNotifyAddrUrls) == 0) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
level = taosArrayGetSize(pStream->tasks);
|
||||
for (int32_t i = 0; i < level; ++i) {
|
||||
pLevel = taosArrayGetP(pStream->tasks, i);
|
||||
nTasks = taosArrayGetSize(pLevel);
|
||||
for (int32_t j = 0; j < nTasks; ++j) {
|
||||
code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j));
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
|
||||
if (pStream->conf.fillHistory && createReq->notifyHistory) {
|
||||
level = taosArrayGetSize(pStream->pHTasksList);
|
||||
for (int32_t i = 0; i < level; ++i) {
|
||||
pLevel = taosArrayGetP(pStream->pHTasksList, i);
|
||||
nTasks = taosArrayGetSize(pLevel);
|
||||
for (int32_t j = 0; j < nTasks; ++j) {
|
||||
code = addStreamTaskNotifyInfo(createReq, pStream, taosArrayGetP(pLevel, j));
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("%s for stream %s failed at line %d since %s", __func__, pStream->name, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SStreamObj *pStream = NULL;
|
||||
|
@ -850,6 +921,14 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
// add notify info into all stream tasks
|
||||
code = addStreamNotifyInfo(&createReq, &streamObj);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
mError("stream:%s failed to add stream notify info since %s", createReq.name, tstrerror(code));
|
||||
mndTransDrop(pTrans);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
// add stream to trans
|
||||
code = mndPersistStream(pTrans, &streamObj);
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
|
|
|
@ -75,6 +75,7 @@ set(
|
|||
"src/tq/tqSnapshot.c"
|
||||
"src/tq/tqStreamStateSnap.c"
|
||||
"src/tq/tqStreamTaskSnap.c"
|
||||
"src/tq/tqStreamNotify.c"
|
||||
)
|
||||
|
||||
aux_source_directory("src/tsdb/" TSDB_SOURCE_FILES)
|
||||
|
|
|
@ -159,6 +159,11 @@ int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t n
|
|||
SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq);
|
||||
int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type);
|
||||
|
||||
// tq send notifications
|
||||
int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap);
|
||||
void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap);
|
||||
int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode);
|
||||
|
||||
#define TQ_ERR_GO_TO_END(c) \
|
||||
do { \
|
||||
code = c; \
|
||||
|
|
|
@ -81,6 +81,8 @@ typedef struct SCommitInfo SCommitInfo;
|
|||
typedef struct SCompactInfo SCompactInfo;
|
||||
typedef struct SQueryNode SQueryNode;
|
||||
|
||||
typedef struct SStreamNotifyHandleMap SStreamNotifyHandleMap;
|
||||
|
||||
#define VNODE_META_TMP_DIR "meta.tmp"
|
||||
#define VNODE_META_BACKUP_DIR "meta.backup"
|
||||
|
||||
|
@ -255,6 +257,9 @@ int32_t tqProcessTaskCheckpointReadyRsp(STQ* pTq, SRpcMsg* pMsg);
|
|||
int32_t tqBuildStreamTask(void* pTq, SStreamTask* pTask, int64_t ver);
|
||||
int32_t tqScanWal(STQ* pTq);
|
||||
|
||||
// injection error
|
||||
void streamMetaFreeTQDuringScanWalError(STQ* pTq);
|
||||
|
||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
|
||||
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
|
||||
// tq-mq
|
||||
|
@ -496,6 +501,9 @@ struct SVnode {
|
|||
int64_t blockSeq;
|
||||
SQHandle* pQuery;
|
||||
SVMonitorObj monitor;
|
||||
|
||||
// Notification Handles
|
||||
SStreamNotifyHandleMap* pNotifyHandleMap;
|
||||
};
|
||||
|
||||
#define TD_VID(PVNODE) ((PVNODE)->config.vgId)
|
||||
|
|
|
@ -75,12 +75,14 @@ int32_t tqOpen(const char* path, SVnode* pVnode) {
|
|||
if (pTq == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pVnode->pTq = pTq;
|
||||
pTq->pVnode = pVnode;
|
||||
|
||||
pTq->path = taosStrdup(path);
|
||||
if (pTq->path == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
pTq->pVnode = pVnode;
|
||||
|
||||
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
|
||||
if (pTq->pHandle == NULL) {
|
||||
|
@ -131,11 +133,19 @@ void tqClose(STQ* pTq) {
|
|||
return;
|
||||
}
|
||||
|
||||
int32_t vgId = 0;
|
||||
if (pTq->pVnode != NULL) {
|
||||
vgId = TD_VID(pTq->pVnode);
|
||||
} else if (pTq->pStreamMeta != NULL) {
|
||||
vgId = pTq->pStreamMeta->vgId;
|
||||
}
|
||||
|
||||
// close the stream meta firstly
|
||||
streamMetaClose(pTq->pStreamMeta);
|
||||
|
||||
void* pIter = taosHashIterate(pTq->pPushMgr, NULL);
|
||||
while (pIter) {
|
||||
STqHandle* pHandle = *(STqHandle**)pIter;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
if (pHandle->msg != NULL) {
|
||||
tqPushEmptyDataRsp(pHandle, vgId);
|
||||
rpcFreeCont(pHandle->msg->pCont);
|
||||
|
@ -151,8 +161,12 @@ void tqClose(STQ* pTq) {
|
|||
taosHashCleanup(pTq->pOffset);
|
||||
taosMemoryFree(pTq->path);
|
||||
tqMetaClose(pTq);
|
||||
qDebug("vgId:%d end to close tq", pTq->pStreamMeta != NULL ? pTq->pStreamMeta->vgId : -1);
|
||||
streamMetaClose(pTq->pStreamMeta);
|
||||
qDebug("vgId:%d end to close tq", vgId);
|
||||
|
||||
#if 0
|
||||
streamMetaFreeTQDuringScanWalError(pTq);
|
||||
#endif
|
||||
|
||||
taosMemoryFree(pTq);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
#include "tcommon.h"
|
||||
#include "tq.h"
|
||||
|
||||
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
|
||||
|
||||
typedef struct STableSinkInfo {
|
||||
uint64_t uid;
|
||||
tstr name;
|
||||
|
@ -983,7 +981,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
tqDebug("s-task:%s append groupId:%" PRId64 " for generated dstTable:%s", id, groupId, dstTableName);
|
||||
if (pTask->ver == SSTREAM_TASK_SUBTABLE_CHANGED_VER) {
|
||||
code = buildCtbNameAddGroupId(NULL, dstTableName, groupId, sizeof(pDataBlock->info.parTbName));
|
||||
} else if (pTask->ver > SSTREAM_TASK_SUBTABLE_CHANGED_VER && stbFullName) {
|
||||
} else if (pTask->ver >= SSTREAM_TASK_APPEND_STABLE_NAME_VER && stbFullName) {
|
||||
code = buildCtbNameAddGroupId(stbFullName, dstTableName, groupId, sizeof(pDataBlock->info.parTbName));
|
||||
}
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1150,6 +1148,12 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
return;
|
||||
}
|
||||
|
||||
code = tqSendAllNotifyEvents(pBlocks, pTask, pVnode);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId: %d, s-task:%s failed to send all event notifications", vgId, id);
|
||||
// continue processing even if notification fails
|
||||
}
|
||||
|
||||
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
|
||||
if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) {
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
|
||||
|
@ -1173,6 +1177,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
continue;
|
||||
} else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) {
|
||||
code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
|
||||
} else if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) {
|
||||
continue;
|
||||
} else {
|
||||
code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
|
||||
}
|
||||
|
@ -1317,6 +1323,10 @@ void rebuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVno
|
|||
continue;
|
||||
}
|
||||
|
||||
if (pDataBlock->info.type == STREAM_NOTIFY_EVENT) {
|
||||
continue;
|
||||
}
|
||||
|
||||
hasSubmit = true;
|
||||
pTask->execInfo.sink.numOfBlocks += 1;
|
||||
uint64_t groupId = pDataBlock->info.id.groupId;
|
||||
|
|
|
@ -0,0 +1,445 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cmdnodes.h"
|
||||
#include "tq.h"
|
||||
|
||||
#ifndef WINDOWS
|
||||
#include "curl/curl.h"
|
||||
#endif
|
||||
|
||||
#define STREAM_EVENT_NOTIFY_RETRY_MS 50 // 50ms
|
||||
|
||||
typedef struct SStreamNotifyHandle {
|
||||
TdThreadMutex mutex;
|
||||
#ifndef WINDOWS
|
||||
CURL* curl;
|
||||
#endif
|
||||
char* url;
|
||||
} SStreamNotifyHandle;
|
||||
|
||||
struct SStreamNotifyHandleMap {
|
||||
TdThreadMutex gMutex;
|
||||
SHashObj* handleMap;
|
||||
};
|
||||
|
||||
static void stopStreamNotifyConn(SStreamNotifyHandle* pHandle) {
|
||||
#ifndef WINDOWS
|
||||
if (pHandle == NULL || pHandle->curl == NULL) {
|
||||
return;
|
||||
}
|
||||
// status code 1000 means normal closure
|
||||
size_t len = 0;
|
||||
uint16_t status = htons(1000);
|
||||
CURLcode res = curl_ws_send(pHandle->curl, &status, sizeof(status), &len, 0, CURLWS_CLOSE);
|
||||
if (res != CURLE_OK) {
|
||||
tqWarn("failed to send ws-close msg to %s for %d", pHandle->url ? pHandle->url : "", res);
|
||||
}
|
||||
// TODO: add wait mechanism for peer connection close response
|
||||
curl_easy_cleanup(pHandle->curl);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void destroyStreamNotifyHandle(void* ptr) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SStreamNotifyHandle** ppHandle = ptr;
|
||||
|
||||
if (ppHandle == NULL || *ppHandle == NULL) {
|
||||
return;
|
||||
}
|
||||
code = taosThreadMutexDestroy(&(*ppHandle)->mutex);
|
||||
stopStreamNotifyConn(*ppHandle);
|
||||
taosMemoryFreeClear((*ppHandle)->url);
|
||||
taosMemoryFreeClear(*ppHandle);
|
||||
}
|
||||
|
||||
static void releaseStreamNotifyHandle(SStreamNotifyHandle** ppHandle) {
|
||||
if (ppHandle == NULL || *ppHandle == NULL) {
|
||||
return;
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&(*ppHandle)->mutex);
|
||||
*ppHandle = NULL;
|
||||
}
|
||||
|
||||
static int32_t acquireStreamNotifyHandle(SStreamNotifyHandleMap* pMap, const char* url,
|
||||
SStreamNotifyHandle** ppHandle) {
|
||||
#ifndef WINDOWS
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
bool gLocked = false;
|
||||
SStreamNotifyHandle** ppFindHandle = NULL;
|
||||
SStreamNotifyHandle* pNewHandle = NULL;
|
||||
CURL* newCurl = NULL;
|
||||
CURLcode res = CURLE_OK;
|
||||
|
||||
TSDB_CHECK_NULL(pMap, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(url, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(ppHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*ppHandle = NULL;
|
||||
|
||||
code = taosThreadMutexLock(&pMap->gMutex);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
gLocked = true;
|
||||
|
||||
ppFindHandle = taosHashGet(pMap->handleMap, url, strlen(url));
|
||||
if (ppFindHandle == NULL) {
|
||||
pNewHandle = taosMemoryCalloc(1, sizeof(SStreamNotifyHandle));
|
||||
TSDB_CHECK_NULL(pNewHandle, code, lino, _end, terrno);
|
||||
code = taosThreadMutexInit(&pNewHandle->mutex, NULL);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
code = taosHashPut(pMap->handleMap, url, strlen(url), &pNewHandle, POINTER_BYTES);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
*ppHandle = pNewHandle;
|
||||
pNewHandle = NULL;
|
||||
} else {
|
||||
*ppHandle = *ppFindHandle;
|
||||
}
|
||||
|
||||
code = taosThreadMutexLock(&(*ppHandle)->mutex);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
|
||||
(void)taosThreadMutexUnlock(&pMap->gMutex);
|
||||
gLocked = false;
|
||||
|
||||
if ((*ppHandle)->curl == NULL) {
|
||||
newCurl = curl_easy_init();
|
||||
TSDB_CHECK_NULL(newCurl, code, lino, _end, TSDB_CODE_FAILED);
|
||||
res = curl_easy_setopt(newCurl, CURLOPT_URL, url);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYPEER, 0L);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
res = curl_easy_setopt(newCurl, CURLOPT_SSL_VERIFYHOST, 0L);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
res = curl_easy_setopt(newCurl, CURLOPT_TIMEOUT, 3L);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
res = curl_easy_setopt(newCurl, CURLOPT_CONNECT_ONLY, 2L);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
res = curl_easy_perform(newCurl);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
(*ppHandle)->curl = newCurl;
|
||||
newCurl = NULL;
|
||||
}
|
||||
|
||||
if ((*ppHandle)->url == NULL) {
|
||||
(*ppHandle)->url = taosStrdup(url);
|
||||
TSDB_CHECK_NULL((*ppHandle)->url, code, lino, _end, terrno);
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code));
|
||||
if (*ppHandle) {
|
||||
releaseStreamNotifyHandle(ppHandle);
|
||||
}
|
||||
*ppHandle = NULL;
|
||||
}
|
||||
if (newCurl) {
|
||||
curl_easy_cleanup(newCurl);
|
||||
}
|
||||
if (pNewHandle) {
|
||||
destroyStreamNotifyHandle(&pNewHandle);
|
||||
}
|
||||
if (gLocked) {
|
||||
(void)taosThreadMutexUnlock(&pMap->gMutex);
|
||||
}
|
||||
return code;
|
||||
#else
|
||||
tqError("stream notify events is not supported on windows");
|
||||
return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS;
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t tqInitNotifyHandleMap(SStreamNotifyHandleMap** ppMap) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SStreamNotifyHandleMap* pMap = NULL;
|
||||
|
||||
TSDB_CHECK_NULL(ppMap, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*ppMap = NULL;
|
||||
pMap = taosMemoryCalloc(1, sizeof(SStreamNotifyHandleMap));
|
||||
TSDB_CHECK_NULL(pMap, code, lino, _end, terrno);
|
||||
code = taosThreadMutexInit(&pMap->gMutex, NULL);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
pMap->handleMap = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
TSDB_CHECK_NULL(pMap->handleMap, code, lino, _end, terrno);
|
||||
taosHashSetFreeFp(pMap->handleMap, destroyStreamNotifyHandle);
|
||||
*ppMap = pMap;
|
||||
pMap = NULL;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (pMap != NULL) {
|
||||
tqDestroyNotifyHandleMap(&pMap);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
void tqDestroyNotifyHandleMap(SStreamNotifyHandleMap** ppMap) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
|
||||
if (*ppMap == NULL) {
|
||||
return;
|
||||
}
|
||||
taosHashCleanup((*ppMap)->handleMap);
|
||||
code = taosThreadMutexDestroy(&(*ppMap)->gMutex);
|
||||
taosMemoryFreeClear((*ppMap));
|
||||
}
|
||||
|
||||
#define JSON_CHECK_ADD_ITEM(obj, str, item) \
|
||||
TSDB_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY)
|
||||
|
||||
static int32_t getStreamNotifyEventHeader(const char* streamName, char** pHeader) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
cJSON* obj = NULL;
|
||||
cJSON* streams = NULL;
|
||||
cJSON* stream = NULL;
|
||||
char msgId[37];
|
||||
|
||||
TSDB_CHECK_NULL(streamName, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pHeader, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pHeader = NULL;
|
||||
|
||||
code = taosGetSystemUUIDLimit36(msgId, sizeof(msgId));
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
|
||||
stream = cJSON_CreateObject();
|
||||
TSDB_CHECK_NULL(stream, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
JSON_CHECK_ADD_ITEM(stream, "streamName", cJSON_CreateStringReference(streamName));
|
||||
JSON_CHECK_ADD_ITEM(stream, "events", cJSON_CreateArray());
|
||||
|
||||
streams = cJSON_CreateArray();
|
||||
TSDB_CHECK_CONDITION(cJSON_AddItemToArray(streams, stream), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY)
|
||||
stream = NULL;
|
||||
|
||||
obj = cJSON_CreateObject();
|
||||
TSDB_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
JSON_CHECK_ADD_ITEM(obj, "messageId", cJSON_CreateStringReference(msgId));
|
||||
JSON_CHECK_ADD_ITEM(obj, "timestamp", cJSON_CreateNumber(taosGetTimestampMs()));
|
||||
JSON_CHECK_ADD_ITEM(obj, "streams", streams);
|
||||
streams = NULL;
|
||||
|
||||
*pHeader = cJSON_PrintUnformatted(obj);
|
||||
TSDB_CHECK_NULL(*pHeader, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (stream != NULL) {
|
||||
cJSON_Delete(stream);
|
||||
}
|
||||
if (streams != NULL) {
|
||||
cJSON_Delete(streams);
|
||||
}
|
||||
if (obj != NULL) {
|
||||
cJSON_Delete(obj);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t packupStreamNotifyEvent(const char* streamName, const SArray* pBlocks, char** pMsg,
|
||||
int32_t* nNotifyEvents) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
int32_t numOfBlocks = 0;
|
||||
int32_t msgHeaderLen = 0;
|
||||
int32_t msgTailLen = 0;
|
||||
int32_t msgLen = 0;
|
||||
char* msgHeader = NULL;
|
||||
const char* msgTail = "]}]}";
|
||||
char* msg = NULL;
|
||||
|
||||
TSDB_CHECK_NULL(pMsg, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pMsg = NULL;
|
||||
numOfBlocks = taosArrayGetSize(pBlocks);
|
||||
*nNotifyEvents = 0;
|
||||
|
||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX);
|
||||
for (int32_t j = 0; j < pDataBlock->info.rows; ++j) {
|
||||
char* val = colDataGetVarData(pEventStrCol, j);
|
||||
msgLen += varDataLen(val) + 1;
|
||||
}
|
||||
*nNotifyEvents += pDataBlock->info.rows;
|
||||
}
|
||||
|
||||
if (msgLen == 0) {
|
||||
// skip since no notification events found
|
||||
goto _end;
|
||||
}
|
||||
|
||||
code = getStreamNotifyEventHeader(streamName, &msgHeader);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
msgHeaderLen = strlen(msgHeader);
|
||||
msgTailLen = strlen(msgTail);
|
||||
msgLen += msgHeaderLen;
|
||||
|
||||
msg = taosMemoryMalloc(msgLen);
|
||||
TSDB_CHECK_NULL(msg, code, lino, _end, terrno);
|
||||
char* p = msg;
|
||||
TAOS_STRNCPY(p, msgHeader, msgHeaderLen);
|
||||
p += msgHeaderLen - msgTailLen;
|
||||
|
||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock == NULL || pDataBlock->info.type != STREAM_NOTIFY_EVENT) {
|
||||
continue;
|
||||
}
|
||||
|
||||
SColumnInfoData* pEventStrCol = taosArrayGet(pDataBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX);
|
||||
for (int32_t j = 0; j < pDataBlock->info.rows; ++j) {
|
||||
char* val = colDataGetVarData(pEventStrCol, j);
|
||||
TAOS_STRNCPY(p, varDataVal(val), varDataLen(val));
|
||||
p += varDataLen(val);
|
||||
*(p++) = ',';
|
||||
}
|
||||
}
|
||||
|
||||
p -= 1;
|
||||
TAOS_STRNCPY(p, msgTail, msgTailLen);
|
||||
*(p + msgTailLen) = '\0';
|
||||
|
||||
*pMsg = msg;
|
||||
msg = NULL;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (msgHeader != NULL) {
|
||||
cJSON_free(msgHeader);
|
||||
}
|
||||
if (msg != NULL) {
|
||||
taosMemoryFreeClear(msg);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t sendSingleStreamNotify(SStreamNotifyHandle* pHandle, char* msg) {
|
||||
#ifndef WINDOWS
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
CURLcode res = CURLE_OK;
|
||||
uint64_t sentLen = 0;
|
||||
uint64_t totalLen = 0;
|
||||
size_t nbytes = 0;
|
||||
|
||||
TSDB_CHECK_NULL(pHandle, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pHandle->curl, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
totalLen = strlen(msg);
|
||||
while (sentLen < totalLen) {
|
||||
res = curl_ws_send(pHandle->curl, msg + sentLen, totalLen - sentLen, &nbytes, 0, CURLWS_TEXT);
|
||||
TSDB_CHECK_CONDITION(res == CURLE_OK, code, lino, _end, TSDB_CODE_FAILED);
|
||||
sentLen += nbytes;
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %d, %s", __func__, lino, res, tstrerror(code));
|
||||
stopStreamNotifyConn(pHandle);
|
||||
}
|
||||
return code;
|
||||
#else
|
||||
tqError("stream notify events is not supported on windows");
|
||||
return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS;
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t tqSendAllNotifyEvents(const SArray* pBlocks, SStreamTask* pTask, SVnode* pVnode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
char* msg = NULL;
|
||||
int32_t nNotifyAddr = 0;
|
||||
int32_t nNotifyEvents = 0;
|
||||
SStreamNotifyHandle* pHandle = NULL;
|
||||
|
||||
TSDB_CHECK_NULL(pTask, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
TSDB_CHECK_NULL(pVnode, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
nNotifyAddr = taosArrayGetSize(pTask->notifyInfo.pNotifyAddrUrls);
|
||||
if (nNotifyAddr == 0) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
code = packupStreamNotifyEvent(pTask->notifyInfo.streamName, pBlocks, &msg, &nNotifyEvents);
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
if (msg == NULL) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
tqDebug("stream task %s prepare to send %d notify events, total msg length: %" PRIu64, pTask->notifyInfo.streamName,
|
||||
nNotifyEvents, (uint64_t)strlen(msg));
|
||||
|
||||
for (int32_t i = 0; i < nNotifyAddr; ++i) {
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
break;
|
||||
}
|
||||
const char* url = taosArrayGetP(pTask->notifyInfo.pNotifyAddrUrls, i);
|
||||
code = acquireStreamNotifyHandle(pVnode->pNotifyHandleMap, url, &pHandle);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("failed to get stream notify handle of %s", url);
|
||||
if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) {
|
||||
// retry for event message sending in PAUSE error handling mode
|
||||
taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS);
|
||||
--i;
|
||||
continue;
|
||||
} else {
|
||||
// simply ignore the failure in DROP error handling mode
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
code = sendSingleStreamNotify(pHandle, msg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("failed to send stream notify handle to %s since %s", url, tstrerror(code));
|
||||
if (pTask->notifyInfo.notifyErrorHandle == SNOTIFY_ERROR_HANDLE_PAUSE) {
|
||||
// retry for event message sending in PAUSE error handling mode
|
||||
taosMsleep(STREAM_EVENT_NOTIFY_RETRY_MS);
|
||||
--i;
|
||||
} else {
|
||||
// simply ignore the failure in DROP error handling mode
|
||||
code = TSDB_CODE_SUCCESS;
|
||||
}
|
||||
} else {
|
||||
tqDebug("stream task %s send %d notify events to %s successfully", pTask->notifyInfo.streamName, nNotifyEvents,
|
||||
url);
|
||||
}
|
||||
releaseStreamNotifyHandle(&pHandle);
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (msg) {
|
||||
taosMemoryFreeClear(msg);
|
||||
}
|
||||
return code;
|
||||
}
|
|
@ -22,6 +22,8 @@
|
|||
typedef struct SBuildScanWalMsgParam {
|
||||
int64_t metaId;
|
||||
int32_t numOfTasks;
|
||||
int8_t restored;
|
||||
SMsgCb msgCb;
|
||||
} SBuildScanWalMsgParam;
|
||||
|
||||
static int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta);
|
||||
|
@ -74,7 +76,6 @@ int32_t tqScanWal(STQ* pTq) {
|
|||
|
||||
static void doStartScanWal(void* param, void* tmrId) {
|
||||
int32_t vgId = 0;
|
||||
STQ* pTq = NULL;
|
||||
int32_t code = 0;
|
||||
|
||||
SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param;
|
||||
|
@ -86,13 +87,29 @@ static void doStartScanWal(void* param, void* tmrId) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (pMeta->closeFlag) {
|
||||
code = taosReleaseRef(streamMetaRefPool, pParam->metaId);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
tqDebug("vgId:%d jump out of scan wal timer since closed", vgId);
|
||||
} else {
|
||||
tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId,
|
||||
tstrerror(code));
|
||||
}
|
||||
|
||||
taosMemoryFree(pParam);
|
||||
return;
|
||||
}
|
||||
|
||||
vgId = pMeta->vgId;
|
||||
pTq = pMeta->ahandle;
|
||||
|
||||
tqDebug("vgId:%d create msg to start wal scan, numOfTasks:%d, vnd restored:%d", vgId, pParam->numOfTasks,
|
||||
pTq->pVnode->restored);
|
||||
pParam->restored);
|
||||
#if 0
|
||||
// wait for the vnode is freed, and invalid read may occur.
|
||||
taosMsleep(10000);
|
||||
#endif
|
||||
|
||||
code = streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
|
||||
code = streamTaskSchedTask(&pParam->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
|
||||
if (code) {
|
||||
tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code));
|
||||
}
|
||||
|
@ -120,6 +137,8 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) {
|
|||
|
||||
pParam->metaId = pMeta->rid;
|
||||
pParam->numOfTasks = numOfTasks;
|
||||
pParam->restored = pTq->pVnode->restored;
|
||||
pParam->msgCb = pTq->pVnode->msgCb;
|
||||
|
||||
code = streamTimerGetInstance(&pTimer);
|
||||
if (code) {
|
||||
|
@ -330,13 +349,13 @@ int32_t doPutDataIntoInputQ(SStreamTask* pTask, int64_t maxVer, int32_t* numOfIt
|
|||
|
||||
int32_t doScanWalForAllTasks(SStreamMeta* pStreamMeta) {
|
||||
int32_t vgId = pStreamMeta->vgId;
|
||||
SArray* pTaskList = NULL;
|
||||
int32_t numOfTasks = taosArrayGetSize(pStreamMeta->pTaskList);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// clone the task list, to avoid the task update during scan wal files
|
||||
SArray* pTaskList = NULL;
|
||||
streamMetaWLock(pStreamMeta);
|
||||
pTaskList = taosArrayDup(pStreamMeta->pTaskList, NULL);
|
||||
streamMetaWUnLock(pStreamMeta);
|
||||
|
@ -447,3 +466,11 @@ int32_t doScanWalAsync(STQ* pTq, bool ckPause) {
|
|||
|
||||
return streamTaskSchedTask(&pTq->pVnode->msgCb, vgId, 0, 0, STREAM_EXEC_T_EXTRACT_WAL_DATA);
|
||||
}
|
||||
|
||||
void streamMetaFreeTQDuringScanWalError(STQ* pTq) {
|
||||
SBuildScanWalMsgParam* p = taosMemoryCalloc(1, sizeof(SBuildScanWalMsgParam));
|
||||
p->metaId = pTq->pStreamMeta->rid;
|
||||
p->numOfTasks = 0;
|
||||
|
||||
doStartScanWal(p, 0);
|
||||
}
|
|
@ -86,6 +86,14 @@ int32_t tqExpandStreamTask(SStreamTask* pTask) {
|
|||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
code =
|
||||
qSetStreamNotifyInfo(pTask->exec.pExecutor, pTask->notifyInfo.notifyEventTypes,
|
||||
pTask->notifyInfo.pSchemaWrapper, pTask->notifyInfo.stbFullName, IS_NEW_SUBTB_RULE(pTask));
|
||||
if (code) {
|
||||
tqError("s-task:%s failed to set stream notify info, code:%s", pTask->id.idStr, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
streamSetupScheduleTrigger(pTask);
|
||||
|
@ -1357,4 +1365,4 @@ int32_t tqStreamTaskProcessConsenChkptIdReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
|||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "sync.h"
|
||||
#include "tcs.h"
|
||||
#include "tq.h"
|
||||
#include "tsdb.h"
|
||||
#include "vnd.h"
|
||||
|
||||
|
@ -483,6 +484,14 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
|
|||
ret = taosRealPath(tdir, NULL, sizeof(tdir));
|
||||
TAOS_UNUSED(ret);
|
||||
|
||||
// init handle map for stream event notification
|
||||
ret = tqInitNotifyHandleMap(&pVnode->pNotifyHandleMap);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
vError("vgId:%d, failed to init StreamNotifyHandleMap", TD_VID(pVnode));
|
||||
terrno = ret;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// open query
|
||||
vInfo("vgId:%d, start to open vnode query", TD_VID(pVnode));
|
||||
if (vnodeQueryOpen(pVnode)) {
|
||||
|
@ -555,6 +564,7 @@ void vnodeClose(SVnode *pVnode) {
|
|||
vnodeAWait(&pVnode->commitTask);
|
||||
vnodeSyncClose(pVnode);
|
||||
vnodeQueryClose(pVnode);
|
||||
tqDestroyNotifyHandleMap(&pVnode->pNotifyHandleMap);
|
||||
tqClose(pVnode->pTq);
|
||||
walClose(pVnode->pWal);
|
||||
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
|
||||
|
|
|
@ -449,9 +449,17 @@ typedef struct STimeWindowAggSupp {
|
|||
SColumnInfoData timeWindowData; // query time window info for scalar function execution.
|
||||
} STimeWindowAggSupp;
|
||||
|
||||
typedef struct SStreamNotifyEventSupp {
|
||||
SArray* pWindowEvents; // Array of SStreamNotifyEvent, storing window events and trigger values.
|
||||
SHashObj* pTableNameHashMap; // Hash map from groupid to the dest child table name.
|
||||
SHashObj* pResultHashMap; // Hash map from groupid+skey to the window agg result.
|
||||
SSDataBlock* pEventBlock; // The datablock contains all window events and results.
|
||||
} SStreamNotifyEventSupp;
|
||||
|
||||
typedef struct SSteamOpBasicInfo {
|
||||
int32_t primaryPkIndex;
|
||||
bool updateOperatorInfo;
|
||||
int32_t primaryPkIndex;
|
||||
bool updateOperatorInfo;
|
||||
SStreamNotifyEventSupp windowEventSup;
|
||||
} SSteamOpBasicInfo;
|
||||
|
||||
typedef struct SStreamFillSupporter {
|
||||
|
@ -767,6 +775,8 @@ typedef struct SStreamEventAggOperatorInfo {
|
|||
SSHashObj* pPkDeleted;
|
||||
bool destHasPrimaryKey;
|
||||
struct SOperatorInfo* pOperator;
|
||||
SNodeList* pStartCondCols;
|
||||
SNodeList* pEndCondCols;
|
||||
} SStreamEventAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamCountAggOperatorInfo {
|
||||
|
|
|
@ -71,6 +71,10 @@ typedef struct {
|
|||
SVersionRange fillHistoryVer;
|
||||
STimeWindow fillHistoryWindow;
|
||||
SStreamState* pState;
|
||||
int32_t eventTypes; // event types to notify
|
||||
SSchemaWrapper* notifyResultSchema; // agg result to notify
|
||||
char* stbFullName; // used to generate dest child table name
|
||||
bool newSubTableRule; // used to generate dest child table name
|
||||
} SStreamTaskInfo;
|
||||
|
||||
struct SExecTaskInfo {
|
||||
|
|
|
@ -19,7 +19,10 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "cJSON.h"
|
||||
#include "cmdnodes.h"
|
||||
#include "executorInt.h"
|
||||
#include "querytask.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#define FILL_POS_INVALID 0
|
||||
|
@ -57,7 +60,8 @@ typedef struct SSlicePoint {
|
|||
void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type);
|
||||
bool needSaveStreamOperatorInfo(SSteamOpBasicInfo* pBasicInfo);
|
||||
void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo);
|
||||
void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
|
||||
int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
|
||||
void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo);
|
||||
|
||||
int64_t getDeleteMarkFromOption(SStreamNodeOption* pOption);
|
||||
void removeDeleteResults(SSHashObj* pUpdatedMap, SArray* pDelWins);
|
||||
|
@ -106,6 +110,13 @@ int32_t buildAllResultKey(SStateStore* pStateStore, SStreamState* pState, TSKEY
|
|||
int32_t initOffsetInfo(int32_t** ppOffset, SSDataBlock* pRes);
|
||||
TSKEY compareTs(void* pKey);
|
||||
|
||||
int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey,
|
||||
const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri,
|
||||
SStreamNotifyEventSupp* sup);
|
||||
int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper,
|
||||
SStreamNotifyEventSupp* sup);
|
||||
int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -250,6 +250,28 @@ int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t qSetStreamNotifyInfo(qTaskInfo_t tinfo, int32_t eventTypes, const SSchemaWrapper* pSchemaWrapper,
|
||||
const char* stbFullName, bool newSubTableRule) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SStreamTaskInfo *pStreamInfo = NULL;
|
||||
|
||||
if (tinfo == 0 || eventTypes == 0 || pSchemaWrapper == NULL || stbFullName == NULL) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
pStreamInfo = &((SExecTaskInfo*)tinfo)->streamInfo;
|
||||
pStreamInfo->eventTypes = eventTypes;
|
||||
pStreamInfo->notifyResultSchema = tCloneSSchemaWrapper(pSchemaWrapper);
|
||||
if (pStreamInfo->notifyResultSchema == NULL) {
|
||||
code = terrno;
|
||||
}
|
||||
pStreamInfo->stbFullName = taosStrdup(stbFullName);
|
||||
pStreamInfo->newSubTableRule = newSubTableRule;
|
||||
|
||||
_end:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
|
||||
if (tinfo == NULL) {
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
@ -469,6 +491,13 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
|
|||
}
|
||||
|
||||
SStreamScanInfo* pScanInfo = pInfo->info;
|
||||
if (pInfo->pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { // clear meta cache for subscription if tag is changed
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
|
||||
int64_t* uid = (int64_t*)taosArrayGet(tableIdList, i);
|
||||
STableScanInfo* pTableScanInfo = pScanInfo->pTableScanOp->info;
|
||||
taosLRUCacheErase(pTableScanInfo->base.metaCache.pTableMetaEntryCache, uid, LONG_BYTES);
|
||||
}
|
||||
}
|
||||
|
||||
if (isAdd) { // add new table id
|
||||
SArray* qa = NULL;
|
||||
|
|
|
@ -262,6 +262,8 @@ SSchemaWrapper* extractQueriedColumnSchema(SScanPhysiNode* pScanNode) {
|
|||
static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) {
|
||||
tDeleteSchemaWrapper(pStreamInfo->schema);
|
||||
tOffsetDestroy(&pStreamInfo->currentOffset);
|
||||
tDeleteSchemaWrapper(pStreamInfo->notifyResultSchema);
|
||||
taosMemoryFree(pStreamInfo->stbFullName);
|
||||
}
|
||||
|
||||
static void freeBlock(void* pParam) {
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cmdnodes.h"
|
||||
#include "executorInt.h"
|
||||
#include "filter.h"
|
||||
#include "function.h"
|
||||
|
@ -53,6 +55,8 @@ void destroyStreamEventOperatorInfo(void* param) {
|
|||
&pInfo->groupResInfo);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
|
||||
destroyStreamBasicInfo(&pInfo->basic);
|
||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||
clearGroupResInfo(&pInfo->groupResInfo);
|
||||
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
||||
|
@ -89,6 +93,16 @@ void destroyStreamEventOperatorInfo(void* param) {
|
|||
pInfo->pEndCondInfo = NULL;
|
||||
}
|
||||
|
||||
if (pInfo->pStartCondCols != NULL) {
|
||||
nodesDestroyList(pInfo->pStartCondCols);
|
||||
pInfo->pStartCondCols = NULL;
|
||||
}
|
||||
|
||||
if (pInfo->pEndCondCols != NULL) {
|
||||
nodesDestroyList(pInfo->pEndCondCols);
|
||||
pInfo->pEndCondCols = NULL;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -121,7 +135,7 @@ void reuseOutputBuf(void* pState, SRowBuffPos* pPos, SStateStore* pAPI) {
|
|||
}
|
||||
|
||||
int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t groupId, bool* pStart, bool* pEnd,
|
||||
int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey) {
|
||||
int32_t index, int32_t rows, SEventWindowInfo* pCurWin, SSessionKey* pNextWinKey, int32_t* pWinCode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
int32_t winCode = TSDB_CODE_SUCCESS;
|
||||
|
@ -143,6 +157,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro
|
|||
setEventWindowInfo(pAggSup, &leftWinKey, pVal, pCurWin);
|
||||
if (inWin || (pCurWin->pWinFlag->startFlag && !pCurWin->pWinFlag->endFlag)) {
|
||||
pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin);
|
||||
(*pWinCode) = TSDB_CODE_SUCCESS;
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +171,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro
|
|||
if (endi < 0 || pTs[endi] >= rightWinKey.win.skey) {
|
||||
setEventWindowInfo(pAggSup, &rightWinKey, pVal, pCurWin);
|
||||
pCurWin->winInfo.isOutput = !isWindowIncomplete(pCurWin);
|
||||
(*pWinCode) = TSDB_CODE_SUCCESS;
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
|
@ -163,6 +179,7 @@ int32_t setEventOutputBuf(SStreamAggSupporter* pAggSup, TSKEY* pTs, uint64_t gro
|
|||
SSessionKey winKey = {.win.skey = ts, .win.ekey = ts, .groupId = groupId};
|
||||
code = pAggSup->stateStore.streamStateSessionAllocWinBuffByNextPosition(pAggSup->pState, pCur, &winKey, &pVal, &len);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
(*pWinCode) = TSDB_CODE_FAILED;
|
||||
|
||||
setEventWindowInfo(pAggSup, &winKey, pVal, pCurWin);
|
||||
pCurWin->pWinFlag->startFlag = start;
|
||||
|
@ -373,10 +390,18 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
bool allEqual = true;
|
||||
SEventWindowInfo curWin = {0};
|
||||
SSessionKey nextWinKey = {0};
|
||||
int32_t winCode = TSDB_CODE_SUCCESS;
|
||||
code = setEventOutputBuf(pAggSup, tsCols, groupId, (bool*)pColStart->pData, (bool*)pColEnd->pData, i, rows, &curWin,
|
||||
&nextWinKey);
|
||||
&nextWinKey, &winCode);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_OPEN) &&
|
||||
*(bool*)colDataGetNumData(pColStart, i) && winCode != TSDB_CODE_SUCCESS) {
|
||||
code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_OPEN, &curWin.winInfo.sessionWin, pSDataBlock,
|
||||
pInfo->pStartCondCols, i, &pInfo->basic.windowEventSup);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo);
|
||||
bool rebuild = false;
|
||||
code = updateEventWindowInfo(pAggSup, &curWin, &nextWinKey, tsCols, (bool*)pColStart->pData, (bool*)pColEnd->pData,
|
||||
|
@ -443,6 +468,12 @@ static void doStreamEventAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
|
|||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo));
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) {
|
||||
code = addEventAggNotifyEvent(SNOTIFY_EVENT_WINDOW_CLOSE, &curWin.winInfo.sessionWin, pSDataBlock,
|
||||
pInfo->pEndCondCols, i + winRows - 1, &pInfo->basic.windowEventSup);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
|
@ -563,6 +594,7 @@ void doStreamEventSaveCheckpoint(SOperatorInfo* pOperator) {
|
|||
|
||||
static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
@ -577,10 +609,27 @@ static int32_t buildEventResult(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
|
|||
doBuildSessionResult(pOperator, pInfo->streamAggSup.pState, &pInfo->groupResInfo, pBInfo->pRes);
|
||||
if (pBInfo->pRes->info.rows > 0) {
|
||||
printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
if (BIT_FLAG_TEST_MASK(pTaskInfo->streamInfo.eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE)) {
|
||||
code = addAggResultNotifyEvent(pBInfo->pRes, pTaskInfo->streamInfo.notifyResultSchema, &pInfo->basic.windowEventSup);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
(*ppRes) = pBInfo->pRes;
|
||||
return code;
|
||||
}
|
||||
|
||||
code = buildNotifyEventBlock(pTaskInfo, &pInfo->basic.windowEventSup);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (pInfo->basic.windowEventSup.pEventBlock->info.rows > 0) {
|
||||
printDataBlock(pInfo->basic.windowEventSup.pEventBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
(*ppRes) = pInfo->basic.windowEventSup.pEventBlock;
|
||||
return code;
|
||||
}
|
||||
|
||||
_end:
|
||||
(*ppRes) = NULL;
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s. task:%s", __func__, lino, tstrerror(code), GET_TASKID(pTaskInfo));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -957,6 +1006,7 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
|
|||
pInfo->pPkDeleted = tSimpleHashInit(64, hashFn);
|
||||
QUERY_CHECK_NULL(pInfo->pPkDeleted, code, lino, _error, terrno);
|
||||
pInfo->destHasPrimaryKey = pEventNode->window.destHasPrimaryKey;
|
||||
initStreamBasicInfo(&pInfo->basic);
|
||||
|
||||
pInfo->pOperator = pOperator;
|
||||
setOperatorInfo(pOperator, "StreamEventAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT, true, OP_NOT_OPENED,
|
||||
|
@ -989,6 +1039,12 @@ int32_t createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
|
|||
code = filterInitFromNode((SNode*)pEventNode->pEndCond, &pInfo->pEndCondInfo, 0);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
code =
|
||||
nodesCollectColumnsFromNode((SNode*)pEventNode->pStartCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pStartCondCols);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
code = nodesCollectColumnsFromNode((SNode*)pEventNode->pEndCond, NULL, COLLECT_COL_TYPE_ALL, &pInfo->pEndCondCols);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
|
||||
*pOptrInfo = pOperator;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
|
|
|
@ -13,7 +13,19 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "streamexecutorInt.h"
|
||||
|
||||
#include "executorInt.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
#define NOTIFY_EVENT_NAME_CACHE_LIMIT_MB 16
|
||||
|
||||
typedef struct SStreamNotifyEvent {
|
||||
uint64_t gid;
|
||||
TSKEY skey;
|
||||
char* content;
|
||||
bool isEnd;
|
||||
} SStreamNotifyEvent;
|
||||
|
||||
void setStreamOperatorState(SSteamOpBasicInfo* pBasicInfo, EStreamType type) {
|
||||
if (type != STREAM_GET_ALL && type != STREAM_CHECKPOINT) {
|
||||
|
@ -29,7 +41,509 @@ void saveStreamOperatorStateComplete(SSteamOpBasicInfo* pBasicInfo) {
|
|||
pBasicInfo->updateOperatorInfo = false;
|
||||
}
|
||||
|
||||
void initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
|
||||
static void destroyStreamWindowEvent(void* ptr) {
|
||||
SStreamNotifyEvent* pEvent = ptr;
|
||||
if (pEvent == NULL || pEvent->content == NULL) return;
|
||||
cJSON_free(pEvent->content);
|
||||
}
|
||||
|
||||
static void destroyStreamNotifyEventSupp(SStreamNotifyEventSupp* sup) {
|
||||
if (sup == NULL) return;
|
||||
taosArrayDestroyEx(sup->pWindowEvents, destroyStreamWindowEvent);
|
||||
taosHashCleanup(sup->pTableNameHashMap);
|
||||
taosHashCleanup(sup->pResultHashMap);
|
||||
blockDataDestroy(sup->pEventBlock);
|
||||
*sup = (SStreamNotifyEventSupp){0};
|
||||
}
|
||||
|
||||
static int32_t initStreamNotifyEventSupp(SStreamNotifyEventSupp *sup) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SSDataBlock* pBlock = NULL;
|
||||
SColumnInfoData infoData = {0};
|
||||
|
||||
if (sup == NULL) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
code = createDataBlock(&pBlock);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
pBlock->info.type = STREAM_NOTIFY_EVENT;
|
||||
pBlock->info.watermark = INT64_MIN;
|
||||
|
||||
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
|
||||
infoData.info.bytes = tDataTypes[infoData.info.type].bytes;
|
||||
code = blockDataAppendColInfo(pBlock, &infoData);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
sup->pWindowEvents = taosArrayInit(0, sizeof(SStreamNotifyEvent));
|
||||
QUERY_CHECK_NULL(sup->pWindowEvents, code, lino, _end, terrno);
|
||||
sup->pTableNameHashMap = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
|
||||
QUERY_CHECK_NULL(sup->pTableNameHashMap, code, lino, _end, terrno);
|
||||
sup->pResultHashMap = taosHashInit(4096, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
QUERY_CHECK_NULL(sup->pResultHashMap, code, lino, _end, terrno);
|
||||
taosHashSetFreeFp(sup->pResultHashMap, destroyStreamWindowEvent);
|
||||
sup->pEventBlock = pBlock;
|
||||
pBlock = NULL;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
if (sup) {
|
||||
destroyStreamNotifyEventSupp(sup);
|
||||
}
|
||||
}
|
||||
if (pBlock != NULL) {
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t initStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
|
||||
pBasicInfo->primaryPkIndex = -1;
|
||||
pBasicInfo->updateOperatorInfo = false;
|
||||
return initStreamNotifyEventSupp(&pBasicInfo->windowEventSup);
|
||||
}
|
||||
|
||||
void destroyStreamBasicInfo(SSteamOpBasicInfo* pBasicInfo) {
|
||||
destroyStreamNotifyEventSupp(&pBasicInfo->windowEventSup);
|
||||
}
|
||||
|
||||
static void streamNotifyGetEventWindowId(const SSessionKey* pSessionKey, char *buf) {
|
||||
uint64_t hash = 0;
|
||||
uint64_t ar[2];
|
||||
|
||||
ar[0] = pSessionKey->groupId;
|
||||
ar[1] = pSessionKey->win.skey;
|
||||
hash = MurmurHash3_64((char*)ar, sizeof(ar));
|
||||
buf = u64toaFastLut(hash, buf);
|
||||
}
|
||||
|
||||
#define JSON_CHECK_ADD_ITEM(obj, str, item) \
|
||||
QUERY_CHECK_CONDITION(cJSON_AddItemToObjectCS(obj, str, item), code, lino, _end, TSDB_CODE_OUT_OF_MEMORY)
|
||||
|
||||
static int32_t jsonAddColumnField(const char* colName, const SColumnInfoData* pColData, int32_t ri, cJSON* obj) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
char* temp = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(colName, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pColData, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(obj, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
if (colDataIsNull_s(pColData, ri)) {
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNull());
|
||||
goto _end;
|
||||
}
|
||||
|
||||
switch (pColData->info.type) {
|
||||
case TSDB_DATA_TYPE_BOOL: {
|
||||
bool val = *(bool*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateBool(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT: {
|
||||
int8_t val = *(int8_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT: {
|
||||
int16_t val = *(int16_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_INT: {
|
||||
int32_t val = *(int32_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP: {
|
||||
int64_t val = *(int64_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float val = *(float*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double val = *(double*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_VARCHAR:
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
// cJSON requires null-terminated strings, but this data is not null-terminated,
|
||||
// so we need to manually copy the string and add null termination.
|
||||
const char* src = varDataVal(colDataGetVarData(pColData, ri));
|
||||
int32_t len = varDataLen(colDataGetVarData(pColData, ri));
|
||||
temp = cJSON_malloc(len + 1);
|
||||
QUERY_CHECK_NULL(temp, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
memcpy(temp, src, len);
|
||||
temp[len] = '\0';
|
||||
|
||||
cJSON* item = cJSON_CreateStringReference(temp);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, item);
|
||||
|
||||
// let the cjson object to free memory later
|
||||
item->type &= ~cJSON_IsReference;
|
||||
temp = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT: {
|
||||
uint8_t val = *(uint8_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT: {
|
||||
uint16_t val = *(uint16_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UINT: {
|
||||
uint32_t val = *(uint32_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT: {
|
||||
uint64_t val = *(uint64_t*)colDataGetNumData(pColData, ri);
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateNumber(val));
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
JSON_CHECK_ADD_ITEM(obj, colName, cJSON_CreateStringReference("<Unable to display this data type>"));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (temp) {
|
||||
cJSON_free(temp);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t addEventAggNotifyEvent(EStreamNotifyEventType eventType, const SSessionKey* pSessionKey,
|
||||
const SSDataBlock* pInputBlock, const SNodeList* pCondCols, int32_t ri,
|
||||
SStreamNotifyEventSupp* sup) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SNode* node = NULL;
|
||||
cJSON* event = NULL;
|
||||
cJSON* fields = NULL;
|
||||
cJSON* cond = NULL;
|
||||
SStreamNotifyEvent item = {0};
|
||||
char windowId[32];
|
||||
|
||||
QUERY_CHECK_NULL(pSessionKey, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pInputBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pInputBlock->pDataBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pCondCols, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
qDebug("add stream notify event from event window, type: %s, start: %" PRId64 ", end: %" PRId64,
|
||||
(eventType == SNOTIFY_EVENT_WINDOW_OPEN) ? "WINDOW_OPEN" : "WINDOW_CLOSE", pSessionKey->win.skey,
|
||||
pSessionKey->win.ekey);
|
||||
|
||||
event = cJSON_CreateObject();
|
||||
QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
|
||||
// add basic info
|
||||
streamNotifyGetEventWindowId(pSessionKey, windowId);
|
||||
if (eventType == SNOTIFY_EVENT_WINDOW_OPEN) {
|
||||
JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_OPEN"));
|
||||
} else if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) {
|
||||
JSON_CHECK_ADD_ITEM(event, "eventType", cJSON_CreateStringReference("WINDOW_CLOSE"));
|
||||
}
|
||||
JSON_CHECK_ADD_ITEM(event, "eventTime", cJSON_CreateNumber(taosGetTimestampMs()));
|
||||
JSON_CHECK_ADD_ITEM(event, "windowId", cJSON_CreateStringReference(windowId));
|
||||
JSON_CHECK_ADD_ITEM(event, "windowType", cJSON_CreateStringReference("Event"));
|
||||
JSON_CHECK_ADD_ITEM(event, "windowStart", cJSON_CreateNumber(pSessionKey->win.skey));
|
||||
if (eventType == SNOTIFY_EVENT_WINDOW_CLOSE) {
|
||||
JSON_CHECK_ADD_ITEM(event, "windowEnd", cJSON_CreateNumber(pSessionKey->win.ekey));
|
||||
}
|
||||
|
||||
// create fields object to store matched column values
|
||||
fields = cJSON_CreateObject();
|
||||
QUERY_CHECK_NULL(fields, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
FOREACH(node, pCondCols) {
|
||||
SColumnNode* pColDef = (SColumnNode*)node;
|
||||
SColumnInfoData* pColData = taosArrayGet(pInputBlock->pDataBlock, pColDef->slotId);
|
||||
code = jsonAddColumnField(pColDef->colName, pColData, ri, fields);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
// add trigger condition
|
||||
cond = cJSON_CreateObject();
|
||||
QUERY_CHECK_NULL(cond, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
JSON_CHECK_ADD_ITEM(cond, "conditionIndex", cJSON_CreateNumber(0));
|
||||
JSON_CHECK_ADD_ITEM(cond, "fieldValues", fields);
|
||||
fields = NULL;
|
||||
JSON_CHECK_ADD_ITEM(event, "triggerConditions", cond);
|
||||
cond = NULL;
|
||||
|
||||
// convert json object to string value
|
||||
item.gid = pSessionKey->groupId;
|
||||
item.skey = pSessionKey->win.skey;
|
||||
item.isEnd = (eventType == SNOTIFY_EVENT_WINDOW_CLOSE);
|
||||
item.content = cJSON_PrintUnformatted(event);
|
||||
QUERY_CHECK_NULL(taosArrayPush(sup->pWindowEvents, &item), code, lino, _end, terrno);
|
||||
item.content = NULL;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
destroyStreamWindowEvent(&item);
|
||||
if (cond != NULL) {
|
||||
cJSON_Delete(cond);
|
||||
}
|
||||
if (fields != NULL) {
|
||||
cJSON_Delete(fields);
|
||||
}
|
||||
if (event != NULL) {
|
||||
cJSON_Delete(event);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t addAggResultNotifyEvent(const SSDataBlock* pResultBlock, const SSchemaWrapper* pSchemaWrapper,
|
||||
SStreamNotifyEventSupp* sup) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SNode * node = NULL;
|
||||
cJSON* event = NULL;
|
||||
cJSON* result = NULL;
|
||||
SStreamNotifyEvent item = {0};
|
||||
SColumnInfoData* pWstartCol = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(pResultBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pSchemaWrapper, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(sup, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
qDebug("add %" PRId64 " stream notify results from window agg", pResultBlock->info.rows);
|
||||
|
||||
pWstartCol = taosArrayGet(pResultBlock->pDataBlock, 0);
|
||||
for (int32_t i = 0; i< pResultBlock->info.rows; ++i) {
|
||||
event = cJSON_CreateObject();
|
||||
QUERY_CHECK_NULL(event, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
|
||||
// convert the result row into json
|
||||
result = cJSON_CreateObject();
|
||||
QUERY_CHECK_NULL(result, code, lino, _end, TSDB_CODE_OUT_OF_MEMORY);
|
||||
for (int32_t j = 0; j < pSchemaWrapper->nCols; ++j) {
|
||||
SSchema *pCol = pSchemaWrapper->pSchema + j;
|
||||
SColumnInfoData *pColData = taosArrayGet(pResultBlock->pDataBlock, pCol->colId - 1);
|
||||
code = jsonAddColumnField(pCol->name, pColData, i, result);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
JSON_CHECK_ADD_ITEM(event, "result", result);
|
||||
result = NULL;
|
||||
|
||||
item.gid = pResultBlock->info.id.groupId;
|
||||
item.skey = *(uint64_t*)colDataGetNumData(pWstartCol, i);
|
||||
item.content = cJSON_PrintUnformatted(event);
|
||||
code = taosHashPut(sup->pResultHashMap, &item.gid, sizeof(item.gid) + sizeof(item.skey), &item, sizeof(item));
|
||||
TSDB_CHECK_CODE(code, lino, _end);
|
||||
item.content = NULL;
|
||||
|
||||
cJSON_Delete(event);
|
||||
event = NULL;
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
destroyStreamWindowEvent(&item);
|
||||
if (result != NULL) {
|
||||
cJSON_Delete(result);
|
||||
}
|
||||
if (event != NULL) {
|
||||
cJSON_Delete(event);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t streamNotifyGetDestTableName(const SExecTaskInfo* pTaskInfo, uint64_t gid, char** pTableName) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
const SStorageAPI* pAPI = NULL;
|
||||
void* tbname = NULL;
|
||||
int32_t winCode = TSDB_CODE_SUCCESS;
|
||||
char parTbName[TSDB_TABLE_NAME_LEN];
|
||||
const SStreamTaskInfo* pStreamInfo = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(pTaskInfo, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pTableName, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pTableName = NULL;
|
||||
|
||||
pAPI = &pTaskInfo->storageAPI;
|
||||
code = pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, gid, &tbname, false, &winCode);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
if (winCode != TSDB_CODE_SUCCESS) {
|
||||
parTbName[0] = '\0';
|
||||
} else {
|
||||
tstrncpy(parTbName, tbname, sizeof(parTbName));
|
||||
}
|
||||
pAPI->stateStore.streamStateFreeVal(tbname);
|
||||
|
||||
pStreamInfo = &pTaskInfo->streamInfo;
|
||||
code = buildSinkDestTableName(parTbName, pStreamInfo->stbFullName, gid, pStreamInfo->newSubTableRule, pTableName);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t streamNotifyFillTableName(const char* tableName, const SStreamNotifyEvent* pEvent,
|
||||
const SStreamNotifyEvent* pResult, char** pVal) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
static const char* prefix = "{\"tableName\":\"";
|
||||
uint64_t prefixLen = 0;
|
||||
uint64_t nameLen = 0;
|
||||
uint64_t eventLen = 0;
|
||||
uint64_t resultLen = 0;
|
||||
uint64_t valLen = 0;
|
||||
char* val = NULL;
|
||||
char* p = NULL;
|
||||
|
||||
QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pEvent, code, lino , _end, TSDB_CODE_INVALID_PARA);
|
||||
QUERY_CHECK_NULL(pVal, code, lino , _end, TSDB_CODE_INVALID_PARA);
|
||||
|
||||
*pVal = NULL;
|
||||
prefixLen = strlen(prefix);
|
||||
nameLen = strlen(tableName);
|
||||
eventLen = strlen(pEvent->content);
|
||||
|
||||
if (pResult != NULL) {
|
||||
resultLen = strlen(pResult->content);
|
||||
valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + resultLen;
|
||||
} else {
|
||||
valLen = VARSTR_HEADER_SIZE + prefixLen + nameLen + eventLen + 1;
|
||||
}
|
||||
val = taosMemoryMalloc(valLen);
|
||||
QUERY_CHECK_NULL(val, code, lino, _end, terrno);
|
||||
varDataSetLen(val, valLen - VARSTR_HEADER_SIZE);
|
||||
|
||||
p = varDataVal(val);
|
||||
TAOS_STRNCPY(p, prefix, prefixLen);
|
||||
p += prefixLen;
|
||||
TAOS_STRNCPY(p, tableName, nameLen);
|
||||
p += nameLen;
|
||||
*(p++) = '\"';
|
||||
TAOS_STRNCPY(p, pEvent->content, eventLen);
|
||||
*p = ',';
|
||||
|
||||
if (pResult != NULL) {
|
||||
p += eventLen - 1;
|
||||
TAOS_STRNCPY(p, pResult->content, resultLen);
|
||||
*p = ',';
|
||||
}
|
||||
*pVal = val;
|
||||
val = NULL;
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (val != NULL) {
|
||||
taosMemoryFreeClear(val);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t buildNotifyEventBlock(const SExecTaskInfo* pTaskInfo, SStreamNotifyEventSupp* sup) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t lino = 0;
|
||||
SColumnInfoData* pEventStrCol = NULL;
|
||||
int32_t nWindowEvents = 0;
|
||||
int32_t nWindowResults = 0;
|
||||
char* val = NULL;
|
||||
|
||||
if (pTaskInfo == NULL || sup == NULL) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
QUERY_CHECK_NULL(sup->pEventBlock, code, lino, _end, TSDB_CODE_INVALID_PARA);
|
||||
blockDataCleanup(sup->pEventBlock);
|
||||
nWindowEvents = taosArrayGetSize(sup->pWindowEvents);
|
||||
nWindowResults = taosHashGetSize(sup->pResultHashMap);
|
||||
qDebug("start to build stream notify event block, nWindowEvents: %d, nWindowResults: %d", nWindowEvents,
|
||||
nWindowResults);
|
||||
if (nWindowEvents == 0) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
code = blockDataEnsureCapacity(sup->pEventBlock, nWindowEvents);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
||||
pEventStrCol = taosArrayGet(sup->pEventBlock->pDataBlock, NOTIFY_EVENT_STR_COLUMN_INDEX);
|
||||
QUERY_CHECK_NULL(pEventStrCol, code, lino, _end, terrno);
|
||||
|
||||
for (int32_t i = 0; i < nWindowEvents; ++i) {
|
||||
SStreamNotifyEvent* pResult = NULL;
|
||||
SStreamNotifyEvent* pEvent = taosArrayGet(sup->pWindowEvents, i);
|
||||
char* tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid));
|
||||
if (tableName == NULL) {
|
||||
code = streamNotifyGetDestTableName(pTaskInfo, pEvent->gid, &tableName);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = taosHashPut(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid), tableName, strlen(tableName) + 1);
|
||||
taosMemoryFreeClear(tableName);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
tableName = taosHashGet(sup->pTableNameHashMap, &pEvent->gid, sizeof(pEvent->gid));
|
||||
QUERY_CHECK_NULL(tableName, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
}
|
||||
if (pEvent->isEnd) {
|
||||
pResult = taosHashGet(sup->pResultHashMap, &pEvent->gid, sizeof(pEvent->gid) + sizeof(pEvent->skey));
|
||||
QUERY_CHECK_NULL(pResult, code, lino, _end, TSDB_CODE_INTERNAL_ERROR);
|
||||
}
|
||||
code = streamNotifyFillTableName(tableName, pEvent, pResult, &val);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
code = colDataSetVal(pEventStrCol, i, val, false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
taosMemoryFreeClear(val);
|
||||
sup->pEventBlock->info.rows++;
|
||||
}
|
||||
|
||||
if (taosHashGetMemSize(sup->pTableNameHashMap) >= NOTIFY_EVENT_NAME_CACHE_LIMIT_MB * 1024 * 1024) {
|
||||
taosHashClear(sup->pTableNameHashMap);
|
||||
}
|
||||
|
||||
_end:
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
|
||||
}
|
||||
if (val != NULL) {
|
||||
taosMemoryFreeClear(val);
|
||||
}
|
||||
if (sup != NULL) {
|
||||
taosArrayClearEx(sup->pWindowEvents, destroyStreamWindowEvent);
|
||||
taosHashClear(sup->pResultHashMap);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@ void destroyStreamIntervalSliceOperatorInfo(void* param) {
|
|||
pInfo->pOperator = NULL;
|
||||
}
|
||||
|
||||
destroyStreamBasicInfo(&pInfo->basic);
|
||||
clearGroupResInfo(&pInfo->groupResInfo);
|
||||
taosArrayDestroyP(pInfo->pUpdated, destroyFlusedPos);
|
||||
pInfo->pUpdated = NULL;
|
||||
|
@ -651,7 +652,8 @@ int32_t createStreamIntervalSliceOperatorInfo(SOperatorInfo* downstream, SPhysiN
|
|||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamIntervalSliceReleaseState, streamIntervalSliceReloadState);
|
||||
|
||||
initStreamBasicInfo(&pInfo->basic);
|
||||
code = initStreamBasicInfo(&pInfo->basic);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
if (downstream) {
|
||||
code = initIntervalSliceDownStream(downstream, &pInfo->streamAggSup, pPhyNode->type, pInfo->primaryTsIndex,
|
||||
&pInfo->twAggSup, &pInfo->basic, &pInfo->interval, pInfo->hasInterpoFunc);
|
||||
|
|
|
@ -150,6 +150,7 @@ void destroyStreamTimeSliceOperatorInfo(void* param) {
|
|||
&pInfo->groupResInfo);
|
||||
pInfo->pOperator = NULL;
|
||||
}
|
||||
destroyStreamBasicInfo(&pInfo->basic);
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||
resetPrevAndNextWindow(pInfo->pFillSup);
|
||||
|
@ -2201,7 +2202,8 @@ int32_t createStreamTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode*
|
|||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamTimeSliceReleaseState, streamTimeSliceReloadState);
|
||||
|
||||
initStreamBasicInfo(&pInfo->basic);
|
||||
code = initStreamBasicInfo(&pInfo->basic);
|
||||
QUERY_CHECK_CODE(code, lino, _error);
|
||||
if (downstream) {
|
||||
code = initTimeSliceDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex,
|
||||
&pInfo->twAggSup, &pInfo->basic, pInfo->pFillSup);
|
||||
|
|
|
@ -771,7 +771,35 @@ bool getSumFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool funcNotSupportStringSma(SFunctionNode* pFunc) {
|
||||
SNode* pParam;
|
||||
switch (pFunc->funcType) {
|
||||
case FUNCTION_TYPE_MAX:
|
||||
case FUNCTION_TYPE_MIN:
|
||||
case FUNCTION_TYPE_SUM:
|
||||
case FUNCTION_TYPE_AVG:
|
||||
case FUNCTION_TYPE_AVG_PARTIAL:
|
||||
case FUNCTION_TYPE_PERCENTILE:
|
||||
case FUNCTION_TYPE_SPREAD:
|
||||
case FUNCTION_TYPE_SPREAD_PARTIAL:
|
||||
case FUNCTION_TYPE_SPREAD_MERGE:
|
||||
case FUNCTION_TYPE_TWA:
|
||||
case FUNCTION_TYPE_ELAPSED:
|
||||
pParam = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (pParam && nodesIsExprNode(pParam) && (IS_VAR_DATA_TYPE(((SExprNode*)pParam)->resType.type))) {
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
EFuncDataRequired statisDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
|
||||
if(funcNotSupportStringSma(pFunc)) {
|
||||
return FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
}
|
||||
return FUNC_DATA_REQUIRED_SMA_LOAD;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,6 +99,8 @@ const char* nodesNodeName(ENodeType type) {
|
|||
return "CountWindow";
|
||||
case QUERY_NODE_ANOMALY_WINDOW:
|
||||
return "AnomalyWindow";
|
||||
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
|
||||
return "StreamNotifyOptions";
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
return "SetOperator";
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
|
@ -5812,6 +5814,45 @@ static int32_t jsonToStreamOptions(const SJson* pJson, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static const char* jkStreamNotifyOptionsAddrUrls = "AddrUrls";
|
||||
static const char* jkStreamNotifyOptionsEventType = "EventType";
|
||||
static const char* jkStreamNotifyOptionsErrorHandle = "ErrorHandle";
|
||||
static const char* jkStreamNotifyOptionsNotifyHistory = "NotifyHistory";
|
||||
|
||||
static int32_t streamNotifyOptionsToJson(const void* pObj, SJson* pJson) {
|
||||
const SStreamNotifyOptions* pNotifyOption = (const SStreamNotifyOptions*)pObj;
|
||||
int32_t code = nodeListToJson(pJson, jkStreamNotifyOptionsAddrUrls, pNotifyOption->pAddrUrls);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsEventType, pNotifyOption->eventTypes);
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkStreamNotifyOptionsErrorHandle, pNotifyOption->errorHandle);
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = tjsonAddBoolToObject(pJson, jkStreamNotifyOptionsNotifyHistory, pNotifyOption->notifyHistory);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t jsonToStreamNotifyOptions(const SJson* pJson, void* pObj) {
|
||||
SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pObj;
|
||||
int32_t code = jsonToNodeList(pJson, jkStreamNotifyOptionsAddrUrls, &pNotifyOption->pAddrUrls);
|
||||
int32_t val = 0;
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsEventType, &val);
|
||||
pNotifyOption->eventTypes = val;
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = tjsonGetIntValue(pJson, jkStreamNotifyOptionsErrorHandle, &val);
|
||||
pNotifyOption->errorHandle = val;
|
||||
}
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
code = tjsonGetBoolValue(pJson, jkStreamNotifyOptionsNotifyHistory, &pNotifyOption->notifyHistory);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static const char* jkWhenThenWhen = "When";
|
||||
static const char* jkWhenThenThen = "Then";
|
||||
|
||||
|
@ -7207,6 +7248,7 @@ static const char* jkCreateStreamStmtOptions = "Options";
|
|||
static const char* jkCreateStreamStmtQuery = "Query";
|
||||
static const char* jkCreateStreamStmtTags = "Tags";
|
||||
static const char* jkCreateStreamStmtSubtable = "Subtable";
|
||||
static const char* jkCreateStreamStmtNotifyOptions = "NotifyOptions";
|
||||
|
||||
static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) {
|
||||
const SCreateStreamStmt* pNode = (const SCreateStreamStmt*)pObj;
|
||||
|
@ -7233,6 +7275,9 @@ static int32_t createStreamStmtToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddObject(pJson, jkCreateStreamStmtSubtable, nodeToJson, pNode->pSubtable);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddObject(pJson, jkCreateStreamStmtNotifyOptions, nodeToJson, pNode->pNotifyOptions);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -7262,6 +7307,9 @@ static int32_t jsonToCreateStreamStmt(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeObject(pJson, jkCreateStreamStmtSubtable, &pNode->pSubtable);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = jsonToNodeObject(pJson, jkCreateStreamStmtNotifyOptions, (SNode**)&pNode->pNotifyOptions);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -8029,6 +8077,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
|||
return countWindowNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_ANOMALY_WINDOW:
|
||||
return anomalyWindowNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
|
||||
return streamNotifyOptionsToJson(pObj, pJson);
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
return setOperatorToJson(pObj, pJson);
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
|
@ -8402,6 +8452,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
|||
return jsonToCountWindowNode(pJson, pObj);
|
||||
case QUERY_NODE_ANOMALY_WINDOW:
|
||||
return jsonToAnomalyWindowNode(pJson, pObj);
|
||||
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
|
||||
return jsonToStreamNotifyOptions(pJson, pObj);
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
return jsonToSetOperator(pJson, pObj);
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
|
|
|
@ -467,6 +467,9 @@ int32_t nodesMakeNode(ENodeType type, SNode** ppNodeOut) {
|
|||
case QUERY_NODE_WINDOW_OFFSET:
|
||||
code = makeNode(type, sizeof(SWindowOffsetNode), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_STREAM_NOTIFY_OPTIONS:
|
||||
code = makeNode(type, sizeof(SStreamNotifyOptions), &pNode);
|
||||
break;
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
code = makeNode(type, sizeof(SSetOperator), &pNode);
|
||||
break;
|
||||
|
@ -1267,6 +1270,11 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pAround->pTimepoint);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_STREAM_NOTIFY_OPTIONS: {
|
||||
SStreamNotifyOptions* pNotifyOptions = (SStreamNotifyOptions*)pNode;
|
||||
nodesDestroyList(pNotifyOptions->pAddrUrls);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_SET_OPERATOR: {
|
||||
SSetOperator* pStmt = (SSetOperator*)pNode;
|
||||
nodesDestroyList(pStmt->pProjectionList);
|
||||
|
@ -1479,6 +1487,7 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pStmt->pQuery);
|
||||
nodesDestroyList(pStmt->pTags);
|
||||
nodesDestroyNode(pStmt->pSubtable);
|
||||
nodesDestroyNode((SNode*)pStmt->pNotifyOptions);
|
||||
tFreeSCMCreateStreamReq(pStmt->pReq);
|
||||
taosMemoryFreeClear(pStmt->pReq);
|
||||
break;
|
||||
|
|
|
@ -296,8 +296,12 @@ SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, con
|
|||
SNode* createStreamOptions(SAstCreateContext* pCxt);
|
||||
SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptionsSetFlag setflag, SToken* pToken,
|
||||
SNode* pNode);
|
||||
SNode* createStreamNotifyOptions(SAstCreateContext *pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes);
|
||||
SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag,
|
||||
SToken* pToken);
|
||||
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable,
|
||||
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols);
|
||||
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols,
|
||||
SNode* pNotifyOptions);
|
||||
SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName);
|
||||
SNode* createPauseStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pStreamName);
|
||||
SNode* createResumeStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, bool ignoreUntreated, SToken* pStreamName);
|
||||
|
|
|
@ -785,7 +785,7 @@ full_view_name(A) ::= db_name(B) NK_DOT view_name(C).
|
|||
/************************************************ create/drop stream **************************************************/
|
||||
cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO
|
||||
full_table_name(C) col_list_opt(H) tag_def_or_ref_opt(F) subtable_opt(G)
|
||||
AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H); }
|
||||
AS query_or_subquery(D) notify_opt(I). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D, H, I); }
|
||||
cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); }
|
||||
cmd ::= PAUSE STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createPauseStreamStmt(pCxt, A, &B); }
|
||||
cmd ::= RESUME STREAM exists_opt(A) ignore_opt(C) stream_name(B). { pCxt->pRootNode = createResumeStreamStmt(pCxt, A, C, &B); }
|
||||
|
@ -832,6 +832,26 @@ subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP.
|
|||
ignore_opt(A) ::= . { A = false; }
|
||||
ignore_opt(A) ::= IGNORE UNTREATED. { A = true; }
|
||||
|
||||
notify_opt(A) ::= . { A = NULL; }
|
||||
notify_opt(A) ::= notify_def(B). { A = B; }
|
||||
|
||||
notify_def(A) ::= NOTIFY NK_LP url_def_list(B) NK_RP ON NK_LP event_def_list(C) NK_RP. { A = createStreamNotifyOptions(pCxt, B, C); }
|
||||
notify_def(A) ::= notify_def(B) ON_FAILURE DROP(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); }
|
||||
notify_def(A) ::= notify_def(B) ON_FAILURE PAUSE(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_ERROR_HANDLE_SET, &C); }
|
||||
notify_def(A) ::= notify_def(B) NOTIFY_HISTORY NK_INTEGER(C). { A = setStreamNotifyOptions(pCxt, B, SNOTIFY_OPT_NOTIFY_HISTORY_SET, &C); }
|
||||
|
||||
%type url_def_list { SNodeList* }
|
||||
%destructor url_def_list { nodesDestroyList($$); }
|
||||
url_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); }
|
||||
url_def_list(A) ::= url_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); }
|
||||
|
||||
%type event_def_list { SNodeList* }
|
||||
%destructor event_def_list { nodesDestroyList($$); }
|
||||
event_def_list(A) ::= NK_STRING(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B)); }
|
||||
event_def_list(A) ::= event_def_list(B) NK_COMMA NK_STRING(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)); }
|
||||
|
||||
|
||||
|
||||
/************************************************ kill connection/query ***********************************************/
|
||||
cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); }
|
||||
cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); }
|
||||
|
|
|
@ -1526,8 +1526,8 @@ SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhe
|
|||
pCaseWhen->pCase = pCase;
|
||||
pCaseWhen->pWhenThenList = pWhenThenList;
|
||||
pCaseWhen->pElse = pElse;
|
||||
pCaseWhen->tz = pCxt->pQueryCxt->timezone;
|
||||
pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt;
|
||||
pCaseWhen->tz = pCxt->pQueryCxt->timezone;
|
||||
pCaseWhen->charsetCxt = pCxt->pQueryCxt->charsetCxt;
|
||||
return (SNode*)pCaseWhen;
|
||||
_err:
|
||||
nodesDestroyNode(pCase);
|
||||
|
@ -3657,8 +3657,115 @@ SNode* setStreamOptions(SAstCreateContext* pCxt, SNode* pOptions, EStreamOptions
|
|||
return pOptions;
|
||||
}
|
||||
|
||||
static bool validateNotifyUrl(const char* url) {
|
||||
const char* prefix[] = {"http://", "https://", "ws://", "wss://"};
|
||||
const char* host = NULL;
|
||||
|
||||
if (!url || *url == '\0') return false;
|
||||
|
||||
for (int32_t i = 0; i < ARRAY_SIZE(prefix); ++i) {
|
||||
if (strncasecmp(url, prefix[i], strlen(prefix[i])) == 0) {
|
||||
host = url + strlen(prefix[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return (host != NULL) && (*host != '\0') && (*host != '/');
|
||||
}
|
||||
|
||||
SNode* createStreamNotifyOptions(SAstCreateContext* pCxt, SNodeList* pAddrUrls, SNodeList* pEventTypes) {
|
||||
SNode* pNode = NULL;
|
||||
EStreamNotifyEventType eventTypes = 0;
|
||||
const char* eWindowOpenStr = "WINDOW_OPEN";
|
||||
const char* eWindowCloseStr = "WINDOW_CLOSE";
|
||||
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
|
||||
if (LIST_LENGTH(pAddrUrls) == 0) {
|
||||
pCxt->errCode =
|
||||
generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "notification address cannot be empty");
|
||||
goto _err;
|
||||
}
|
||||
|
||||
FOREACH(pNode, pAddrUrls) {
|
||||
char *url = ((SValueNode*)pNode)->literal;
|
||||
if (strlen(url) >= TSDB_STREAM_NOTIFY_URL_LEN) {
|
||||
pCxt->errCode =
|
||||
generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
|
||||
"notification address \"%s\" exceed maximum length %d", url, TSDB_STREAM_NOTIFY_URL_LEN);
|
||||
goto _err;
|
||||
}
|
||||
if (!validateNotifyUrl(url)) {
|
||||
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
|
||||
"invalid notification address \"%s\"", url);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
if (LIST_LENGTH(pEventTypes) == 0) {
|
||||
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
|
||||
"event types must be specified for notification");
|
||||
goto _err;
|
||||
}
|
||||
|
||||
FOREACH(pNode, pEventTypes) {
|
||||
char *eventStr = ((SValueNode *)pNode)->literal;
|
||||
if (strncasecmp(eventStr, eWindowOpenStr, strlen(eWindowOpenStr) + 1) == 0) {
|
||||
BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_OPEN);
|
||||
} else if (strncasecmp(eventStr, eWindowCloseStr, strlen(eWindowCloseStr) + 1) == 0) {
|
||||
BIT_FLAG_SET_MASK(eventTypes, SNOTIFY_EVENT_WINDOW_CLOSE);
|
||||
} else {
|
||||
pCxt->errCode = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
|
||||
"invalid event type '%s' for notification", eventStr);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
||||
SStreamNotifyOptions* pNotifyOptions = NULL;
|
||||
pCxt->errCode = nodesMakeNode(QUERY_NODE_STREAM_NOTIFY_OPTIONS, (SNode**)&pNotifyOptions);
|
||||
CHECK_MAKE_NODE(pNotifyOptions);
|
||||
pNotifyOptions->pAddrUrls = pAddrUrls;
|
||||
pNotifyOptions->eventTypes = eventTypes;
|
||||
pNotifyOptions->errorHandle = SNOTIFY_ERROR_HANDLE_PAUSE;
|
||||
pNotifyOptions->notifyHistory = false;
|
||||
nodesDestroyList(pEventTypes);
|
||||
return (SNode*)pNotifyOptions;
|
||||
_err:
|
||||
nodesDestroyList(pAddrUrls);
|
||||
nodesDestroyList(pEventTypes);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SNode* setStreamNotifyOptions(SAstCreateContext* pCxt, SNode* pNode, EStreamNotifyOptionSetFlag setFlag,
|
||||
SToken* pToken) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
|
||||
SStreamNotifyOptions* pNotifyOption = (SStreamNotifyOptions*)pNode;
|
||||
if (BIT_FLAG_TEST_MASK(pNotifyOption->setFlag, setFlag)) {
|
||||
pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR,
|
||||
"stream notify options each item can only be set once");
|
||||
goto _err;
|
||||
}
|
||||
switch (setFlag) {
|
||||
case SNOTIFY_OPT_ERROR_HANDLE_SET:
|
||||
pNotifyOption->errorHandle = (pToken->type == TK_DROP) ? SNOTIFY_ERROR_HANDLE_DROP : SNOTIFY_ERROR_HANDLE_PAUSE;
|
||||
break;
|
||||
case SNOTIFY_OPT_NOTIFY_HISTORY_SET:
|
||||
pNotifyOption->notifyHistory = taosStr2Int8(pToken->z, NULL, 10);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
BIT_FLAG_SET_MASK(pNotifyOption->setFlag, setFlag);
|
||||
return pNode;
|
||||
_err:
|
||||
nodesDestroyNode(pNode);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pStreamName, SNode* pRealTable,
|
||||
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols) {
|
||||
SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery, SNodeList* pCols,
|
||||
SNode* pNotifyOptions) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
CHECK_NAME(checkStreamName(pCxt, pStreamName));
|
||||
SCreateStreamStmt* pStmt = NULL;
|
||||
|
@ -3674,6 +3781,7 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken
|
|||
pStmt->pTags = pTags;
|
||||
pStmt->pSubtable = pSubtable;
|
||||
pStmt->pCols = pCols;
|
||||
pStmt->pNotifyOptions = (SStreamNotifyOptions*)pNotifyOptions;
|
||||
return (SNode*)pStmt;
|
||||
_err:
|
||||
nodesDestroyNode(pRealTable);
|
||||
|
@ -3682,6 +3790,7 @@ _err:
|
|||
nodesDestroyList(pTags);
|
||||
nodesDestroyNode(pSubtable);
|
||||
nodesDestroyList(pCols);
|
||||
nodesDestroyNode(pNotifyOptions);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -2751,6 +2751,9 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
|
|||
if (TSDB_CODE_SUCCESS == code && hasData) {
|
||||
code = parseInsertTableClause(pCxt, pStmt, &token);
|
||||
}
|
||||
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code && pCxt->preCtbname) {
|
||||
code = TSDB_CODE_TSC_STMT_TBNAME_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code && !pCxt->missCache) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue