Merge branch '3.0' into fix/TS-4937-2
This commit is contained in:
commit
d18caf6606
|
@ -163,4 +163,12 @@ geos_c.h
|
|||
source/libs/parser/src/sql.c
|
||||
include/common/ttokenauto.h
|
||||
!packaging/smokeTest/pytest_require.txt
|
||||
tdengine-test-dir
|
||||
tdengine-test-dir/
|
||||
localtime.c
|
||||
private.h
|
||||
strftime.c
|
||||
tzdir.h
|
||||
tzfile.h
|
||||
coverage.info
|
||||
taos
|
||||
taosd
|
||||
|
|
|
@ -131,7 +131,7 @@ IF(TD_WINDOWS)
|
|||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
ENDIF()
|
||||
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO /FORCE:MULTIPLE")
|
||||
|
||||
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
|
||||
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# libuv
|
||||
ExternalProject_Add(libuv
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.48.0
|
||||
GIT_TAG v1.49.2
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||
BINARY_DIR "${TD_CONTRIB_DIR}/libuv"
|
||||
CONFIGURE_COMMAND ""
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -112,14 +112,14 @@ Fill in the example data from the MQTT message body in **Message Body**.
|
|||
|
||||
JSON data supports JSONObject or JSONArray, and the json parser can parse the following data:
|
||||
|
||||
``` json
|
||||
```json
|
||||
{"id": 1, "message": "hello-word"}
|
||||
{"id": 2, "message": "hello-word"}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` json
|
||||
```json
|
||||
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
||||
```
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ In addition, the [Kerberos](https://web.mit.edu/kerberos/) authentication servic
|
|||
|
||||
After configuration, you can use the [kcat](https://github.com/edenhill/kcat) tool to verify Kafka topic consumption:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
kcat <topic> \
|
||||
-b <kafka-server:port> \
|
||||
-G kcat \
|
||||
|
@ -171,14 +171,14 @@ Enter sample data from the Kafka message body in **Message Body**.
|
|||
|
||||
JSON data supports JSONObject or JSONArray, and the following data can be parsed using a JSON parser:
|
||||
|
||||
``` json
|
||||
```json
|
||||
{"id": 1, "message": "hello-word"}
|
||||
{"id": 2, "message": "hello-word"}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` json
|
||||
```json
|
||||
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
||||
```
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ Parsing is the process of parsing unstructured strings into structured data. The
|
|||
|
||||
JSON parsing supports JSONObject or JSONArray. The following JSON sample data can automatically parse fields: `groupid`, `voltage`, `current`, `ts`, `inuse`, `location`.
|
||||
|
||||
``` json
|
||||
```json
|
||||
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
||||
|
@ -91,7 +91,7 @@ JSON parsing supports JSONObject or JSONArray. The following JSON sample data ca
|
|||
|
||||
Or
|
||||
|
||||
``` json
|
||||
```json
|
||||
[{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"},
|
||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"},
|
||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}]
|
||||
|
@ -101,7 +101,7 @@ Subsequent examples will only explain using JSONObject.
|
|||
|
||||
The following nested JSON data can automatically parse fields `groupid`, `data_voltage`, `data_current`, `ts`, `inuse`, `location_0_province`, `location_0_city`, `location_0_datun`, and you can also choose which fields to parse and set aliases for the parsed fields.
|
||||
|
||||
``` json
|
||||
```json
|
||||
{"groupid": 170001, "data": { "voltage": "221V", "current": 12.3 }, "ts": "2023-12-18T22:12:00", "inuse": true, "location": [{"province": "beijing", "city":"chaoyang", "street": "datun"}]}
|
||||
```
|
||||
|
||||
|
@ -114,7 +114,7 @@ The following nested JSON data can automatically parse fields `groupid`, `data_v
|
|||
|
||||
You can use **named capture groups** in regular expressions to extract multiple fields from any string (text) field. As shown in the figure, extract fields such as access IP, timestamp, and accessed URL from nginx logs.
|
||||
|
||||
``` re
|
||||
```regex
|
||||
(?<ip>\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b)\s-\s-\s\[(?<ts>\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}\s\+\d{4})\]\s"(?<method>[A-Z]+)\s(?<url>[^\s"]+).*(?<status>\d{3})\s(?<length>\d+)
|
||||
```
|
||||
|
||||
|
@ -133,7 +133,7 @@ Custom rhai syntax scripts for parsing input data (refer to `https://rhai.rs/boo
|
|||
|
||||
For example, for data reporting three-phase voltage values, which are entered into three subtables respectively, such data needs to be parsed
|
||||
|
||||
``` json
|
||||
```json
|
||||
{
|
||||
"ts": "2024-06-27 18:00:00",
|
||||
"voltage": "220.1,220.3,221.1",
|
||||
|
@ -164,7 +164,7 @@ The final parsing result is shown below:
|
|||
|
||||
The parsed data may still not meet the data requirements of the target table. For example, the original data collected by a smart meter is as follows (in json format):
|
||||
|
||||
``` json
|
||||
```json
|
||||
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
||||
|
|
|
@ -83,14 +83,14 @@ Next, create a supertable (STABLE) named `meters`, whose table structure include
|
|||
|
||||
Create Database
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
||||
--data 'CREATE DATABASE IF NOT EXISTS power'
|
||||
```
|
||||
|
||||
Create Table, specify the database as `power` in the URL
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \
|
||||
--data 'CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))'
|
||||
```
|
||||
|
@ -167,7 +167,7 @@ NOW is an internal system function, defaulting to the current time of the client
|
|||
|
||||
Write data
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
||||
--data 'INSERT INTO power.d1001 USING power.meters TAGS(2,'\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) power.d1002 USING power.meters TAGS(3, '\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 218, 0.25000)'
|
||||
```
|
||||
|
@ -247,7 +247,7 @@ Rust connector also supports using **serde** for deserializing to get structured
|
|||
|
||||
Query Data
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
||||
--data 'SELECT ts, current, location FROM power.meters limit 100'
|
||||
```
|
||||
|
@ -329,7 +329,7 @@ Below are code examples of setting reqId to execute SQL in various language conn
|
|||
|
||||
Query data, specify reqId as 3
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql?req_id=3' \
|
||||
--data 'SELECT ts, current, location FROM power.meters limit 1'
|
||||
```
|
||||
|
|
|
@ -273,19 +273,19 @@ To better operate the above data structures, some convenience functions are prov
|
|||
|
||||
Create table:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
|
||||
```
|
||||
|
||||
Create custom function:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
||||
```
|
||||
|
||||
Use custom function:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
select max_vol(vol1, vol2, vol3, deviceid) from battery;
|
||||
```
|
||||
|
||||
|
@ -334,7 +334,7 @@ When developing UDFs in Python, you need to implement the specified interface fu
|
|||
|
||||
The interface for scalar functions is as follows.
|
||||
|
||||
```Python
|
||||
```python
|
||||
def process(input: datablock) -> tuple[output_type]:
|
||||
```
|
||||
|
||||
|
@ -347,7 +347,7 @@ The main parameters are as follows:
|
|||
|
||||
The interface for aggregate functions is as follows.
|
||||
|
||||
```Python
|
||||
```python
|
||||
def start() -> bytes:
|
||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||
def finish(buf: bytes) -> output_type:
|
||||
|
@ -365,7 +365,7 @@ Finally, when all row data blocks have been processed, the finish function is ca
|
|||
|
||||
The interfaces for initialization and destruction are as follows.
|
||||
|
||||
```Python
|
||||
```python
|
||||
def init()
|
||||
def destroy()
|
||||
```
|
||||
|
@ -381,7 +381,7 @@ Parameter description:
|
|||
|
||||
The template for developing scalar functions in Python is as follows.
|
||||
|
||||
```Python
|
||||
```python
|
||||
def init():
|
||||
# initialization
|
||||
def destroy():
|
||||
|
@ -393,7 +393,7 @@ def process(input: datablock) -> tuple[output_type]:
|
|||
|
||||
The template for developing aggregate functions in Python is as follows.
|
||||
|
||||
```Python
|
||||
```python
|
||||
def init():
|
||||
#initialization
|
||||
def destroy():
|
||||
|
@ -828,7 +828,7 @@ Through this example, we learned how to define aggregate functions and print cus
|
|||
<details>
|
||||
<summary>pybitand.py</summary>
|
||||
|
||||
```Python
|
||||
```python
|
||||
{{#include tests/script/sh/pybitand.py}}
|
||||
```
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ TDengine is designed for various writing scenarios, and many of these scenarios
|
|||
|
||||
### Syntax
|
||||
|
||||
```SQL
|
||||
```sql
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
SHOW COMPACTS [compact_id];
|
||||
KILL COMPACT compact_id;
|
||||
|
@ -41,7 +41,7 @@ KILL COMPACT compact_id;
|
|||
|
||||
When one or more nodes in a multi-replica cluster restart due to upgrades or other reasons, it may lead to an imbalance in the load among the various dnodes in the cluster. In extreme cases, all vgroup leaders may be located on the same dnode. To solve this problem, you can use the following commands, which were first released in version 3.0.4.0. It is recommended to use the latest version as much as possible.
|
||||
|
||||
```SQL
|
||||
```sql
|
||||
balance vgroup leader; # Rebalance all vgroup leaders
|
||||
balance vgroup leader on <vgroup_id>; # Rebalance a vgroup leader
|
||||
balance vgroup leader database <database_name>; # Rebalance all vgroup leaders within a database
|
||||
|
|
|
@ -121,7 +121,7 @@ The cost of using object storage services is related to the amount of data store
|
|||
|
||||
When the TSDB time-series data exceeds the time specified by the `s3_keeplocal` parameter, the related data files will be split into multiple file blocks, each with a default size of 512 MB (`s3_chunkpages * tsdb_pagesize`). Except for the last file block, which is retained on the local file system, the rest of the file blocks are uploaded to the object storage service.
|
||||
|
||||
```math
|
||||
```text
|
||||
Upload Count = Data File Size / (s3_chunkpages * tsdb_pagesize) - 1
|
||||
```
|
||||
|
||||
|
@ -135,7 +135,7 @@ During query operations, if data in object storage needs to be accessed, TSDB do
|
|||
|
||||
Adjacent multiple data pages are downloaded as a single data block from object storage to reduce the number of downloads. The size of each data page is specified by the `tsdb_pagesize` parameter when creating the database, with a default of 4 KB.
|
||||
|
||||
```math
|
||||
```text
|
||||
Download Count = Number of Data Blocks Needed for Query - Number of Cached Data Blocks
|
||||
```
|
||||
|
||||
|
@ -155,7 +155,7 @@ For deployment methods, please refer to the [Flexify](https://azuremarketplace.m
|
|||
|
||||
In the configuration file /etc/taos/taos.cfg, add parameters for S3 access:
|
||||
|
||||
```cfg
|
||||
```text
|
||||
s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25
|
||||
s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd
|
||||
s3BucketName td-test
|
||||
|
|
|
@ -18,14 +18,14 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
|||
The parameters are explained as follows.
|
||||
|
||||
- user_name: Up to 23 B long.
|
||||
- password: Up to 128 B long, valid characters include letters and numbers as well as special characters other than single and double quotes, apostrophes, backslashes, and spaces, and it cannot be empty.
|
||||
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
|
||||
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. The default value is 0. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||
|
||||
The following SQL can create a user named test with the password 123456 who can view system information.
|
||||
The following SQL can create a user named test with the password abc123!@# who can view system information.
|
||||
|
||||
```sql
|
||||
create user test pass '123456' sysinfo 1
|
||||
create user test pass 'abc123!@#' sysinfo 1
|
||||
```
|
||||
|
||||
### Viewing Users
|
||||
|
|
|
@ -140,7 +140,7 @@ Finally, click the "Create" button at the bottom left to save the rule.
|
|||
|
||||
## Write a Mock Test Program
|
||||
|
||||
```javascript
|
||||
```js
|
||||
{{#include docs/examples/other/mock.js}}
|
||||
```
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ curl http://localhost:8083/connectors
|
|||
|
||||
If all components have started successfully, the following output will be displayed:
|
||||
|
||||
```txt
|
||||
```text
|
||||
[]
|
||||
```
|
||||
|
||||
|
@ -181,7 +181,7 @@ If the above command is executed successfully, the following output will be disp
|
|||
|
||||
Prepare a text file with test data, content as follows:
|
||||
|
||||
```txt title="test-data.txt"
|
||||
```text title="test-data.txt"
|
||||
meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
||||
meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
||||
meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
||||
|
@ -303,7 +303,7 @@ kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --t
|
|||
|
||||
Output:
|
||||
|
||||
```txt
|
||||
```text
|
||||
......
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||
|
|
|
@ -60,7 +60,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc
|
|||
|
||||
For users using Grafana version 7.x or configuring with [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/), you can use the installation script on the Grafana server to automatically install the plugin and add the data source Provisioning configuration file.
|
||||
|
||||
```sh
|
||||
```shell
|
||||
bash -c "$(curl -fsSL \
|
||||
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
|
||||
-a http://localhost:6041 \
|
||||
|
@ -77,7 +77,7 @@ Save the script and execute `./install.sh --help` to view detailed help document
|
|||
|
||||
Use the [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin [installation](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation).
|
||||
|
||||
```bash
|
||||
```shell
|
||||
grafana-cli plugins install tdengine-datasource
|
||||
# with sudo
|
||||
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||
|
@ -85,7 +85,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
|
|||
|
||||
Alternatively, download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) to your local machine and unzip it into the Grafana plugins directory. Example command line download is as follows:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
GF_VERSION=3.5.1
|
||||
# from GitHub
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
|
||||
|
@ -95,13 +95,13 @@ wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tden
|
|||
|
||||
For CentOS 7.2 operating system, unzip the plugin package into the /var/lib/grafana/plugins directory and restart Grafana.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
|
||||
```
|
||||
|
||||
If Grafana is running in a Docker environment, you can use the following environment variable to set up automatic installation of the TDengine data source plugin:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
GF_INSTALL_PLUGINS=tdengine-datasource
|
||||
```
|
||||
|
||||
|
@ -120,7 +120,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc
|
|||
|
||||
Refer to [Grafana containerized installation instructions](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). Use the following command to start a container and automatically install the TDengine plugin:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
docker run -d \
|
||||
-p 3000:3000 \
|
||||
--name=grafana \
|
|
@ -28,68 +28,70 @@ After modifying configuration file parameters, you need to restart the *taosd* s
|
|||
|
||||
### Connection Related
|
||||
|
||||
|Parameter Name |Supported Version |Description|
|
||||
|-----------------------|-------------------------|------------|
|
||||
|firstEp | |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
||||
|secondEp | |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
||||
|fqdn | |The service address that taosd listens on, default is the first hostname configured on the server|
|
||||
|serverPort | |The port that taosd listens on, default value 6030|
|
||||
|compressMsgSize | |Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
||||
|shellActivityTimer | |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|rpcQueueMemoryAllowed | |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
||||
|resolveFQDNRetryTime | Cancelled after 3.x |Number of retries when FQDN resolution fails|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|firstEp | |Not supported |Endpoint of the first dnode in the cluster that taosd actively connects to at startup, default value localhost:6030|
|
||||
|secondEp | |Not supported |Endpoint of the second dnode in the cluster that taosd tries to connect to if the firstEp is unreachable, no default value|
|
||||
|fqdn | |Not supported |The service address that taosd listens on, default is the first hostname configured on the server|
|
||||
|serverPort | |Not supported |The port that taosd listens on, default value 6030|
|
||||
|compressMsgSize | |Supported, effective after restart|Whether to compress RPC messages; -1: do not compress any messages; 0: compress all messages; N (N>0): only compress messages larger than N bytes; default value -1|
|
||||
|shellActivityTimer | |Supported, effective immediately |Duration in seconds for the client to send heartbeat to mnode, range 1-120, default value 3 |
|
||||
|numOfRpcSessions | |Supported, effective after restart|Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Supported, effective after restart|Number of threads for receiving and sending RPC data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Supported, effective after restart|Number of threads for client to process RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|rpcQueueMemoryAllowed | |Supported, effective immediately |Maximum memory allowed for received RPC messages in dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory |
|
||||
|resolveFQDNRetryTime | Cancelled after 3.x |Not supported |Number of retries when FQDN resolution fails|
|
||||
|timeToGetAvailableConn | Cancelled after 3.3.4.x |Maximum waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|maxShellConns | Cancelled after 3.x |Maximum number of connections allowed|
|
||||
|maxRetryWaitTime | |Maximum timeout for reconnection, default value is 10s|
|
||||
|shareConnLimit |Added in 3.3.4.0 |Number of requests a connection can share, range 1-512, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0 |Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||
|maxShellConns | Cancelled after 3.x |Supported, effective after restart|Maximum number of connections allowed|
|
||||
|maxRetryWaitTime | |Supported, effective after restart|Maximum timeout for reconnection, default value is 10s|
|
||||
|shareConnLimit |Added in 3.3.4.0 |Supported, effective after restart|Number of requests a connection can share, range 1-512, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0 |Supported, effective after restart|Minimum timeout for a single request, range 64-604800, in seconds, default value 900|
|
||||
|
||||
### Monitoring Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|-----------------------|----------|-|
|
||||
|monitor | |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
||||
|monitorFqdn | |The FQDN of the server where the taosKeeper service is located, default value none|
|
||||
|monitorPort | |The port number listened to by the taosKeeper service, default value 6043|
|
||||
|monitorInterval | |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
||||
|monitorMaxLogs | |Number of cached logs pending report|
|
||||
|monitorComp | |Whether to use compression when reporting monitoring logs|
|
||||
|monitorLogProtocol | |Whether to print monitoring logs|
|
||||
|monitorForceV2 | |Whether to use V2 protocol for reporting|
|
||||
|telemetryReporting | |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
||||
|telemetryServer | |Telemetry server address|
|
||||
|telemetryPort | |Telemetry server port number|
|
||||
|telemetryInterval | |Telemetry upload interval, in seconds, default 43200|
|
||||
|crashReporting | |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|monitor | |Supported, effective immediately |Whether to collect and report monitoring data, 0: off; 1: on; default value 0|
|
||||
|monitorFqdn | |Supported, effective after restart|The FQDN of the server where the taosKeeper service is located, default value none|
|
||||
|monitorPort | |Supported, effective after restart|The port number listened to by the taosKeeper service, default value 6043|
|
||||
|monitorInterval | |Supported, effective immediately |The time interval for recording system parameters (CPU/memory) in the monitoring database, in seconds, range 1-200000, default value 30|
|
||||
|monitorMaxLogs | |Supported, effective immediately |Number of cached logs pending report|
|
||||
|monitorComp | |Supported, effective after restart|Whether to use compression when reporting monitoring logs|
|
||||
|monitorLogProtocol | |Supported, effective immediately |Whether to print monitoring logs|
|
||||
|monitorForceV2 | |Supported, effective immediately |Whether to use V2 protocol for reporting|
|
||||
|telemetryReporting | |Supported, effective immediately |Whether to upload telemetry, 0: do not upload, 1: upload, default value 1|
|
||||
|telemetryServer | |Not supported |Telemetry server address|
|
||||
|telemetryPort | |Not supported |Telemetry server port number|
|
||||
|telemetryInterval | |Supported, effective immediately |Telemetry upload interval, in seconds, default 43200|
|
||||
|crashReporting | |Supported, effective immediately |Whether to upload crash information; 0: do not upload, 1: upload; default value 1|
|
||||
|
||||
### Query Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|------------------------|----------|-|
|
||||
|countAlwaysReturnValue | |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
||||
|tagFilterCache | |Whether to cache tag filter results|
|
||||
|maxNumOfDistinctRes | |Maximum number of distinct results allowed to return, default value 100,000, maximum allowed value 100 million|
|
||||
|queryBufferSize | |Not effective yet|
|
||||
|queryRspPolicy | |Query response strategy|
|
||||
|filterScalarMode | |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables| |Internal parameter, concurrency number of the query plan|
|
||||
|queryRsmaTolerance | |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
||||
|pqSortMemThreshold | |Internal parameter, memory threshold for sorting|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|countAlwaysReturnValue | |Supported, effective immediately |Whether count/hyperloglog functions return a value when input data is empty or NULL; 0: return empty row, 1: return; default value 1; When this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; Note that this parameter should be consistent between client and server|
|
||||
|tagFilterCache | |Not supported |Whether to cache tag filter results|
|
||||
|queryBufferSize | |Supported, effective after restart|Not effective yet|
|
||||
|queryRspPolicy | |Supported, effective immediately |Query response strategy|
|
||||
|queryUseMemoryPool | |Not supported |Whether query will use memory pool to manage memory, default value: 1 (on); 0: off, 1: on|
|
||||
|minReservedMemorySize | |Not supported |The minimum reserved system available memory size, all memory except reserved can be used for queries, unit: MB, default reserved size is 20% of system physical memory, value range 1024-1000000000|
|
||||
|singleQueryMaxMemorySize| |Not supported |The memory limit that a single query can use on a single node (dnode), exceeding this limit will return an error, unit: MB, default value: 0 (no limit), value range 0-1000000000|
|
||||
|filterScalarMode | |Not supported |Force scalar filter mode, 0: off; 1: on, default value 0|
|
||||
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables| |Not supported |Internal parameter, concurrency number of the query plan|
|
||||
|queryRsmaTolerance | |Not supported |Internal parameter, tolerance time for determining which level of rsma data to query, in milliseconds|
|
||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|pqSortMemThreshold | |Not supported |Internal parameter, memory threshold for sorting|
|
||||
|
||||
### Region Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|-----------------|----------|-|
|
||||
|timezone | |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | |Character set encoding, defaults to obtaining from the system|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|timezone | |Not supported |Time zone; defaults to dynamically obtaining the current time zone setting from the system|
|
||||
|locale | |Not supported |System locale information and encoding format, defaults to obtaining from the system|
|
||||
|charset | |Not supported |Character set encoding, defaults to obtaining from the system|
|
||||
|
||||
:::info
|
||||
|
||||
|
@ -167,152 +169,153 @@ The effective value of charset is UTF-8.
|
|||
|
||||
### Storage Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|--------------------|----------|-|
|
||||
|dataDir | |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
||||
|tempDir | |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||
|minimalDataDirGB | |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||
|minimalTmpDirGB | |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||
|minDiskFreeSize |After 3.1.1.0|When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||
|s3MigrateIntervalSec|After 3.3.4.3|Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||
|s3MigrateEnabled |After 3.3.4.3|Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||
|s3Accesskey |After 3.3.4.3|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||
|s3Endpoint |After 3.3.4.3|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
||||
|s3BucketName |After 3.3.4.3|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
||||
|s3PageCacheSize |After 3.3.4.3|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
||||
|s3UploadDelaySec |After 3.3.4.3|How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
||||
|cacheLazyLoadThreshold| |Internal parameter, cache loading strategy|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|dataDir | |Not supported |Directory for data files, all data files are written to this directory, default value /var/lib/taos|
|
||||
|tempDir | |Not supported |Specifies the directory for generating temporary files during system operation, default value /tmp|
|
||||
|minimalDataDirGB | |Not supported |Minimum space to be reserved in the time-series data storage directory specified by dataDir, in GB, default value 2|
|
||||
|minimalTmpDirGB | |Not supported |Minimum space to be reserved in the temporary file directory specified by tempDir, in GB, default value 1|
|
||||
|minDiskFreeSize |After 3.1.1.0|Supported, effective immediately |When the available space on a disk is less than or equal to this threshold, the disk will no longer be selected for generating new data files, unit is bytes, range 52428800-1073741824, default value 52428800; Enterprise parameter|
|
||||
|s3MigrateIntervalSec|After 3.3.4.3|Supported, effective immediately |Trigger cycle for automatic upload of local data files to S3, in seconds. Minimum: 600; Maximum: 100000. Default value 3600; Enterprise parameter|
|
||||
|s3MigrateEnabled |After 3.3.4.3|Supported, effective immediately |Whether to automatically perform S3 migration, default value is 0, which means auto S3 migration is off, can be set to 1; Enterprise parameter|
|
||||
|s3Accesskey |After 3.3.4.3|Supported, effective after restart|Colon-separated user SecretId:SecretKey, for example AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E; Enterprise parameter|
|
||||
|s3Endpoint |After 3.3.4.3|Supported, effective after restart|COS service domain name in the user's region, supports http and https, the region of the bucket must match the endpoint, otherwise it cannot be accessed; Enterprise parameter|
|
||||
|s3BucketName |After 3.3.4.3|Supported, effective after restart|Bucket name, followed by a hyphen and the AppId of the user registered COS service, where AppId is unique to COS, not present in AWS and Alibaba Cloud, needs to be part of the bucket name, separated by a hyphen; parameter values are string type, but do not need quotes; for example test0711-1309024725; Enterprise parameter|
|
||||
|s3PageCacheSize |After 3.3.4.3|Supported, effective after restart|Number of S3 page cache pages, range 4-1048576, unit is pages, default value 4096; Enterprise parameter|
|
||||
|s3UploadDelaySec |After 3.3.4.3|Supported, effective immediately |How long a data file remains unchanged before being uploaded to S3, range 1-2592000 (30 days), in seconds, default value 60; Enterprise parameter|
|
||||
|cacheLazyLoadThreshold| |Supported, effective immediately |Internal parameter, cache loading strategy|
|
||||
|
||||
### Cluster Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|--------------------------|----------|-|
|
||||
|supportVnodes | |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||
|numOfCommitThreads | |Maximum number of commit threads, range 0-1024, default value 4|
|
||||
|numOfMnodeReadThreads | |Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeQueryThreads | |Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfVnodeFetchThreads | |Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeRsmaThreads | |Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfQnodeQueryThreads | |Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfSnodeSharedThreads | |Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|numOfSnodeUniqueThreads | |Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|ratioOfVnodeStreamThreads | |Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
||||
|ttlUnit | |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
||||
|ttlPushInterval | |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
||||
|ttlChangeOnWrite | |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
||||
|ttlBatchDropNum | |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
||||
|retentionSpeedLimitMB | |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
||||
|maxTsmaNum | |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
||||
|tmqMaxTopicNum | |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
||||
|tmqRowSize | |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
||||
|audit | |Audit feature switch; Enterprise parameter|
|
||||
|auditInterval | |Time interval for reporting audit data; Enterprise parameter|
|
||||
|auditCreateTable | |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
||||
|encryptAlgorithm | |Data encryption algorithm; Enterprise parameter|
|
||||
|encryptScope | |Encryption scope; Enterprise parameter|
|
||||
|enableWhiteList | |Switch for whitelist feature; Enterprise parameter|
|
||||
|syncLogBufferMemoryAllowed| |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
||||
|syncElectInterval | |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatInterval | |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatTimeout | |Internal parameter, for debugging synchronization module|
|
||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
||||
|syncSnapReplMaxWaitN | |Internal parameter, for debugging synchronization module|
|
||||
|arbHeartBeatIntervalSec | |Internal parameter, for debugging synchronization module|
|
||||
|arbCheckSyncIntervalSec | |Internal parameter, for debugging synchronization module|
|
||||
|arbSetAssignedTimeoutSec | |Internal parameter, for debugging synchronization module|
|
||||
|mndSdbWriteDelta | |Internal parameter, for debugging mnode module|
|
||||
|mndLogRetention | |Internal parameter, for debugging mnode module|
|
||||
|skipGrant | |Internal parameter, for authorization checks|
|
||||
|trimVDbIntervalSec | |Internal parameter, for deleting expired data|
|
||||
|ttlFlushThreshold | |Internal parameter, frequency of ttl timer|
|
||||
|compactPullupInterval | |Internal parameter, frequency of data reorganization timer|
|
||||
|walFsyncDataSizeLimit | |Internal parameter, threshold for WAL to perform FSYNC|
|
||||
|transPullupInterval | |Internal parameter, retry interval for mnode to execute transactions|
|
||||
|mqRebalanceInterval | |Internal parameter, interval for consumer rebalancing|
|
||||
|uptimeInterval | |Internal parameter, for recording system uptime|
|
||||
|timeseriesThreshold | |Internal parameter, for usage statistics|
|
||||
|udf | |Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||
|udfdResFuncs | |Internal parameter, for setting UDF result sets|
|
||||
|udfdLdLibPath | |Internal parameter, indicates the library path for loading UDF|
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
|supportVnodes | |Supported, effective immediately |Maximum number of vnodes supported by a dnode, range 0-4096, default value is twice the number of CPU cores + 5|
|
||||
|numOfCommitThreads | |Supported, effective after restart|Maximum number of commit threads, range 0-1024, default value 4|
|
||||
|numOfMnodeReadThreads | |Supported, effective after restart|Number of Read threads for mnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeQueryThreads | |Supported, effective after restart|Number of Query threads for vnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfVnodeFetchThreads | |Supported, effective after restart|Number of Fetch threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfVnodeRsmaThreads | |Supported, effective after restart|Number of Rsma threads for vnode, range 0-1024, default value is one quarter of the CPU cores (not exceeding 4)|
|
||||
|numOfQnodeQueryThreads | |Supported, effective after restart|Number of Query threads for qnode, range 0-1024, default value is twice the number of CPU cores (not exceeding 16)|
|
||||
|numOfSnodeSharedThreads | |Supported, effective after restart|Number of shared threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|numOfSnodeUniqueThreads | |Supported, effective after restart|Number of exclusive threads for snode, range 0-1024, default value is one quarter of the CPU cores (not less than 2, not exceeding 4)|
|
||||
|ratioOfVnodeStreamThreads | |Supported, effective after restart|Ratio of stream computing using vnode threads, range 0.01-4, default value 4|
|
||||
|ttlUnit | |Not supported |Unit for ttl parameter, range 1-31572500, in seconds, default value 86400|
|
||||
|ttlPushInterval | |Supported, effective immediately |Frequency of ttl timeout checks, range 1-100000, in seconds, default value 10|
|
||||
|ttlChangeOnWrite | |Supported, effective immediately |Whether ttl expiration time changes with table modification; 0: no change, 1: change; default value 0|
|
||||
|ttlBatchDropNum | |Supported, effective immediately |Number of subtables deleted in a batch for ttl, minimum value 0, default value 10000|
|
||||
|retentionSpeedLimitMB | |Supported, effective immediately |Speed limit for data migration across different levels of disks, range 0-1024, in MB, default value 0, which means no limit|
|
||||
|maxTsmaNum | |Supported, effective immediately |Maximum number of TSMAs that can be created in the cluster; range 0-3; default value 3|
|
||||
|tmqMaxTopicNum | |Supported, effective immediately |Maximum number of topics that can be established for subscription; range 1-10000; default value 20|
|
||||
|tmqRowSize | |Supported, effective immediately |Maximum number of records in a subscription data block, range 1-1000000, default value 4096|
|
||||
|audit | |Supported, effective immediately |Audit feature switch; Enterprise parameter|
|
||||
|auditInterval | |Supported, effective immediately |Time interval for reporting audit data; Enterprise parameter|
|
||||
|auditCreateTable | |Supported, effective immediately |Whether to enable audit feature for creating subtables; Enterprise parameter|
|
||||
|encryptAlgorithm | |Not supported |Data encryption algorithm; Enterprise parameter|
|
||||
|encryptScope | |Not supported |Encryption scope; Enterprise parameter|
|
||||
|enableWhiteList | |Supported, effective immediately |Switch for whitelist feature; Enterprise parameter|
|
||||
|syncLogBufferMemoryAllowed| |Supported, effective immediately |Maximum memory allowed for sync log cache messages for a dnode, in bytes, range 104857600-INT64_MAX, default value is 1/10 of server memory, effective from versions 3.1.3.2/3.3.2.13|
|
||||
|syncElectInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatInterval | |Not supported |Internal parameter, for debugging synchronization module|
|
||||
|syncHeartbeatTimeout | |Not supported |Internal parameter, for debugging synchronization module|
|
||||
|syncSnapReplMaxWaitN | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|arbHeartBeatIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|arbCheckSyncIntervalSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|arbSetAssignedTimeoutSec | |Supported, effective immediately |Internal parameter, for debugging synchronization module|
|
||||
|mndSdbWriteDelta | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||
|mndLogRetention | |Supported, effective immediately |Internal parameter, for debugging mnode module|
|
||||
|skipGrant | |Not supported |Internal parameter, for authorization checks|
|
||||
|trimVDbIntervalSec | |Supported, effective immediately |Internal parameter, for deleting expired data|
|
||||
|ttlFlushThreshold | |Supported, effective immediately |Internal parameter, frequency of ttl timer|
|
||||
|compactPullupInterval | |Supported, effective immediately |Internal parameter, frequency of data reorganization timer|
|
||||
|walFsyncDataSizeLimit | |Supported, effective immediately |Internal parameter, threshold for WAL to perform FSYNC|
|
||||
|transPullupInterval | |Supported, effective immediately |Internal parameter, retry interval for mnode to execute transactions|
|
||||
|mqRebalanceInterval | |Supported, effective immediately |Internal parameter, interval for consumer rebalancing|
|
||||
|uptimeInterval | |Supported, effective immediately |Internal parameter, for recording system uptime|
|
||||
|timeseriesThreshold | |Supported, effective immediately |Internal parameter, for usage statistics|
|
||||
|udf | |Supported, effective after restart|Whether to start UDF service; 0: do not start, 1: start; default value 0 |
|
||||
|udfdResFuncs | |Supported, effective after restart|Internal parameter, for setting UDF result sets|
|
||||
|udfdLdLibPath | |Supported, effective after restart|Internal parameter, indicates the library path for loading UDF|
|
||||
|
||||
### Stream Computing Parameters
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|-----------------------|----------|-|
|
||||
| disableStream | | Switch to enable or disable stream computing |
|
||||
| streamBufferSize | | Controls the size of the window state cache in memory, default value is 128MB |
|
||||
| streamAggCnt | | Internal parameter, number of concurrent aggregation computations |
|
||||
| checkpointInterval | | Internal parameter, checkpoint synchronization interval |
|
||||
| concurrentCheckpoint | | Internal parameter, whether to check checkpoints concurrently |
|
||||
| maxStreamBackendCache | | Internal parameter, maximum cache used by stream computing |
|
||||
| streamSinkDataRate | | Internal parameter, used to control the write speed of stream computing results |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| disableStream | |Supported, effective immediately | Switch to enable or disable stream computing |
|
||||
| streamBufferSize | |Supported, effective immediately | Controls the size of the window state cache in memory, default value is 128MB |
|
||||
| streamAggCnt | |Not supported | Internal parameter, number of concurrent aggregation computations |
|
||||
| checkpointInterval | |Supported, effective after restart| Internal parameter, checkpoint synchronization interval |
|
||||
| concurrentCheckpoint | |Supported, effective immediately | Internal parameter, whether to check checkpoints concurrently |
|
||||
| maxStreamBackendCache | |Supported, effective immediately | Internal parameter, maximum cache used by stream computing |
|
||||
| streamSinkDataRate | |Supported, effective after restart| Internal parameter, used to control the write speed of stream computing results |
|
||||
|
||||
### Log Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------|----------|-|
|
||||
| logDir | | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
||||
| minimalLogDirGB | | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
||||
| logKeepDays | | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| slowLogThreshold| 3.3.3.0 onwards | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
||||
| slowLogMaxLen | 3.3.3.0 onwards | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
||||
| slowLogScope | 3.3.3.0 onwards | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
||||
| slowLogExceptDb | 3.3.3.0 onwards | Specifies the database that does not report slow queries, only supports configuring one database |
|
||||
| debugFlag | | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | | Log switch for the timer module, range as above |
|
||||
| uDebugFlag | | Log switch for the utility module, range as above |
|
||||
| rpcDebugFlag | | Log switch for the rpc module, range as above |
|
||||
| qDebugFlag | | Log switch for the query module, range as above |
|
||||
| dDebugFlag | | Log switch for the dnode module, range as above |
|
||||
| vDebugFlag | | Log switch for the vnode module, range as above |
|
||||
| mDebugFlag | | Log switch for the mnode module, range as above |
|
||||
| azDebugFlag | 3.3.4.3 onwards | Log switch for the S3 module, range as above |
|
||||
| sDebugFlag | | Log switch for the sync module, range as above |
|
||||
| tsdbDebugFlag | | Log switch for the tsdb module, range as above |
|
||||
| tqDebugFlag | | Log switch for the tq module, range as above |
|
||||
| fsDebugFlag | | Log switch for the fs module, range as above |
|
||||
| udfDebugFlag | | Log switch for the udf module, range as above |
|
||||
| smaDebugFlag | | Log switch for the sma module, range as above |
|
||||
| idxDebugFlag | | Log switch for the index module, range as above |
|
||||
| tdbDebugFlag | | Log switch for the tdb module, range as above |
|
||||
| metaDebugFlag | | Log switch for the meta module, range as above |
|
||||
| stDebugFlag | | Log switch for the stream module, range as above |
|
||||
| sndDebugFlag | | Log switch for the snode module, range as above |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value /var/log/taos |
|
||||
| minimalLogDirGB | |Not supported | Stops writing logs when the available space on the disk where the log folder is located is less than this value, unit GB, default value 1 |
|
||||
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value 10,000,000 |
|
||||
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value 1 |
|
||||
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, unit: days, default value 0, which means unlimited retention, log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, when the log file size reaches the set limit, it will be renamed to taosdlog.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| slowLogThreshold| 3.3.3.0 onwards |Supported, effective immediately | Slow query threshold, queries taking longer than or equal to this threshold are considered slow, unit seconds, default value 3 |
|
||||
| slowLogMaxLen | 3.3.3.0 onwards |Supported, effective immediately | Maximum length of slow query logs, range 1-16384, default value 4096 |
|
||||
| slowLogScope | 3.3.3.0 onwards |Supported, effective immediately | Type of slow query records, range ALL/QUERY/INSERT/OTHERS/NONE, default value QUERY |
|
||||
| slowLogExceptDb | 3.3.3.0 onwards |Supported, effective immediately | Specifies the database that does not report slow queries, only supports configuring one database |
|
||||
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (outputs error and warning logs), 135 (outputs error, warning, and debug logs), 143 (outputs error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, range as above |
|
||||
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, range as above |
|
||||
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, range as above |
|
||||
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, range as above |
|
||||
| dDebugFlag | |Supported, effective immediately | Log switch for the dnode module, range as above |
|
||||
| vDebugFlag | |Supported, effective immediately | Log switch for the vnode module, range as above |
|
||||
| mDebugFlag | |Supported, effective immediately | Log switch for the mnode module, range as above |
|
||||
| azDebugFlag | 3.3.4.3 onwards |Supported, effective immediately | Log switch for the S3 module, range as above |
|
||||
| sDebugFlag | |Supported, effective immediately | Log switch for the sync module, range as above |
|
||||
| tsdbDebugFlag | |Supported, effective immediately | Log switch for the tsdb module, range as above |
|
||||
| tqDebugFlag | |Supported, effective immediately | Log switch for the tq module, range as above |
|
||||
| fsDebugFlag | |Supported, effective immediately | Log switch for the fs module, range as above |
|
||||
| udfDebugFlag | |Supported, effective immediately | Log switch for the udf module, range as above |
|
||||
| smaDebugFlag | |Supported, effective immediately | Log switch for the sma module, range as above |
|
||||
| idxDebugFlag | |Supported, effective immediately | Log switch for the index module, range as above |
|
||||
| tdbDebugFlag | |Supported, effective immediately | Log switch for the tdb module, range as above |
|
||||
| metaDebugFlag | |Supported, effective immediately | Log switch for the meta module, range as above |
|
||||
| stDebugFlag | |Supported, effective immediately | Log switch for the stream module, range as above |
|
||||
| sndDebugFlag | |Supported, effective immediately | Log switch for the snode module, range as above |
|
||||
|
||||
### Debugging Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------------|-------------------|-------------|
|
||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
||||
| configDir | | Directory where the configuration files are located |
|
||||
| scriptDir | | Directory for internal test tool scripts |
|
||||
| assert | | Assertion control switch, default value is 0 |
|
||||
| randErrorChance | | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | | Internal parameter, used for random failure testing |
|
||||
| experimental | | Internal parameter, used for some experimental features |
|
||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
||||
| rsyncPort | | Internal parameter, used for debugging stream computing |
|
||||
| snodeAddress | | Internal parameter, used for debugging stream computing |
|
||||
| checkpointBackupDir | | Internal parameter, used for restoring snode data |
|
||||
| enableAuditDelete | | Internal parameter, used for testing audit functions |
|
||||
| slowLogThresholdTest | | Internal parameter, used for testing slow logs |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value is 1 |
|
||||
| configDir | |Not supported | Directory where the configuration files are located |
|
||||
|forceReadConfig | |Not supported ||Force the use of parameters from the configuration file,default value: 0|
|
||||
| scriptDir | |Not supported | Directory for internal test tool scripts |
|
||||
| assert | |Not supported | Assertion control switch, default value is 0 |
|
||||
| randErrorChance | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | |Supported, effective immediately | Internal parameter, used for random failure testing |
|
||||
| experimental | |Supported, effective immediately | Internal parameter, used for some experimental features |
|
||||
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||
| rsyncPort | |Not supported | Internal parameter, used for debugging stream computing |
|
||||
| snodeAddress | |Supported, effective immediately | Internal parameter, used for debugging stream computing |
|
||||
| checkpointBackupDir | |Supported, effective immediately | Internal parameter, used for restoring snode data |
|
||||
| enableAuditDelete | |Not supported | Internal parameter, used for testing audit functions |
|
||||
| slowLogThresholdTest | |Not supported | Internal parameter, used for testing slow logs |
|
||||
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||
|
||||
### Compression Parameters
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------|-------------------|-------------|
|
||||
| fPrecision | | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| dPrecision | | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| lossyColumn | Before 3.3.0.0 | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
||||
| ifAdtFse | | When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
||||
| maxRange | | Internal parameter, used for setting lossy compression |
|
||||
| curRange | | Internal parameter, used for setting lossy compression |
|
||||
| compressor | | Internal parameter, used for setting lossy compression |
|
||||
|Parameter Name |Supported Version |Dynamic Modification|Description|
|
||||
|-----------------------|-------------------------|--------------------|------------|
|
||||
| fPrecision | |Supported, effective immediately | Sets the compression precision for float type floating numbers, range 0.1 ~ 0.00000001, default value 0.00000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| dPrecision | |Supported, effective immediately | Sets the compression precision for double type floating numbers, range 0.1 ~ 0.0000000000000001, default value 0.0000000000000001, floating numbers smaller than this value will have their mantissa truncated |
|
||||
| lossyColumn | Before 3.3.0.0 |Not supported | Enables TSZ lossy compression for float and/or double types; range float/double/none; default value none, indicating lossless compression is off |
|
||||
| ifAdtFse | |Supported, effective after restart| When TSZ lossy compression is enabled, use the FSE algorithm instead of the HUFFMAN algorithm, FSE algorithm is faster in compression but slightly slower in decompression, choose this for faster compression speed; 0: off, 1: on; default value is 0 |
|
||||
| maxRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||
| curRange | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||
| compressor | |Supported, effective after restart| Internal parameter, used for setting lossy compression |
|
||||
|
||||
**Additional Notes**
|
||||
|
||||
|
|
|
@ -10,107 +10,109 @@ The TDengine client driver provides all the APIs needed for application programm
|
|||
|
||||
### Connection Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|----------------------|----------|-------------|
|
||||
|firstEp | |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
||||
|secondEp | |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
||||
|compressMsgSize | |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
||||
|shellActivityTimer | |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
||||
|numOfRpcSessions | |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|timeToGetAvailableConn| Cancelled after 3.3.4.* |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|useAdapter | |Internal parameter, whether to use taosadapter, affects CSV file import|
|
||||
|shareConnLimit |Added in 3.3.4.0|Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0|Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
|firstEp | |Supported, effective immediately |At startup, the endpoint of the first dnode in the cluster to actively connect to, default value: hostname:6030, if the server's hostname cannot be obtained, it is assigned to localhost|
|
||||
|secondEp | |Supported, effective immediately |At startup, if the firstEp cannot be connected, try to connect to the endpoint of the second dnode in the cluster, no default value|
|
||||
|compressMsgSize | |Supported, effective immediately |Whether to compress RPC messages; -1: no messages are compressed; 0: all messages are compressed; N (N>0): only messages larger than N bytes are compressed; default value -1|
|
||||
|shellActivityTimer | |Not supported |The duration in seconds for the client to send heartbeats to mnode, range 1-120, default value 3|
|
||||
|numOfRpcSessions | |Supported, effective immediately |Maximum number of connections supported by RPC, range 100-100000, default value 30000|
|
||||
|numOfRpcThreads | |Not supported |Number of threads for RPC to send and receive data, range 1-50, default value is half of the CPU cores|
|
||||
|numOfTaskQueueThreads | |Not supported |Number of threads for the client to handle RPC messages, range 4-16, default value is half of the CPU cores|
|
||||
|timeToGetAvailableConn| Cancelled after 3.3.4.* |Not supported |The longest waiting time to get an available connection, range 10-50000000, in milliseconds, default value 500000|
|
||||
|useAdapter | |Supported, effective immediately |Internal parameter, whether to use taosadapter, affects CSV file import|
|
||||
|shareConnLimit |Added in 3.3.4.0|Not supported |Internal parameter, the number of queries a link can share, range 1-256, default value 10|
|
||||
|readTimeout |Added in 3.3.4.0|Not supported |Internal parameter, minimum timeout, range 64-604800, in seconds, default value 900|
|
||||
|
||||
### Query Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|---------------------------------|---------|-|
|
||||
|countAlwaysReturnValue | |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
||||
|keepColumnName | |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
||||
|multiResultFunctionStarReturnTags|After 3.3.3.0|When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
||||
|metaCacheMaxSize | |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
||||
|maxTsmaCalcDelay | |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
||||
|tsmaDataDeleteMark | |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
||||
|queryPolicy | |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
||||
|queryTableNotExistAsEmpty | |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
||||
|querySmaOptimize | |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
||||
|queryPlannerTrace | |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables | |Internal parameter, concurrency number of the query plan|
|
||||
|enableQueryHb | |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Internal parameter, minimum allowable value for interval|
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
|countAlwaysReturnValue | |Supported, effective immediately |Whether the count/hyperloglog function returns a value when the input data is empty or NULL; 0: returns an empty row, 1: returns; default value 1; when this parameter is set to 1, if the query contains an INTERVAL clause or the query uses TSMA, and the corresponding group or window has empty or NULL data, the corresponding group or window will not return a query result; note that this parameter should be consistent between client and server|
|
||||
|keepColumnName | |Supported, effective immediately |Automatically sets the alias to the column name (excluding the function name) when querying with Last, First, LastRow functions without specifying an alias, thus the order by clause will automatically refer to the column corresponding to the function; 1: automatically sets the alias to the column name (excluding the function name), 0: does not automatically set an alias; default value: 0|
|
||||
|multiResultFunctionStarReturnTags|After 3.3.3.0|Supported, effective immediately |When querying a supertable, whether last(\*)/last_row(\*)/first(\*) returns tag columns; when querying basic tables, subtables, it is not affected by this parameter; 0: does not return tag columns, 1: returns tag columns; default value: 0; when this parameter is set to 0, last(\*)/last_row(\*)/first(\*) only returns the ordinary columns of the supertable; when set to 1, it returns both the ordinary columns and tag columns of the supertable|
|
||||
|metaCacheMaxSize | |Supported, effective immediately |Specifies the maximum size of metadata cache for a single client, in MB; default value -1, meaning unlimited|
|
||||
|maxTsmaCalcDelay | |Supported, effective immediately |The allowable delay for tsma calculation by the client during query, range 600s - 86400s, i.e., 10 minutes - 1 day; default value: 600 seconds|
|
||||
|tsmaDataDeleteMark | |Supported, effective immediately |The retention time for intermediate results of historical data calculated by TSMA, in milliseconds; range >= 3600000, i.e., at least 1h; default value: 86400000, i.e., 1d |
|
||||
|queryPolicy | |Supported, effective immediately |Execution strategy for query statements, 1: only use vnode, do not use qnode; 2: subtasks without scan operators are executed on qnode, subtasks with scan operators are executed on vnode; 3: vnode only runs scan operators, all other operators are executed on qnode; default value: 1|
|
||||
|queryTableNotExistAsEmpty | |Supported, effective immediately |Whether to return an empty result set when the queried table does not exist; false: returns an error; true: returns an empty result set; default value false|
|
||||
|querySmaOptimize | |Supported, effective immediately |Optimization strategy for sma index, 0: do not use sma index, always query from original data; 1: use sma index, directly query from pre-calculated results for eligible statements; default value: 0|
|
||||
|queryPlannerTrace | |Supported, effective immediately |Internal parameter, whether the query plan outputs detailed logs|
|
||||
|queryNodeChunkSize | |Supported, effective immediately |Internal parameter, chunk size of the query plan|
|
||||
|queryUseNodeAllocator | |Supported, effective immediately |Internal parameter, allocation method of the query plan|
|
||||
|queryMaxConcurrentTables | |Not supported |Internal parameter, concurrency number of the query plan|
|
||||
|enableQueryHb | |Supported, effective immediately |Internal parameter, whether to send query heartbeat messages|
|
||||
|minSlidingTime | |Supported, effective immediately |Internal parameter, minimum allowable value for sliding|
|
||||
|minIntervalTime | |Supported, effective immediately |Internal parameter, minimum allowable value for interval|
|
||||
|
||||
### Writing Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|---------------------------------|-------------------|-------------|
|
||||
| smlChildTableName | | Key for custom child table name in schemaless, no default value |
|
||||
| smlAutoChildTableNameDelimiter | | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
||||
| smlTagName | | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
||||
| smlTsDefaultName | | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
||||
| smlDot2Underline | | Converts dots in supertable names to underscores in schemaless |
|
||||
| maxInsertBatchRows | | Internal parameter, maximum number of rows per batch insert |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| smlChildTableName | |Supported, effective immediately | Key for custom child table name in schemaless, no default value |
|
||||
| smlAutoChildTableNameDelimiter | |Supported, effective immediately | Delimiter between schemaless tags, concatenated as the child table name, no default value |
|
||||
| smlTagName | |Supported, effective immediately | Default tag name when schemaless tag is empty, default value "_tag_null" |
|
||||
| smlTsDefaultName | |Supported, effective immediately | Configuration for setting the time column name in schemaless auto table creation, default value "_ts" |
|
||||
| smlDot2Underline | |Supported, effective immediately | Converts dots in supertable names to underscores in schemaless |
|
||||
| maxInsertBatchRows | |Supported, effective immediately | Internal parameter, maximum number of rows per batch insert |
|
||||
|
||||
### Region Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|----------------|-------------------|-------------|
|
||||
| timezone | | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
||||
| locale | | System locale and encoding format, defaults to system settings |
|
||||
| charset | | Character set encoding, defaults to system settings |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| timezone | |Supported, effective immediately | Time zone; defaults to dynamically obtaining the current system time zone setting |
|
||||
| locale | |Supported, effective immediately | System locale and encoding format, defaults to system settings |
|
||||
| charset | |Supported, effective immediately | Character set encoding, defaults to system settings |
|
||||
|
||||
### Storage Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|-----------------|-------------------|-------------|
|
||||
| tempDir | | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
||||
| minimalTmpDirGB | | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| tempDir | |Supported, effective immediately | Specifies the directory for generating temporary files during operation, default on Linux platform is /tmp |
|
||||
| minimalTmpDirGB | |Supported, effective immediately | Minimum space required to be reserved in the directory specified by tempDir, in GB, default value: 1 |
|
||||
|
||||
### Log Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|------------------|-------------------|-------------|
|
||||
| logDir | | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
||||
| minimalLogDirGB | | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
||||
| numOfLogLines | | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
||||
| asyncLog | | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
||||
| logKeepDays | | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| debugFlag | | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | | Log switch for the timer module, value range as above |
|
||||
| uDebugFlag | | Log switch for the utility module, value range as above |
|
||||
| rpcDebugFlag | | Log switch for the rpc module, value range as above |
|
||||
| jniDebugFlag | | Log switch for the jni module, value range as above |
|
||||
| qDebugFlag | | Log switch for the query module, value range as above |
|
||||
| cDebugFlag | | Log switch for the client module, value range as above |
|
||||
| simDebugFlag | | Internal parameter, log switch for the test tool, value range as above |
|
||||
| tqClientDebugFlag| After 3.3.4.3 | Log switch for the client module, value range as above |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| logDir | |Not supported | Log file directory, operational logs will be written to this directory, default value: /var/log/taos |
|
||||
| minimalLogDirGB | |Supported, effective immediately | Stops writing logs when the disk space available in the log directory is less than this value, in GB, default value: 1 |
|
||||
| numOfLogLines | |Supported, effective immediately | Maximum number of lines allowed in a single log file, default value: 10,000,000 |
|
||||
| asyncLog | |Supported, effective immediately | Log writing mode, 0: synchronous, 1: asynchronous, default value: 1 |
|
||||
| logKeepDays | |Supported, effective immediately | Maximum retention time for log files, in days, default value: 0, meaning unlimited retention. Log files will not be renamed, nor will new log files be rolled out, but the content of the log files may continue to roll depending on the log file size setting; when set to a value greater than 0, the log file will be renamed to taoslogx.yyy, where yyy is the timestamp of the last modification of the log file, and a new log file will be rolled out |
|
||||
| debugFlag | |Supported, effective immediately | Log switch for running logs, 131 (output error and warning logs), 135 (output error, warning, and debug logs), 143 (output error, warning, debug, and trace logs); default value 131 or 135 (depending on the module) |
|
||||
| tmrDebugFlag | |Supported, effective immediately | Log switch for the timer module, value range as above |
|
||||
| uDebugFlag | |Supported, effective immediately | Log switch for the utility module, value range as above |
|
||||
| rpcDebugFlag | |Supported, effective immediately | Log switch for the rpc module, value range as above |
|
||||
| jniDebugFlag | |Supported, effective immediately | Log switch for the jni module, value range as above |
|
||||
| qDebugFlag | |Supported, effective immediately | Log switch for the query module, value range as above |
|
||||
| cDebugFlag | |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||
| simDebugFlag | |Supported, effective immediately | Internal parameter, log switch for the test tool, value range as above |
|
||||
| tqClientDebugFlag| After 3.3.4.3 |Supported, effective immediately | Log switch for the client module, value range as above |
|
||||
|
||||
### Debugging Related
|
||||
|
||||
| Parameter Name | Supported Version | Description |
|
||||
|------------------|-------------------|-------------|
|
||||
| crashReporting | | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
||||
| enableCoreFile | | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
||||
| assert | | Assertion control switch, default value: 0 |
|
||||
| configDir | | Directory for configuration files |
|
||||
| scriptDir | | Internal parameter, directory for test cases |
|
||||
| randErrorChance | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | After 3.3.3.0 | Internal parameter, used for random failure testing |
|
||||
| simdEnable | After 3.3.4.3 | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 | Internal parameter, used for testing AVX512 acceleration |
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
| crashReporting | |Supported, effective immediately | Whether to upload crash to telemetry, 0: do not upload, 1: upload; default value: 1 |
|
||||
| enableCoreFile | |Supported, effective immediately | Whether to generate a core file when crashing, 0: do not generate, 1: generate; default value: 1 |
|
||||
| assert | |Not supported | Assertion control switch, default value: 0 |
|
||||
| configDir | |Not supported | Directory for configuration files |
|
||||
| scriptDir | |Not supported | Internal parameter, directory for test cases |
|
||||
| randErrorChance | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| randErrorDivisor | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| randErrorScope | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| safetyCheckLevel | After 3.3.3.0 |Not supported | Internal parameter, used for random failure testing |
|
||||
| simdEnable | After 3.3.4.3 |Not supported | Internal parameter, used for testing SIMD acceleration |
|
||||
| AVX512Enable | After 3.3.4.3 |Not supported | Internal parameter, used for testing AVX512 acceleration |
|
||||
| bypassFlag |After 3.3.4.5 |Supported, effective immediately | Internal parameter, used for short-circuit testing|
|
||||
|
||||
|
||||
### SHELL Related
|
||||
|
||||
|Parameter Name|Supported Version|Description|
|
||||
|-----------------|----------|-|
|
||||
|enableScience | |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
||||
|Parameter Name|Supported Version|Dynamic Modification|Description|
|
||||
|----------------------|----------|--------------------|-------------|
|
||||
|enableScience | |Not supported |Whether to enable scientific notation for displaying floating numbers; 0: do not enable, 1: enable; default value: 1|
|
||||
|
||||
## API
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ The following parameter descriptions and examples use `<content>` as a placehold
|
|||
|
||||
In command line mode, taosX uses DSN to represent a data source (source or destination), a typical DSN is as follows:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
# url-like
|
||||
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<object>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||
|------|------------|---|-----------|-----------|------|------|----------|-----------------------|
|
||||
|
@ -390,7 +390,7 @@ You can view the log files or use the `journalctl` command to view the logs of `
|
|||
|
||||
The command to view logs under Linux using `journalctl` is as follows:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
journalctl -u taosx [-f]
|
||||
```
|
||||
|
||||
|
@ -572,7 +572,7 @@ uint32_t len: The binary length of this string (excluding `\0`).
|
|||
|
||||
**Return Value**:
|
||||
|
||||
``` c
|
||||
```c
|
||||
struct parser_resp_t {
|
||||
int e; // 0 if success.
|
||||
void* p; // Success if contains.
|
||||
|
@ -589,7 +589,7 @@ When creation is successful, e = 0, p is the parser object.
|
|||
|
||||
Parse the input payload and return the result in JSON format [u8]. The returned JSON will be fully decoded using the default JSON parser (expanding the root array and all objects).
|
||||
|
||||
``` c
|
||||
```c
|
||||
const char* parser_mutate(
|
||||
void* parser,
|
||||
const uint8_t* in_ptr, uint32_t in_len,
|
||||
|
|
|
@ -26,7 +26,7 @@ The default configuration file for `Agent` is located at `/etc/taos/agent.toml`,
|
|||
|
||||
As shown below:
|
||||
|
||||
```TOML
|
||||
```toml
|
||||
# taosX service endpoint
|
||||
#
|
||||
#endpoint = "http://localhost:6055"
|
||||
|
@ -83,7 +83,7 @@ You don't need to be confused about how to set up the configuration file. Read a
|
|||
|
||||
On Linux systems, the `Agent` can be started with the Systemd command:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
systemctl start taosx-agent
|
||||
```
|
||||
|
||||
|
@ -95,6 +95,6 @@ You can view the log files or use the `journalctl` command to view the logs of t
|
|||
|
||||
The command to view logs with `journalctl` on Linux is as follows:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
journalctl -u taosx-agent [-f]
|
||||
```
|
||||
|
|
|
@ -143,13 +143,13 @@ For details on TDengine monitoring configuration, please refer to: [TDengine Mon
|
|||
|
||||
After installation, please use the `systemctl` command to start the taoskeeper service process.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
systemctl start taoskeeper
|
||||
```
|
||||
|
||||
Check if the service is working properly:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
systemctl status taoskeeper
|
||||
```
|
||||
|
||||
|
@ -261,7 +261,7 @@ Query OK, 14 row(s) in set (0.006542s)
|
|||
|
||||
You can view the most recent report record of a supertable, such as:
|
||||
|
||||
``` shell
|
||||
```shell
|
||||
taos> select last_row(*) from taosd_dnodes_info;
|
||||
last_row(_ts) | last_row(disk_engine) | last_row(system_net_in) | last_row(vnodes_num) | last_row(system_net_out) | last_row(uptime) | last_row(has_mnode) | last_row(io_read_disk) | last_row(error_log_count) | last_row(io_read) | last_row(cpu_cores) | last_row(has_qnode) | last_row(has_snode) | last_row(disk_total) | last_row(mem_engine) | last_row(info_log_count) | last_row(cpu_engine) | last_row(io_write_disk) | last_row(debug_log_count) | last_row(disk_used) | last_row(mem_total) | last_row(io_write) | last_row(masters) | last_row(cpu_system) | last_row(trace_log_count) | last_row(mem_free) |
|
||||
======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|
|
|
@ -14,7 +14,7 @@ taosExplorer does not require separate installation. Starting from TDengine vers
|
|||
|
||||
Before starting taosExplorer, please make sure the content in the configuration file is correct.
|
||||
|
||||
```TOML
|
||||
```toml
|
||||
# This is an automatically generated configuration file for Explorer in [TOML](https://toml.io/) format.
|
||||
#
|
||||
# Here is a full list of available options.
|
||||
|
@ -148,7 +148,7 @@ Description:
|
|||
|
||||
Then start taosExplorer, you can directly execute taos-explorer in the command line or use the systemctl command:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
systemctl start taos-explorer # Linux
|
||||
sc.exe start taos-explorer # Windows
|
||||
```
|
||||
|
|
|
@ -171,7 +171,37 @@ Metric details:
|
|||
5. **Writes**: Total number of writes
|
||||
6. **Other**: Total number of other requests
|
||||
|
||||
There are also line charts for the above categories.
|
||||
There are also line charts for the above categories.
|
||||
|
||||
### Automatic import of preconfigured alert rules
|
||||
|
||||
After summarizing user experience, 14 commonly used alert rules are sorted out. These alert rules can monitor key indicators of the TDengine cluster and report alerts, such as abnormal and exceeded indicators.
|
||||
Starting from TDengine-Server 3.3.4.3 (TDengine-datasource 3.6.3), TDengine Datasource supports automatic import of preconfigured alert rules. You can import 14 alert rules to Grafana (version 11 or later) with one click.
|
||||
In the TDengine-datasource setting interface, turn on the "Load Tengine Alert" switch, click the "Save & test" button, the plugin will automatically load the mentioned 14 alert rules. The rules will be placed in the Grafana alerts directory. If not required, turn off the "Load TDengine Alert" switch, and click the button next to "Clear TDengine Alert" to clear all the alert rules imported into this data source.
|
||||
|
||||
After importing, click on "Alert rules" on the left side of the Grafana interface to view all current alert rules. By configuring contact points, users can receive alert notifications.
|
||||
|
||||
The specific configuration of the 14 alert rules is as follows:
|
||||
|
||||
| alert rule| Rule threshold| Behavior when no data | Data scanning interval |Duration | SQL |
|
||||
| ------ | --------- | ---------------- | ----------- |------- |----------------------|
|
||||
|CPU load of dnode node|average > 80%|Trigger alert|5 minutes|5 minutes |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `|
|
||||
|Memory of dnode node |average > 60%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts <now partition by dnode_id`|
|
||||
|Disk capacity occupancy of dnode nodes | > 80%|Trigger alert|5 minutes|5 minutes|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`|
|
||||
|Authorization expires |< 60天|Trigger alert|1 day|0 0 seconds|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||
|The used measurement points has reached the authorized number|>= 90%|Trigger alert|1 day|0 seconds|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`|
|
||||
|Number of concurrent query requests | > 100|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`|
|
||||
|Maximum time for slow query execution (no time window) |> 300秒|Do not trigger alert|1 minute|0 seconds|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`|
|
||||
|dnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`|
|
||||
|vnode offline |total != alive|Trigger alert|30 seconds|0 seconds|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||
|Number of data deletion requests |> 0|Do not trigger alert|30 seconds|0 seconds|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``|
|
||||
|Adapter RESTful request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``|
|
||||
|Adapter WebSocket request fail |> 5|Do not trigger alert|30 seconds|0 seconds|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``|
|
||||
|Dnode data reporting is missing |< 3|Trigger alert|180 seconds|0 seconds|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`|
|
||||
|Restart dnode |max(update_time) > last(update_time)|Trigger alert|90 seconds|0 seconds|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`|
|
||||
|
||||
TDengine users can modify and improve these alert rules according to their own business needs. In Grafana 7.5 and below versions, the Dashboard and Alert rules functions are combined, while in subsequent new versions, the two functions are separated. To be compatible with Grafana7.5 and below versions, an Alert Used Only panel has been added to the TDinsight panel, which is only required for Grafana7.5 and below versions.
|
||||
|
||||
|
||||
## Upgrade
|
||||
|
||||
|
@ -248,13 +278,13 @@ The new version of the plugin uses the Grafana unified alerting feature, the `-E
|
|||
|
||||
Assuming you start the TDengine database on the host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||
```
|
||||
|
||||
If you want to monitor multiple TDengine clusters, you need to set up multiple TDinsight dashboards. Setting up a non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and if using the built-in SMS alert feature, `-N` and `-L` should also be changed.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
|
||||
```
|
||||
|
|
@ -10,7 +10,7 @@ The TDengine command line program (hereinafter referred to as TDengine CLI) is t
|
|||
|
||||
To enter the TDengine CLI, simply execute `taos` in the terminal.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
taos
|
||||
```
|
||||
|
||||
|
@ -81,7 +81,7 @@ There are many other parameters:
|
|||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
taos -h h1.taos.com -s "use db; show tables;"
|
||||
```
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ taosBenchmark supports comprehensive performance testing for TDengine, and the T
|
|||
|
||||
Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
taosBenchmark
|
||||
```
|
||||
|
||||
|
@ -38,7 +38,7 @@ When running without parameters, taosBenchmark by default connects to the TDengi
|
|||
|
||||
When running taosBenchmark using command line parameters and controlling its behavior, the `-f <json file>` parameter cannot be used. All configuration parameters must be specified through the command line. Below is an example of using command line mode to test the write performance of taosBenchmark.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
taosBenchmark -I stmt -n 200 -t 100
|
||||
```
|
||||
|
||||
|
@ -50,7 +50,7 @@ The taosBenchmark installation package includes examples of configuration files,
|
|||
|
||||
Use the following command line to run taosBenchmark and control its behavior through a configuration file.
|
||||
|
||||
```bash
|
||||
```shell
|
||||
taosBenchmark -f <json file>
|
||||
```
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
|
|||
|
||||
alter_table_clause: {
|
||||
alter_table_options
|
||||
| SET tag tag_name = new_tag_value,tag_name2=new_tag2_value...
|
||||
| SET tag tag_name = new_tag_value, tag_name2=new_tag2_value ...
|
||||
}
|
||||
|
||||
alter_table_options:
|
||||
|
@ -194,7 +194,7 @@ alter_table_option: {
|
|||
### Modify Subtable Tag Value
|
||||
|
||||
```sql
|
||||
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1,tag_name2=new_tag_value2...;
|
||||
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
|
||||
```
|
||||
|
||||
### Modify Table Lifespan
|
||||
|
|
|
@ -210,19 +210,19 @@ However, renaming individual columns is not supported for `first(*)`, `last(*)`,
|
|||
|
||||
Retrieve all subtable names and related tag information from a supertable:
|
||||
|
||||
```mysql
|
||||
```sql
|
||||
SELECT TAGS TBNAME, location FROM meters;
|
||||
```
|
||||
|
||||
It is recommended that users query the subtable tag information of supertables using the INS_TAGS system table under INFORMATION_SCHEMA, for example, to get all subtable names and tag values of the supertable meters:
|
||||
|
||||
```mysql
|
||||
```sql
|
||||
SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters';
|
||||
```
|
||||
|
||||
Count the number of subtables under a supertable:
|
||||
|
||||
```mysql
|
||||
```sql
|
||||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||
```
|
||||
|
||||
|
@ -385,7 +385,7 @@ SELECT CURRENT_USER();
|
|||
|
||||
### Syntax
|
||||
|
||||
```txt
|
||||
```text
|
||||
WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
|
||||
```
|
||||
|
||||
|
@ -403,7 +403,7 @@ The length of the regular match string cannot exceed 128 bytes. You can set and
|
|||
|
||||
### Syntax
|
||||
|
||||
```txt
|
||||
```text
|
||||
CASE value WHEN compare_value THEN result [WHEN compare_value THEN result ...] [ELSE result] END
|
||||
CASE WHEN condition THEN result [WHEN condition THEN result ...] [ELSE result] END
|
||||
```
|
||||
|
@ -493,7 +493,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
|
|||
|
||||
## UNION ALL Clause
|
||||
|
||||
```txt title=Syntax
|
||||
```text title=Syntax
|
||||
SELECT ...
|
||||
UNION ALL SELECT ...
|
||||
[UNION ALL SELECT ...]
|
||||
|
|
|
@ -417,7 +417,7 @@ MOD(expr1, expr2)
|
|||
|
||||
**Example**:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
taos> select mod(10,3);
|
||||
mod(10,3) |
|
||||
============================
|
||||
|
@ -454,7 +454,7 @@ RAND([seed])
|
|||
|
||||
**Example**:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
taos> select rand();
|
||||
rand() |
|
||||
============================
|
||||
|
|
|
@ -41,38 +41,28 @@ If there is a single replica on the node and the node is offline, to forcibly de
|
|||
ALTER DNODE dnode_id dnode_option
|
||||
|
||||
ALTER ALL DNODES dnode_option
|
||||
|
||||
dnode_option: {
|
||||
'resetLog'
|
||||
| 'balance' 'value'
|
||||
| 'monitor' 'value'
|
||||
| 'debugFlag' 'value'
|
||||
| 'monDebugFlag' 'value'
|
||||
| 'vDebugFlag' 'value'
|
||||
| 'mDebugFlag' 'value'
|
||||
| 'cDebugFlag' 'value'
|
||||
| 'httpDebugFlag' 'value'
|
||||
| 'qDebugflag' 'value'
|
||||
| 'sdbDebugFlag' 'value'
|
||||
| 'uDebugFlag' 'value'
|
||||
| 'tsdbDebugFlag' 'value'
|
||||
| 'sDebugflag' 'value'
|
||||
| 'rpcDebugFlag' 'value'
|
||||
| 'dDebugFlag' 'value'
|
||||
| 'mqttDebugFlag' 'value'
|
||||
| 'wDebugFlag' 'value'
|
||||
| 'tmrDebugFlag' 'value'
|
||||
| 'cqDebugFlag' 'value'
|
||||
}
|
||||
```
|
||||
|
||||
The modifiable configuration items in the syntax above are configured in the same way as in the dnode configuration file, the difference being that modifications are dynamic, take immediate effect, and do not require restarting the dnode.
|
||||
For configuration parameters that support dynamic modification, you can use the ALTER DNODE or ALTER ALL DNODES syntax to modify the values of configuration parameters in a dnode. Starting from version 3.3.4.0, the modified configuration parameters will be automatically persisted and will remain effective even after the database service is restarted.
|
||||
|
||||
`value` is the value of the parameter, which needs to be in string format. For example, to change the log output level of dnode 1 to debug:
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page: [taosd Reference](../01-components/01-taosd.md)
|
||||
|
||||
The value is the parameter's value and needs to be in character format. For example, to change the log output level of dnode 1 to debug:
|
||||
|
||||
```sql
|
||||
ALTER DNODE 1 'debugFlag' '143';
|
||||
```
|
||||
### Additional Notes:
|
||||
Configuration parameters in a dnode are divided into global configuration parameters and local configuration parameters. You can check the category field in SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE to determine whether a configuration parameter is a global configuration parameter or a local configuration parameter:
|
||||
|
||||
Local configuration parameters: You can use ALTER DNODE or ALTER ALL DNODES to update the local configuration parameters of a specific dnode or all dnodes.
|
||||
Global configuration parameters: Global configuration parameters require consistency across all dnodes, so you can only use ALTER ALL DNODES to update the global configuration parameters of all dnodes.
|
||||
There are three cases for whether a configuration parameter can be dynamically modified:
|
||||
|
||||
Supports dynamic modification, effective immediately
|
||||
Supports dynamic modification, effective after restart
|
||||
Does not support dynamic modification
|
||||
For configuration parameters that take effect after a restart, you can see the modified values through SHOW VARIABLES or SHOW DNODE dnode_id VARIABLE, but you need to restart the database service to make them effective.
|
||||
|
||||
## Add Management Node
|
||||
|
||||
|
@ -136,18 +126,12 @@ If the client is also considered as part of the cluster in a broader sense, the
|
|||
|
||||
```sql
|
||||
ALTER LOCAL local_option
|
||||
|
||||
local_option: {
|
||||
'resetLog'
|
||||
| 'rpcDebugFlag' 'value'
|
||||
| 'tmrDebugFlag' 'value'
|
||||
| 'cDebugFlag' 'value'
|
||||
| 'uDebugFlag' 'value'
|
||||
| 'debugFlag' 'value'
|
||||
}
|
||||
```
|
||||
|
||||
The parameters in the syntax above are used in the same way as in the configuration file for the client, but do not require a restart of the client, and the changes take effect immediately.
|
||||
You can use the above syntax to modify the client's configuration parameters, and there is no need to restart the client. The changes take effect immediately.
|
||||
|
||||
To check whether a configuration parameter supports dynamic modification, please refer to the following page:[taosc Reference](../01-components/02-taosc.md)
|
||||
|
||||
|
||||
## View Client Configuration
|
||||
|
||||
|
|
|
@ -342,3 +342,18 @@ Note: Users with SYSINFO property set to 0 cannot view this table.
|
|||
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB |
|
||||
|
||||
note:
|
||||
|
||||
## INS_FILESETS
|
||||
|
||||
Provides information about file sets.
|
||||
|
||||
| # | **Column** | **Data Type** | **Description** | ** |
|
||||
| --- | :-----------: | ------------- | ---------------------------------------------------- |
|
||||
| 1 | db_name | VARCHAR(65) | Database name |
|
||||
| 2 | vgroup_id | INT | Vgroup ID |
|
||||
| 3 | fileset_id | INT | File set ID |
|
||||
| 4 | start_time | TIMESTAMP | Start time of the time range covered by the file set |
|
||||
| 5 | end_time | TIMESTAMP | End time of the time range covered by the file set |
|
||||
| 6 | total_size | BIGINT | Total size of the file set |
|
||||
| 7 | last_compact | TIMESTAMP | Time of the last compaction |
|
||||
| 8 | shold_compact | bool | Whether the file set should be compacted |
|
||||
|
|
|
@ -13,16 +13,16 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
|||
|
||||
The username can be up to 23 bytes long.
|
||||
|
||||
The password can be up to 31 bytes long. The password can include letters, numbers, and special characters except for single quotes, double quotes, backticks, backslashes, and spaces, and it cannot be an empty string.
|
||||
The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
|
||||
|
||||
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
|
||||
|
||||
`CREATEDB` indicates whether the user can create databases. `1` means they can create databases, `0` means they have no permission to create databases. The default value is `0`. // Supported starting from TDengine Enterprise version 3.3.2.0
|
||||
|
||||
In the example below, we create a user with the password `123456` who can view system information.
|
||||
In the example below, we create a user with the password `abc123!@#` who can view system information.
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
taos> create user test pass 'abc123!@#' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
|
|
|
@ -29,13 +29,13 @@ In this document, it specifically refers to the internal levels of the second-le
|
|||
- Default compression algorithms list and applicable range for each data type
|
||||
|
||||
| Data Type |Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms | Default Compression Algorithm | Default Compression Level |
|
||||
|:------------------------------------:|:----------------:|:-----------:|:--------------------:|:----:|:------:|
|
||||
| int/uint | simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| tinyint/untinyint/smallint/usmallint | simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
|
||||
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| float/double | delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:|
|
||||
| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
|
||||
| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|
||||
## SQL Syntax
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ For the source code of the example programs, please refer to: [Example Programs]
|
|||
|
||||
The Data Source Name has a generic format, similar to [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but without the type prefix (brackets indicate optional):
|
||||
|
||||
``` text
|
||||
```text
|
||||
[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN]
|
||||
```
|
||||
|
|
@ -21,7 +21,7 @@ Below is an example using the `curl` tool in an Ubuntu environment (please confi
|
|||
|
||||
The following example lists all databases, please replace `h1.tdengine.com` and 6041 (default value) with the actual running TDengine service FQDN and port number:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
|
||||
-d "select name, ntables, status from information_schema.ins_databases;" \
|
||||
h1.tdengine.com:6041/rest/sql
|
||||
|
@ -100,13 +100,13 @@ The BODY of the HTTP request contains a complete SQL statement. The data table i
|
|||
|
||||
Use `curl` to initiate an HTTP Request with custom authentication as follows:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
|
||||
```
|
||||
|
||||
Or,
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
|
||||
```
|
||||
|
||||
|
@ -279,7 +279,7 @@ Column types use the following strings:
|
|||
|
||||
Prepare data
|
||||
|
||||
```bash
|
||||
```shell
|
||||
create database demo
|
||||
use demo
|
||||
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
|
||||
|
@ -288,7 +288,7 @@ insert into t values(now,'\x7f8290','point(100 100)')
|
|||
|
||||
Execute query
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl --location 'http://<fqdn>:<port>/rest/sql' \
|
||||
--header 'Content-Type: text/plain' \
|
||||
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
|
||||
|
@ -428,7 +428,7 @@ Data Query Return Example
|
|||
|
||||
HTTP requests need to include an authorization code `<TOKEN>`, used for identity verification. The authorization code is usually provided by the administrator and can be simply obtained by sending an `HTTP GET` request as follows:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl http://<fqnd>:<port>/rest/login/<username>/<password>
|
||||
```
|
||||
|
||||
|
@ -440,7 +440,7 @@ Here, `fqdn` is the FQDN or IP address of the TDengine database, `port` is the p
|
|||
|
||||
Example of obtaining an authorization code:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl http://192.168.0.1:6041/rest/login/root/taosdata
|
||||
```
|
||||
|
||||
|
@ -457,7 +457,7 @@ Return value:
|
|||
|
||||
- Query all records of table d1001 in the demo database:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
|
||||
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
|
||||
```
|
||||
|
@ -509,7 +509,7 @@ Return value:
|
|||
|
||||
- Create database demo:
|
||||
|
||||
```bash
|
||||
```shell
|
||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
|
||||
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql
|
||||
```
|
||||
|
@ -560,7 +560,7 @@ Return value:
|
|||
|
||||
#### TDengine 2.x response codes and message bodies
|
||||
|
||||
```JSON
|
||||
```json
|
||||
{
|
||||
"status": "succ",
|
||||
"head": [
|
||||
|
@ -624,7 +624,7 @@ Return value:
|
|||
|
||||
#### TDengine 3.0 Response Codes and Message Body
|
||||
|
||||
```JSON
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"column_meta": [
|
|
@ -72,6 +72,8 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000133 | Invalid operation | Invalid or unsupported operation | 1. Modify to confirm the current operation is legal and supported, check parameter validity 2. If the problem persists, preserve the scene and logs, report issue on github |
|
||||
| 0x80000134 | Invalid value | Invalid value | Preserve the scene and logs, report issue on github |
|
||||
| 0x80000135 | Invalid fqdn | Invalid FQDN | Check if the configured or input FQDN value is correct |
|
||||
| 0x8000013C | Invalid disk id | Invalid disk id | Check users whether the mounted disk is invalid or use the parameter diskIDCheckEnabled to skip the disk check. |
|
||||
|
||||
|
||||
## tsc
|
||||
|
||||
|
@ -129,7 +131,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000350 | User already exists | Create user, duplicate creation | Confirm if the operation is correct |
|
||||
| 0x80000351 | Invalid user | User does not exist | Confirm if the operation is correct |
|
||||
| 0x80000352 | Invalid user format | Incorrect format | Confirm if the operation is correct |
|
||||
| 0x80000353 | Invalid password format | Incorrect format | Confirm if the operation is correct |
|
||||
| 0x80000353 | Invalid password format | The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. | Confirm the format of the password string |
|
||||
| 0x80000354 | Can not get user from conn | Internal error | Report issue |
|
||||
| 0x80000355 | Too many users | (Enterprise only) Exceeding user limit | Adjust configuration |
|
||||
| 0x80000357 | Authentication failure | Incorrect password | Confirm if the operation is correct |
|
||||
|
@ -251,6 +253,7 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000529 | Vnode is stopped | Vnode is closed | Report issue |
|
||||
| 0x80000530 | Duplicate write request | Duplicate write request, internal error | Report issue |
|
||||
| 0x80000531 | Vnode query is busy | Query is busy | Report issue |
|
||||
| 0x80000540 | Vnode already exist but Dbid not match | Internal error | Report issue |
|
||||
|
||||
## tsdb
|
||||
|
||||
|
@ -283,6 +286,9 @@ This document details the server error codes that may be encountered when using
|
|||
| 0x80000729 | Task message error | Query message error | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x8000072B | Task status error | Subquery status error | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x8000072F | Job not exist | Query JOB no longer exists | Preserve the scene and logs, report issue on GitHub |
|
||||
| 0x80000739 | Query memory upper limit is reached | Single query memory upper limit is reached | Modify memory upper limit size or optimize SQL |
|
||||
| 0x8000073A | Query memory exhausted | Query memory in dnode is exhausted | Limit concurrent queries or add more physical memory |
|
||||
| 0x8000073B | Timeout for long time no fetch | Query without fetch for a long time | Correct application to fetch data asap |
|
||||
|
||||
## grant
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ Batch insertion. Each insert statement can insert multiple records into one tabl
|
|||
|
||||
When inserting nchar type data containing Chinese characters on Windows, first ensure that the system's regional settings are set to China (this can be set in the Control Panel). At this point, the `taos` client in cmd should already be working properly; if developing a Java application in an IDE, such as Eclipse or IntelliJ, ensure that the file encoding in the IDE is set to GBK (which is the default encoding type for Java), then initialize the client configuration when creating the Connection, as follows:
|
||||
|
||||
```JAVA
|
||||
```java
|
||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||
|
@ -145,7 +145,7 @@ Version 3.0 of TDengine includes a standalone component developed in Go called `
|
|||
|
||||
The Go language version requirement is 1.14 or higher. If there are Go compilation errors, often due to issues accessing Go mod in China, they can be resolved by setting Go environment variables:
|
||||
|
||||
```sh
|
||||
```shell
|
||||
go env -w GO111MODULE=on
|
||||
go env -w GOPROXY=https://goproxy.cn,direct
|
||||
```
|
||||
|
@ -196,7 +196,7 @@ Here are the solutions:
|
|||
|
||||
1. Create a file /Library/LaunchDaemons/limit.maxfiles.plist, write the following content (the example changes limit and maxfiles to 100,000, modify as needed):
|
||||
|
||||
```plist
|
||||
```xml
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
|
@ -286,4 +286,14 @@ This connection only reports the most basic information that does not involve an
|
|||
This feature is an optional configuration item, which is enabled by default in the open-source version. The specific parameter is telemetryReporting, as explained in the [official documentation](../tdengine-reference/components/taosd/).
|
||||
You can disable this parameter at any time by modifying telemetryReporting to 0 in taos.cfg, then restarting the database service.
|
||||
Code located at: [https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c](https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c).
|
||||
Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational.
|
||||
Additionally, for the highly secure enterprise version, TDengine Enterprise, this parameter will not be operational.
|
||||
|
||||
### 31 What should I do if I encounter 'Sync leader is unreachable' when connecting to the cluster for the first time?
|
||||
|
||||
Reporting this error indicates that the first connection to the cluster was successful, but the IP address accessed for the first time was not the leader of mnode. An error occurred when the client attempted to establish a connection with the leader. The client searches for the leader node through EP, which specifies the fqdn and port number. There are two common reasons for this error:
|
||||
|
||||
- The ports of other dnodes in the cluster are not open
|
||||
- The client's hosts file is not configured correctly
|
||||
|
||||
Therefore, first, check whether all ports on the server and cluster (default 6030 for native connections and 6041 for HTTP connections) are open; Next, check if the client's hosts file has configured the fqdn and IP information for all dnodes in the cluster.
|
||||
If the issue still cannot be resolved, it is necessary to contact Taos technical personnel for support.
|
||||
|
|
|
@ -26,7 +26,6 @@ async function createDbAndTable() {
|
|||
let conf = new taos.WSConfig(dsn);
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
wsSql = await taos.sqlConnect(conf);
|
||||
console.log("Connected to " + dsn + " successfully.");
|
||||
// create database
|
||||
|
|
|
@ -40,7 +40,6 @@ async function prepare() {
|
|||
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||
|
||||
|
|
|
@ -34,10 +34,10 @@ async function createConsumer() {
|
|||
}
|
||||
|
||||
async function prepare() {
|
||||
let conf = new taos.WSConfig('ws://192.168.1.98:6041');
|
||||
let conf = new taos.WSConfig('ws://localhost:6041');
|
||||
conf.setUser('root');
|
||||
conf.setPwd('taosdata');
|
||||
conf.setDb('power');
|
||||
|
||||
const createDB = `CREATE DATABASE IF NOT EXISTS ${db}`;
|
||||
const createStable = `CREATE STABLE IF NOT EXISTS ${db}.${stable} (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);`;
|
||||
|
||||
|
|
|
@ -4,11 +4,10 @@ sidebar_label: "安装部署"
|
|||
---
|
||||
|
||||
### 环境准备
|
||||
使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/MacOS 等平台上,同时需要 3.10 或以上版本的 Python 环境支持。
|
||||
使用 TDgpt 的高级时序数据分析功能需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 运行在 Linux 平台上,并需要 3.10 或以上版本的 Python 环境支持。
|
||||
> 部署 Anode 需要 TDengine Enterprise 3.3.4.3 及以后版本,请首先确认搭配 Anode 使用的 TDengine 能够支持 Anode。
|
||||
|
||||
### 安装及卸载
|
||||
不同操作系统上安装及部署 Anode 有一些差异,主要是卸载操作、安装路径、服务启停等方面。本文以 Linux 系统为例,说明安装部署的流程。
|
||||
使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,命令如下:
|
||||
|
||||
```bash
|
||||
|
@ -37,7 +36,7 @@ systemctl status taosanoded
|
|||
|/usr/local/taos/taosanode/bin|可执行文件目录|
|
||||
|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
|
||||
|/usr/local/taos/taosanode/lib|库文件目录|
|
||||
|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
|
||||
|/usr/local/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
|
||||
|/var/log/taos/taosanode/|日志文件目录|
|
||||
|/etc/taos/taosanode.ini|配置文件|
|
||||
|
||||
|
@ -64,7 +63,7 @@ pidfile = /usr/local/taos/taosanode/taosanode.pid
|
|||
# conflict with systemctl, so do NOT uncomment this
|
||||
# daemonize = /var/log/taos/taosanode/taosanode.log
|
||||
|
||||
# log directory
|
||||
# uWSGI log files
|
||||
logto = /var/log/taos/taosanode/taosanode.log
|
||||
|
||||
# wWSGI monitor port
|
||||
|
@ -74,7 +73,7 @@ stats = 127.0.0.1:8387
|
|||
virtualenv = /usr/local/taos/taosanode/venv/
|
||||
|
||||
[taosanode]
|
||||
# default app log file
|
||||
# default taosanode log file
|
||||
app-log = /var/log/taos/taosanode/taosanode.app.log
|
||||
|
||||
# model storage directory
|
||||
|
|
|
@ -12,7 +12,7 @@ import wndata from './pic/white-noise-data.png'
|
|||
<img src={activity} width="560" alt="预处理流程" />
|
||||
|
||||
TDgpt 首先对输入数据进行白噪声检查(White Noise Data check), 检查通过以后针对预测分析,还要进行输入(历史)数据的重采样和时间戳对齐处理(异常检测跳过数据重采样和时间戳对齐步骤)。
|
||||
预处理完成以后,再进行预测或异常检测操作。预处理过程部署于预测或异常检测处理逻辑的一部分。
|
||||
预处理完成以后,再进行预测或异常检测操作。预处理过程不属于预测或异常检测处理逻辑的一部分。
|
||||
|
||||
### 白噪声检查
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ description: 预测算法
|
|||
|
||||
```bash
|
||||
taos> select * from foo;
|
||||
ts | k |
|
||||
ts | i32 |
|
||||
========================================
|
||||
2020-01-01 00:00:12.681 | 13 |
|
||||
2020-01-01 00:00:13.727 | 14 |
|
||||
|
@ -42,7 +42,7 @@ algo=expr1
|
|||
|
||||
```
|
||||
1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。
|
||||
2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
|
||||
2. `options`:预测函数的参数。字符串类型,其中使用 K=V 方式调用算法及相关参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
|
||||
|
||||
### 参数说明
|
||||
|
||||
|
|
|
@ -34,7 +34,8 @@ return {
|
|||
|
||||
```python
|
||||
import numpy as np
|
||||
from service import AbstractForecastService
|
||||
from taosanalytics.service import AbstractForecastService
|
||||
|
||||
|
||||
# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束
|
||||
class _MyForecastService(AbstractForecastService):
|
||||
|
@ -51,12 +52,12 @@ class _MyForecastService(AbstractForecastService):
|
|||
super().__init__()
|
||||
|
||||
def execute(self):
|
||||
""" 算法逻辑的核心实现"""
|
||||
""" 算法逻辑的核心实现"""
|
||||
res = []
|
||||
|
||||
"""这个预测算法固定返回 1 作为预测值,预测值的数量是用户通过 self.fc_rows 指定"""
|
||||
ts_list = [self.start_ts + i * self.time_step for i in range(self.fc_rows)]
|
||||
res.app(ts_list) # 设置预测结果时间戳列
|
||||
res.append(ts_list) # 设置预测结果时间戳列
|
||||
|
||||
"""生成全部为 1 的预测结果 """
|
||||
res_list = [1] * self.fc_rows
|
||||
|
@ -64,18 +65,18 @@ class _MyForecastService(AbstractForecastService):
|
|||
|
||||
"""检查用户输入,是否要求返回预测置信区间上下界"""
|
||||
if self.return_conf:
|
||||
"""对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可"""
|
||||
bound_list = [1] * self.fc_rows
|
||||
res.append(bound_list) # 预测结果置信区间下界
|
||||
res.append(bound_list) # 预测结果执行区间上界
|
||||
"""对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可"""
|
||||
bound_list = [1] * self.fc_rows
|
||||
res.append(bound_list) # 预测结果置信区间下界
|
||||
res.append(bound_list) # 预测结果执行区间上界
|
||||
|
||||
"""返回结果"""
|
||||
return { "res": res, "mse": 0}
|
||||
return {"res": res, "mse": 0}
|
||||
|
||||
|
||||
def set_params(self, params):
|
||||
"""该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
|
||||
pass
|
||||
"""该算法无需任何输入参数,直接调用父类函数,不处理算法参数设置逻辑"""
|
||||
return super().set_params(params)
|
||||
|
||||
```
|
||||
|
||||
将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。在 TDengine 命令行接口中执行 `SHOW ANODES FULL` 能够看到新加入的算法。应用就可以通过 SQL 语句调用该预测算法。
|
||||
|
|
|
@ -16,7 +16,7 @@ sidebar_label: "异常检测"
|
|||
|
||||
```python
|
||||
import numpy as np
|
||||
from service import AbstractAnomalyDetectionService
|
||||
from taosanalytics.service import AbstractAnomalyDetectionService
|
||||
|
||||
# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束
|
||||
class _MyAnomalyDetectionService(AbstractAnomalyDetectionService):
|
||||
|
|
|
@ -19,24 +19,25 @@ Anode的主要目录结构如下图所示
|
|||
|
||||
```bash
|
||||
.
|
||||
├── bin
|
||||
├── cfg
|
||||
├── model
|
||||
│ └── ad_autoencoder
|
||||
├── release
|
||||
├── script
|
||||
└── taosanalytics
|
||||
├── algo
|
||||
│ ├── ad
|
||||
│ └── fc
|
||||
├── misc
|
||||
└── test
|
||||
├── lib
|
||||
│ └── taosanalytics
|
||||
│ ├── algo
|
||||
│ │ ├── ad
|
||||
│ │ └── fc
|
||||
│ ├── misc
|
||||
│ └── test
|
||||
├── log -> /var/log/taos/taosanode
|
||||
├── model -> /var/lib/taos/taosanode/model
|
||||
└── venv -> /var/lib/taos/taosanode/venv
|
||||
|
||||
```
|
||||
|
||||
|目录|说明|
|
||||
|---|---|
|
||||
|taosanalytics| 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 保存异常检测算法代码,fc 目录保存预测算法代码|
|
||||
|script|是安装脚本和发布脚本放置目录|
|
||||
|venv| Python 虚拟环境|
|
||||
|model|放置针对数据集完成的训练模型|
|
||||
|cfg|配置文件目录|
|
||||
|
||||
|
@ -63,7 +64,8 @@ Anode采用算法自动加载模式,因此只识别符合命名约定的 Pytho
|
|||
|
||||
```SQL
|
||||
--- algo 后面的参数 name 即为类属性 `name`
|
||||
SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name')
|
||||
SELECT COUNT(*)
|
||||
FROM foo ANOMALY_WINDOW(col_name, 'algo=name')
|
||||
```
|
||||
|
||||
## 添加具有模型的分析算法
|
||||
|
@ -76,19 +78,10 @@ SELECT COUNT(*) FROM foo ANOMALY_WINDOW(col_name, 'algo=name')
|
|||
|
||||
```bash
|
||||
.
|
||||
├── cfg
|
||||
├── model
|
||||
│ └── ad_autoencoder
|
||||
│ ├── ad_autoencoder_foo.dat
|
||||
│ └── ad_autoencoder_foo.info
|
||||
├── release
|
||||
├── script
|
||||
└── taosanalytics
|
||||
├── algo
|
||||
│ ├── ad
|
||||
│ └── fc
|
||||
├── misc
|
||||
└── test
|
||||
└── model
|
||||
└── ad_autoencoder
|
||||
├── ad_autoencoder_foo.dat
|
||||
└── ad_autoencoder_foo.info
|
||||
|
||||
```
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ TDengine 面向多种写入场景,而很多写入场景下,TDengine 的存
|
|||
|
||||
```SQL
|
||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||
COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
|
||||
SHOW COMPACTS [compact_id];
|
||||
KILL COMPACT compact_id;
|
||||
```
|
||||
|
@ -25,6 +26,7 @@ KILL COMPACT compact_id;
|
|||
### 效果
|
||||
|
||||
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
|
||||
- 扫描并压缩 DB 中指定的 VGROUP 列表中 VNODE 的所有数据文件, 若 db_name 为空,则默认为当前数据库
|
||||
- COMPCAT 会删除被删除数据以及被删除的表的数据
|
||||
- COMPACT 会合并多个 STT 文件
|
||||
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
|
||||
|
|
|
@ -68,19 +68,19 @@ dataDir /mnt/data6 2 0
|
|||
|
||||
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
||||
|
||||
|参数名称 | 参数含义 |
|
||||
|:-------------|:-----------------------------------------------|
|
||||
|s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 的保持一致,否则无法访问。 |
|
||||
|s3AccessKey |冒号分隔的用户 SecretId:SecretKey。例如:AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|
||||
|s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔。参数值均为字符串类型,但不需要引号。例如:test0711-1309024725 |
|
||||
|s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000 (30天),默认值 60 秒 |
|
||||
|s3PageCacheSize |s3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024\*1024。 ,默认值 4096|
|
||||
|s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 |
|
||||
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 |
|
||||
| 参数名称 | 参数含义 |
|
||||
|:---------------------|:-----------------------------------------------|
|
||||
| s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 的保持一致,否则无法访问。 |
|
||||
| s3AccessKey | 冒号分隔的用户 SecretId:SecretKey。例如:AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|
||||
| s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔。参数值均为字符串类型,但不需要引号。例如:test0711-1309024725 |
|
||||
| s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000(30天),默认值 60 秒 |
|
||||
| s3PageCacheSize | S3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024*1024。 ,默认值 4096|
|
||||
| s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 |
|
||||
| s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 |
|
||||
|
||||
### 检查配置参数可用性
|
||||
|
||||
在 taos.cfg 中完成对 s3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
|
||||
在 taos.cfg 中完成对 S3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
|
||||
|
||||
```
|
||||
taosd --checks3
|
||||
|
@ -106,11 +106,11 @@ s3migrate database <db_name>;
|
|||
|
||||
详细的 DB 参数见下表:
|
||||
|
||||
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
|
||||
| :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- |
|
||||
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
||||
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
|
||||
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
|
||||
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
|
||||
|:--|:--------------|:-------|:------ |:------- | :----------------------------------------------------------- |
|
||||
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
||||
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
|
||||
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作 |
|
||||
|
||||
### 对象存储读写次数估算
|
||||
|
||||
|
@ -168,10 +168,10 @@ s3BucketName td-test
|
|||
|
||||
用户界面同 S3,不同的地方在于下面三个参数的配置:
|
||||
|
||||
| # | 参数 | 示例值 | 描述 |
|
||||
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
|
||||
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
|
||||
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
|
||||
| 3 | s3BucketName | test-container | Container name |
|
||||
| # | 参数 | 示例值 | 描述 |
|
||||
|:--|:-------------|:-----------------------------------------|:----------------------------------|
|
||||
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
|
||||
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
|
||||
| 3 | s3BucketName | test-container | Container name |
|
||||
|
||||
其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。
|
||||
|
|
|
@ -16,15 +16,15 @@ create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
|
|||
```
|
||||
|
||||
相关参数说明如下。
|
||||
- user_name:最长为 23 B。
|
||||
- password:最长为 128 B,合法字符包括字母和数字以及单双引号、撇号、反斜杠和空格以外的特殊字符,且不可以为空。
|
||||
- user_name:用户名最长不超过 23 个字节。
|
||||
- password:密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。
|
||||
- sysinfo :用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点(qnode)等,以及与存储相关的信息等。默认为可以查看系统信息。
|
||||
- createdb:用户是否可以创建数据库。1 表示可以创建,0 表示不可以创建。缺省值为 0。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
|
||||
如下 SQL 可以创建密码为 123456 且可以查看系统信息的用户 test。
|
||||
如下 SQL 可以创建密码为 abc123!@# 且可以查看系统信息的用户 test。
|
||||
|
||||
```sql
|
||||
create user test pass '123456' sysinfo 1
|
||||
create user test pass 'abc123!@#' sysinfo 1
|
||||
```
|
||||
|
||||
### 查看用户
|
||||
|
|
|
@ -26,65 +26,67 @@ taosd 命令行参数如下
|
|||
:::
|
||||
|
||||
### 连接相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------------|-------------------------|------------|
|
||||
|firstEp | |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030|
|
||||
|secondEp | |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值|
|
||||
|fqdn | |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname|
|
||||
|serverPort | |taosd 监听的端口,默认值 6030|
|
||||
|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1|
|
||||
|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3 |
|
||||
|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000|
|
||||
|numOfRpcThreads | |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
||||
|numOfTaskQueueThreads | |客户端处理 RPC 消息的线程数取值, 范围4-16,默认值为 CPU 核数的一半|
|
||||
|rpcQueueMemoryAllowed | |dnode允许的已经收到的RPC消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值为服务器内存的 1/10 |
|
||||
|resolveFQDNRetryTime | 3.x 之后取消 |FQDN 解析失败时的重试次数|
|
||||
|timeToGetAvailableConn | 3.3.4.x之后取消 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000|
|
||||
|maxShellConns | 3.x 后取消 |允许创建的最大链接数|
|
||||
|maxRetryWaitTime | |重连最大超时时间, 默认值是 10s|
|
||||
|shareConnLimit |3.3.4.0 新增 |一个链接可以共享的请求的数目,取值范围 1-512,默认值 10|
|
||||
|readTimeout |3.3.4.0 新增 |单个请求最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|-----------------------|-------------------------|-------------------------|------------|
|
||||
|firstEp | |不支持动态修改 |taosd 启动时,主动连接的集群中首个 dnode 的 end point,默认值 localhost:6030|
|
||||
|secondEp | |不支持动态修改 |taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,无默认值|
|
||||
|fqdn | |不支持动态修改 |taosd 监听的服务地址,默认为所在服务器上配置的第一个 hostname|
|
||||
|serverPort | |不支持动态修改 |taosd 监听的端口,默认值 6030|
|
||||
|compressMsgSize | |支持动态修改 重启生效 |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;默认值 -1|
|
||||
|shellActivityTimer | |支持动态修改 立即生效 |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3 |
|
||||
|numOfRpcSessions | |支持动态修改 重启生效 |RPC 支持的最大连接数,取值范围 100-100000,默认值 30000|
|
||||
|numOfRpcThreads | |支持动态修改 重启生效 |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
||||
|numOfTaskQueueThreads | |支持动态修改 重启生效 |客户端处理 RPC 消息的线程数取值, 范围4-16,默认值为 CPU 核数的一半|
|
||||
|rpcQueueMemoryAllowed | |支持动态修改 立即生效 |dnode允许的已经收到的RPC消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值为服务器内存的 1/10 |
|
||||
|resolveFQDNRetryTime | 3.x 之后取消 |不支持动态修改 |FQDN 解析失败时的重试次数|
|
||||
|timeToGetAvailableConn | 3.3.4.x之后取消 |支持动态修改 重启生效 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,默认值 500000|
|
||||
|maxShellConns | 3.x 后取消 |支持动态修改 重启生效 |允许创建的最大链接数|
|
||||
|maxRetryWaitTime | |支持动态修改 重启生效 |重连最大超时时间, 默认值是 10s|
|
||||
|shareConnLimit |3.3.4.0 新增 |支持动态修改 重启生效 |一个链接可以共享的请求的数目,取值范围 1-512,默认值 10|
|
||||
|readTimeout |3.3.4.0 新增 |支持动态修改 重启生效 |单个请求最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
||||
|
||||
### 监控相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------------|----------|-|
|
||||
|monitor | |是否收集监控数据并上报,0:关闭;1:打开;默认值 0|
|
||||
|monitorFqdn | |taosKeeper 服务所在服务器的 FQDN,默认值 无|
|
||||
|monitorPort | |taosKeeper 服务所监听的端口号,默认值 6043|
|
||||
|monitorInterval | |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30|
|
||||
|monitorMaxLogs | |缓存的待上报日志条数|
|
||||
|monitorComp | |是否采用压缩方式上报监控日志时|
|
||||
|monitorLogProtocol | |是否打印监控日志|
|
||||
|monitorForceV2 | |是否使用 V2 版本协议上报|
|
||||
|telemetryReporting | |是否上传 telemetry,0:不上传,1:上传,默认值 1|
|
||||
|telemetryServer | |telemetry 服务器地址|
|
||||
|telemetryPort | |telemetry 服务器端口编号|
|
||||
|telemetryInterval | |telemetry 上传时间间隔,单位为秒,默认 43200|
|
||||
|crashReporting | |是否上传 crash 信息;0:不上传,1:上传;默认值 1|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|-----------------------|----------|-------------------------|-|
|
||||
|monitor | |支持动态修改 立即生效 |是否收集监控数据并上报,0:关闭;1:打开;默认值 0|
|
||||
|monitorFqdn | |支持动态修改 重启生效 |taosKeeper 服务所在服务器的 FQDN,默认值 无|
|
||||
|monitorPort | |支持动态修改 重启生效 |taosKeeper 服务所监听的端口号,默认值 6043|
|
||||
|monitorInterval | |支持动态修改 立即生效 |监控数据库记录系统参数(CPU/内存)的时间间隔,单位是秒,取值范围 1-200000 ,默认值 30|
|
||||
|monitorMaxLogs | |支持动态修改 立即生效 |缓存的待上报日志条数|
|
||||
|monitorComp | |支持动态修改 重启生效 |是否采用压缩方式上报监控日志时|
|
||||
|monitorLogProtocol | |支持动态修改 立即生效 |是否打印监控日志|
|
||||
|monitorForceV2 | |支持动态修改 立即生效 |是否使用 V2 版本协议上报|
|
||||
|telemetryReporting | |支持动态修改 立即生效 |是否上传 telemetry,0:不上传,1:上传,默认值 1|
|
||||
|telemetryServer | |不支持动态修改 |telemetry 服务器地址|
|
||||
|telemetryPort | |不支持动态修改 |telemetry 服务器端口编号|
|
||||
|telemetryInterval | |支持动态修改 立即生效 |telemetry 上传时间间隔,单位为秒,默认 43200|
|
||||
|crashReporting | |支持动态修改 立即生效 |是否上传 crash 信息;0:不上传,1:上传;默认值 1|
|
||||
|
||||
### 查询相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|------------------------|----------|-|
|
||||
|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
||||
|tagFilterCache | |是否缓存标签过滤结果|
|
||||
|maxNumOfDistinctRes | |允许返回的 distinct 结果最大行数,默认值 10 万,最大允许值 1 亿|
|
||||
|queryBufferSize | |暂不生效|
|
||||
|queryRspPolicy | |查询响应策略|
|
||||
|filterScalarMode | |强制使用标量过滤模式,0:关闭;1:开启,默认值 0|
|
||||
|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
|
||||
|queryNodeChunkSize | |内部参数,查询计划的块大小|
|
||||
|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
|
||||
|queryMaxConcurrentTables| |内部参数,查询计划的并发数目|
|
||||
|queryRsmaTolerance | |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
|
||||
|enableQueryHb | |内部参数,是否发送查询心跳消息|
|
||||
|pqSortMemThreshold | |内部参数,排序使用的内存阈值|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|------------------------|----------|-------------------------|-|
|
||||
|countAlwaysReturnValue | |支持动态修改 立即生效 |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
||||
|tagFilterCache | |不支持动态修改 |是否缓存标签过滤结果|
|
||||
|queryBufferSize | |支持动态修改 重启生效 |暂不生效|
|
||||
|queryRspPolicy | |支持动态修改 立即生效 |查询响应策略|
|
||||
|queryUseMemoryPool | |不支持动态修改 |查询是否使用内存池管理内存,默认值:1(打开); 0: 关闭,1: 打开|
|
||||
|minReservedMemorySize | |不支持动态修改 |最小预留的系统可用内存数量,除预留外的内存都可以被用于查询,单位:MB,默认预留大小为系统物理内存的 20%,取值范围 1024 - 1000000000|
|
||||
|singleQueryMaxMemorySize| |不支持动态修改 |单个查询在单个节点(dnode)上可以使用的内存上限,超过该上限将返回错误,单位:MB,默认值:0(无上限),取值范围 0 - 1000000000|
|
||||
|filterScalarMode | |不支持动态修改 |强制使用标量过滤模式,0:关闭;1:开启,默认值 0|
|
||||
|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志|
|
||||
|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小|
|
||||
|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法|
|
||||
|queryMaxConcurrentTables| |不支持动态修改 |内部参数,查询计划的并发数目|
|
||||
|queryRsmaTolerance | |不支持动态修改 |内部参数,用于判定查询哪一级 rsma 数据时的容忍时间,单位为毫秒|
|
||||
|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息|
|
||||
|pqSortMemThreshold | |不支持动态修改 |内部参数,排序使用的内存阈值|
|
||||
|
||||
### 区域相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------|----------|-|
|
||||
|timezone | |时区;缺省从系统中动态获取当前的时区设置|
|
||||
|locale | |系统区位信息及编码格式,缺省从系统中获取|
|
||||
|charset | |字符集编码,缺省从系统中获取|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|-----------------|----------|-------------------------|-|
|
||||
|timezone | |不支持动态修改 |时区;缺省从系统中动态获取当前的时区设置|
|
||||
|locale | |不支持动态修改 |系统区位信息及编码格式,缺省从系统中获取|
|
||||
|charset | |不支持动态修改 |字符集编码,缺省从系统中获取|
|
||||
|
||||
:::info
|
||||
1. 为应对多时区的数据写入和查询问题,TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区,产生的时间戳均一致。需要注意的是,Unix 时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
|
||||
|
@ -162,159 +164,161 @@ charset 的有效值是 UTF-8。
|
|||
:::
|
||||
|
||||
### 存储相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|--------------------|----------|-|
|
||||
|dataDir | |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos|
|
||||
|tempDir | |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp|
|
||||
|minimalDataDirGB | |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2|
|
||||
|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1|
|
||||
|minDiskFreeSize |3.1.1.0 后|当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数|
|
||||
|s3MigrateIntervalSec|3.3.4.3 后|本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数|
|
||||
|s3MigrateEnabled |3.3.4.3 后|是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数|
|
||||
|s3Accesskey |3.3.4.3 后|冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数|
|
||||
|s3Endpoint |3.3.4.3 后|用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数|
|
||||
|s3BucketName |3.3.4.3 后|存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数|
|
||||
|s3PageCacheSize |3.3.4.3 后|S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数|
|
||||
|s3UploadDelaySec |3.3.4.3 后|data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数|
|
||||
|cacheLazyLoadThreshold| |内部参数,缓存的装载策略|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|--------------------|----------|-------------------------|-|
|
||||
|dataDir | |不支持动态修改 |数据文件目录,所有的数据文件都将写入该目录,默认值 /var/lib/taos|
|
||||
|diskIDCheckEnabled | |不支持动态修改 |在 3.3.4.3 后,在重启 dnode 时增加了检查 dataDir 所在磁盘 id 是否发生改变,0:进行检查,1:不进行检查;默认值:1|
|
||||
|tempDir | |不支持动态修改 |指定所有系统运行过程中的临时文件生成的目录,默认值 /tmp|
|
||||
|minimalDataDirGB | |不支持动态修改 |dataDir 指定的时序数据存储目录所需要保留的最小空间,单位 GB,默认值 2|
|
||||
|minimalTmpDirGB | |不支持动态修改 |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,默认值 1|
|
||||
|minDiskFreeSize |3.1.1.0 后|支持动态修改 立即生效 |当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件,单位为字节,取值范围 52428800-1073741824,默认值为 52428800;企业版参数|
|
||||
|s3MigrateIntervalSec|3.3.4.3 后|支持动态修改 立即生效 |本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600;企业版参数|
|
||||
|s3MigrateEnabled |3.3.4.3 后|支持动态修改 立即生效 |是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1;企业版参数|
|
||||
|s3Accesskey |3.3.4.3 后|支持动态修改 重启生效 |冒号分隔的用户 SecretId:SecretKey,例如 AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E;企业版参数|
|
||||
|s3Endpoint |3.3.4.3 后|支持动态修改 重启生效 |用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 保持一致,否则无法访问;企业版参数|
|
||||
|s3BucketName |3.3.4.3 后|支持动态修改 重启生效 |存储桶名称,减号后面是用户注册 COS 服务的 AppId,其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔;参数值均为字符串类型,但不需要引号;例如 test0711-1309024725;企业版参数|
|
||||
|s3PageCacheSize |3.3.4.3 后|支持动态修改 重启生效 |S3 page cache 缓存页数目,取值范围 4-1048576,单位为页,默认值 4096;企业版参数|
|
||||
|s3UploadDelaySec |3.3.4.3 后|支持动态修改 立即生效 |data 文件持续多长时间不再变动后上传至 S3,取值范围 1-2592000 (30天),单位为秒,默认值 60;企业版参数|
|
||||
|cacheLazyLoadThreshold| |支持动态修改 立即生效 |内部参数,缓存的装载策略|
|
||||
|
||||
### 集群相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|--------------------------|----------|-|
|
||||
|supportVnodes | |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5|
|
||||
|numOfCommitThreads | |落盘线程的最大数量,取值范围 0-1024,默认值为 4|
|
||||
|numOfMnodeReadThreads | |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||
|numOfVnodeQueryThreads | |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||
|numOfVnodeFetchThreads | |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||
|numOfVnodeRsmaThreads | |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||
|numOfQnodeQueryThreads | |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||
|numOfSnodeSharedThreads | |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||
|numOfSnodeUniqueThreads | |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||
|ratioOfVnodeStreamThreads | |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4|
|
||||
|ttlUnit | |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400|
|
||||
|ttlPushInterval | |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10|
|
||||
|ttlChangeOnWrite | |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0|
|
||||
|ttlBatchDropNum | |ttl 一批删除子表的数目,最小值为 0,默认值 10000|
|
||||
|retentionSpeedLimitMB | |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制|
|
||||
|maxTsmaNum | |集群内可创建的TSMA个数;取值范围 0-3;默认值 3|
|
||||
|tmqMaxTopicNum | |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20|
|
||||
|tmqRowSize | |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096|
|
||||
|audit | |审计功能开关;企业版参数|
|
||||
|auditInterval | |审计数据上报的时间间隔;企业版参数|
|
||||
|auditCreateTable | |是否针对创建子表开启申计功能;企业版参数|
|
||||
|encryptAlgorithm | |数据加密算法;企业版参数|
|
||||
|encryptScope | |加密范围;企业版参数|
|
||||
|enableWhiteList | |白名单功能开关;企业版参数|
|
||||
|syncLogBufferMemoryAllowed| |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||
|syncElectInterval | |内部参数,用于同步模块调试|
|
||||
|syncHeartbeatInterval | |内部参数,用于同步模块调试|
|
||||
|syncHeartbeatTimeout | |内部参数,用于同步模块调试|
|
||||
|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
|
||||
|syncSnapReplMaxWaitN | |内部参数,用于同步模块调试|
|
||||
|arbHeartBeatIntervalSec | |内部参数,用于同步模块调试|
|
||||
|arbCheckSyncIntervalSec | |内部参数,用于同步模块调试|
|
||||
|arbSetAssignedTimeoutSec | |内部参数,用于同步模块调试|
|
||||
|mndSdbWriteDelta | |内部参数,用于 mnode 模块调试|
|
||||
|mndLogRetention | |内部参数,用于 mnode 模块调试|
|
||||
|skipGrant | |内部参数,用于授权检查|
|
||||
|trimVDbIntervalSec | |内部参数,用于删除过期数据|
|
||||
|ttlFlushThreshold | |内部参数,ttl 定时器的频率|
|
||||
|compactPullupInterval | |内部参数,数据重整定时器的频率|
|
||||
|walFsyncDataSizeLimit | |内部参数,WAL 进行 FSYNC 的阈值|
|
||||
|transPullupInterval | |内部参数,mnode 执行事务的重试间隔|
|
||||
|mqRebalanceInterval | |内部参数,消费者再平衡的时间间隔|
|
||||
|uptimeInterval | |内部参数,用于记录系统启动时间|
|
||||
|timeseriesThreshold | |内部参数,用于统计用量|
|
||||
|udf | |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 |
|
||||
|udfdResFuncs | |内部参数,用于 UDF 结果集设置|
|
||||
|udfdLdLibPath | |内部参数,表示 UDF 装载的库路径|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|--------------------------|----------|-------------------------|-|
|
||||
|supportVnodes | |支持动态修改 立即生效 |dnode 支持的最大 vnode 数目,取值范围 0-4096,默认值 CPU 核数的 2 倍 + 5|
|
||||
|numOfCommitThreads | |支持动态修改 重启生效 |落盘线程的最大数量,取值范围 0-1024,默认值为 4|
|
||||
|numOfMnodeReadThreads | |支持动态修改 重启生效 |mnode 的 Read 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||
|numOfVnodeQueryThreads | |支持动态修改 重启生效 |vnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||
|numOfVnodeFetchThreads | |支持动态修改 重启生效 |vnode 的 Fetch 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||
|numOfVnodeRsmaThreads | |支持动态修改 重启生效 |vnode 的 Rsma 线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不超过 4)|
|
||||
|numOfQnodeQueryThreads | |支持动态修改 重启生效 |qnode 的 Query 线程数目,取值范围 0-1024,默认值为 CPU 核数的两倍(不超过 16)|
|
||||
|numOfSnodeSharedThreads | |支持动态修改 重启生效 |snode 的共享线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||
|numOfSnodeUniqueThreads | |支持动态修改 重启生效 |snode 的独占线程数目,取值范围 0-1024,默认值为 CPU 核数的四分之一(不小于 2,不超过 4)|
|
||||
|ratioOfVnodeStreamThreads | |支持动态修改 重启生效 |流计算使用 vnode 线程的比例,取值范围 0.01-4,默认值 4|
|
||||
|ttlUnit | |不支持动态修改 |ttl 参数的单位,取值范围 1-31572500,单位为秒,默认值 86400|
|
||||
|ttlPushInterval | |支持动态修改 立即生效 |ttl 检测超时频率,取值范围 1-100000,单位为秒,默认值 10|
|
||||
|ttlChangeOnWrite | |支持动态修改 立即生效 |ttl 到期时间是否伴随表的修改操作改变;0:不改变,1:改变;默认值为 0|
|
||||
|ttlBatchDropNum | |支持动态修改 立即生效 |ttl 一批删除子表的数目,最小值为 0,默认值 10000|
|
||||
|retentionSpeedLimitMB | |支持动态修改 立即生效 |数据在不同级别硬盘上迁移时的速度限制,取值范围 0-1024,单位 MB,默认值 0,表示不限制|
|
||||
|maxTsmaNum | |支持动态修改 立即生效 |集群内可创建的TSMA个数;取值范围 0-3;默认值 3|
|
||||
|tmqMaxTopicNum | |支持动态修改 立即生效 |订阅最多可建立的 topic 数量;取值范围 1-10000;默认值为 20|
|
||||
|tmqRowSize | |支持动态修改 立即生效 |订阅数据块的最大记录条数,取值范围 1-1000000,默认值 4096|
|
||||
|audit | |支持动态修改 立即生效 |审计功能开关;企业版参数|
|
||||
|auditInterval | |支持动态修改 立即生效 |审计数据上报的时间间隔;企业版参数|
|
||||
|auditCreateTable | |支持动态修改 立即生效 |是否针对创建子表开启申计功能;企业版参数|
|
||||
|encryptAlgorithm | |不支持动态修改 |数据加密算法;企业版参数|
|
||||
|encryptScope | |不支持动态修改 |加密范围;企业版参数|
|
||||
|enableWhiteList | |支持动态修改 立即生效 |白名单功能开关;企业版参数|
|
||||
|syncLogBufferMemoryAllowed| |支持动态修改 立即生效 |一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围 104857600-INT64_MAX,默认值 服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 |
|
||||
|syncElectInterval | |不支持动态修改 |内部参数,用于同步模块调试|
|
||||
|syncHeartbeatInterval | |不支持动态修改 |内部参数,用于同步模块调试|
|
||||
|syncHeartbeatTimeout | |不支持动态修改 |内部参数,用于同步模块调试|
|
||||
|syncSnapReplMaxWaitN | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||
|arbHeartBeatIntervalSec | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||
|arbCheckSyncIntervalSec | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||
|arbSetAssignedTimeoutSec | |支持动态修改 立即生效 |内部参数,用于同步模块调试|
|
||||
|mndSdbWriteDelta | |支持动态修改 立即生效 |内部参数,用于 mnode 模块调试|
|
||||
|mndLogRetention | |支持动态修改 立即生效 |内部参数,用于 mnode 模块调试|
|
||||
|skipGrant | |不支持动态修改 |内部参数,用于授权检查|
|
||||
|trimVDbIntervalSec | |支持动态修改 立即生效 |内部参数,用于删除过期数据|
|
||||
|ttlFlushThreshold | |支持动态修改 立即生效 |内部参数,ttl 定时器的频率|
|
||||
|compactPullupInterval | |支持动态修改 立即生效 |内部参数,数据重整定时器的频率|
|
||||
|walFsyncDataSizeLimit | |支持动态修改 立即生效 |内部参数,WAL 进行 FSYNC 的阈值|
|
||||
|transPullupInterval | |支持动态修改 立即生效 |内部参数,mnode 执行事务的重试间隔|
|
||||
|mqRebalanceInterval | |支持动态修改 立即生效 |内部参数,消费者再平衡的时间间隔|
|
||||
|uptimeInterval | |支持动态修改 立即生效 |内部参数,用于记录系统启动时间|
|
||||
|timeseriesThreshold | |支持动态修改 立即生效 |内部参数,用于统计用量|
|
||||
|udf | |支持动态修改 重启生效 |是否启动 UDF 服务;0:不启动,1:启动;默认值为 0 |
|
||||
|udfdResFuncs | |支持动态修改 重启生效 |内部参数,用于 UDF 结果集设置|
|
||||
|udfdLdLibPath | |支持动态修改 重启生效 |内部参数,表示 UDF 装载的库路径|
|
||||
|
||||
|
||||
### 流计算参数
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------------|----------|-|
|
||||
|disableStream | |流计算的启动开关|
|
||||
|streamBufferSize | |控制内存中窗口状态缓存的大小,默认值为 128MB|
|
||||
|streamAggCnt | |内部参数,并发进行聚合计算的数目|
|
||||
|checkpointInterval | |内部参数,checkponit 同步间隔|
|
||||
|concurrentCheckpoint | |内部参数,是否并发检查 checkpoint|
|
||||
|maxStreamBackendCache | |内部参数,流计算使用的最大缓存|
|
||||
|streamSinkDataRate | |内部参数,用于控制流计算结果的写入速度|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|-----------------------|-------------------------|----------|-|
|
||||
|disableStream | |支持动态修改 重启生效 |流计算的启动开关|
|
||||
|streamBufferSize | |支持动态修改 重启生效 |控制内存中窗口状态缓存的大小,默认值为 128MB|
|
||||
|streamAggCnt | |不支持动态修改 |内部参数,并发进行聚合计算的数目|
|
||||
|checkpointInterval | |支持动态修改 重启生效 |内部参数,checkponit 同步间隔|
|
||||
|concurrentCheckpoint | |支持动态修改 立即生效 |内部参数,是否并发检查 checkpoint|
|
||||
|maxStreamBackendCache | |支持动态修改 立即生效 |内部参数,流计算使用的最大缓存|
|
||||
|streamSinkDataRate | |支持动态修改 重启生效 |内部参数,用于控制流计算结果的写入速度|
|
||||
|
||||
### 日志相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|----------------|----------|-|
|
||||
|logDir | |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos|
|
||||
|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1|
|
||||
|numOfLogLines | |单个日志文件允许的最大行数,默认值 10,000,000|
|
||||
|asyncLog | |日志写入模式,0:同步,1:异步,默认值 1|
|
||||
|logKeepDays | |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||
|slowLogThreshold|3.3.3.0 后|慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
|
||||
|slowLogMaxLen |3.3.3.0 后|慢查询日志最大长度,取值范围 1-16384,默认值 4096|
|
||||
|slowLogScope |3.3.3.0 后|慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
|
||||
|slowLogExceptDb |3.3.3.0 后|指定的数据库不上报慢查询,仅支持配置换一个数据库|
|
||||
|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||
|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
|
||||
|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
|
||||
|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
|
||||
|qDebugFlag | |query 模块的日志开关,取值范围同上|
|
||||
|dDebugFlag | |dnode 模块的日志开关,取值范围同上|
|
||||
|vDebugFlag | |vnode 模块的日志开关,取值范围同上|
|
||||
|mDebugFlag | |mnode 模块的日志开关,取值范围同上|
|
||||
|azDebugFlag |3.3.4.3 后|S3 模块的日志开关,取值范围同上|
|
||||
|sDebugFlag | |sync 模块的日志开关,取值范围同上|
|
||||
|tsdbDebugFlag | |tsdb 模块的日志开关,取值范围同上|
|
||||
|tqDebugFlag | |tq 模块的日志开关,取值范围同上|
|
||||
|fsDebugFlag | |fs 模块的日志开关,取值范围同上|
|
||||
|udfDebugFlag | |udf 模块的日志开关,取值范围同上|
|
||||
|smaDebugFlag | |sma 模块的日志开关,取值范围同上|
|
||||
|idxDebugFlag | |index 模块的日志开关,取值范围同上|
|
||||
|tdbDebugFlag | |tdb 模块的日志开关,取值范围同上|
|
||||
|metaDebugFlag | |meta 模块的日志开关,取值范围同上|
|
||||
|stDebugFlag | |stream 模块的日志开关,取值范围同上|
|
||||
|sndDebugFlag | |snode 模块的日志开关,取值范围同上|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------|-------------------------|----------|-|
|
||||
|logDir | |不支持动态修改 |日志文件目录,运行日志将写入该目录,默认值 /var/log/taos|
|
||||
|minimalLogDirGB | |不支持动态修改 |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,默认值 1|
|
||||
|numOfLogLines | |支持动态修改 立即生效 |单个日志文件允许的最大行数,默认值 10,000,000|
|
||||
|asyncLog | |支持动态修改 立即生效 |日志写入模式,0:同步,1:异步,默认值 1|
|
||||
|logKeepDays | |支持动态修改 立即生效 |日志文件的最长保存时间,单位:天,默认值 0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taosdlog.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||
|slowLogThreshold|3.3.3.0 后|支持动态修改 立即生效 |慢查询门限值,大于等于门限值认为是慢查询,单位秒,默认值 3 |
|
||||
|slowLogMaxLen |3.3.3.0 后|支持动态修改 立即生效 |慢查询日志最大长度,取值范围 1-16384,默认值 4096|
|
||||
|slowLogScope |3.3.3.0 后|支持动态修改 立即生效 |慢查询记录类型,取值范围 ALL/QUERY/INSERT/OTHERS/NONE,默认值 QUERY|
|
||||
|slowLogExceptDb |3.3.3.0 后|支持动态修改 立即生效 |指定的数据库不上报慢查询,仅支持配置换一个数据库|
|
||||
|debugFlag | |支持动态修改 立即生效 |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||
|tmrDebugFlag | |支持动态修改 立即生效 |定时器模块的日志开关,取值范围同上|
|
||||
|uDebugFlag | |支持动态修改 立即生效 |共用功能模块的日志开关,取值范围同上|
|
||||
|rpcDebugFlag | |支持动态修改 立即生效 |rpc 模块的日志开关,取值范围同上|
|
||||
|qDebugFlag | |支持动态修改 立即生效 |query 模块的日志开关,取值范围同上|
|
||||
|dDebugFlag | |支持动态修改 立即生效 |dnode 模块的日志开关,取值范围同上|
|
||||
|vDebugFlag | |支持动态修改 立即生效 |vnode 模块的日志开关,取值范围同上|
|
||||
|mDebugFlag | |支持动态修改 立即生效 |mnode 模块的日志开关,取值范围同上|
|
||||
|azDebugFlag |3.3.4.3 后|支持动态修改 立即生效 |S3 模块的日志开关,取值范围同上|
|
||||
|sDebugFlag | |支持动态修改 立即生效 |sync 模块的日志开关,取值范围同上|
|
||||
|tsdbDebugFlag | |支持动态修改 立即生效 |tsdb 模块的日志开关,取值范围同上|
|
||||
|tqDebugFlag | |支持动态修改 立即生效 |tq 模块的日志开关,取值范围同上|
|
||||
|fsDebugFlag | |支持动态修改 立即生效 |fs 模块的日志开关,取值范围同上|
|
||||
|udfDebugFlag | |支持动态修改 立即生效 |udf 模块的日志开关,取值范围同上|
|
||||
|smaDebugFlag | |支持动态修改 立即生效 |sma 模块的日志开关,取值范围同上|
|
||||
|idxDebugFlag | |支持动态修改 立即生效 |index 模块的日志开关,取值范围同上|
|
||||
|tdbDebugFlag | |支持动态修改 立即生效 |tdb 模块的日志开关,取值范围同上|
|
||||
|metaDebugFlag | |支持动态修改 立即生效 |meta 模块的日志开关,取值范围同上|
|
||||
|stDebugFlag | |支持动态修改 立即生效 |stream 模块的日志开关,取值范围同上|
|
||||
|sndDebugFlag | |支持动态修改 立即生效 |snode 模块的日志开关,取值范围同上|
|
||||
|
||||
### 调试相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|--------------------|----------|-|
|
||||
|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1|
|
||||
|configDir | |配置文件所在目录|
|
||||
|scriptDir | |内部测试工具的脚本目录|
|
||||
|assert | |断言控制开关,默认值 0|
|
||||
|randErrorChance | |内部参数,用于随机失败测试|
|
||||
|randErrorDivisor | |内部参数,用于随机失败测试|
|
||||
|randErrorScope | |内部参数,用于随机失败测试|
|
||||
|safetyCheckLevel | |内部参数,用于随机失败测试|
|
||||
|experimental | |内部参数,用于一些实验特性|
|
||||
|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
|
||||
|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
|
||||
|rsyncPort | |内部参数,用于调试流计算|
|
||||
|snodeAddress | |内部参数,用于调试流计算|
|
||||
|checkpointBackupDir | |内部参数,用于恢复 snode 数据|
|
||||
|enableAuditDelete | |内部参数,用于测试审计功能|
|
||||
|slowLogThresholdTest| |内部参数,用于测试慢日志|
|
||||
|bypassFlag |3.3.4.5 后|内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;默认值 0|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|--------------------|-------------------------|----------|-|
|
||||
|enableCoreFile | |支持动态修改 立即生效 |crash 时是否生成 core 文件,0:不生成,1:生成;默认值 1|
|
||||
|configDir | |不支持动态修改 |配置文件所在目录|
|
||||
|forceReadConfig | |不支持动态修改 |强制使用配置文件中的参数,0:使用持久化的配置参数,1:使用配置文件中的配置参数;默认值 0|
|
||||
|scriptDir | |不支持动态修改 |内部测试工具的脚本目录|
|
||||
|assert | |不支持动态修改 |断言控制开关,默认值 0|
|
||||
|randErrorChance | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||
|randErrorDivisor | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||
|randErrorScope | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||
|safetyCheckLevel | |支持动态修改 立即生效 |内部参数,用于随机失败测试|
|
||||
|experimental | |支持动态修改 立即生效 |内部参数,用于一些实验特性|
|
||||
|simdEnable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 SIMD 加速|
|
||||
|AVX512Enable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 AVX512 加速|
|
||||
|rsyncPort | |不支持动态修改 |内部参数,用于调试流计算|
|
||||
|snodeAddress | |支持动态修改 重启生效 |内部参数,用于调试流计算|
|
||||
|checkpointBackupDir | |支持动态修改 重启生效 |内部参数,用于恢复 snode 数据|
|
||||
|enableAuditDelete | |不支持动态修改 |内部参数,用于测试审计功能|
|
||||
|slowLogThresholdTest| |不支持动态修改 |内部参数,用于测试慢日志|
|
||||
|bypassFlag |3.3.4.5 后|支持动态修改 立即生效 |内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;默认值 0|
|
||||
|
||||
### 压缩参数
|
||||
|参数名称|支持版本|参数含义|
|
||||
|------------|----------|-|
|
||||
|fPrecision | |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断|
|
||||
|dPrecision | |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取|
|
||||
|lossyColumn |3.3.0.0 前|对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩|
|
||||
|ifAdtFse | |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0|
|
||||
|maxRange | |内部参数,用于有损压缩设置|
|
||||
|curRange | |内部参数,用于有损压缩设置|
|
||||
|compressor | |内部参数,用于有损压缩设置|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|------------|----------|-------------------------|-|
|
||||
|fPrecision | |支持动态修改 立即生效 |设置 float 类型浮点数压缩精度 ,取值范围 0.1 ~ 0.00000001 ,默认值 0.00000001 , 小于此值的浮点数尾数部分将被截断|
|
||||
|dPrecision | |支持动态修改 立即生效 |设置 double 类型浮点数压缩精度 , 取值范围 0.1 ~ 0.0000000000000001 , 默认值 0.0000000000000001 , 小于此值的浮点数尾数部分将被截取|
|
||||
|lossyColumn |3.3.0.0 前|不支持动态修改 |对 float 和/或 double 类型启用 TSZ 有损压缩;取值范围 float/double/none;默认值 none,表示关闭无损压缩|
|
||||
|ifAdtFse | |支持动态修改 重启生效 |在启用 TSZ 有损压缩时,使用 FSE 算法替换 HUFFMAN 算法,FSE 算法压缩速度更快,但解压稍慢,追求压缩速度可选用此算法;0:关闭,1:打开;默认值为 0|
|
||||
|maxRange | |支持动态修改 重启生效 |内部参数,用于有损压缩设置|
|
||||
|curRange | |支持动态修改 重启生效 |内部参数,用于有损压缩设置|
|
||||
|compressor | |支持动态修改 重启生效 |内部参数,用于有损压缩设置|
|
||||
|
||||
**补充说明**
|
||||
1. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本
|
||||
2. TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
|
||||
3. TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
|
||||
4. 示例:对 float 和 double 类型都启用有损压缩
|
||||
1. 在 3.4.0.0 之后,所有配置参数都将被持久化到本地存储,重启数据库服务后,将默认使用持久化的配置参数列表;如果您希望继续使用 config 文件中配置的参数,需设置 forceReadConfig 为 1。
|
||||
2. 在 3.2.0.0 ~ 3.3.0.0(不包含)版本生效,启用该参数后不能回退到升级前的版本
|
||||
3. TSZ 压缩算法是通过数据预测技术完成的压缩,所以更适合有规律变化的数据
|
||||
4. TSZ 压缩时间会更长一些,如果您的服务器 CPU 空闲多,存储空间小的情况下适合选用
|
||||
5. 示例:对 float 和 double 类型都启用有损压缩
|
||||
```shell
|
||||
lossyColumns float|double
|
||||
```
|
||||
5. 配置需重启服务生效,重启如果在 taosd 日志中看到以下内容,表明配置已生效:
|
||||
6. 配置需重启服务生效,重启如果在 taosd 日志中看到以下内容,表明配置已生效:
|
||||
```sql
|
||||
02/22 10:49:27.607990 00002933 UTL lossyColumns float|double
|
||||
```
|
||||
|
|
|
@ -9,101 +9,102 @@ TDengine 客户端驱动提供了应用编程所需要的全部 API,并且在
|
|||
## 配置参数
|
||||
|
||||
### 连接相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|----------------------|----------|-------------|
|
||||
|firstEp | |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost|
|
||||
|secondEp | |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值|
|
||||
|compressMsgSize | |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1|
|
||||
|shellActivityTimer | |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3|
|
||||
|numOfRpcSessions | |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000|
|
||||
|numOfRpcThreads | |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
||||
|numOfTaskQueueThreads | |客户端处理 RPC消息的线程数, 范围4-16,默认值为 CPU 核数的一半|
|
||||
|timeToGetAvailableConn| 3.3.4.*之后取消 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000|
|
||||
|useAdapter | |内部参数,是否使用 taosadapter,影响 CSV 文件导入|
|
||||
|shareConnLimit |3.3.4.0 新增|内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10|
|
||||
|readTimeout |3.3.4.0 新增|内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|firstEp | |支持动态修改 立即生效 |启动时,主动连接的集群中首个 dnode 的 endpoint,缺省值:hostname:6030,若无法获取该服务器的 hostname,则赋值为 localhost|
|
||||
|secondEp | |支持动态修改 立即生效 |启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,没有缺省值|
|
||||
|serverPort | |支持动态修改 立即生效 |taosd 监听的端口,默认值 6030|
|
||||
|compressMsgSize | |支持动态修改 立即生效 |是否对 RPC 消息进行压缩;-1:所有消息都不压缩;0:所有消息都压缩;N (N>0):只有大于 N 个字节的消息才压缩;缺省值 -1|
|
||||
|shellActivityTimer | |不支持动态修改 |客户端向 mnode 发送心跳的时长,单位为秒,取值范围 1-120,默认值 3|
|
||||
|numOfRpcSessions | |支持动态修改 立即生效 |RPC 支持的最大连接数,取值范围 100-100000,缺省值 30000|
|
||||
|numOfRpcThreads | |不支持动态修改 |RPC 收发数据线程数目,取值范围1-50,默认值为 CPU 核数的一半|
|
||||
|numOfTaskQueueThreads | |不支持动态修改 |客户端处理 RPC消息的线程数, 范围4-16,默认值为 CPU 核数的一半|
|
||||
|timeToGetAvailableConn| 3.3.4.*之后取消 |不支持动态修改 |获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值 500000|
|
||||
|useAdapter | |支持动态修改 立即生效 |内部参数,是否使用 taosadapter,影响 CSV 文件导入|
|
||||
|shareConnLimit |3.3.4.0 新增|不支持动态修改 |内部参数,一个链接可以共享的查询数目,取值范围 1-256,默认值 10|
|
||||
|readTimeout |3.3.4.0 新增|不支持动态修改 |内部参数,最小超时时间,取值范围 64-604800,单位为秒,默认值 900|
|
||||
|
||||
### 查询相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|---------------------------------|---------|-|
|
||||
|countAlwaysReturnValue | |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
||||
|keepColumnName | |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0|
|
||||
|multiResultFunctionStarReturnTags|3.3.3.0 后|查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列|
|
||||
|metaCacheMaxSize | |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制|
|
||||
|maxTsmaCalcDelay | |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒|
|
||||
|tsmaDataDeleteMark | |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d |
|
||||
|queryPolicy | |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1|
|
||||
|queryTableNotExistAsEmpty | |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false|
|
||||
|querySmaOptimize | |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0|
|
||||
|queryPlannerTrace | |内部参数,查询计划是否输出详细日志|
|
||||
|queryNodeChunkSize | |内部参数,查询计划的块大小|
|
||||
|queryUseNodeAllocator | |内部参数,查询计划的分配方法|
|
||||
|queryMaxConcurrentTables | |内部参数,查询计划的并发数目|
|
||||
|enableQueryHb | |内部参数,是否发送查询心跳消息|
|
||||
|minSlidingTime | |内部参数,sliding 的最小允许值|
|
||||
|minIntervalTime | |内部参数,interval 的最小允许值|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|countAlwaysReturnValue | |支持动态修改 立即生效 |count/hyperloglog 函数在输入数据为空或者 NULL 的情况下是否返回值;0:返回空行,1:返回;默认值 1;该参数设置为 1 时,如果查询中含有 INTERVAL 子句或者该查询使用了 TSMA 时,且相应的组或窗口内数据为空或者 NULL,对应的组或窗口将不返回查询结果;注意此参数客户端和服务端值应保持一致|
|
||||
|keepColumnName | |支持动态修改 立即生效 |Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数;1:表示自动设置别名为列名(不包含函数名),0:表示不自动设置别名;缺省值:0|
|
||||
|multiResultFunctionStarReturnTags|3.3.3.0 后|支持动态修改 立即生效 |查询超级表时,last(\*)/last_row(\*)/first(\*) 是否返回标签列;查询普通表、子表时,不受该参数影响;0:不返回标签列,1:返回标签列;缺省值:0;该参数设置为 0 时,last(\*)/last_row(\*)/first(\*) 只返回超级表的普通列;为 1 时,返回超级表的普通列和标签列|
|
||||
|metaCacheMaxSize | |支持动态修改 立即生效 |指定单个客户端元数据缓存大小的最大值,单位 MB;缺省值 -1,表示无限制|
|
||||
|maxTsmaCalcDelay | |支持动态修改 立即生效 |查询时客户端可允许的 tsma 计算延迟,若 tsma 的计算延迟大于配置值,则该 TSMA 将不会被使用;取值范围 600s - 86400s,即 10 分钟 - 1 小时;缺省值:600 秒|
|
||||
|tsmaDataDeleteMark | |支持动态修改 立即生效 |TSMA 计算的历史数据中间结果保存时间,单位为毫秒;取值范围 >= 3600000,即大于等于1h;缺省值:86400000,即 1d |
|
||||
|queryPolicy | |支持动态修改 立即生效 |查询语句的执行策略,1:只使用 vnode,不使用 qnode;2:没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行;3:vnode 只运行扫描算子,其余算子均在 qnode 执行;缺省值:1|
|
||||
|queryTableNotExistAsEmpty | |支持动态修改 立即生效 |查询表不存在时是否返回空结果集;false:返回错误;true:返回空结果集;缺省值 false|
|
||||
|querySmaOptimize | |支持动态修改 立即生效 |sma index 的优化策略,0:表示不使用 sma index,永远从原始数据进行查询;1:表示使用 sma index,对符合的语句,直接从预计算的结果进行查询;缺省值:0|
|
||||
|queryPlannerTrace | |支持动态修改 立即生效 |内部参数,查询计划是否输出详细日志|
|
||||
|queryNodeChunkSize | |支持动态修改 立即生效 |内部参数,查询计划的块大小|
|
||||
|queryUseNodeAllocator | |支持动态修改 立即生效 |内部参数,查询计划的分配方法|
|
||||
|queryMaxConcurrentTables | |不支持动态修改 |内部参数,查询计划的并发数目|
|
||||
|enableQueryHb | |支持动态修改 立即生效 |内部参数,是否发送查询心跳消息|
|
||||
|minSlidingTime | |支持动态修改 立即生效 |内部参数,sliding 的最小允许值|
|
||||
|minIntervalTime | |支持动态修改 立即生效 |内部参数,interval 的最小允许值|
|
||||
|
||||
### 写入相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|------------------------------|----------|-|
|
||||
|smlChildTableName | |schemaless 自定义的子表名的 key,无缺省值|
|
||||
|smlAutoChildTableNameDelimiter| |schemaless tag 之间的连接符,连起来作为子表名,无缺省值|
|
||||
|smlTagName | |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"|
|
||||
|smlTsDefaultName | |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"|
|
||||
|smlDot2Underline | |schemaless 把超级表名中的 dot 转成下划线|
|
||||
|maxInsertBatchRows | |内部参数,一批写入的最大条数|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|smlChildTableName | |支持动态修改 立即生效 |schemaless 自定义的子表名的 key,无缺省值|
|
||||
|smlAutoChildTableNameDelimiter| |支持动态修改 立即生效 |schemaless tag 之间的连接符,连起来作为子表名,无缺省值|
|
||||
|smlTagName | |支持动态修改 立即生效 |schemaless tag 为空时默认的 tag 名字,缺省值 "_tag_null"|
|
||||
|smlTsDefaultName | |支持动态修改 立即生效 |schemaless 自动建表的时间列名字通过该配置设置,缺省值 "_ts"|
|
||||
|smlDot2Underline | |支持动态修改 立即生效 |schemaless 把超级表名中的 dot 转成下划线|
|
||||
|maxInsertBatchRows | |支持动态修改 立即生效 |内部参数,一批写入的最大条数|
|
||||
|
||||
### 区域相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------|----------|-|
|
||||
|timezone | |时区;缺省从系统中动态获取当前的时区设置|
|
||||
|locale | |系统区位信息及编码格式,缺省从系统中获取|
|
||||
|charset | |字符集编码,缺省从系统中获取|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|timezone | |支持动态修改 立即生效 |时区;缺省从系统中动态获取当前的时区设置|
|
||||
|locale | |支持动态修改 立即生效 |系统区位信息及编码格式,缺省从系统中获取|
|
||||
|charset | |支持动态修改 立即生效 |字符集编码,缺省从系统中获取|
|
||||
|
||||
### 存储相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------|----------|-|
|
||||
|tempDir | |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp|
|
||||
|minimalTmpDirGB | |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|tempDir | |支持动态修改 立即生效 |指定所有运行过程中的临时文件生成的目录,Linux 平台默认值为 /tmp|
|
||||
|minimalTmpDirGB | |支持动态修改 立即生效 |tempDir 所指定的临时文件目录所需要保留的最小空间,单位 GB,缺省值:1|
|
||||
|
||||
### 日志相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------|----------|-|
|
||||
|logDir | |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos|
|
||||
|minimalLogDirGB | |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1|
|
||||
|numOfLogLines | |单个日志文件允许的最大行数,缺省值:10,000,000|
|
||||
|asyncLog | |日志写入模式,0:同步,1:异步,缺省值:1|
|
||||
|logKeepDays | |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||
|debugFlag | |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||
|tmrDebugFlag | |定时器模块的日志开关,取值范围同上|
|
||||
|uDebugFlag | |共用功能模块的日志开关,取值范围同上|
|
||||
|rpcDebugFlag | |rpc 模块的日志开关,取值范围同上|
|
||||
|jniDebugFlag | |jni 模块的日志开关,取值范围同上|
|
||||
|qDebugFlag | |query 模块的日志开关,取值范围同上|
|
||||
|cDebugFlag | |客户端模块的日志开关,取值范围同上|
|
||||
|simDebugFlag | |内部参数,测试工具的日志开关,取值范围同上|
|
||||
|tqClientDebugFlag|3.3.4.3 后|客户端模块的日志开关,取值范围同上|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|logDir | |不支持动态修改 |日志文件目录,运行日志将写入该目录,缺省值:/var/log/taos|
|
||||
|minimalLogDirGB | |支持动态修改 立即生效 |日志文件夹所在磁盘可用空间大小小于该值时,停止写日志,单位 GB,缺省值:1|
|
||||
|numOfLogLines | |支持动态修改 立即生效 |单个日志文件允许的最大行数,缺省值:10,000,000|
|
||||
|asyncLog | |支持动态修改 立即生效 |日志写入模式,0:同步,1:异步,缺省值:1|
|
||||
|logKeepDays | |支持动态修改 立即生效 |日志文件的最长保存时间,单位:天,缺省值:0,意味着无限保存,日志文件不会被重命名,也不会有新的日志文件滚动产生,但日志文件的内容有可能会不断滚动,取决于日志文件大小的设置;当设置为大于 0 的值时,当日志文件大小达到设置的上限时会被重命名为 taoslogx.yyy,其中 yyy 为日志文件最后修改的时间戳,并滚动产生新的日志文件|
|
||||
|debugFlag | |支持动态修改 立即生效 |运行日志开关,131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志);默认值 131 或 135 (取决于不同模块)|
|
||||
|tmrDebugFlag | |支持动态修改 立即生效 |定时器模块的日志开关,取值范围同上|
|
||||
|uDebugFlag | |支持动态修改 立即生效 |共用功能模块的日志开关,取值范围同上|
|
||||
|rpcDebugFlag | |支持动态修改 立即生效 |rpc 模块的日志开关,取值范围同上|
|
||||
|jniDebugFlag | |支持动态修改 立即生效 |jni 模块的日志开关,取值范围同上|
|
||||
|qDebugFlag | |支持动态修改 立即生效 |query 模块的日志开关,取值范围同上|
|
||||
|cDebugFlag | |支持动态修改 立即生效 |客户端模块的日志开关,取值范围同上|
|
||||
|simDebugFlag | |支持动态修改 立即生效 |内部参数,测试工具的日志开关,取值范围同上|
|
||||
|tqClientDebugFlag|3.3.4.3 后|支持动态修改 立即生效 |客户端模块的日志开关,取值范围同上|
|
||||
|
||||
### 调试相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------|-----------|-|
|
||||
|crashReporting | |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1|
|
||||
|enableCoreFile | |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1|
|
||||
|assert | |断言控制开关,缺省值:0|
|
||||
|configDir | |配置文件所在目录|
|
||||
|scriptDir | |内部参数,测试用例的目录|
|
||||
|randErrorChance |3.3.3.0 后|内部参数,用于随机失败测试|
|
||||
|randErrorDivisor |3.3.3.0 后|内部参数,用于随机失败测试|
|
||||
|randErrorScope |3.3.3.0 后|内部参数,用于随机失败测试|
|
||||
|safetyCheckLevel |3.3.3.0 后|内部参数,用于随机失败测试|
|
||||
|simdEnable |3.3.4.3 后|内部参数,用于测试 SIMD 加速|
|
||||
|AVX512Enable |3.3.4.3 后|内部参数,用于测试 AVX512 加速|
|
||||
|bypassFlag |3.3.4.5 后|内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;缺省值:0|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|crashReporting | |支持动态修改 立即生效 |是否上传 crash 到 telemetry,0:不上传,1:上传;缺省值:1|
|
||||
|enableCoreFile | |支持动态修改 立即生效 |crash 时是否生成 core 文件,0:不生成,1:生成;缺省值:1|
|
||||
|assert | |不支持动态修改 |断言控制开关,缺省值:0|
|
||||
|configDir | |不支持动态修改 |配置文件所在目录|
|
||||
|scriptDir | |不支持动态修改 |内部参数,测试用例的目录|
|
||||
|randErrorChance |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||
|randErrorDivisor |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||
|randErrorScope |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||
|safetyCheckLevel |3.3.3.0 后|不支持动态修改 |内部参数,用于随机失败测试|
|
||||
|simdEnable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 SIMD 加速|
|
||||
|AVX512Enable |3.3.4.3 后|不支持动态修改 |内部参数,用于测试 AVX512 加速|
|
||||
|bypassFlag |3.3.4.5 后|支持动态修改 立即生效 |内部参数,用于短路测试,0:正常写入,1:写入消息在 taos 客户端发送 RPC 消息前返回,2:写入消息在 taosd 服务端收到 RPC 消息后返回,4:写入消息在 taosd 服务端写入内存缓存前返回,8:写入消息在 taosd 服务端数据落盘前返回;缺省值:0|
|
||||
|
||||
### SHELL 相关
|
||||
|参数名称|支持版本|参数含义|
|
||||
|-----------------|----------|-|
|
||||
|enableScience | |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1|
|
||||
|参数名称|支持版本|动态修改|参数含义|
|
||||
|----------------------|----------|-------------------------|-------------|
|
||||
|enableScience | |不支持动态修改 |是否开启科学计数法显示浮点数;0:不开始,1:开启;缺省值:1|
|
||||
|
||||
## API
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
|
|||
- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
|
||||
- -P PORT: 指定服务端所用端口号
|
||||
- -u USER: 连接时使用的用户名
|
||||
- -p PASSWORD: 连接服务端时使用的密码
|
||||
- -p PASSWORD: 连接服务端时使用的密码,特殊字符如 `! & ( ) < > ; |` 需使用字符 `\` 进行转义处理
|
||||
- -?, --help: 打印出所有命令行参数
|
||||
|
||||
还有更多其他参数:
|
||||
|
|
|
@ -64,7 +64,8 @@ database_option: {
|
|||
- DURATION:数据文件存储数据的时间跨度。可以使用加单位的表示形式,如 DURATION 100h、DURATION 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。不加时间单位时默认单位为天,如 DURATION 50 表示 50 天。
|
||||
- MAXROWS:文件块中记录的最大条数,默认为 4096 条。
|
||||
- MINROWS:文件块中记录的最小条数,默认为 100 条。
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
|
||||
- KEEP:表示数据文件保存的天数,缺省值为 3650,取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m(分钟)、h(小时)和 d(天)三个单位。也可以不写单位,如 KEEP 50,此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2,如 KEEP 100h,100d,3650d); 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
|
||||
|
||||
- KEEP_TIME_OFFSET:自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。开源版本固定为 1,企业版本可设置范围为 1 到 16。对于少表高频写入场景,此参数建议使用默认配置;而对于多表低频写入场景,此参数建议配置较大的值。
|
||||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
|
@ -135,11 +136,11 @@ alter_database_option: {
|
|||
|
||||
1. 如何查看 cachesize?
|
||||
|
||||
通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值。
|
||||
通过 select * from information_schema.ins_databases; 可以查看这些 cachesize 的具体值(单位为 MB)。。
|
||||
|
||||
2. 如何查看 cacheload?
|
||||
|
||||
通过 show \<db_name>.vgroups; 可以查看 cacheload
|
||||
通过 show \<db_name>.vgroups; 可以查看 cacheload(单位为字节)。
|
||||
|
||||
3. 判断 cachesize 是否够用
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
|
|||
|
||||
alter_table_clause: {
|
||||
alter_table_options
|
||||
| SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value...
|
||||
| SET TAG tag_name = new_tag_value, tag_name2=new_tag2_value ...
|
||||
}
|
||||
|
||||
alter_table_options:
|
||||
|
@ -195,7 +195,7 @@ alter_table_option: {
|
|||
### 修改子表标签值
|
||||
|
||||
```
|
||||
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1,tag_name2=new_tag_value2...;
|
||||
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
|
||||
```
|
||||
|
||||
### 修改表生命周期
|
||||
|
|
|
@ -6,7 +6,7 @@ title: "删除数据"
|
|||
|
||||
删除数据是 TDengine 提供的根据指定时间段删除指定表或超级表中数据记录的功能,方便用户清理由于设备故障等原因产生的异常数据。
|
||||
|
||||
**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动或用户手动进行数据重整时。
|
||||
**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动清理(建库参数 keep 生效)或用户手动进行数据重整时(企业版功能 compact)。
|
||||
|
||||
**语法:**
|
||||
|
||||
|
|
|
@ -1817,7 +1817,7 @@ ignore_null_values: {
|
|||
}
|
||||
```
|
||||
|
||||
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。
|
||||
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为 0。
|
||||
|
||||
**返回数据类型**:同字段类型。
|
||||
|
||||
|
@ -1838,9 +1838,9 @@ ignore_null_values: {
|
|||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0 版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0 版本以后支持)。
|
||||
- INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。
|
||||
- INTERP 查询支持NEAR FILL模式, 即当需要FILL时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)。(3.3.4.9版本及以后支持)。
|
||||
- INTERP 只有在使用FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在3.3.4.9版本及以后支持。
|
||||
- INTERP `RANEG`子句支持时间范围的扩展(3.3.4.9版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点'2023-01-01 00:00:00'查找前后10s的数据进行插值, FILL PREV/NEXT/NEAR分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用FILL指定的值进行插值, 因此此时FILL子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). 目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与EVERY类似, 单位不能是年或月, 值不能为0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他FILL模式, 且不能指定EVERY子句。
|
||||
- INTERP 查询支持 NEAR FILL 模式, 即当需要 FILL 时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)(3.3.4.9 版本及以后支持)。
|
||||
- INTERP 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在 3.3.4.9 版本及以后支持。
|
||||
- INTERP `RANGE`子句支持时间范围的扩展(3.3.4.9 版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点 '2023-01-01 00:00:00' 查找前后 10s 的数据进行插值, FILL PREV/NEXT/NEAR 分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用 FILL 指定的值进行插值, 因此此时 FILL 子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1)。目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持 RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与 EVERY 类似, 单位不能是年或月, 值不能为 0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他 FILL 模式, 且不能指定 EVERY 子句。
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -42,32 +42,11 @@ DROP DNODE dnode_id [force] [unsafe]
|
|||
ALTER DNODE dnode_id dnode_option
|
||||
|
||||
ALTER ALL DNODES dnode_option
|
||||
|
||||
dnode_option: {
|
||||
'resetLog'
|
||||
| 'balance' 'value'
|
||||
| 'monitor' 'value'
|
||||
| 'debugFlag' 'value'
|
||||
| 'monDebugFlag' 'value'
|
||||
| 'vDebugFlag' 'value'
|
||||
| 'mDebugFlag' 'value'
|
||||
| 'cDebugFlag' 'value'
|
||||
| 'httpDebugFlag' 'value'
|
||||
| 'qDebugflag' 'value'
|
||||
| 'sdbDebugFlag' 'value'
|
||||
| 'uDebugFlag' 'value'
|
||||
| 'tsdbDebugFlag' 'value'
|
||||
| 'sDebugflag' 'value'
|
||||
| 'rpcDebugFlag' 'value'
|
||||
| 'dDebugFlag' 'value'
|
||||
| 'mqttDebugFlag' 'value'
|
||||
| 'wDebugFlag' 'value'
|
||||
| 'tmrDebugFlag' 'value'
|
||||
| 'cqDebugFlag' 'value'
|
||||
}
|
||||
```
|
||||
|
||||
上面语法中的这些可修改配置项其配置方式与 dnode 配置文件中的配置方式相同,区别是修改是动态的立即生效,且不需要重启 dnode。
|
||||
对于支持动态修改的配置参数,您可以使用 ALTER DNODE 或 ALTER ALL DNODES 语法修改 dnode 中配置参数的值,自 3.3.4.0 后,修改的配置参数将自动持久化,即便数据库服务重启后仍然生效。
|
||||
|
||||
对于一个配置参数是否支持动态修改,请您参考以下页面:[taosd 参考手册](../01-components/01-taosd.md)
|
||||
|
||||
value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输出级别为 debug:
|
||||
|
||||
|
@ -75,6 +54,18 @@ value 是参数的值,需要是字符格式。如修改 dnode 1 的日志输
|
|||
ALTER DNODE 1 'debugFlag' '143';
|
||||
```
|
||||
|
||||
### 补充说明:
|
||||
配置参数在 dnode 中被分为全局配置参数与局部配置参数,您可以查看 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 中的 category 字段来确认配置参数属于全局配置参数还是局部配置参数:
|
||||
1. 局部配置参数:您可以使用 ALTER DNODE 或 ALTER ALL DNODES 来更新某一个 dnode 或全部 dnodes 的局部配置参数。
|
||||
2. 全局配置参数:全局配置参数要求各个 dnode 保持一致,所以您只可以使用 ALTER ALL DNODES 来更新全部 dnodes 的全局配置参数。
|
||||
|
||||
配置参数是否可以动态修改,有以下三种情况:
|
||||
1. 支持动态修改 立即生效
|
||||
2. 支持动态修改 重启生效
|
||||
3. 不支持动态修改
|
||||
|
||||
对于重启后生效的配置参数,您可以通过 SHOW VARIABLES 或 SHOW DNODE dnode_id VARIABLE 看到修改后的值,但是需要重启数据库服务才使其生效。
|
||||
|
||||
## 添加管理节点
|
||||
|
||||
```sql
|
||||
|
@ -137,18 +128,12 @@ SHOW CLUSTER ALIVE;
|
|||
|
||||
```sql
|
||||
ALTER LOCAL local_option
|
||||
|
||||
local_option: {
|
||||
'resetLog'
|
||||
| 'rpcDebugFlag' 'value'
|
||||
| 'tmrDebugFlag' 'value'
|
||||
| 'cDebugFlag' 'value'
|
||||
| 'uDebugFlag' 'value'
|
||||
| 'debugFlag' 'value'
|
||||
}
|
||||
```
|
||||
|
||||
上面语法中的参数与在配置文件中配置客户端的用法相同,但不需要重启客户端,修改后立即生效。
|
||||
您可以使用以上语法更该客户端的配置参数,并且不需要重启客户端,修改后立即生效。
|
||||
|
||||
对于一个配置参数是否支持动态修改,请您参考以下页面:[taosc 参考手册](../01-components/02-taosc.md)
|
||||
|
||||
|
||||
## 查看客户端配置
|
||||
|
||||
|
|
|
@ -343,3 +343,17 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
|
|||
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB |
|
||||
|
||||
|
||||
## INS_FILESETS
|
||||
|
||||
提供当前数据存储的文件组的相关信息。
|
||||
|
||||
| # | **列名** | **数据类型** | **说明** |
|
||||
| --- | :-----------: | ------------ | --------------------------------------- |
|
||||
| 1 | db_name | VARCHAR(65) | 数据库名 |
|
||||
| 2 | vgroup_id | INT | vgroup id |
|
||||
| 3 | fileset_id | INT | 文件组 id |
|
||||
| 4 | start_time | TIMESTAMP | 文件组的覆盖数据的开始时间 |
|
||||
| 5 | end_time | TIMESTAMP | 文件组的覆盖数据的结束时间 |
|
||||
| 6 | total_size | BIGINT | 文件组的总大小 |
|
||||
| 7 | last_compact | TIMESTAMP | 最后一次压缩的时间 |
|
||||
| 8 | shold_compact | bool | 是否需要压缩,true:需要,false:不需要 |
|
||||
|
|
|
@ -14,16 +14,16 @@ CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
|
|||
|
||||
用户名最长不超过 23 个字节。
|
||||
|
||||
密码最长不超过 31 个字节。密码可以包含字母、数字以及除单引号、双引号、反引号、反斜杠和空格以外的特殊字符,密码不能为空字符串。
|
||||
密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`。
|
||||
|
||||
`SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`。
|
||||
|
||||
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。// 从 TDengine 企业版 3.3.2.0 开始支持
|
||||
|
||||
在下面的示例中,我们创建一个密码为 `123456` 且可以查看系统信息的用户。
|
||||
在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
taos> create user test pass 'abc123!@#' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
|
|
|
@ -30,13 +30,13 @@ description: 可配置压缩算法
|
|||
- 各个数据类型的默认压缩算法列表和适用范围
|
||||
|
||||
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法 | 压缩算法默认值 |压缩等级默认值|
|
||||
|:------------------------------------:|:----------------:|:-----------:|:--------------------:|:----:|:------:|
|
||||
| int/uint | simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| tinyint/untinyint/smallint/usmallint | simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
|
||||
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| float/double | delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:|
|
||||
| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
|
||||
| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
|
||||
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||
|
||||
|
||||
## SQL 语法
|
||||
|
|
|
@ -172,7 +172,7 @@ WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/w
|
|||
|
||||
**原因**:程序没有找到依赖的本地函数库 taos。
|
||||
|
||||
**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可,macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib`。
|
||||
**解决方法**:Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可,macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib /usr/lib/libtaos.dylib`。
|
||||
|
||||
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
| 0x80000133 | Invalid operation | 无效的或不支持的操作 | 1. 修改确认当前操作为合法有效支持的操作,检查参数有效性 2. 如果问题还未解决,保留现场和日志,github上报issue |
|
||||
| 0x80000134 | Invalid value | 无效值 | 保留现场和日志,github上报issue |
|
||||
| 0x80000135 | Invalid fqdn | 无效FQDN | 检查配置或输入的FQDN值是否正确 |
|
||||
| 0x8000013C | Invalid disk id | 不合法的disk id | 建议用户检查挂载磁盘是否失效或者使用参数 diskIDCheckEnabled 来跳过磁盘检查 |
|
||||
|
||||
|
||||
|
||||
|
@ -136,7 +137,7 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
| 0x80000350 | User already exists | Create user, 重复创建 | 确认操作是否正确 |
|
||||
| 0x80000351 | Invalid user | 用户不存在 | 确认操作是否正确 |
|
||||
| 0x80000352 | Invalid user format | 格式不正确 | 确认操作是否正确 |
|
||||
| 0x80000353 | Invalid password format | 格式不正确 | 确认操作是否正确 |
|
||||
| 0x80000353 | Invalid password format | 密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类 | 确认密码字符串的格式 |
|
||||
| 0x80000354 | Can not get user from conn | 内部错误 | 上报issue |
|
||||
| 0x80000355 | Too many users | (仅企业版)用户数量超限 | 调整配置 |
|
||||
| 0x80000357 | Authentication failure | 密码不正确 | 确认操作是否正确 |
|
||||
|
@ -261,6 +262,7 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
| 0x80000529 | Vnode is stopped | Vnode 已经关闭 | 上报问题 |
|
||||
| 0x80000530 | Duplicate write request | 重复写入请求,内部错误 | 上报问题 |
|
||||
| 0x80000531 | Vnode query is busy | 查询忙碌 | 上报问题 |
|
||||
| 0x80000540 | Vnode already exist but Dbid not match | 内部错误 | 上报问题 |
|
||||
|
||||
|
||||
## tsdb
|
||||
|
@ -294,6 +296,9 @@ description: TDengine 服务端的错误码列表和详细说明
|
|||
| 0x80000729 | Task message error | 查询消息错误 | 保留现场和日志,github上报issue |
|
||||
| 0x8000072B | Task status error | 子查询状态错误 | 保留现场和日志,github上报issue |
|
||||
| 0x8000072F | Job not exist | 查询JOB已经不存在 | 保留现场和日志,github上报issue |
|
||||
| 0x80000739 | Query memory upper limit is reached | 单个查询达到内存使用上限 | 设置合理的内存上限或调整 SQL 语句 |
|
||||
| 0x8000073A | Query memory exhausted | dnode查询内存到达使用上限 | 设置合理的内存上限或调整并发查询量或增大系统内存 |
|
||||
| 0x8000073B | Timeout for long time no fetch | 查询被长时间中断未恢复 | 调整应用实现尽快 fetch 数据 |
|
||||
|
||||
## grant
|
||||
|
||||
|
|
|
@ -280,4 +280,21 @@ TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集
|
|||
https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B8%E5%85%B3
|
||||
您可以随时关闭该参数,只需要在taos.cfg 中修改telemetryReporting为 0,然后重启数据库服务即可。
|
||||
代码位于:https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c
|
||||
此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。
|
||||
此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。
|
||||
### 31 第一次连接集群时遇到“Sync leader is unreachable”怎么办?
|
||||
报这个错,说明第一次向集群的连接是成功的,但第一次访问的IP不是mnode的leader节点,客户端试图与leader建立连接时发生错误。客户端通过EP,也就是指定的fqdn与端口号寻找leader节点,常见的报错原因有两个:
|
||||
|
||||
- 集群中其他节点的端口没有打开
|
||||
- 客户端的hosts未正确配置
|
||||
|
||||
因此用户首先要检查服务端,集群的所有端口(原生连接默认6030,http连接默认6041)有无打开;其次是客户端的hosts文件中是否配置了集群所有节点的fqdn与IP信息。
|
||||
如仍无法解决,则需要联系涛思技术人员支持。
|
||||
|
||||
### 32 同一台服务器,数据库的数据目录 dataDir 不变,为什么原有数据库丢失且集群 ID 发生了变化?
|
||||
背景知识:TDengine 服务端进程(taosd)在启动时,若数据目录(dataDir,该目录在配置文件 taos.cfg 中指定)下不存在有效的数据文件子目录(如 mnode、dnode 和 vnode 等),则会自动创建这些目录。在创建新的 mnode 目录的同时,会分配一个新的集群 ID,从而产生一个新的集群。
|
||||
|
||||
原因分析:taosd 的数据目录 dataDir 可以指向多个不同的挂载点。如果这些挂载点未在 fstab 文件中配置自动挂载,服务器重启后,dataDir 将仅作为一个本地磁盘的普通目录存在,而未能按预期指向挂载的磁盘。此时,若 taosd 服务启动,它将在 dataDir 下新建目录,从而产生一个新的集群。
|
||||
|
||||
问题影响:服务器重启后,原有数据库丢失(注:并非真正丢失,只是原有的数据磁盘未挂载,暂时看不到)且集群 ID 发生变化,导致无法访问原有数据库。对于企业版用户,如果已针对集群 ID 进行授权,还会发现集群服务器的机器码未变,但原有的授权已失效。如果未针对该问题进行监控或者未及时发现并进行处理,则用户不会注意到原有数据库已经丢失,从而造成损失,增加运维成本。
|
||||
|
||||
问题解决:应在 fstab 文件中配置 dataDir 目录的自动挂载,确保 dataDir 始终指向预期的挂载点和目录,此时,再重启服务器,会找回原有的数据库和集群。在后续的版本中,我们将开发一个功能,使 taosd 在检测到启动前后 dataDir 发生变化时,在启动阶段退出,同时提供相应的错误提示。
|
|
@ -62,6 +62,7 @@ extern "C" {
|
|||
#define TSDB_INS_TABLE_ENCRYPTIONS "ins_encryptions"
|
||||
#define TSDB_INS_TABLE_TSMAS "ins_tsmas"
|
||||
#define TSDB_INS_DISK_USAGE "ins_disk_usage"
|
||||
#define TSDB_INS_TABLE_FILESETS "ins_filesets"
|
||||
|
||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "tarray.h"
|
||||
#include "tconfig.h"
|
||||
#include "tdef.h"
|
||||
#include "tmsg.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -30,6 +31,9 @@ extern "C" {
|
|||
#define SLOW_LOG_TYPE_OTHERS 0x4
|
||||
#define SLOW_LOG_TYPE_ALL 0x7
|
||||
|
||||
#define GLOBAL_CONFIG_FILE_VERSION 1
|
||||
#define LOCAL_CONFIG_FILE_VERSION 1
|
||||
|
||||
typedef enum {
|
||||
DND_CA_SM4 = 1,
|
||||
} EEncryptAlgor;
|
||||
|
@ -41,6 +45,8 @@ typedef enum {
|
|||
DND_CS_MNODE_WAL = 8,
|
||||
} EEncryptScope;
|
||||
|
||||
extern SConfig *tsCfg;
|
||||
|
||||
// cluster
|
||||
extern char tsFirst[];
|
||||
extern char tsSecond[];
|
||||
|
@ -49,6 +55,9 @@ extern char tsLocalEp[];
|
|||
extern char tsVersionName[];
|
||||
extern uint16_t tsServerPort;
|
||||
extern int32_t tsVersion;
|
||||
extern int32_t tsForceReadConfig;
|
||||
extern int32_t tsdmConfigVersion;
|
||||
extern int32_t tsConfigInited;
|
||||
extern int32_t tsStatusInterval;
|
||||
extern int32_t tsNumOfSupportVnodes;
|
||||
extern char tsEncryptAlgorithm[];
|
||||
|
@ -70,12 +79,23 @@ extern int32_t tsTagFilterResCacheSize;
|
|||
extern int32_t tsBypassFlag;
|
||||
|
||||
// queue & threads
|
||||
extern int32_t tsQueryMinConcurrentTaskNum;
|
||||
extern int32_t tsQueryMaxConcurrentTaskNum;
|
||||
extern int32_t tsQueryConcurrentTaskNum;
|
||||
extern int32_t tsSingleQueryMaxMemorySize;
|
||||
extern int8_t tsQueryUseMemoryPool;
|
||||
extern int8_t tsMemPoolFullFunc;
|
||||
//extern int32_t tsQueryBufferPoolSize;
|
||||
extern int32_t tsMinReservedMemorySize;
|
||||
extern int64_t tsCurrentAvailMemorySize;
|
||||
extern int8_t tsNeedTrim;
|
||||
extern int32_t tsQueryNoFetchTimeoutSec;
|
||||
extern int32_t tsNumOfQueryThreads;
|
||||
extern int32_t tsNumOfRpcThreads;
|
||||
extern int32_t tsNumOfRpcSessions;
|
||||
extern int32_t tsShareConnLimit;
|
||||
extern int32_t tsReadTimeout;
|
||||
extern int32_t tsTimeToGetAvailableConn;
|
||||
extern int32_t tsKeepAliveIdle;
|
||||
extern int32_t tsNumOfCommitThreads;
|
||||
extern int32_t tsNumOfTaskQueueThreads;
|
||||
extern int32_t tsNumOfMnodeQueryThreads;
|
||||
|
@ -92,6 +112,9 @@ extern int32_t tsNumOfSnodeWriteThreads;
|
|||
extern int64_t tsQueueMemoryAllowed;
|
||||
extern int32_t tsRetentionSpeedLimitMB;
|
||||
|
||||
extern const char *tsAlterCompactTaskKeywords;
|
||||
extern int32_t tsNumOfCompactThreads;
|
||||
|
||||
// sync raft
|
||||
extern int32_t tsElectInterval;
|
||||
extern int32_t tsHeartbeatInterval;
|
||||
|
@ -244,6 +267,7 @@ extern int64_t tsmaDataDeleteMark;
|
|||
extern int64_t tsWalFsyncDataSizeLimit;
|
||||
|
||||
// internal
|
||||
extern bool tsDiskIDCheckEnabled;
|
||||
extern int32_t tsTransPullupInterval;
|
||||
extern int32_t tsCompactPullupInterval;
|
||||
extern int32_t tsMqRebalanceInterval;
|
||||
|
@ -289,6 +313,16 @@ void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
|||
int8_t taosGranted(int8_t type);
|
||||
int32_t taosSetSlowLogScope(char *pScopeStr, int32_t *pScope);
|
||||
|
||||
int32_t taosPersistGlobalConfig(SArray *array, const char *path, int32_t version);
|
||||
int32_t taosPersistLocalConfig(const char *path);
|
||||
int32_t localConfigSerialize(SArray *array, char **serialized);
|
||||
int32_t tSerializeSConfigArray(SEncoder *pEncoder, SArray *array);
|
||||
int32_t tDeserializeSConfigArray(SDecoder *pDecoder, SArray *array);
|
||||
int32_t setAllConfigs(SConfig *pCfg);
|
||||
void printConfigNotMatch(SArray *array);
|
||||
|
||||
int32_t compareSConfigItemArrays(SArray *mArray, const SArray *dArray, SArray *diffArray);
|
||||
bool isConifgItemLazyMode(SConfigItem *item);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -162,6 +162,7 @@ typedef enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_ANODE,
|
||||
TSDB_MGMT_TABLE_ANODE_FULL,
|
||||
TSDB_MGMT_TABLE_USAGE,
|
||||
TSDB_MGMT_TABLE_FILESETS,
|
||||
TSDB_MGMT_TABLE_MAX,
|
||||
} EShowType;
|
||||
|
||||
|
@ -310,6 +311,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_DESCRIBE_STMT,
|
||||
QUERY_NODE_RESET_QUERY_CACHE_STMT,
|
||||
QUERY_NODE_COMPACT_DATABASE_STMT,
|
||||
QUERY_NODE_COMPACT_VGROUPS_STMT,
|
||||
QUERY_NODE_CREATE_FUNCTION_STMT,
|
||||
QUERY_NODE_DROP_FUNCTION_STMT,
|
||||
QUERY_NODE_CREATE_STREAM_STMT,
|
||||
|
@ -402,6 +404,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_CREATE_TSMA_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_TSMA_STMT,
|
||||
QUERY_NODE_DROP_TSMA_STMT,
|
||||
QUERY_NODE_SHOW_FILESETS_STMT,
|
||||
|
||||
// logic plan node
|
||||
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
|
||||
|
@ -681,7 +684,7 @@ typedef struct {
|
|||
int32_t tsSlowLogThreshold;
|
||||
int32_t tsSlowLogMaxLen;
|
||||
int32_t tsSlowLogScope;
|
||||
int32_t tsSlowLogThresholdTest; //Obsolete
|
||||
int32_t tsSlowLogThresholdTest; // Obsolete
|
||||
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN];
|
||||
} SMonitorParas;
|
||||
|
||||
|
@ -1347,6 +1350,11 @@ typedef struct {
|
|||
int8_t withArbitrator;
|
||||
int8_t encryptAlgorithm;
|
||||
char dnodeListStr[TSDB_DNODE_LIST_LEN];
|
||||
// 1. add auto-compact parameters
|
||||
int32_t compactInterval; // minutes
|
||||
int32_t compactStartTime; // minutes
|
||||
int32_t compactEndTime; // minutes
|
||||
int8_t compactTimeOffset; // hour
|
||||
} SCreateDbReq;
|
||||
|
||||
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
|
||||
|
@ -1378,6 +1386,11 @@ typedef struct {
|
|||
int32_t sqlLen;
|
||||
char* sql;
|
||||
int8_t withArbitrator;
|
||||
// 1. add auto-compact parameters
|
||||
int32_t compactInterval;
|
||||
int32_t compactStartTime;
|
||||
int32_t compactEndTime;
|
||||
int8_t compactTimeOffset;
|
||||
} SAlterDbReq;
|
||||
|
||||
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
|
||||
|
@ -1510,6 +1523,10 @@ typedef struct {
|
|||
int32_t s3ChunkSize;
|
||||
int32_t s3KeepLocal;
|
||||
int8_t s3Compact;
|
||||
int8_t compactTimeOffset;
|
||||
int32_t compactInterval;
|
||||
int32_t compactStartTime;
|
||||
int32_t compactEndTime;
|
||||
int32_t tsdbPageSize;
|
||||
int32_t walRetentionPeriod;
|
||||
int32_t walRollPeriod;
|
||||
|
@ -1617,6 +1634,7 @@ typedef struct {
|
|||
STimeWindow timeRange;
|
||||
int32_t sqlLen;
|
||||
char* sql;
|
||||
SArray* vgroupIds;
|
||||
} SCompactDbReq;
|
||||
|
||||
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
|
||||
|
@ -1827,6 +1845,16 @@ int32_t tSerializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
|||
int32_t tDeserializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
|
||||
void tFreeSStatusReq(SStatusReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t forceReadConfig;
|
||||
int32_t cver;
|
||||
SArray* array;
|
||||
} SConfigReq;
|
||||
|
||||
int32_t tSerializeSConfigReq(void* buf, int32_t bufLen, SConfigReq* pReq);
|
||||
int32_t tDeserializeSConfigReq(void* buf, int32_t bufLen, SConfigReq* pReq);
|
||||
void tFreeSConfigReq(SConfigReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t dnodeId;
|
||||
char machineId[TSDB_MACHINE_ID_LEN + 1];
|
||||
|
@ -1904,6 +1932,18 @@ int32_t tSerializeSStatusRsp(void* buf, int32_t bufLen, SStatusRsp* pRsp);
|
|||
int32_t tDeserializeSStatusRsp(void* buf, int32_t bufLen, SStatusRsp* pRsp);
|
||||
void tFreeSStatusRsp(SStatusRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int32_t forceReadConfig;
|
||||
int32_t isConifgVerified;
|
||||
int32_t isVersionVerified;
|
||||
int32_t cver;
|
||||
SArray* array;
|
||||
} SConfigRsp;
|
||||
|
||||
int32_t tSerializeSConfigRsp(void* buf, int32_t bufLen, SConfigRsp* pRsp);
|
||||
int32_t tDeserializeSConfigRsp(void* buf, int32_t bufLen, SConfigRsp* pRsp);
|
||||
void tFreeSConfigRsp(SConfigRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int32_t reserved;
|
||||
} SMTimerReq;
|
||||
|
@ -2002,6 +2042,8 @@ typedef struct {
|
|||
int32_t dnodeId;
|
||||
int32_t numberFileset;
|
||||
int32_t finished;
|
||||
int32_t progress;
|
||||
int64_t remainingTime;
|
||||
} SQueryCompactProgressRsp;
|
||||
|
||||
int32_t tSerializeSQueryCompactProgressRsp(void* buf, int32_t bufLen, SQueryCompactProgressRsp* pReq);
|
||||
|
@ -2209,6 +2251,7 @@ typedef struct {
|
|||
char name[TSDB_CONFIG_OPTION_LEN + 1];
|
||||
char value[TSDB_CONFIG_PATH_LEN + 1];
|
||||
char scope[TSDB_CONFIG_SCOPE_LEN + 1];
|
||||
char category[TSDB_CONFIG_CATEGORY_LEN + 1];
|
||||
char info[TSDB_CONFIG_INFO_LEN + 1];
|
||||
} SVariablesInfo;
|
||||
|
||||
|
@ -2417,8 +2460,9 @@ int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq
|
|||
void tFreeSMCfgDnodeReq(SMCfgDnodeReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
char config[TSDB_DNODE_CONFIG_LEN];
|
||||
char value[TSDB_DNODE_VALUE_LEN];
|
||||
char config[TSDB_DNODE_CONFIG_LEN];
|
||||
char value[TSDB_DNODE_VALUE_LEN];
|
||||
int32_t version;
|
||||
} SDCfgDnodeReq;
|
||||
|
||||
int32_t tSerializeSDCfgDnodeReq(void* buf, int32_t bufLen, SDCfgDnodeReq* pReq);
|
||||
|
@ -2758,7 +2802,7 @@ int32_t tDeserializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
|
|||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
uint64_t clientId;
|
||||
} SSchTasksStatusReq;
|
||||
|
||||
typedef struct {
|
||||
|
@ -2788,7 +2832,7 @@ typedef struct SQueryNodeEpId {
|
|||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
uint64_t clientId;
|
||||
SQueryNodeEpId epId;
|
||||
SArray* taskAction; // SArray<STaskAction>
|
||||
} SSchedulerHbReq;
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype);
|
|||
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
typedef void (*SendRspFp)(SRpcMsg* pMsg);
|
||||
typedef void (*RegisterBrokenLinkArgFp)(struct SRpcMsg* pMsg);
|
||||
typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type);
|
||||
typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type, int32_t status);
|
||||
typedef void (*ReportStartup)(const char* name, const char* desc);
|
||||
|
||||
typedef struct {
|
||||
|
@ -76,7 +76,7 @@ int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
|||
int32_t tmsgSendSyncReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
||||
void tmsgSendRsp(SRpcMsg* pMsg);
|
||||
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type, int32_t code);
|
||||
void tmsgReportStartup(const char* name, const char* desc);
|
||||
bool tmsgUpdateDnodeInfo(int32_t* dnodeId, int64_t* clusterId, char* fqdn, uint16_t* port);
|
||||
void tmsgUpdateDnodeEpSet(SEpSet* epset);
|
||||
|
|
|
@ -260,6 +260,7 @@
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG, "init-config", NULL, NULL)
|
||||
TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
|
||||
|
|
|
@ -17,10 +17,10 @@
|
|||
#define _TD_MND_H_
|
||||
|
||||
#include "monitor.h"
|
||||
#include "sync.h"
|
||||
#include "tmsg.h"
|
||||
#include "tmsgcb.h"
|
||||
#include "trpc.h"
|
||||
#include "sync.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -73,7 +73,7 @@ int32_t mndStart(SMnode *pMnode);
|
|||
*/
|
||||
void mndStop(SMnode *pMnode);
|
||||
|
||||
int32_t mndIsCatchUp(SMnode *pMnode);
|
||||
int32_t mndIsCatchUp(SMnode *pMnode);
|
||||
ESyncRole mndGetRole(SMnode *pMnode);
|
||||
int64_t mndGetTerm(SMnode *pMnode);
|
||||
|
||||
|
@ -109,7 +109,7 @@ int64_t mndGetRoleTimeMs(SMnode *pMnode);
|
|||
* @param pMsg The request msg.
|
||||
* @return int32_t 0 for success, -1 for failure.
|
||||
*/
|
||||
int32_t mndProcessRpcMsg(SRpcMsg *pMsg, SQueueInfo* pQueueInfo);
|
||||
int32_t mndProcessRpcMsg(SRpcMsg *pMsg, SQueueInfo *pQueueInfo);
|
||||
int32_t mndProcessSyncMsg(SRpcMsg *pMsg);
|
||||
int32_t mndPreProcessQueryMsg(SRpcMsg *pMsg);
|
||||
void mndPostProcessQueryMsg(SRpcMsg *pMsg);
|
||||
|
|
|
@ -29,6 +29,9 @@ extern "C" {
|
|||
#define DS_BUF_FULL 2
|
||||
#define DS_BUF_EMPTY 3
|
||||
|
||||
#define DS_FLAG_USE_MEMPOOL (1 << 0)
|
||||
|
||||
|
||||
struct SSDataBlock;
|
||||
|
||||
typedef struct SDeleterRes {
|
||||
|
@ -84,7 +87,7 @@ typedef struct SOutputData {
|
|||
* @param pHandle output
|
||||
* @return error code
|
||||
*/
|
||||
int32_t dsCreateDataSinker(void* pSinkManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
|
||||
int32_t dsCreateDataSinker(void* pSinkManager, SDataSinkNode** ppDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
|
||||
|
||||
int32_t dsDataSinkGetCacheSize(SDataSinkStat* pStat);
|
||||
|
||||
|
@ -131,6 +134,9 @@ void dsScheduleProcess(void* ahandle, void* pItem);
|
|||
*/
|
||||
void dsDestroyDataSinker(DataSinkHandle handle);
|
||||
|
||||
int32_t dsGetSinkFlags(DataSinkHandle handle, uint64_t* pFlags);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -58,6 +58,7 @@ typedef struct {
|
|||
|
||||
struct SStorageAPI api;
|
||||
void* pWorkerCb;
|
||||
bool localExec;
|
||||
} SReadHandle;
|
||||
|
||||
// in queue mode, data streams are seperated by msg
|
||||
|
@ -167,6 +168,7 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
|
|||
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
|
||||
|
||||
int32_t qExecutorInit(void);
|
||||
void qResetTaskCode(qTaskInfo_t tinfo);
|
||||
|
||||
void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
||||
|
|
|
@ -171,6 +171,8 @@ typedef union {
|
|||
|
||||
typedef void (*TsdReaderNotifyCbFn)(ETsdReaderNotifyType type, STsdReaderNotifyInfo* info, void* param);
|
||||
|
||||
struct SFileSetReader;
|
||||
|
||||
typedef struct TsdReader {
|
||||
int32_t (*tsdReaderOpen)(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
|
||||
SSDataBlock* pResBlock, void** ppReader, const char* idstr, SHashObj** pIgnoreTables);
|
||||
|
@ -191,6 +193,13 @@ typedef struct TsdReader {
|
|||
|
||||
void (*tsdSetFilesetDelimited)(void* pReader);
|
||||
void (*tsdSetSetNotifyCb)(void* pReader, TsdReaderNotifyCbFn notifyFn, void* param);
|
||||
|
||||
// for fileset query
|
||||
int32_t (*fileSetReaderOpen)(void *pVnode, struct SFileSetReader **ppReader);
|
||||
int32_t (*fileSetReadNext)(struct SFileSetReader *);
|
||||
int32_t (*fileSetGetEntryField)(struct SFileSetReader *, const char *, void *);
|
||||
void (*fileSetReaderClose)(struct SFileSetReader **);
|
||||
|
||||
} TsdReader;
|
||||
|
||||
typedef struct SStoreCacheReader {
|
||||
|
@ -431,7 +440,7 @@ typedef struct SStateStore {
|
|||
int32_t (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
||||
GetTsFun fp, void* pFile, TSKEY delMark, const char* id, int64_t ckId, int8_t type,
|
||||
struct SStreamFileState** ppFileState);
|
||||
|
||||
|
||||
int32_t (*streamStateGroupPut)(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
|
||||
SStreamStateCur* (*streamStateGroupGetCur)(SStreamState* pState);
|
||||
void (*streamStateGroupCurNext)(SStreamStateCur* pCur);
|
||||
|
|
|
@ -42,11 +42,12 @@ extern "C" {
|
|||
#define SHOW_CREATE_VIEW_RESULT_FIELD1_LEN (TSDB_VIEW_FNAME_LEN + 4 + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_CREATE_VIEW_RESULT_FIELD2_LEN (TSDB_MAX_ALLOWED_SQL_LEN + VARSTR_HEADER_SIZE)
|
||||
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 4
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 5
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_PATH_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD3_LEN (TSDB_CONFIG_SCOPE_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD4_LEN (TSDB_CONFIG_CATEGORY_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD5_LEN (TSDB_CONFIG_INFO_LEN + VARSTR_HEADER_SIZE)
|
||||
|
||||
#define COMPACT_DB_RESULT_COLS 3
|
||||
#define COMPACT_DB_RESULT_FIELD1_LEN 32
|
||||
|
@ -110,6 +111,16 @@ typedef struct SDatabaseOptions {
|
|||
SValueNode* s3KeepLocalStr;
|
||||
int8_t s3Compact;
|
||||
int8_t withArbitrator;
|
||||
// for auto-compact
|
||||
int8_t compactTimeOffset; // hours
|
||||
int32_t compactInterval; // minutes
|
||||
int32_t compactStartTime; // minutes
|
||||
int32_t compactEndTime; // minutes
|
||||
SValueNode* pCompactTimeOffsetNode;
|
||||
SValueNode* pCompactIntervalNode;
|
||||
SNodeList* pCompactTimeRangeList;
|
||||
// for cache
|
||||
SDbCfgInfo* pDbCfg;
|
||||
} SDatabaseOptions;
|
||||
|
||||
typedef struct SCreateDatabaseStmt {
|
||||
|
@ -159,6 +170,14 @@ typedef struct SCompactDatabaseStmt {
|
|||
SNode* pEnd;
|
||||
} SCompactDatabaseStmt;
|
||||
|
||||
typedef struct SCompactVgroupsStmt {
|
||||
ENodeType type;
|
||||
SNode* pDbName;
|
||||
SNodeList* vgidList;
|
||||
SNode* pStart;
|
||||
SNode* pEnd;
|
||||
} SCompactVgroupsStmt;
|
||||
|
||||
typedef struct STableOptions {
|
||||
ENodeType type;
|
||||
bool commentNull;
|
||||
|
|
|
@ -631,7 +631,7 @@ typedef struct SDownstreamSourceNode {
|
|||
SQueryNodeAddr addr;
|
||||
uint64_t clientId;
|
||||
uint64_t taskId;
|
||||
uint64_t schedId;
|
||||
uint64_t sId;
|
||||
int32_t execId;
|
||||
int32_t fetchMsgType;
|
||||
bool localExec;
|
||||
|
|
|
@ -113,6 +113,11 @@ int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64
|
|||
|
||||
int32_t qWorkerDbgEnableDebug(char *option);
|
||||
|
||||
void qWorkerRetireJob(uint64_t jobId, uint64_t clientId, int32_t errCode);
|
||||
|
||||
void qWorkerRetireJobs(int64_t retireSize, int32_t errCode);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -171,7 +171,7 @@ void *rpcReallocCont(void *ptr, int64_t contLen);
|
|||
int32_t rpcSendRequest(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid);
|
||||
int32_t rpcSendResponse(const SRpcMsg *pMsg);
|
||||
int32_t rpcRegisterBrokenLinkArg(SRpcMsg *msg);
|
||||
int32_t rpcReleaseHandle(void *handle, int8_t type); // just release conn to rpc instance, no close sock
|
||||
int32_t rpcReleaseHandle(void *handle, int8_t type, int32_t code); // just release conn to rpc instance, no close sock
|
||||
|
||||
// These functions will not be called in the child process
|
||||
int32_t rpcSendRequestWithCtx(void *thandle, const SEpSet *pEpSet, SRpcMsg *pMsg, int64_t *rid, SRpcCtx *ctx);
|
||||
|
|
|
@ -110,6 +110,7 @@ extern "C" {
|
|||
#include "osLz4.h"
|
||||
#include "osMath.h"
|
||||
#include "osMemory.h"
|
||||
#include "osMemPool.h"
|
||||
#include "osRand.h"
|
||||
#include "osSemaphore.h"
|
||||
#include "osSignal.h"
|
||||
|
|
|
@ -83,6 +83,8 @@ int32_t taosUnLockFile(TdFilePtr pFile);
|
|||
int32_t taosUmaskFile(int32_t maskVal);
|
||||
|
||||
int32_t taosStatFile(const char *path, int64_t *size, int64_t *mtime, int64_t *atime);
|
||||
int32_t taosGetFileDiskID(const char *path, int64_t *diskid);
|
||||
bool taosCheckFileDiskID(const char *path, int64_t *actDiskID, int64_t expDiskID);
|
||||
int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno);
|
||||
int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int64_t *mtime);
|
||||
bool taosCheckExistFile(const char *pathname);
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef _TD_OS_MEMPOOL_H_
|
||||
#define _TD_OS_MEMPOOL_H_
|
||||
|
||||
#include "os.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define MEMPOOL_MAX_CHUNK_SIZE (1 << 30)
|
||||
#define MEMPOOL_MIN_CHUNK_SIZE (1 << 20)
|
||||
|
||||
typedef enum MemPoolEvictPolicy {
|
||||
E_EVICT_ALL = 1,
|
||||
E_EVICT_NONE,
|
||||
E_EVICT_AUTO,
|
||||
E_EVICT_MAX_VALUE, // no used
|
||||
} MemPoolEvictPolicy;
|
||||
|
||||
typedef struct SMemPoolJob {
|
||||
uint64_t jobId;
|
||||
uint64_t clientId;
|
||||
|
||||
int32_t remainSession;
|
||||
|
||||
int64_t allocMemSize;
|
||||
int64_t maxAllocMemSize;
|
||||
} SMemPoolJob;
|
||||
|
||||
typedef struct SMPStatItem {
|
||||
int64_t inErr;
|
||||
int64_t exec;
|
||||
int64_t succ;
|
||||
int64_t fail;
|
||||
} SMPStatItem;
|
||||
|
||||
typedef struct SMPStatItemExt {
|
||||
int64_t inErr;
|
||||
int64_t exec;
|
||||
int64_t succ;
|
||||
int64_t fail;
|
||||
int64_t origExec;
|
||||
int64_t origSucc;
|
||||
int64_t origFail;
|
||||
} SMPStatItemExt;
|
||||
|
||||
typedef struct SMPMemoryStat {
|
||||
SMPStatItem memMalloc;
|
||||
SMPStatItem memCalloc;
|
||||
SMPStatItemExt memRealloc;
|
||||
SMPStatItem memStrdup;
|
||||
SMPStatItem memStrndup;
|
||||
SMPStatItem memFree;
|
||||
SMPStatItem memTrim;
|
||||
|
||||
SMPStatItem chunkMalloc;
|
||||
SMPStatItem chunkRecycle;
|
||||
SMPStatItem chunkReUse;
|
||||
SMPStatItem chunkFree;
|
||||
} SMPMemoryStat;
|
||||
|
||||
typedef struct SMPStatDetail {
|
||||
SMPMemoryStat times;
|
||||
SMPMemoryStat bytes;
|
||||
} SMPStatDetail;
|
||||
|
||||
|
||||
typedef void (*mpDecConcSessionNum)(void);
|
||||
typedef void (*mpIncConcSessionNum)(void);
|
||||
typedef void (*mpSetConcSessionNum)(int32_t);
|
||||
typedef void (*mpReserveFailFp)(int64_t, int32_t);
|
||||
typedef void (*mpReserveReachFp)(uint64_t, uint64_t, int32_t);
|
||||
typedef void (*mpCfgUpdate)(void*, void*);
|
||||
|
||||
typedef struct SMemPoolCallBack {
|
||||
//mpDecConcSessionNum decSessFp;
|
||||
//mpIncConcSessionNum incSessFp;
|
||||
//mpSetConcSessionNum setSessFp;
|
||||
mpReserveFailFp failFp;
|
||||
mpReserveReachFp reachFp;
|
||||
//mpCfgUpdate cfgUpdateFp;
|
||||
} SMemPoolCallBack;
|
||||
|
||||
|
||||
typedef struct SMemPoolCfg {
|
||||
//bool reserveMode;
|
||||
int64_t reserveSize;
|
||||
//int32_t *upperLimitSize; //MB
|
||||
//int64_t retireUnitSize;
|
||||
int32_t *jobQuota; //MB
|
||||
int32_t chunkSize;
|
||||
int32_t threadNum;
|
||||
MemPoolEvictPolicy evicPolicy;
|
||||
SMemPoolCallBack cb;
|
||||
} SMemPoolCfg;
|
||||
|
||||
#define MEMPOOL_GET_ALLOC_SIZE(_dstat) ((_dstat)->bytes.memMalloc.succ + (_dstat)->bytes.memCalloc.succ + (_dstat)->bytes.memRealloc.succ + (_dstat)->bytes.memStrdup.succ + (_dstat)->bytes.memStrndup.succ)
|
||||
#define MEMPOOL_GET_FREE_SIZE(_dstat) ((_dstat)->bytes.memRealloc.origSucc + (_dstat)->bytes.memFree.succ)
|
||||
#define MEMPOOL_GET_USED_SIZE(_dstat) (MEMPOOL_GET_ALLOC_SIZE(_dstat) - MEMPOOL_GET_FREE_SIZE(_dstat))
|
||||
|
||||
|
||||
int32_t taosMemPoolOpen(char* poolName, SMemPoolCfg* cfg, void** poolHandle);
|
||||
void *taosMemPoolMalloc(void* poolHandle, void* session, int64_t size, char* fileName, int32_t lineNo);
|
||||
void *taosMemPoolCalloc(void* poolHandle, void* session, int64_t num, int64_t size, char* fileName, int32_t lineNo);
|
||||
void *taosMemPoolRealloc(void* poolHandle, void* session, void *ptr, int64_t size, char* fileName, int32_t lineNo);
|
||||
char *taosMemPoolStrdup(void* poolHandle, void* session, const char *ptr, char* fileName, int32_t lineNo);
|
||||
char *taosMemPoolStrndup(void* poolHandle, void* session, const char *ptr, int64_t size, char* fileName, int32_t lineNo);
|
||||
void taosMemPoolFree(void* poolHandle, void* session, void *ptr, char* fileName, int32_t lineNo);
|
||||
int64_t taosMemPoolGetMemorySize(void* poolHandle, void* session, void *ptr, char* fileName, int32_t lineNo);
|
||||
int32_t taosMemPoolTrim(void* poolHandle, void* session, int32_t size, char* fileName, int32_t lineNo, bool* trimed);
|
||||
void *taosMemPoolMallocAlign(void* poolHandle, void* session, uint32_t alignment, int64_t size, char* fileName, int32_t lineNo);
|
||||
void taosMemPoolClose(void* poolHandle);
|
||||
void taosMemPoolModDestroy(void);
|
||||
void taosAutoMemoryFree(void *ptr);
|
||||
int32_t taosMemPoolInitSession(void* poolHandle, void** ppSession, void* pJob, char *sessionId);
|
||||
void taosMemPoolDestroySession(void* poolHandle, void* session);
|
||||
int32_t taosMemPoolCallocJob(uint64_t jobId, uint64_t cId, void** ppJob);
|
||||
void taosMemPoolCfgUpdate(void* poolHandle, SMemPoolCfg* pCfg);
|
||||
void taosMemPoolPrintStat(void* poolHandle, void* session, char* procName);
|
||||
int32_t taosMemPoolTryLockPool(void* poolHandle, bool readLock);
|
||||
void taosMemPoolUnLockPool(void* poolHandle, bool readLock);
|
||||
void taosMemPoolGetUsedSizeBegin(void* poolHandle, int64_t* usedSize, bool* needEnd);
|
||||
void taosMemPoolGetUsedSizeEnd(void* poolHandle);
|
||||
int32_t taosMemPoolGetSessionStat(void* session, SMPStatDetail** ppStat, int64_t* allocSize, int64_t* maxAllocSize);
|
||||
void taosMemPoolSchedTrim(void);
|
||||
int32_t taosMemoryPoolInit(mpReserveFailFp, mpReserveReachFp);
|
||||
|
||||
|
||||
#define taosMemPoolFreeClear(ptr) \
|
||||
do { \
|
||||
if (ptr) { \
|
||||
taosMemPoolFree((void *)ptr); \
|
||||
(ptr) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
#ifndef BUILD_TEST
|
||||
extern void* gMemPoolHandle;
|
||||
extern threadlocal void* threadPoolSession;
|
||||
extern threadlocal bool threadPoolEnabled;
|
||||
extern int8_t tsMemPoolFullFunc;
|
||||
|
||||
|
||||
#define taosEnableMemPoolUsage(_session) do { threadPoolSession = _session; tsEnableRandErr = true;} while (0)
|
||||
#define taosDisableMemPoolUsage() do { threadPoolSession = NULL; tsEnableRandErr = false;} while (0)
|
||||
|
||||
#define taosSaveDisableMemPoolUsage(_enable, _randErr) do { (_enable) = threadPoolEnabled; (_randErr) = tsEnableRandErr; threadPoolEnabled = false; tsEnableRandErr = false;} while (0)
|
||||
#define taosRestoreEnableMemPoolUsage(_enable, _randErr) do { threadPoolEnabled = (_enable); tsEnableRandErr = (_randErr);} while (0)
|
||||
|
||||
|
||||
#define taosMemoryMalloc(_size) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolMalloc(gMemPoolHandle, threadPoolSession, _size, (char*)__FILE__, __LINE__)) : (taosMemMalloc(_size)))
|
||||
#define taosMemoryCalloc(_num, _size) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolCalloc(gMemPoolHandle, threadPoolSession, _num, _size, (char*)__FILE__, __LINE__)) : (taosMemCalloc(_num, _size)))
|
||||
#define taosMemoryRealloc(_ptr, _size) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolRealloc(gMemPoolHandle, threadPoolSession, _ptr, _size, (char*)__FILE__, __LINE__)) : (taosMemRealloc(_ptr, _size)))
|
||||
#define taosStrdup(_ptr) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolStrdup(gMemPoolHandle, threadPoolSession, _ptr, (char*)__FILE__, __LINE__)) : (taosStrdupi(_ptr)))
|
||||
#define taosStrndup(_ptr, _size) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolStrndup(gMemPoolHandle, threadPoolSession, _ptr, _size, (char*)__FILE__, __LINE__)) : (taosStrndupi(_ptr, _size)))
|
||||
#define taosMemoryFree(_ptr) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolFree(gMemPoolHandle, threadPoolSession, _ptr, (char*)__FILE__, __LINE__)) : (taosMemFree(_ptr)))
|
||||
#define taosMemorySize(_ptr) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolGetMemorySize(gMemPoolHandle, threadPoolSession, _ptr, (char*)__FILE__, __LINE__)) : (taosMemSize(_ptr)))
|
||||
#define taosMemoryTrim(_size, _trimed) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolTrim(gMemPoolHandle, threadPoolSession, _size, (char*)__FILE__, __LINE__, _trimed)) : (taosMemTrim(_size, _trimed)))
|
||||
#define taosMemoryMallocAlign(_alignment, _size) ((threadPoolEnabled && threadPoolSession) ? (taosMemPoolMallocAlign(gMemPoolHandle, threadPoolSession, _alignment, _size, (char*)__FILE__, __LINE__)) : (taosMemMallocAlign(_alignment, _size)))
|
||||
#else
|
||||
#define taosEnableMemoryPoolUsage(_pool, _session)
|
||||
#define taosDisableMemoryPoolUsage()
|
||||
#define taosSaveDisableMemoryPoolUsage()
|
||||
#define taosRestoreEnableMemoryPoolUsage()
|
||||
|
||||
#define taosMemoryMalloc(_size) taosMemMalloc(_size)
|
||||
#define taosMemoryCalloc(_num, _size) taosMemCalloc(_num, _size)
|
||||
#define taosMemoryRealloc(_ptr, _size) taosMemRealloc(_ptr, _size)
|
||||
#define taosStrdup(_ptr) taosStrdupi(_ptr)
|
||||
#define taosStrndup(_ptr, _size) taosStrndupi(_ptr, _size)
|
||||
#define taosMemoryFree(_ptr) taosMemFree(_ptr)
|
||||
#define taosMemorySize(_ptr) taosMemSize(_ptr)
|
||||
#define taosMemoryTrim(_size, _trimed) taosMemTrim(_size, _trimed)
|
||||
#define taosMemoryMallocAlign(_alignment, _size) taosMemMallocAlign(_alignment, _size)
|
||||
|
||||
#endif
|
||||
|
||||
#define taosMemoryFreeClear(ptr) \
|
||||
do { \
|
||||
if (ptr) { \
|
||||
taosMemoryFree((void *)ptr); \
|
||||
(ptr) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_OS_MEMPOOL_H_*/
|
|
@ -36,35 +36,32 @@ extern "C" {
|
|||
#endif // ifndef ALLOW_FORBID_FUNC
|
||||
#endif // if !defined(WINDOWS)
|
||||
|
||||
// #define taosMemoryMalloc malloc
|
||||
// #define taosMemoryCalloc calloc
|
||||
// #define taosMemoryRealloc realloc
|
||||
// #define taosMemoryFree free
|
||||
|
||||
int32_t taosMemoryDbgInit();
|
||||
int32_t taosMemoryDbgInitRestore();
|
||||
void *taosMemoryMalloc(int64_t size);
|
||||
void *taosMemoryCalloc(int64_t num, int64_t size);
|
||||
void *taosMemoryRealloc(void *ptr, int64_t size);
|
||||
char *taosStrdup(const char *ptr);
|
||||
void taosMemoryFree(void *ptr);
|
||||
int64_t taosMemorySize(void *ptr);
|
||||
void *taosMemMalloc(int64_t size);
|
||||
void *taosMemCalloc(int64_t num, int64_t size);
|
||||
void *taosMemRealloc(void *ptr, int64_t size);
|
||||
char *taosStrdupi(const char *ptr);
|
||||
char *taosStrndupi(const char *ptr, int64_t size);
|
||||
void taosMemFree(void *ptr);
|
||||
int64_t taosMemSize(void *ptr);
|
||||
void taosPrintBackTrace();
|
||||
void taosMemoryTrim(int32_t size);
|
||||
void *taosMemoryMallocAlign(uint32_t alignment, int64_t size);
|
||||
int32_t taosMemTrim(int32_t size, bool* trimed);
|
||||
void *taosMemMallocAlign(uint32_t alignment, int64_t size);
|
||||
|
||||
#define TAOS_MEMSET(_s, _c, _n) ((void)memset(_s, _c, _n))
|
||||
#define TAOS_MEMCPY(_d, _s, _n) ((void)memcpy(_d, _s, _n))
|
||||
#define TAOS_MEMMOVE(_d, _s, _n) ((void)memmove(_d, _s, _n))
|
||||
|
||||
#define taosMemoryFreeClear(ptr) \
|
||||
#define taosMemFreeClear(ptr) \
|
||||
do { \
|
||||
if (ptr) { \
|
||||
taosMemoryFree((void *)ptr); \
|
||||
taosMemFree((void *)ptr); \
|
||||
(ptr) = NULL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#include "osMemPool.h"
|
||||
#define TAOS_MEMORY_REALLOC(ptr, len) \
|
||||
do { \
|
||||
void *tmp = taosMemoryRealloc(ptr, (len)); \
|
||||
|
|
|
@ -25,7 +25,7 @@ typedef int32_t TdUcs4;
|
|||
#if !defined(DISALLOW_NCHAR_WITHOUT_ICONV)// && defined(DARWIN)
|
||||
#include "iconv.h"
|
||||
#else
|
||||
typedef void *iconv_t;
|
||||
typedef void *iconv_t;
|
||||
#endif
|
||||
typedef enum { M2C = 0, C2M, CM_NUM } ConvType;
|
||||
|
||||
|
@ -67,29 +67,35 @@ typedef struct {
|
|||
#ifdef strndup
|
||||
#undef strndup
|
||||
#endif
|
||||
#define strndup STR_TO_F_FUNC_TAOS_FORBID
|
||||
#define strndup STR_TO_F_FUNC_TAOS_FORBID
|
||||
|
||||
#endif
|
||||
|
||||
#define tstrncpy(dst, src, size) \
|
||||
do { \
|
||||
(void)strncpy((dst), (src), (size)); \
|
||||
(dst)[(size) - 1] = 0; \
|
||||
(dst)[(size)-1] = 0; \
|
||||
} while (0)
|
||||
|
||||
int64_t tsnprintf(char *dst, int64_t size, const char *format, ...);
|
||||
#define TAOS_STRCPY(_dst, _src) ((void)strcpy(_dst, _src))
|
||||
#define TAOS_STRCPY(_dst, _src) ((void)strcpy(_dst, _src))
|
||||
#define TAOS_STRNCPY(_dst, _src, _size) ((void)strncpy(_dst, _src, _size))
|
||||
#define TAOS_STRCAT(_dst, _src) ((void)strcat(_dst, _src))
|
||||
#define TAOS_STRNCAT(_dst, _src, len) ((void)strncat(_dst, _src, len))
|
||||
#define TAOS_STRCAT(_dst, _src) ((void)strcat(_dst, _src))
|
||||
#define TAOS_STRNCAT(_dst, _src, len) ((void)strncat(_dst, _src, len))
|
||||
|
||||
char *tstrdup(const char *src);
|
||||
char *tstrndup(const char *str, int64_t size);
|
||||
int32_t taosUcs4len(TdUcs4 *ucs4);
|
||||
int32_t taosStr2int64(const char *str, int64_t *val);
|
||||
int32_t taosStr2int16(const char *str, int16_t *val);
|
||||
int32_t taosStr2int32(const char *str, int32_t *val);
|
||||
int32_t taosStr2int16(const char *str, int16_t *val);
|
||||
int32_t taosStr2int8(const char *str, int8_t *val);
|
||||
|
||||
int32_t taosStr2Uint64(const char *str, uint64_t *val);
|
||||
int32_t taosStr2Uint32(const char *str, uint32_t *val);
|
||||
int32_t taosStr2Uint16(const char *str, uint16_t *val);
|
||||
int32_t taosStr2Uint8(const char *str, uint8_t *val);
|
||||
|
||||
iconv_t taosAcquireConv(int32_t *idx, ConvType type, void* charsetCxt);
|
||||
void taosReleaseConv(int32_t idx, iconv_t conv, ConvType type, void* charsetCxt);
|
||||
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, void* charsetCxt);
|
||||
|
@ -121,10 +127,9 @@ double taosStr2Double(const char *str, char **pEnd);
|
|||
float taosStr2Float(const char *str, char **pEnd);
|
||||
int32_t taosHex2Ascii(const char *z, uint32_t n, void **data, uint32_t *size);
|
||||
int32_t taosAscii2Hex(const char *z, uint32_t n, void **data, uint32_t *size);
|
||||
char *taosStrndup(const char *s, int n);
|
||||
//int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
|
||||
bool isHex(const char* z, uint32_t n);
|
||||
bool isValidateHex(const char* z, uint32_t n);
|
||||
// int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
|
||||
bool isHex(const char *z, uint32_t n);
|
||||
bool isValidateHex(const char *z, uint32_t n);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma, ch
|
|||
int32_t taosGetTotalMemory(int64_t *totalKB);
|
||||
int32_t taosGetProcMemory(int64_t *usedKB);
|
||||
int32_t taosGetSysMemory(int64_t *usedKB);
|
||||
int32_t taosGetSysAvailMemory(int64_t *availSize);
|
||||
int32_t taosGetDiskSize(char *dataDir, SDiskSize *diskSize);
|
||||
int32_t taosGetProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, int64_t *write_bytes);
|
||||
void taosSetDefaultProcIODelta(int64_t *rchars, int64_t *wchars, int64_t *read_bytes, int64_t *write_bytes);
|
||||
|
|
|
@ -159,6 +159,7 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_SOCKET_ERROR TAOS_DEF_ERROR_CODE(0, 0x0139)
|
||||
#define TSDB_CODE_UNSUPPORT_OS TAOS_DEF_ERROR_CODE(0, 0x013A)
|
||||
#define TSDB_CODE_TIME_ERROR TAOS_DEF_ERROR_CODE(0, 0x013B)
|
||||
#define TSDB_CODE_INVALID_DISK_ID TAOS_DEF_ERROR_CODE(0, 0x013C)
|
||||
|
||||
//client
|
||||
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
|
||||
|
@ -480,6 +481,7 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_DNODE_INVALID_EN_WHITELIST TAOS_DEF_ERROR_CODE(0, 0x0428)
|
||||
#define TSDB_CODE_DNODE_INVALID_MONITOR_PARAS TAOS_DEF_ERROR_CODE(0, 0x0429)
|
||||
#define TSDB_CODE_MNODE_STOPPED TAOS_DEF_ERROR_CODE(0, 0x042A)
|
||||
#define TSDB_CODE_DNODE_INVALID_COMPACT_TASKS TAOS_DEF_ERROR_CODE(0, 0x042B)
|
||||
|
||||
// anode
|
||||
#define TSDB_CODE_MND_ANODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0430)
|
||||
|
@ -565,6 +567,7 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_VND_ARB_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0537) // internal
|
||||
#define TSDB_CODE_VND_WRITE_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0538) // internal
|
||||
#define TSDB_CODE_VND_TTL_FLUSH_INCOMPLETION TAOS_DEF_ERROR_CODE(0, 0x0539) // internal
|
||||
#define TSDB_CODE_VND_ALREADY_EXIST_BUT_NOT_MATCH TAOS_DEF_ERROR_CODE(0, 0x0540)
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||
|
@ -643,6 +646,9 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_QRY_FILTER_RANGE_ERROR TAOS_DEF_ERROR_CODE(0, 0x0736)
|
||||
#define TSDB_CODE_QRY_FILTER_INVALID_TYPE TAOS_DEF_ERROR_CODE(0, 0x0737)
|
||||
#define TSDB_CODE_QRY_TASK_SUCC_TO_PARTSUSS TAOS_DEF_ERROR_CODE(0, 0x0738)
|
||||
#define TSDB_CODE_QRY_REACH_QMEM_THRESHOLD TAOS_DEF_ERROR_CODE(0, 0x0739)
|
||||
#define TSDB_CODE_QRY_QUERY_MEM_EXHAUSTED TAOS_DEF_ERROR_CODE(0, 0x073A)
|
||||
#define TSDB_CODE_QRY_NO_FETCH_TIMEOUT TAOS_DEF_ERROR_CODE(0, 0x073B)
|
||||
|
||||
// grant
|
||||
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
|
||||
|
@ -899,6 +905,7 @@ int32_t taosGetErrSize();
|
|||
#define TSDB_CODE_PAR_INVALID_ANOMALY_WIN_COL TAOS_DEF_ERROR_CODE(0, 0x2683)
|
||||
#define TSDB_CODE_PAR_INVALID_ANOMALY_WIN_OPT TAOS_DEF_ERROR_CODE(0, 0x2684)
|
||||
#define TSDB_CODE_PAR_INVALID_FORECAST_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x2685)
|
||||
#define TSDB_CODE_PAR_INVALID_VGID_LIST TAOS_DEF_ERROR_CODE(0, 0x2686)
|
||||
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
|
||||
|
||||
//planner
|
||||
|
|
|
@ -53,19 +53,30 @@ typedef enum {
|
|||
} ECfgDataType;
|
||||
|
||||
typedef enum { CFG_SCOPE_SERVER, CFG_SCOPE_CLIENT, CFG_SCOPE_BOTH } ECfgScopeType;
|
||||
typedef enum { CFG_CATEGORY_GLOBAL, CFG_CATEGORY_LOCAL } ECfgCategoryType;
|
||||
typedef enum { CFG_ALTER_LOCAL, CFG_ALTER_DNODE, CFG_ALTER_ALL_DNODES } CfgAlterType;
|
||||
|
||||
typedef enum {
|
||||
CFG_DYN_NONE = 0,
|
||||
CFG_DYN_SERVER = 1,
|
||||
CFG_DYN_CLIENT = 2,
|
||||
CFG_DYN_BOTH = 3,
|
||||
CFG_DYN_SERVER_LAZY = 3,
|
||||
CFG_DYN_CLIENT_LAZY = 4,
|
||||
CFG_DYN_BOTH_LAZY = 5,
|
||||
CFG_DYN_BOTH = 6,
|
||||
#ifdef TD_ENTERPRISE
|
||||
CFG_DYN_ENT_SERVER = CFG_DYN_SERVER,
|
||||
CFG_DYN_ENT_CLIENT = CFG_DYN_CLIENT,
|
||||
CFG_DYN_ENT_SERVER_LAZY = CFG_DYN_SERVER_LAZY,
|
||||
CFG_DYN_ENT_CLIENT_LAZY = CFG_DYN_CLIENT_LAZY,
|
||||
CFG_DYN_ENT_BOTH_LAZY = CFG_DYN_BOTH_LAZY,
|
||||
CFG_DYN_ENT_BOTH = CFG_DYN_BOTH,
|
||||
#else
|
||||
CFG_DYN_ENT_SERVER = CFG_DYN_NONE,
|
||||
CFG_DYN_ENT_CLIENT = CFG_DYN_NONE,
|
||||
CFG_DYN_ENT_SERVER_LAZY = CFG_DYN_NONE,
|
||||
CFG_DYN_ENT_CLIENT_LAZY = CFG_DYN_NONE,
|
||||
CFG_DYN_ENT_BOTH_LAZY = CFG_DYN_NONE,
|
||||
CFG_DYN_ENT_BOTH = CFG_DYN_NONE,
|
||||
#endif
|
||||
} ECfgDynType;
|
||||
|
@ -75,6 +86,7 @@ typedef struct SConfigItem {
|
|||
ECfgDataType dtype;
|
||||
int8_t scope;
|
||||
int8_t dynScope;
|
||||
int8_t category;
|
||||
char *name;
|
||||
union {
|
||||
bool bval;
|
||||
|
@ -99,17 +111,21 @@ typedef struct {
|
|||
const char *value;
|
||||
} SConfigPair;
|
||||
|
||||
typedef struct SConfig SConfig;
|
||||
typedef struct SConfig SConfig;
|
||||
typedef struct SConfigIter SConfigIter;
|
||||
|
||||
int32_t cfgInit(SConfig **ppCfg);
|
||||
int32_t cfgLoad(SConfig *pCfg, ECfgSrcType cfgType, const void *sourceStr);
|
||||
int32_t cfgLoadFromArray(SConfig *pCfg, SArray *pArgs); // SConfigPair
|
||||
int32_t cfgLoadFromArray(SConfig *pCfg, SArray *pArgs); // SConfigPair
|
||||
int32_t cfgUpdateFromArray(SConfig *pCfg, SArray *pArgs); // SConfigItem
|
||||
void cfgCleanup(SConfig *pCfg);
|
||||
int32_t cfgGetSize(SConfig *pCfg);
|
||||
SConfigItem *cfgGetItem(SConfig *pCfg, const char *pName);
|
||||
int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcType stype, bool lock);
|
||||
int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer);
|
||||
int32_t cfgGetAndSetItem(SConfig *pCfg, SConfigItem **ppItem, const char *name, const char *value, ECfgSrcType stype,
|
||||
bool lock);
|
||||
int32_t cfgCheckRangeForDynUpdate(SConfig *pCfg, const char *name, const char *pVal, bool isServer,
|
||||
CfgAlterType alterType);
|
||||
|
||||
int32_t cfgCreateIter(SConfig *pConf, SConfigIter **ppIter);
|
||||
SConfigItem *cfgNextIter(SConfigIter *pIter);
|
||||
|
@ -118,15 +134,16 @@ void cfgLock(SConfig *pCfg);
|
|||
void cfgUnLock(SConfig *pCfg);
|
||||
|
||||
// clang-format off
|
||||
int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddInt32(SConfig *pCfg, const char *name, int32_t defaultVal, int64_t minval, int64_t maxval, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddInt64(SConfig *pCfg, const char *name, int64_t defaultVal, int64_t minval, int64_t maxval, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddFloat(SConfig *pCfg, const char *name, float defaultVal, float minval, float maxval, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddDir(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddLocale(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddCharset(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope);
|
||||
int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddInt32(SConfig *pCfg, const char *name, int32_t defaultVal, int64_t minval, int64_t maxval, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddInt32Ex(SConfig *pCfg, const char *name, int32_t defaultVal, int64_t minval, int64_t maxval, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddInt64(SConfig *pCfg, const char *name, int64_t defaultVal, int64_t minval, int64_t maxval, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddFloat(SConfig *pCfg, const char *name, float defaultVal, float minval, float maxval, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddDir(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddLocale(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddCharset(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope,int8_t category);
|
||||
int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope, int8_t dynScope,int8_t category);
|
||||
// clang-format on
|
||||
|
||||
const char *cfgStypeStr(ECfgSrcType type);
|
||||
|
@ -134,12 +151,17 @@ const char *cfgDtypeStr(ECfgDataType type);
|
|||
|
||||
int32_t cfgDumpItemValue(SConfigItem *pItem, char *buf, int32_t bufSize, int32_t *pLen);
|
||||
int32_t cfgDumpItemScope(SConfigItem *pItem, char *buf, int32_t bufSize, int32_t *pLen);
|
||||
int32_t cfgDumpItemCategory(SConfigItem *pItem, char *buf, int32_t bufSize, int32_t *pLen);
|
||||
|
||||
void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump);
|
||||
void cfgDumpCfgS3(SConfig *pCfg, bool tsc, bool dump);
|
||||
|
||||
int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char *apolloUrl);
|
||||
SArray *taosGetLocalCfg(SConfig *pCfg);
|
||||
SArray *taosGetGlobalCfg(SConfig *pCfg);
|
||||
|
||||
void taosSetLocalCfg(SConfig *pCfg, SArray *pArray);
|
||||
void taosSetGlobalCfg(SConfig *pCfg, SArray *pArray);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -190,45 +190,22 @@ typedef enum EOperatorType {
|
|||
} EOperatorType;
|
||||
|
||||
static const EOperatorType OPERATOR_ARRAY[] = {
|
||||
OP_TYPE_ADD,
|
||||
OP_TYPE_SUB,
|
||||
OP_TYPE_MULTI,
|
||||
OP_TYPE_DIV,
|
||||
OP_TYPE_REM,
|
||||
OP_TYPE_ADD, OP_TYPE_SUB, OP_TYPE_MULTI, OP_TYPE_DIV, OP_TYPE_REM,
|
||||
|
||||
OP_TYPE_MINUS,
|
||||
OP_TYPE_MINUS,
|
||||
|
||||
OP_TYPE_BIT_AND,
|
||||
OP_TYPE_BIT_OR,
|
||||
OP_TYPE_BIT_AND, OP_TYPE_BIT_OR,
|
||||
|
||||
OP_TYPE_GREATER_THAN,
|
||||
OP_TYPE_GREATER_EQUAL,
|
||||
OP_TYPE_LOWER_THAN,
|
||||
OP_TYPE_LOWER_EQUAL,
|
||||
OP_TYPE_EQUAL,
|
||||
OP_TYPE_NOT_EQUAL,
|
||||
OP_TYPE_IN,
|
||||
OP_TYPE_NOT_IN,
|
||||
OP_TYPE_LIKE,
|
||||
OP_TYPE_NOT_LIKE,
|
||||
OP_TYPE_MATCH,
|
||||
OP_TYPE_NMATCH,
|
||||
OP_TYPE_GREATER_THAN, OP_TYPE_GREATER_EQUAL, OP_TYPE_LOWER_THAN, OP_TYPE_LOWER_EQUAL, OP_TYPE_EQUAL,
|
||||
OP_TYPE_NOT_EQUAL, OP_TYPE_IN, OP_TYPE_NOT_IN, OP_TYPE_LIKE, OP_TYPE_NOT_LIKE, OP_TYPE_MATCH, OP_TYPE_NMATCH,
|
||||
|
||||
OP_TYPE_IS_NULL,
|
||||
OP_TYPE_IS_NOT_NULL,
|
||||
OP_TYPE_IS_TRUE,
|
||||
OP_TYPE_IS_FALSE,
|
||||
OP_TYPE_IS_UNKNOWN,
|
||||
OP_TYPE_IS_NOT_TRUE,
|
||||
OP_TYPE_IS_NOT_FALSE,
|
||||
OP_TYPE_IS_NOT_UNKNOWN,
|
||||
//OP_TYPE_COMPARE_MAX_VALUE,
|
||||
OP_TYPE_IS_NULL, OP_TYPE_IS_NOT_NULL, OP_TYPE_IS_TRUE, OP_TYPE_IS_FALSE, OP_TYPE_IS_UNKNOWN, OP_TYPE_IS_NOT_TRUE,
|
||||
OP_TYPE_IS_NOT_FALSE, OP_TYPE_IS_NOT_UNKNOWN,
|
||||
// OP_TYPE_COMPARE_MAX_VALUE,
|
||||
|
||||
OP_TYPE_JSON_GET_VALUE,
|
||||
OP_TYPE_JSON_CONTAINS,
|
||||
OP_TYPE_JSON_GET_VALUE, OP_TYPE_JSON_CONTAINS,
|
||||
|
||||
OP_TYPE_ASSIGN
|
||||
};
|
||||
OP_TYPE_ASSIGN};
|
||||
|
||||
#define OP_TYPE_CALC_MAX OP_TYPE_BIT_OR
|
||||
|
||||
|
@ -318,6 +295,8 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MAX_JSON_KEY_LEN 256
|
||||
|
||||
#define TSDB_AUTH_LEN 16
|
||||
#define TSDB_PASSWORD_MIN_LEN 8
|
||||
#define TSDB_PASSWORD_MAX_LEN 16
|
||||
#define TSDB_PASSWORD_LEN 32
|
||||
#define TSDB_USET_PASSWORD_LEN 129
|
||||
#define TSDB_VERSION_LEN 32
|
||||
|
@ -529,6 +508,15 @@ typedef enum ELogicConditionType {
|
|||
#define TSDB_MIN_TABLE_TTL 0
|
||||
#define TSDB_DEFAULT_TABLE_TTL 0
|
||||
|
||||
#define TSDB_DEFAULT_COMPACT_INTERVAL 0
|
||||
#define TSDB_MIN_COMPACT_INTERVAL 10 // unit minute
|
||||
#define TSDB_MAX_COMPACT_INTERVAL TSDB_MAX_KEEP // unit minute
|
||||
#define TSDB_DEFAULT_COMPACT_START_TIME 0
|
||||
#define TSDB_DEFAULT_COMPACT_END_TIME 0
|
||||
#define TSDB_MIN_COMPACT_TIME_OFFSET 0
|
||||
#define TSDB_MAX_COMPACT_TIME_OFFSET 23
|
||||
#define TSDB_DEFAULT_COMPACT_TIME_OFFSET 0
|
||||
|
||||
#define TSDB_MIN_EXPLAIN_RATIO 0
|
||||
#define TSDB_MAX_EXPLAIN_RATIO 1
|
||||
#define TSDB_DEFAULT_EXPLAIN_RATIO 0.001
|
||||
|
@ -603,6 +591,7 @@ enum { ENCRYPT_KEY_STAT_UNKNOWN = 0, ENCRYPT_KEY_STAT_UNSET, ENCRYPT_KEY_STAT_SE
|
|||
typedef struct {
|
||||
char dir[TSDB_FILENAME_LEN];
|
||||
int32_t level;
|
||||
int64_t diskId;
|
||||
int32_t primary;
|
||||
int8_t disable; // disable create new file
|
||||
} SDiskCfg;
|
||||
|
@ -638,12 +627,13 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 };
|
|||
#define VNODE_HANDLE -3
|
||||
#define CLIENT_HANDLE -5
|
||||
|
||||
#define TSDB_CONFIG_OPTION_LEN 32
|
||||
#define TSDB_CONFIG_VALUE_LEN 64
|
||||
#define TSDB_CONFIG_SCOPE_LEN 8
|
||||
#define TSDB_CONFIG_NUMBER 16
|
||||
#define TSDB_CONFIG_PATH_LEN 4096
|
||||
#define TSDB_CONFIG_INFO_LEN 64
|
||||
#define TSDB_CONFIG_OPTION_LEN 32
|
||||
#define TSDB_CONFIG_VALUE_LEN 64
|
||||
#define TSDB_CONFIG_SCOPE_LEN 8
|
||||
#define TSDB_CONFIG_NUMBER 16
|
||||
#define TSDB_CONFIG_PATH_LEN 4096
|
||||
#define TSDB_CONFIG_INFO_LEN 64
|
||||
#define TSDB_CONFIG_CATEGORY_LEN 8
|
||||
|
||||
#define QUERY_ID_SIZE 20
|
||||
#define QUERY_OBJ_ID_SIZE 18
|
||||
|
@ -674,6 +664,8 @@ typedef enum {
|
|||
ANAL_ALGO_TYPE_END,
|
||||
} EAnalAlgoType;
|
||||
|
||||
#define MIN_RESERVE_MEM_SIZE 1024 // MB
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -88,6 +88,7 @@ static int32_t tEncodeU64v(SEncoder* pCoder, uint64_t val);
|
|||
static int32_t tEncodeI64v(SEncoder* pCoder, int64_t val);
|
||||
static int32_t tEncodeFloat(SEncoder* pCoder, float val);
|
||||
static int32_t tEncodeDouble(SEncoder* pCoder, double val);
|
||||
static int32_t tEncodeBool(SEncoder* pCoder, bool val);
|
||||
static int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len);
|
||||
static int32_t tEncodeBinaryEx(SEncoder* pCoder, const uint8_t* val, uint32_t len);
|
||||
static int32_t tEncodeCStrWithLen(SEncoder* pCoder, const char* val, uint32_t len);
|
||||
|
@ -116,6 +117,7 @@ static int32_t tDecodeU64v(SDecoder* pCoder, uint64_t* val);
|
|||
static int32_t tDecodeI64v(SDecoder* pCoder, int64_t* val);
|
||||
static int32_t tDecodeFloat(SDecoder* pCoder, float* val);
|
||||
static int32_t tDecodeDouble(SDecoder* pCoder, double* val);
|
||||
static int32_t tDecodeBool(SDecoder* pCoder, bool* val);
|
||||
static int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint32_t* len);
|
||||
static int32_t tDecodeCStrAndLen(SDecoder* pCoder, char** val, uint32_t* len);
|
||||
static int32_t tDecodeCStr(SDecoder* pCoder, char** val);
|
||||
|
@ -205,6 +207,8 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
|
|||
return tEncodeU64(pCoder, v.ui);
|
||||
}
|
||||
|
||||
static int32_t tEncodeBool(SEncoder* pCoder, bool val) { return tEncodeU8(pCoder, val ? 1 : 0); }
|
||||
|
||||
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
|
||||
TAOS_CHECK_RETURN(tEncodeU32v(pCoder, len));
|
||||
if (len) {
|
||||
|
@ -391,6 +395,15 @@ static FORCE_INLINE int32_t tDecodeDouble(SDecoder* pCoder, double* val) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t tDecodeBool(SDecoder* pCoder, bool* val) {
|
||||
uint8_t v;
|
||||
TAOS_CHECK_RETURN(tDecodeU8(pCoder, &v));
|
||||
if (val) {
|
||||
*val = v ? true : false;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint32_t* len) {
|
||||
uint32_t length = 0;
|
||||
|
||||
|
|
|
@ -87,6 +87,8 @@ BoundedQueue* createBoundedQueue(uint32_t maxSize, pq_comp_fn fn, FDelete delete
|
|||
|
||||
void taosBQSetFn(BoundedQueue* q, pq_comp_fn fn);
|
||||
|
||||
void taosBQClear(BoundedQueue* q);
|
||||
|
||||
void destroyBoundedQueue(BoundedQueue* q);
|
||||
|
||||
/*
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue