Merge remote-tracking branch 'origin/main' into enh/useSafySysFunc
This commit is contained in:
commit
32c370f368
|
@ -112,14 +112,14 @@ Fill in the example data from the MQTT message body in **Message Body**.
|
||||||
|
|
||||||
JSON data supports JSONObject or JSONArray, and the json parser can parse the following data:
|
JSON data supports JSONObject or JSONArray, and the json parser can parse the following data:
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
{"id": 1, "message": "hello-word"}
|
{"id": 1, "message": "hello-word"}
|
||||||
{"id": 2, "message": "hello-word"}
|
{"id": 2, "message": "hello-word"}
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,7 @@ In addition, the [Kerberos](https://web.mit.edu/kerberos/) authentication servic
|
||||||
|
|
||||||
After configuration, you can use the [kcat](https://github.com/edenhill/kcat) tool to verify Kafka topic consumption:
|
After configuration, you can use the [kcat](https://github.com/edenhill/kcat) tool to verify Kafka topic consumption:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
kcat <topic> \
|
kcat <topic> \
|
||||||
-b <kafka-server:port> \
|
-b <kafka-server:port> \
|
||||||
-G kcat \
|
-G kcat \
|
||||||
|
@ -171,14 +171,14 @@ Enter sample data from the Kafka message body in **Message Body**.
|
||||||
|
|
||||||
JSON data supports JSONObject or JSONArray, and the following data can be parsed using a JSON parser:
|
JSON data supports JSONObject or JSONArray, and the following data can be parsed using a JSON parser:
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
{"id": 1, "message": "hello-word"}
|
{"id": 1, "message": "hello-word"}
|
||||||
{"id": 2, "message": "hello-word"}
|
{"id": 2, "message": "hello-word"}
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ Parsing is the process of parsing unstructured strings into structured data. The
|
||||||
|
|
||||||
JSON parsing supports JSONObject or JSONArray. The following JSON sample data can automatically parse fields: `groupid`, `voltage`, `current`, `ts`, `inuse`, `location`.
|
JSON parsing supports JSONObject or JSONArray. The following JSON sample data can automatically parse fields: `groupid`, `voltage`, `current`, `ts`, `inuse`, `location`.
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
|
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
||||||
|
@ -91,7 +91,7 @@ JSON parsing supports JSONObject or JSONArray. The following JSON sample data ca
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
[{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"},
|
[{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"},
|
||||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"},
|
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"},
|
||||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}]
|
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}]
|
||||||
|
@ -101,7 +101,7 @@ Subsequent examples will only explain using JSONObject.
|
||||||
|
|
||||||
The following nested JSON data can automatically parse fields `groupid`, `data_voltage`, `data_current`, `ts`, `inuse`, `location_0_province`, `location_0_city`, `location_0_datun`, and you can also choose which fields to parse and set aliases for the parsed fields.
|
The following nested JSON data can automatically parse fields `groupid`, `data_voltage`, `data_current`, `ts`, `inuse`, `location_0_province`, `location_0_city`, `location_0_datun`, and you can also choose which fields to parse and set aliases for the parsed fields.
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
{"groupid": 170001, "data": { "voltage": "221V", "current": 12.3 }, "ts": "2023-12-18T22:12:00", "inuse": true, "location": [{"province": "beijing", "city":"chaoyang", "street": "datun"}]}
|
{"groupid": 170001, "data": { "voltage": "221V", "current": 12.3 }, "ts": "2023-12-18T22:12:00", "inuse": true, "location": [{"province": "beijing", "city":"chaoyang", "street": "datun"}]}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ The following nested JSON data can automatically parse fields `groupid`, `data_v
|
||||||
|
|
||||||
You can use **named capture groups** in regular expressions to extract multiple fields from any string (text) field. As shown in the figure, extract fields such as access IP, timestamp, and accessed URL from nginx logs.
|
You can use **named capture groups** in regular expressions to extract multiple fields from any string (text) field. As shown in the figure, extract fields such as access IP, timestamp, and accessed URL from nginx logs.
|
||||||
|
|
||||||
``` re
|
```regex
|
||||||
(?<ip>\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b)\s-\s-\s\[(?<ts>\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}\s\+\d{4})\]\s"(?<method>[A-Z]+)\s(?<url>[^\s"]+).*(?<status>\d{3})\s(?<length>\d+)
|
(?<ip>\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b)\s-\s-\s\[(?<ts>\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}\s\+\d{4})\]\s"(?<method>[A-Z]+)\s(?<url>[^\s"]+).*(?<status>\d{3})\s(?<length>\d+)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ Custom rhai syntax scripts for parsing input data (refer to `https://rhai.rs/boo
|
||||||
|
|
||||||
For example, for data reporting three-phase voltage values, which are entered into three subtables respectively, such data needs to be parsed
|
For example, for data reporting three-phase voltage values, which are entered into three subtables respectively, such data needs to be parsed
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
{
|
{
|
||||||
"ts": "2024-06-27 18:00:00",
|
"ts": "2024-06-27 18:00:00",
|
||||||
"voltage": "220.1,220.3,221.1",
|
"voltage": "220.1,220.3,221.1",
|
||||||
|
@ -164,7 +164,7 @@ The final parsing result is shown below:
|
||||||
|
|
||||||
The parsed data may still not meet the data requirements of the target table. For example, the original data collected by a smart meter is as follows (in json format):
|
The parsed data may still not meet the data requirements of the target table. For example, the original data collected by a smart meter is as follows (in json format):
|
||||||
|
|
||||||
``` json
|
```json
|
||||||
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||||
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
|
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
|
||||||
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
|
||||||
|
|
|
@ -83,14 +83,14 @@ Next, create a supertable (STABLE) named `meters`, whose table structure include
|
||||||
|
|
||||||
Create Database
|
Create Database
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
||||||
--data 'CREATE DATABASE IF NOT EXISTS power'
|
--data 'CREATE DATABASE IF NOT EXISTS power'
|
||||||
```
|
```
|
||||||
|
|
||||||
Create Table, specify the database as `power` in the URL
|
Create Table, specify the database as `power` in the URL
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \
|
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \
|
||||||
--data 'CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))'
|
--data 'CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))'
|
||||||
```
|
```
|
||||||
|
@ -167,7 +167,7 @@ NOW is an internal system function, defaulting to the current time of the client
|
||||||
|
|
||||||
Write data
|
Write data
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
||||||
--data 'INSERT INTO power.d1001 USING power.meters TAGS(2,'\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) power.d1002 USING power.meters TAGS(3, '\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 218, 0.25000)'
|
--data 'INSERT INTO power.d1001 USING power.meters TAGS(2,'\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) power.d1002 USING power.meters TAGS(3, '\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 218, 0.25000)'
|
||||||
```
|
```
|
||||||
|
@ -247,7 +247,7 @@ Rust connector also supports using **serde** for deserializing to get structured
|
||||||
|
|
||||||
Query Data
|
Query Data
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
|
||||||
--data 'SELECT ts, current, location FROM power.meters limit 100'
|
--data 'SELECT ts, current, location FROM power.meters limit 100'
|
||||||
```
|
```
|
||||||
|
@ -329,7 +329,7 @@ Below are code examples of setting reqId to execute SQL in various language conn
|
||||||
|
|
||||||
Query data, specify reqId as 3
|
Query data, specify reqId as 3
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql?req_id=3' \
|
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql?req_id=3' \
|
||||||
--data 'SELECT ts, current, location FROM power.meters limit 1'
|
--data 'SELECT ts, current, location FROM power.meters limit 1'
|
||||||
```
|
```
|
||||||
|
|
|
@ -273,19 +273,19 @@ To better operate the above data structures, some convenience functions are prov
|
||||||
|
|
||||||
Create table:
|
Create table:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
|
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
|
||||||
```
|
```
|
||||||
|
|
||||||
Create custom function:
|
Create custom function:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
|
||||||
```
|
```
|
||||||
|
|
||||||
Use custom function:
|
Use custom function:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
select max_vol(vol1, vol2, vol3, deviceid) from battery;
|
select max_vol(vol1, vol2, vol3, deviceid) from battery;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -334,7 +334,7 @@ When developing UDFs in Python, you need to implement the specified interface fu
|
||||||
|
|
||||||
The interface for scalar functions is as follows.
|
The interface for scalar functions is as follows.
|
||||||
|
|
||||||
```Python
|
```python
|
||||||
def process(input: datablock) -> tuple[output_type]:
|
def process(input: datablock) -> tuple[output_type]:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -347,7 +347,7 @@ The main parameters are as follows:
|
||||||
|
|
||||||
The interface for aggregate functions is as follows.
|
The interface for aggregate functions is as follows.
|
||||||
|
|
||||||
```Python
|
```python
|
||||||
def start() -> bytes:
|
def start() -> bytes:
|
||||||
def reduce(inputs: datablock, buf: bytes) -> bytes
|
def reduce(inputs: datablock, buf: bytes) -> bytes
|
||||||
def finish(buf: bytes) -> output_type:
|
def finish(buf: bytes) -> output_type:
|
||||||
|
@ -365,7 +365,7 @@ Finally, when all row data blocks have been processed, the finish function is ca
|
||||||
|
|
||||||
The interfaces for initialization and destruction are as follows.
|
The interfaces for initialization and destruction are as follows.
|
||||||
|
|
||||||
```Python
|
```python
|
||||||
def init()
|
def init()
|
||||||
def destroy()
|
def destroy()
|
||||||
```
|
```
|
||||||
|
@ -381,7 +381,7 @@ Parameter description:
|
||||||
|
|
||||||
The template for developing scalar functions in Python is as follows.
|
The template for developing scalar functions in Python is as follows.
|
||||||
|
|
||||||
```Python
|
```python
|
||||||
def init():
|
def init():
|
||||||
# initialization
|
# initialization
|
||||||
def destroy():
|
def destroy():
|
||||||
|
@ -393,7 +393,7 @@ def process(input: datablock) -> tuple[output_type]:
|
||||||
|
|
||||||
The template for developing aggregate functions in Python is as follows.
|
The template for developing aggregate functions in Python is as follows.
|
||||||
|
|
||||||
```Python
|
```python
|
||||||
def init():
|
def init():
|
||||||
#initialization
|
#initialization
|
||||||
def destroy():
|
def destroy():
|
||||||
|
@ -828,7 +828,7 @@ Through this example, we learned how to define aggregate functions and print cus
|
||||||
<details>
|
<details>
|
||||||
<summary>pybitand.py</summary>
|
<summary>pybitand.py</summary>
|
||||||
|
|
||||||
```Python
|
```python
|
||||||
{{#include tests/script/sh/pybitand.py}}
|
{{#include tests/script/sh/pybitand.py}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ TDengine is designed for various writing scenarios, and many of these scenarios
|
||||||
|
|
||||||
### Syntax
|
### Syntax
|
||||||
|
|
||||||
```SQL
|
```sql
|
||||||
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
|
||||||
SHOW COMPACTS [compact_id];
|
SHOW COMPACTS [compact_id];
|
||||||
KILL COMPACT compact_id;
|
KILL COMPACT compact_id;
|
||||||
|
@ -41,7 +41,7 @@ KILL COMPACT compact_id;
|
||||||
|
|
||||||
When one or more nodes in a multi-replica cluster restart due to upgrades or other reasons, it may lead to an imbalance in the load among the various dnodes in the cluster. In extreme cases, all vgroup leaders may be located on the same dnode. To solve this problem, you can use the following commands, which were first released in version 3.0.4.0. It is recommended to use the latest version as much as possible.
|
When one or more nodes in a multi-replica cluster restart due to upgrades or other reasons, it may lead to an imbalance in the load among the various dnodes in the cluster. In extreme cases, all vgroup leaders may be located on the same dnode. To solve this problem, you can use the following commands, which were first released in version 3.0.4.0. It is recommended to use the latest version as much as possible.
|
||||||
|
|
||||||
```SQL
|
```sql
|
||||||
balance vgroup leader; # Rebalance all vgroup leaders
|
balance vgroup leader; # Rebalance all vgroup leaders
|
||||||
balance vgroup leader on <vgroup_id>; # Rebalance a vgroup leader
|
balance vgroup leader on <vgroup_id>; # Rebalance a vgroup leader
|
||||||
balance vgroup leader database <database_name>; # Rebalance all vgroup leaders within a database
|
balance vgroup leader database <database_name>; # Rebalance all vgroup leaders within a database
|
||||||
|
|
|
@ -121,7 +121,7 @@ The cost of using object storage services is related to the amount of data store
|
||||||
|
|
||||||
When the TSDB time-series data exceeds the time specified by the `s3_keeplocal` parameter, the related data files will be split into multiple file blocks, each with a default size of 512 MB (`s3_chunkpages * tsdb_pagesize`). Except for the last file block, which is retained on the local file system, the rest of the file blocks are uploaded to the object storage service.
|
When the TSDB time-series data exceeds the time specified by the `s3_keeplocal` parameter, the related data files will be split into multiple file blocks, each with a default size of 512 MB (`s3_chunkpages * tsdb_pagesize`). Except for the last file block, which is retained on the local file system, the rest of the file blocks are uploaded to the object storage service.
|
||||||
|
|
||||||
```math
|
```text
|
||||||
Upload Count = Data File Size / (s3_chunkpages * tsdb_pagesize) - 1
|
Upload Count = Data File Size / (s3_chunkpages * tsdb_pagesize) - 1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ During query operations, if data in object storage needs to be accessed, TSDB do
|
||||||
|
|
||||||
Adjacent multiple data pages are downloaded as a single data block from object storage to reduce the number of downloads. The size of each data page is specified by the `tsdb_pagesize` parameter when creating the database, with a default of 4 KB.
|
Adjacent multiple data pages are downloaded as a single data block from object storage to reduce the number of downloads. The size of each data page is specified by the `tsdb_pagesize` parameter when creating the database, with a default of 4 KB.
|
||||||
|
|
||||||
```math
|
```text
|
||||||
Download Count = Number of Data Blocks Needed for Query - Number of Cached Data Blocks
|
Download Count = Number of Data Blocks Needed for Query - Number of Cached Data Blocks
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ For deployment methods, please refer to the [Flexify](https://azuremarketplace.m
|
||||||
|
|
||||||
In the configuration file /etc/taos/taos.cfg, add parameters for S3 access:
|
In the configuration file /etc/taos/taos.cfg, add parameters for S3 access:
|
||||||
|
|
||||||
```cfg
|
```text
|
||||||
s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25
|
s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25
|
||||||
s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd
|
s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd
|
||||||
s3BucketName td-test
|
s3BucketName td-test
|
||||||
|
|
|
@ -140,7 +140,7 @@ Finally, click the "Create" button at the bottom left to save the rule.
|
||||||
|
|
||||||
## Write a Mock Test Program
|
## Write a Mock Test Program
|
||||||
|
|
||||||
```javascript
|
```js
|
||||||
{{#include docs/examples/other/mock.js}}
|
{{#include docs/examples/other/mock.js}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ curl http://localhost:8083/connectors
|
||||||
|
|
||||||
If all components have started successfully, the following output will be displayed:
|
If all components have started successfully, the following output will be displayed:
|
||||||
|
|
||||||
```txt
|
```text
|
||||||
[]
|
[]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ If the above command is executed successfully, the following output will be disp
|
||||||
|
|
||||||
Prepare a text file with test data, content as follows:
|
Prepare a text file with test data, content as follows:
|
||||||
|
|
||||||
```txt title="test-data.txt"
|
```text title="test-data.txt"
|
||||||
meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
|
||||||
meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
|
||||||
meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
|
||||||
|
@ -303,7 +303,7 @@ kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --t
|
||||||
|
|
||||||
Output:
|
Output:
|
||||||
|
|
||||||
```txt
|
```text
|
||||||
......
|
......
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
|
||||||
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
|
||||||
|
|
|
@ -60,7 +60,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc
|
||||||
|
|
||||||
For users using Grafana version 7.x or configuring with [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/), you can use the installation script on the Grafana server to automatically install the plugin and add the data source Provisioning configuration file.
|
For users using Grafana version 7.x or configuring with [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/), you can use the installation script on the Grafana server to automatically install the plugin and add the data source Provisioning configuration file.
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
bash -c "$(curl -fsSL \
|
bash -c "$(curl -fsSL \
|
||||||
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
|
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
|
||||||
-a http://localhost:6041 \
|
-a http://localhost:6041 \
|
||||||
|
@ -77,7 +77,7 @@ Save the script and execute `./install.sh --help` to view detailed help document
|
||||||
|
|
||||||
Use the [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin [installation](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation).
|
Use the [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin [installation](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation).
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
grafana-cli plugins install tdengine-datasource
|
grafana-cli plugins install tdengine-datasource
|
||||||
# with sudo
|
# with sudo
|
||||||
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||||
|
@ -85,7 +85,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
|
||||||
|
|
||||||
Alternatively, download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) to your local machine and unzip it into the Grafana plugins directory. Example command line download is as follows:
|
Alternatively, download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) to your local machine and unzip it into the Grafana plugins directory. Example command line download is as follows:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
GF_VERSION=3.5.1
|
GF_VERSION=3.5.1
|
||||||
# from GitHub
|
# from GitHub
|
||||||
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
|
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
|
||||||
|
@ -95,13 +95,13 @@ wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tden
|
||||||
|
|
||||||
For CentOS 7.2 operating system, unzip the plugin package into the /var/lib/grafana/plugins directory and restart Grafana.
|
For CentOS 7.2 operating system, unzip the plugin package into the /var/lib/grafana/plugins directory and restart Grafana.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
|
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
|
||||||
```
|
```
|
||||||
|
|
||||||
If Grafana is running in a Docker environment, you can use the following environment variable to set up automatic installation of the TDengine data source plugin:
|
If Grafana is running in a Docker environment, you can use the following environment variable to set up automatic installation of the TDengine data source plugin:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
GF_INSTALL_PLUGINS=tdengine-datasource
|
GF_INSTALL_PLUGINS=tdengine-datasource
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc
|
||||||
|
|
||||||
Refer to [Grafana containerized installation instructions](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). Use the following command to start a container and automatically install the TDengine plugin:
|
Refer to [Grafana containerized installation instructions](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). Use the following command to start a container and automatically install the TDengine plugin:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
docker run -d \
|
docker run -d \
|
||||||
-p 3000:3000 \
|
-p 3000:3000 \
|
||||||
--name=grafana \
|
--name=grafana \
|
||||||
|
|
|
@ -31,7 +31,7 @@ The following parameter descriptions and examples use `<content>` as a placehold
|
||||||
|
|
||||||
In command line mode, taosX uses DSN to represent a data source (source or destination), a typical DSN is as follows:
|
In command line mode, taosX uses DSN to represent a data source (source or destination), a typical DSN is as follows:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
# url-like
|
# url-like
|
||||||
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<object>][?<p1>=<v1>[&<p2>=<v2>]]
|
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<object>][?<p1>=<v1>[&<p2>=<v2>]]
|
||||||
|------|------------|---|-----------|-----------|------|------|----------|-----------------------|
|
|------|------------|---|-----------|-----------|------|------|----------|-----------------------|
|
||||||
|
@ -390,7 +390,7 @@ You can view the log files or use the `journalctl` command to view the logs of `
|
||||||
|
|
||||||
The command to view logs under Linux using `journalctl` is as follows:
|
The command to view logs under Linux using `journalctl` is as follows:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
journalctl -u taosx [-f]
|
journalctl -u taosx [-f]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -572,7 +572,7 @@ uint32_t len: The binary length of this string (excluding `\0`).
|
||||||
|
|
||||||
**Return Value**:
|
**Return Value**:
|
||||||
|
|
||||||
``` c
|
```c
|
||||||
struct parser_resp_t {
|
struct parser_resp_t {
|
||||||
int e; // 0 if success.
|
int e; // 0 if success.
|
||||||
void* p; // Success if contains.
|
void* p; // Success if contains.
|
||||||
|
@ -589,7 +589,7 @@ When creation is successful, e = 0, p is the parser object.
|
||||||
|
|
||||||
Parse the input payload and return the result in JSON format [u8]. The returned JSON will be fully decoded using the default JSON parser (expanding the root array and all objects).
|
Parse the input payload and return the result in JSON format [u8]. The returned JSON will be fully decoded using the default JSON parser (expanding the root array and all objects).
|
||||||
|
|
||||||
``` c
|
```c
|
||||||
const char* parser_mutate(
|
const char* parser_mutate(
|
||||||
void* parser,
|
void* parser,
|
||||||
const uint8_t* in_ptr, uint32_t in_len,
|
const uint8_t* in_ptr, uint32_t in_len,
|
||||||
|
|
|
@ -26,7 +26,7 @@ The default configuration file for `Agent` is located at `/etc/taos/agent.toml`,
|
||||||
|
|
||||||
As shown below:
|
As shown below:
|
||||||
|
|
||||||
```TOML
|
```toml
|
||||||
# taosX service endpoint
|
# taosX service endpoint
|
||||||
#
|
#
|
||||||
#endpoint = "http://localhost:6055"
|
#endpoint = "http://localhost:6055"
|
||||||
|
@ -83,7 +83,7 @@ You don't need to be confused about how to set up the configuration file. Read a
|
||||||
|
|
||||||
On Linux systems, the `Agent` can be started with the Systemd command:
|
On Linux systems, the `Agent` can be started with the Systemd command:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
systemctl start taosx-agent
|
systemctl start taosx-agent
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -95,6 +95,6 @@ You can view the log files or use the `journalctl` command to view the logs of t
|
||||||
|
|
||||||
The command to view logs with `journalctl` on Linux is as follows:
|
The command to view logs with `journalctl` on Linux is as follows:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
journalctl -u taosx-agent [-f]
|
journalctl -u taosx-agent [-f]
|
||||||
```
|
```
|
||||||
|
|
|
@ -143,13 +143,13 @@ For details on TDengine monitoring configuration, please refer to: [TDengine Mon
|
||||||
|
|
||||||
After installation, please use the `systemctl` command to start the taoskeeper service process.
|
After installation, please use the `systemctl` command to start the taoskeeper service process.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
systemctl start taoskeeper
|
systemctl start taoskeeper
|
||||||
```
|
```
|
||||||
|
|
||||||
Check if the service is working properly:
|
Check if the service is working properly:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
systemctl status taoskeeper
|
systemctl status taoskeeper
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ Query OK, 14 row(s) in set (0.006542s)
|
||||||
|
|
||||||
You can view the most recent report record of a supertable, such as:
|
You can view the most recent report record of a supertable, such as:
|
||||||
|
|
||||||
``` shell
|
```shell
|
||||||
taos> select last_row(*) from taosd_dnodes_info;
|
taos> select last_row(*) from taosd_dnodes_info;
|
||||||
last_row(_ts) | last_row(disk_engine) | last_row(system_net_in) | last_row(vnodes_num) | last_row(system_net_out) | last_row(uptime) | last_row(has_mnode) | last_row(io_read_disk) | last_row(error_log_count) | last_row(io_read) | last_row(cpu_cores) | last_row(has_qnode) | last_row(has_snode) | last_row(disk_total) | last_row(mem_engine) | last_row(info_log_count) | last_row(cpu_engine) | last_row(io_write_disk) | last_row(debug_log_count) | last_row(disk_used) | last_row(mem_total) | last_row(io_write) | last_row(masters) | last_row(cpu_system) | last_row(trace_log_count) | last_row(mem_free) |
|
last_row(_ts) | last_row(disk_engine) | last_row(system_net_in) | last_row(vnodes_num) | last_row(system_net_out) | last_row(uptime) | last_row(has_mnode) | last_row(io_read_disk) | last_row(error_log_count) | last_row(io_read) | last_row(cpu_cores) | last_row(has_qnode) | last_row(has_snode) | last_row(disk_total) | last_row(mem_engine) | last_row(info_log_count) | last_row(cpu_engine) | last_row(io_write_disk) | last_row(debug_log_count) | last_row(disk_used) | last_row(mem_total) | last_row(io_write) | last_row(masters) | last_row(cpu_system) | last_row(trace_log_count) | last_row(mem_free) |
|
||||||
======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||||
|
|
|
@ -14,7 +14,7 @@ taosExplorer does not require separate installation. Starting from TDengine vers
|
||||||
|
|
||||||
Before starting taosExplorer, please make sure the content in the configuration file is correct.
|
Before starting taosExplorer, please make sure the content in the configuration file is correct.
|
||||||
|
|
||||||
```TOML
|
```toml
|
||||||
# This is an automatically generated configuration file for Explorer in [TOML](https://toml.io/) format.
|
# This is an automatically generated configuration file for Explorer in [TOML](https://toml.io/) format.
|
||||||
#
|
#
|
||||||
# Here is a full list of available options.
|
# Here is a full list of available options.
|
||||||
|
@ -148,7 +148,7 @@ Description:
|
||||||
|
|
||||||
Then start taosExplorer, you can directly execute taos-explorer in the command line or use the systemctl command:
|
Then start taosExplorer, you can directly execute taos-explorer in the command line or use the systemctl command:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
systemctl start taos-explorer # Linux
|
systemctl start taos-explorer # Linux
|
||||||
sc.exe start taos-explorer # Windows
|
sc.exe start taos-explorer # Windows
|
||||||
```
|
```
|
||||||
|
|
|
@ -248,13 +248,13 @@ The new version of the plugin uses the Grafana unified alerting feature, the `-E
|
||||||
|
|
||||||
Assuming you start the TDengine database on the host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script:
|
Assuming you start the TDengine database on the host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to monitor multiple TDengine clusters, you need to set up multiple TDinsight dashboards. Setting up a non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and if using the built-in SMS alert feature, `-N` and `-L` should also be changed.
|
If you want to monitor multiple TDengine clusters, you need to set up multiple TDinsight dashboards. Setting up a non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and if using the built-in SMS alert feature, `-N` and `-L` should also be changed.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
|
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ The TDengine command line program (hereinafter referred to as TDengine CLI) is t
|
||||||
|
|
||||||
To enter the TDengine CLI, simply execute `taos` in the terminal.
|
To enter the TDengine CLI, simply execute `taos` in the terminal.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
taos
|
taos
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ There are many other parameters:
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
taos -h h1.taos.com -s "use db; show tables;"
|
taos -h h1.taos.com -s "use db; show tables;"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ taosBenchmark supports comprehensive performance testing for TDengine, and the T
|
||||||
|
|
||||||
Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration.
|
Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
taosBenchmark
|
taosBenchmark
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ When running without parameters, taosBenchmark by default connects to the TDengi
|
||||||
|
|
||||||
When running taosBenchmark using command line parameters and controlling its behavior, the `-f <json file>` parameter cannot be used. All configuration parameters must be specified through the command line. Below is an example of using command line mode to test the write performance of taosBenchmark.
|
When running taosBenchmark using command line parameters and controlling its behavior, the `-f <json file>` parameter cannot be used. All configuration parameters must be specified through the command line. Below is an example of using command line mode to test the write performance of taosBenchmark.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
taosBenchmark -I stmt -n 200 -t 100
|
taosBenchmark -I stmt -n 200 -t 100
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ The taosBenchmark installation package includes examples of configuration files,
|
||||||
|
|
||||||
Use the following command line to run taosBenchmark and control its behavior through a configuration file.
|
Use the following command line to run taosBenchmark and control its behavior through a configuration file.
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
taosBenchmark -f <json file>
|
taosBenchmark -f <json file>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
|
||||||
|
|
||||||
alter_table_clause: {
|
alter_table_clause: {
|
||||||
alter_table_options
|
alter_table_options
|
||||||
| SET tag tag_name = new_tag_value,tag_name2=new_tag2_value...
|
| SET tag tag_name = new_tag_value, tag_name2=new_tag2_value ...
|
||||||
}
|
}
|
||||||
|
|
||||||
alter_table_options:
|
alter_table_options:
|
||||||
|
@ -194,7 +194,7 @@ alter_table_option: {
|
||||||
### Modify Subtable Tag Value
|
### Modify Subtable Tag Value
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1,tag_name2=new_tag_value2...;
|
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
|
||||||
```
|
```
|
||||||
|
|
||||||
### Modify Table Lifespan
|
### Modify Table Lifespan
|
||||||
|
|
|
@ -210,19 +210,19 @@ However, renaming individual columns is not supported for `first(*)`, `last(*)`,
|
||||||
|
|
||||||
Retrieve all subtable names and related tag information from a supertable:
|
Retrieve all subtable names and related tag information from a supertable:
|
||||||
|
|
||||||
```mysql
|
```sql
|
||||||
SELECT TAGS TBNAME, location FROM meters;
|
SELECT TAGS TBNAME, location FROM meters;
|
||||||
```
|
```
|
||||||
|
|
||||||
It is recommended that users query the subtable tag information of supertables using the INS_TAGS system table under INFORMATION_SCHEMA, for example, to get all subtable names and tag values of the supertable meters:
|
It is recommended that users query the subtable tag information of supertables using the INS_TAGS system table under INFORMATION_SCHEMA, for example, to get all subtable names and tag values of the supertable meters:
|
||||||
|
|
||||||
```mysql
|
```sql
|
||||||
SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters';
|
SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters';
|
||||||
```
|
```
|
||||||
|
|
||||||
Count the number of subtables under a supertable:
|
Count the number of subtables under a supertable:
|
||||||
|
|
||||||
```mysql
|
```sql
|
||||||
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -385,7 +385,7 @@ SELECT CURRENT_USER();
|
||||||
|
|
||||||
### Syntax
|
### Syntax
|
||||||
|
|
||||||
```txt
|
```text
|
||||||
WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
|
WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -403,7 +403,7 @@ The length of the regular match string cannot exceed 128 bytes. You can set and
|
||||||
|
|
||||||
### Syntax
|
### Syntax
|
||||||
|
|
||||||
```txt
|
```text
|
||||||
CASE value WHEN compare_value THEN result [WHEN compare_value THEN result ...] [ELSE result] END
|
CASE value WHEN compare_value THEN result [WHEN compare_value THEN result ...] [ELSE result] END
|
||||||
CASE WHEN condition THEN result [WHEN condition THEN result ...] [ELSE result] END
|
CASE WHEN condition THEN result [WHEN condition THEN result ...] [ELSE result] END
|
||||||
```
|
```
|
||||||
|
@ -493,7 +493,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
|
||||||
|
|
||||||
## UNION ALL Clause
|
## UNION ALL Clause
|
||||||
|
|
||||||
```txt title=Syntax
|
```text title=Syntax
|
||||||
SELECT ...
|
SELECT ...
|
||||||
UNION ALL SELECT ...
|
UNION ALL SELECT ...
|
||||||
[UNION ALL SELECT ...]
|
[UNION ALL SELECT ...]
|
||||||
|
|
|
@ -417,7 +417,7 @@ MOD(expr1, expr2)
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
|
|
||||||
``` sql
|
```sql
|
||||||
taos> select mod(10,3);
|
taos> select mod(10,3);
|
||||||
mod(10,3) |
|
mod(10,3) |
|
||||||
============================
|
============================
|
||||||
|
@ -454,7 +454,7 @@ RAND([seed])
|
||||||
|
|
||||||
**Example**:
|
**Example**:
|
||||||
|
|
||||||
``` sql
|
```sql
|
||||||
taos> select rand();
|
taos> select rand();
|
||||||
rand() |
|
rand() |
|
||||||
============================
|
============================
|
||||||
|
|
|
@ -28,13 +28,14 @@ In this document, it specifically refers to the internal levels of the second-le
|
||||||
|
|
||||||
- Default compression algorithms list and applicable range for each data type
|
- Default compression algorithms list and applicable range for each data type
|
||||||
|
|
||||||
| Data Type | Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms|Default Compression Algorithm| Default Compression Level|
|
| Data Type |Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms | Default Compression Algorithm | Default Compression Level |
|
||||||
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
|
|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:|
|
||||||
| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
|
| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
|
||||||
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|
| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
|
||||||
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
|
| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
|
||||||
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
|
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||||
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
|
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||||
|
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||||
|
|
||||||
## SQL Syntax
|
## SQL Syntax
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ For the source code of the example programs, please refer to: [Example Programs]
|
||||||
|
|
||||||
The Data Source Name has a generic format, similar to [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but without the type prefix (brackets indicate optional):
|
The Data Source Name has a generic format, similar to [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but without the type prefix (brackets indicate optional):
|
||||||
|
|
||||||
``` text
|
```text
|
||||||
[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN]
|
[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...¶mN=valueN]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ Below is an example using the `curl` tool in an Ubuntu environment (please confi
|
||||||
|
|
||||||
The following example lists all databases, please replace `h1.tdengine.com` and 6041 (default value) with the actual running TDengine service FQDN and port number:
|
The following example lists all databases, please replace `h1.tdengine.com` and 6041 (default value) with the actual running TDengine service FQDN and port number:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
|
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
|
||||||
-d "select name, ntables, status from information_schema.ins_databases;" \
|
-d "select name, ntables, status from information_schema.ins_databases;" \
|
||||||
h1.tdengine.com:6041/rest/sql
|
h1.tdengine.com:6041/rest/sql
|
||||||
|
@ -100,13 +100,13 @@ The BODY of the HTTP request contains a complete SQL statement. The data table i
|
||||||
|
|
||||||
Use `curl` to initiate an HTTP Request with custom authentication as follows:
|
Use `curl` to initiate an HTTP Request with custom authentication as follows:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
|
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
|
||||||
```
|
```
|
||||||
|
|
||||||
Or,
|
Or,
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
|
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ Column types use the following strings:
|
||||||
|
|
||||||
Prepare data
|
Prepare data
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
create database demo
|
create database demo
|
||||||
use demo
|
use demo
|
||||||
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
|
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
|
||||||
|
@ -288,7 +288,7 @@ insert into t values(now,'\x7f8290','point(100 100)')
|
||||||
|
|
||||||
Execute query
|
Execute query
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl --location 'http://<fqdn>:<port>/rest/sql' \
|
curl --location 'http://<fqdn>:<port>/rest/sql' \
|
||||||
--header 'Content-Type: text/plain' \
|
--header 'Content-Type: text/plain' \
|
||||||
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
|
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
|
||||||
|
@ -428,7 +428,7 @@ Data Query Return Example
|
||||||
|
|
||||||
HTTP requests need to include an authorization code `<TOKEN>`, used for identity verification. The authorization code is usually provided by the administrator and can be simply obtained by sending an `HTTP GET` request as follows:
|
HTTP requests need to include an authorization code `<TOKEN>`, used for identity verification. The authorization code is usually provided by the administrator and can be simply obtained by sending an `HTTP GET` request as follows:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl http://<fqnd>:<port>/rest/login/<username>/<password>
|
curl http://<fqnd>:<port>/rest/login/<username>/<password>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -440,7 +440,7 @@ Here, `fqdn` is the FQDN or IP address of the TDengine database, `port` is the p
|
||||||
|
|
||||||
Example of obtaining an authorization code:
|
Example of obtaining an authorization code:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl http://192.168.0.1:6041/rest/login/root/taosdata
|
curl http://192.168.0.1:6041/rest/login/root/taosdata
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ Return value:
|
||||||
|
|
||||||
- Query all records of table d1001 in the demo database:
|
- Query all records of table d1001 in the demo database:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
|
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
|
||||||
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
|
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
|
||||||
```
|
```
|
||||||
|
@ -509,7 +509,7 @@ Return value:
|
||||||
|
|
||||||
- Create database demo:
|
- Create database demo:
|
||||||
|
|
||||||
```bash
|
```shell
|
||||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
|
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
|
||||||
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql
|
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql
|
||||||
```
|
```
|
||||||
|
@ -560,7 +560,7 @@ Return value:
|
||||||
|
|
||||||
#### TDengine 2.x response codes and message bodies
|
#### TDengine 2.x response codes and message bodies
|
||||||
|
|
||||||
```JSON
|
```json
|
||||||
{
|
{
|
||||||
"status": "succ",
|
"status": "succ",
|
||||||
"head": [
|
"head": [
|
||||||
|
@ -624,7 +624,7 @@ Return value:
|
||||||
|
|
||||||
#### TDengine 3.0 Response Codes and Message Body
|
#### TDengine 3.0 Response Codes and Message Body
|
||||||
|
|
||||||
```JSON
|
```json
|
||||||
{
|
{
|
||||||
"code": 0,
|
"code": 0,
|
||||||
"column_meta": [
|
"column_meta": [
|
||||||
|
|
|
@ -90,7 +90,7 @@ Batch insertion. Each insert statement can insert multiple records into one tabl
|
||||||
|
|
||||||
When inserting nchar type data containing Chinese characters on Windows, first ensure that the system's regional settings are set to China (this can be set in the Control Panel). At this point, the `taos` client in cmd should already be working properly; if developing a Java application in an IDE, such as Eclipse or IntelliJ, ensure that the file encoding in the IDE is set to GBK (which is the default encoding type for Java), then initialize the client configuration when creating the Connection, as follows:
|
When inserting nchar type data containing Chinese characters on Windows, first ensure that the system's regional settings are set to China (this can be set in the Control Panel). At this point, the `taos` client in cmd should already be working properly; if developing a Java application in an IDE, such as Eclipse or IntelliJ, ensure that the file encoding in the IDE is set to GBK (which is the default encoding type for Java), then initialize the client configuration when creating the Connection, as follows:
|
||||||
|
|
||||||
```JAVA
|
```java
|
||||||
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
Class.forName("com.taosdata.jdbc.TSDBDriver");
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
|
||||||
|
@ -145,7 +145,7 @@ Version 3.0 of TDengine includes a standalone component developed in Go called `
|
||||||
|
|
||||||
The Go language version requirement is 1.14 or higher. If there are Go compilation errors, often due to issues accessing Go mod in China, they can be resolved by setting Go environment variables:
|
The Go language version requirement is 1.14 or higher. If there are Go compilation errors, often due to issues accessing Go mod in China, they can be resolved by setting Go environment variables:
|
||||||
|
|
||||||
```sh
|
```shell
|
||||||
go env -w GO111MODULE=on
|
go env -w GO111MODULE=on
|
||||||
go env -w GOPROXY=https://goproxy.cn,direct
|
go env -w GOPROXY=https://goproxy.cn,direct
|
||||||
```
|
```
|
||||||
|
@ -196,7 +196,7 @@ Here are the solutions:
|
||||||
|
|
||||||
1. Create a file /Library/LaunchDaemons/limit.maxfiles.plist, write the following content (the example changes limit and maxfiles to 100,000, modify as needed):
|
1. Create a file /Library/LaunchDaemons/limit.maxfiles.plist, write the following content (the example changes limit and maxfiles to 100,000, modify as needed):
|
||||||
|
|
||||||
```plist
|
```xml
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||||
|
|
|
@ -68,19 +68,19 @@ dataDir /mnt/data6 2 0
|
||||||
|
|
||||||
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|
||||||
|
|
||||||
|参数名称 | 参数含义 |
|
| 参数名称 | 参数含义 |
|
||||||
|:-------------|:-----------------------------------------------|
|
|:---------------------|:-----------------------------------------------|
|
||||||
|s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 的保持一致,否则无法访问。 |
|
| s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 https,bucket 的区域需要与 endpoint 的保持一致,否则无法访问。 |
|
||||||
|s3AccessKey |冒号分隔的用户 SecretId:SecretKey。例如:AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|
| s3AccessKey | 冒号分隔的用户 SecretId:SecretKey。例如:AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|
||||||
|s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔。参数值均为字符串类型,但不需要引号。例如:test0711-1309024725 |
|
| s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有,AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分,使用减号分隔。参数值均为字符串类型,但不需要引号。例如:test0711-1309024725 |
|
||||||
|s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000 (30天),默认值 60 秒 |
|
| s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000(30天),默认值 60 秒 |
|
||||||
|s3PageCacheSize |s3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024\*1024。 ,默认值 4096|
|
| s3PageCacheSize | S3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024*1024。 ,默认值 4096|
|
||||||
|s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 |
|
| s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 |
|
||||||
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 |
|
| s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 |
|
||||||
|
|
||||||
### 检查配置参数可用性
|
### 检查配置参数可用性
|
||||||
|
|
||||||
在 taos.cfg 中完成对 s3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
|
在 taos.cfg 中完成对 S3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
|
||||||
|
|
||||||
```
|
```
|
||||||
taosd --checks3
|
taosd --checks3
|
||||||
|
@ -106,11 +106,11 @@ s3migrate database <db_name>;
|
||||||
|
|
||||||
详细的 DB 参数见下表:
|
详细的 DB 参数见下表:
|
||||||
|
|
||||||
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
|
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
|
||||||
| :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- |
|
|:--|:--------------|:-------|:------ |:------- | :----------------------------------------------------------- |
|
||||||
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 |
|
||||||
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
|
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
|
||||||
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 |
|
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作 |
|
||||||
|
|
||||||
### 对象存储读写次数估算
|
### 对象存储读写次数估算
|
||||||
|
|
||||||
|
@ -168,10 +168,10 @@ s3BucketName td-test
|
||||||
|
|
||||||
用户界面同 S3,不同的地方在于下面三个参数的配置:
|
用户界面同 S3,不同的地方在于下面三个参数的配置:
|
||||||
|
|
||||||
| # | 参数 | 示例值 | 描述 |
|
| # | 参数 | 示例值 | 描述 |
|
||||||
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
|
|:--|:-------------|:-----------------------------------------|:----------------------------------|
|
||||||
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
|
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
|
||||||
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
|
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
|
||||||
| 3 | s3BucketName | test-container | Container name |
|
| 3 | s3BucketName | test-container | Container name |
|
||||||
|
|
||||||
其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。
|
其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。
|
||||||
|
|
Binary file not shown.
After Width: | Height: | Size: 212 KiB |
Binary file not shown.
After Width: | Height: | Size: 222 KiB |
Binary file not shown.
After Width: | Height: | Size: 95 KiB |
|
@ -145,6 +145,44 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,
|
||||||
|
|
||||||
还有上述分类的细分维度折线图。
|
还有上述分类的细分维度折线图。
|
||||||
|
|
||||||
|
### 预配置告警规则自动导入
|
||||||
|
|
||||||
|
涛思总结用户使用经验,整理出 14 个常用的告警规则(alert rule),能够对集群关键指标进行监测并及时上报指标异常、超限等告警信息。
|
||||||
|
从 TDengine-server 3.3.4.3 版本(tdengine-datasource 3.6.3)开始,TDengine Datasource 支持预配置告警规则自动导入功能,用户可将 14 个告警规则一键导入 Grafana(11 及以上版本),直接使用。
|
||||||
|
预配置告警规则导入方法如下图所示,在 tdengine-datasource setting 界面,打开 “Load Tengine Alert” 开关,点击 “Save & test” 按钮后,插件会自动加载上述告警规则, 规则会放入以数据源名称 + “-alert” 的 grafana 告警目录中。如不需要,关闭 “Load TDengine Alert” 开关,点击 “Clear TDengine Alert” 旁边的按钮则会清除此数据源已导入的所有告警规则。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
导入后,点击 Grafana 左侧 “Alert rules” ,可查看当前所有告警规则。
|
||||||
|
用户只需配置联络点(Contact points),即可获取告警通知。联络点配置方法见[告警配置](https://docs.taosdata.com/third-party/visual/grafana/#%E5%91%8A%E8%AD%A6%E9%85%8D%E7%BD%AE)。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
14 个告警规则具体配置如下:
|
||||||
|
|
||||||
|
| 规则名称| 规则阈值| 无监控数据时的行为 | 数据扫描间隔 |持续时间 | 执行SQL |
|
||||||
|
| ------ | --------- | ---------------- | ----------- |------- |----------------------|
|
||||||
|
|dnode 节点的CPU负载|均值 > 80%|触发告警|5分钟|5分钟 |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `|
|
||||||
|
|dnode 节点的的内存 |均值 > 60%|触发告警|5分钟|5分钟|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts <now partition by dnode_id`|
|
||||||
|
|dnode 节点的磁盘容量占用 | > 80%|触发告警|5分钟|5分钟|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`|
|
||||||
|
|集群授权到期 |< 60天|触发告警|1天|0秒|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||||
|
|测点数达到授权测点数|>= 90%|触发告警|1天|0秒|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`|
|
||||||
|
|查询并发请求数 | > 100|不触发报警|1分钟|0秒|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`|
|
||||||
|
|慢查询执行最长时间 (无时间窗口) |> 300秒|不触发报警|1分钟|0秒|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`|
|
||||||
|
|dnode下线 |total != alive|触发告警|30秒|0秒|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`|
|
||||||
|
|vnode下线 |total != alive|触发告警|30秒|0秒|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|
||||||
|
|数据删除请求数 |> 0|不触发报警|30秒|0秒|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``|
|
||||||
|
|Adapter RESTful 请求失败 |> 5|不触发报警|30秒|0秒|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``|
|
||||||
|
|Adapter WebSocket 请求失败 |> 5|不触发报警|30秒|0秒|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``|
|
||||||
|
|dnode 数据上报缺少 |< 3|触发告警|180秒|0秒|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`|
|
||||||
|
|dnode 重启 |max(update_time) > last(update_time)|触发告警|90秒|0秒|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`|
|
||||||
|
|
||||||
|
用户可参考上述告警规则,根据自己业务需求进行修改与完善。
|
||||||
|
Grafana7.5 及以下版本,Dashboards 与 Alert rules 功能合在一起,而之后的新版本两个功能是分开的。为兼容 Grafana7.5 及以下版本,TDinsight 面板中增加了 Alert Used Only 面板,仅 Grafana7.5 及以下版本需要使用。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
## 升级
|
## 升级
|
||||||
下面三种方式都可以进行升级:
|
下面三种方式都可以进行升级:
|
||||||
- 用图形界面,若有新版本,可以在 ”TDengine Datasource“ 插件页面点击 update 升级。
|
- 用图形界面,若有新版本,可以在 ”TDengine Datasource“ 插件页面点击 update 升级。
|
||||||
|
@ -155,10 +193,11 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,
|
||||||
针对不同的安装方式,卸载时:
|
针对不同的安装方式,卸载时:
|
||||||
- 用图形界面,在 ”TDengine Datasource“ 插件页面点击 ”Uninstall“ 卸载。
|
- 用图形界面,在 ”TDengine Datasource“ 插件页面点击 ”Uninstall“ 卸载。
|
||||||
- 通过 `TDinsight.sh` 脚本安装的 TDinsight,可以使用命令行 `TDinsight.sh -R` 清理相关资源。
|
- 通过 `TDinsight.sh` 脚本安装的 TDinsight,可以使用命令行 `TDinsight.sh -R` 清理相关资源。
|
||||||
- 手动安装的 TDinsight,要完全卸载,需要清理以下内容:
|
- 手动安装的 TDinsight,要完全卸载,需要按照顺序清理以下内容:
|
||||||
1. Grafana 中的 TDinsight Dashboard。
|
1. Grafana 中的 TDinsight Dashboard。
|
||||||
2. Grafana 中的 Data Source 数据源。
|
2. Grafana 中的 Alert rules 告警规则。
|
||||||
3. 从插件安装目录删除 `tdengine-datasource` 插件。
|
3. Grafana 中的 Data Source 数据源。
|
||||||
|
4. 从插件安装目录删除 `tdengine-datasource` 插件。
|
||||||
|
|
||||||
## 附录
|
## 附录
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
|
||||||
|
|
||||||
alter_table_clause: {
|
alter_table_clause: {
|
||||||
alter_table_options
|
alter_table_options
|
||||||
| SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value...
|
| SET TAG tag_name = new_tag_value, tag_name2=new_tag2_value ...
|
||||||
}
|
}
|
||||||
|
|
||||||
alter_table_options:
|
alter_table_options:
|
||||||
|
@ -195,7 +195,7 @@ alter_table_option: {
|
||||||
### 修改子表标签值
|
### 修改子表标签值
|
||||||
|
|
||||||
```
|
```
|
||||||
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1,tag_name2=new_tag_value2...;
|
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
|
||||||
```
|
```
|
||||||
|
|
||||||
### 修改表生命周期
|
### 修改表生命周期
|
||||||
|
|
|
@ -6,7 +6,7 @@ title: "删除数据"
|
||||||
|
|
||||||
删除数据是 TDengine 提供的根据指定时间段删除指定表或超级表中数据记录的功能,方便用户清理由于设备故障等原因产生的异常数据。
|
删除数据是 TDengine 提供的根据指定时间段删除指定表或超级表中数据记录的功能,方便用户清理由于设备故障等原因产生的异常数据。
|
||||||
|
|
||||||
**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动或用户手动进行数据重整时。
|
**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动清理(建库参数 keep 生效)或用户手动进行数据重整时(企业版功能 compact)。
|
||||||
|
|
||||||
**语法:**
|
**语法:**
|
||||||
|
|
||||||
|
|
|
@ -1817,7 +1817,7 @@ ignore_null_values: {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。
|
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为 0。
|
||||||
|
|
||||||
**返回数据类型**:同字段类型。
|
**返回数据类型**:同字段类型。
|
||||||
|
|
||||||
|
@ -1838,9 +1838,9 @@ ignore_null_values: {
|
||||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0 版本以后支持)。
|
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0 版本以后支持)。
|
||||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0 版本以后支持)。
|
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0 版本以后支持)。
|
||||||
- INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。
|
- INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。
|
||||||
- INTERP 查询支持NEAR FILL模式, 即当需要FILL时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)。(3.3.4.9版本及以后支持)。
|
- INTERP 查询支持 NEAR FILL 模式, 即当需要 FILL 时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)(3.3.4.9 版本及以后支持)。
|
||||||
- INTERP 只有在使用FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在3.3.4.9版本及以后支持。
|
- INTERP 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在 3.3.4.9 版本及以后支持。
|
||||||
- INTERP `RANEG`子句支持时间范围的扩展(3.3.4.9版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点'2023-01-01 00:00:00'查找前后10s的数据进行插值, FILL PREV/NEXT/NEAR分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用FILL指定的值进行插值, 因此此时FILL子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). 目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与EVERY类似, 单位不能是年或月, 值不能为0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他FILL模式, 且不能指定EVERY子句。
|
- INTERP `RANGE`子句支持时间范围的扩展(3.3.4.9 版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点 '2023-01-01 00:00:00' 查找前后 10s 的数据进行插值, FILL PREV/NEXT/NEAR 分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用 FILL 指定的值进行插值, 因此此时 FILL 子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1)。目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持 RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与 EVERY 类似, 单位不能是年或月, 值不能为 0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他 FILL 模式, 且不能指定 EVERY 子句。
|
||||||
|
|
||||||
### LAST
|
### LAST
|
||||||
|
|
||||||
|
|
|
@ -29,13 +29,15 @@ description: 可配置压缩算法
|
||||||
|
|
||||||
- 各个数据类型的默认压缩算法列表和适用范围
|
- 各个数据类型的默认压缩算法列表和适用范围
|
||||||
|
|
||||||
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法|压缩算法默认值| 压缩等级默认值|
|
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法 | 压缩算法默认值 |压缩等级默认值|
|
||||||
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
|
|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:|
|
||||||
| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
|
| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
|
||||||
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|
| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
|
||||||
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
|
| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
|
||||||
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
|
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
|
||||||
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
|
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
|
||||||
|
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
|
||||||
|
|
||||||
|
|
||||||
## SQL 语法
|
## SQL 语法
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,22 @@
|
||||||
|
|
||||||
#define TMQ_META_VERSION "1.0"
|
#define TMQ_META_VERSION "1.0"
|
||||||
|
|
||||||
|
static bool tmqAddJsonObjectItem(cJSON *object, const char *string, cJSON *item){
|
||||||
|
bool ret = cJSON_AddItemToObject(object, string, item);
|
||||||
|
if (!ret){
|
||||||
|
cJSON_Delete(item);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
static bool tmqAddJsonArrayItem(cJSON *array, cJSON *item){
|
||||||
|
bool ret = cJSON_AddItemToArray(array, item);
|
||||||
|
if (!ret){
|
||||||
|
cJSON_Delete(item);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
|
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
|
||||||
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
|
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
|
||||||
static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t,
|
static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t,
|
||||||
|
@ -68,41 +84,43 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche
|
||||||
cJSON* type = cJSON_CreateString("create");
|
cJSON* type = cJSON_CreateString("create");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
|
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
|
cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
|
||||||
RAW_NULL_CHECK(tableType);
|
RAW_NULL_CHECK(tableType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
|
||||||
cJSON* tableName = cJSON_CreateString(name);
|
cJSON* tableName = cJSON_CreateString(name);
|
||||||
RAW_NULL_CHECK(tableName);
|
RAW_NULL_CHECK(tableName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
|
||||||
|
|
||||||
cJSON* columns = cJSON_CreateArray();
|
cJSON* columns = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(columns);
|
RAW_NULL_CHECK(columns);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "columns", columns));
|
||||||
|
|
||||||
for (int i = 0; i < schemaRow->nCols; i++) {
|
for (int i = 0; i < schemaRow->nCols; i++) {
|
||||||
cJSON* column = cJSON_CreateObject();
|
cJSON* column = cJSON_CreateObject();
|
||||||
RAW_NULL_CHECK(column);
|
RAW_NULL_CHECK(column);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(columns, column));
|
||||||
SSchema* s = schemaRow->pSchema + i;
|
SSchema* s = schemaRow->pSchema + i;
|
||||||
cJSON* cname = cJSON_CreateString(s->name);
|
cJSON* cname = cJSON_CreateString(s->name);
|
||||||
RAW_NULL_CHECK(cname);
|
RAW_NULL_CHECK(cname);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "name", cname));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "name", cname));
|
||||||
cJSON* ctype = cJSON_CreateNumber(s->type);
|
cJSON* ctype = cJSON_CreateNumber(s->type);
|
||||||
RAW_NULL_CHECK(ctype);
|
RAW_NULL_CHECK(ctype);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "type", ctype));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "type", ctype));
|
||||||
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "length", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "length", cbytes));
|
||||||
} else if (s->type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (s->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "length", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "length", cbytes));
|
||||||
}
|
}
|
||||||
cJSON* isPk = cJSON_CreateBool(s->flags & COL_IS_KEY);
|
cJSON* isPk = cJSON_CreateBool(s->flags & COL_IS_KEY);
|
||||||
RAW_NULL_CHECK(isPk);
|
RAW_NULL_CHECK(isPk);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "isPrimarykey", isPk));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "isPrimarykey", isPk));
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(columns, column));
|
|
||||||
|
|
||||||
if (pColCmprRow == NULL) {
|
if (pColCmprRow == NULL) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -124,44 +142,44 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche
|
||||||
|
|
||||||
cJSON* encodeJson = cJSON_CreateString(encode);
|
cJSON* encodeJson = cJSON_CreateString(encode);
|
||||||
RAW_NULL_CHECK(encodeJson);
|
RAW_NULL_CHECK(encodeJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "encode", encodeJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "encode", encodeJson));
|
||||||
|
|
||||||
cJSON* compressJson = cJSON_CreateString(compress);
|
cJSON* compressJson = cJSON_CreateString(compress);
|
||||||
RAW_NULL_CHECK(compressJson);
|
RAW_NULL_CHECK(compressJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "compress", compressJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "compress", compressJson));
|
||||||
|
|
||||||
cJSON* levelJson = cJSON_CreateString(level);
|
cJSON* levelJson = cJSON_CreateString(level);
|
||||||
RAW_NULL_CHECK(levelJson);
|
RAW_NULL_CHECK(levelJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "level", levelJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "level", levelJson));
|
||||||
}
|
}
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "columns", columns));
|
|
||||||
|
|
||||||
cJSON* tags = cJSON_CreateArray();
|
cJSON* tags = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(tags);
|
RAW_NULL_CHECK(tags);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags));
|
||||||
|
|
||||||
for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
|
for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
|
||||||
cJSON* tag = cJSON_CreateObject();
|
cJSON* tag = cJSON_CreateObject();
|
||||||
RAW_NULL_CHECK(tag);
|
RAW_NULL_CHECK(tag);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag));
|
||||||
SSchema* s = schemaTag->pSchema + i;
|
SSchema* s = schemaTag->pSchema + i;
|
||||||
cJSON* tname = cJSON_CreateString(s->name);
|
cJSON* tname = cJSON_CreateString(s->name);
|
||||||
RAW_NULL_CHECK(tname);
|
RAW_NULL_CHECK(tname);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname));
|
||||||
cJSON* ttype = cJSON_CreateNumber(s->type);
|
cJSON* ttype = cJSON_CreateNumber(s->type);
|
||||||
RAW_NULL_CHECK(ttype);
|
RAW_NULL_CHECK(ttype);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype));
|
||||||
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "length", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "length", cbytes));
|
||||||
} else if (s->type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (s->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "length", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "length", cbytes));
|
||||||
}
|
}
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag));
|
|
||||||
}
|
}
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
*pJson = json;
|
*pJson = json;
|
||||||
|
@ -175,7 +193,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) {
|
||||||
RAW_NULL_CHECK(encodeStr);
|
RAW_NULL_CHECK(encodeStr);
|
||||||
cJSON* encodeJson = cJSON_CreateString(encodeStr);
|
cJSON* encodeJson = cJSON_CreateString(encodeStr);
|
||||||
RAW_NULL_CHECK(encodeJson);
|
RAW_NULL_CHECK(encodeJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "encode", encodeJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "encode", encodeJson));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
uint8_t compress = COMPRESS_L2_TYPE_U32(para);
|
uint8_t compress = COMPRESS_L2_TYPE_U32(para);
|
||||||
|
@ -184,7 +202,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) {
|
||||||
RAW_NULL_CHECK(compressStr);
|
RAW_NULL_CHECK(compressStr);
|
||||||
cJSON* compressJson = cJSON_CreateString(compressStr);
|
cJSON* compressJson = cJSON_CreateString(compressStr);
|
||||||
RAW_NULL_CHECK(compressJson);
|
RAW_NULL_CHECK(compressJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "compress", compressJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "compress", compressJson));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
uint8_t level = COMPRESS_L2_TYPE_LEVEL_U32(para);
|
uint8_t level = COMPRESS_L2_TYPE_LEVEL_U32(para);
|
||||||
|
@ -193,7 +211,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) {
|
||||||
RAW_NULL_CHECK(levelStr);
|
RAW_NULL_CHECK(levelStr);
|
||||||
cJSON* levelJson = cJSON_CreateString(levelStr);
|
cJSON* levelJson = cJSON_CreateString(levelStr);
|
||||||
RAW_NULL_CHECK(levelJson);
|
RAW_NULL_CHECK(levelJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "level", levelJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "level", levelJson));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,19 +232,19 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(json);
|
RAW_NULL_CHECK(json);
|
||||||
cJSON* type = cJSON_CreateString("alter");
|
cJSON* type = cJSON_CreateString("alter");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
SName name = {0};
|
SName name = {0};
|
||||||
RAW_RETURN_CHECK(tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE));
|
RAW_RETURN_CHECK(tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE));
|
||||||
cJSON* tableType = cJSON_CreateString("super");
|
cJSON* tableType = cJSON_CreateString("super");
|
||||||
RAW_NULL_CHECK(tableType);
|
RAW_NULL_CHECK(tableType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
|
||||||
cJSON* tableName = cJSON_CreateString(name.tname);
|
cJSON* tableName = cJSON_CreateString(name.tname);
|
||||||
RAW_NULL_CHECK(tableName);
|
RAW_NULL_CHECK(tableName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
|
||||||
|
|
||||||
cJSON* alterType = cJSON_CreateNumber(req.alterType);
|
cJSON* alterType = cJSON_CreateNumber(req.alterType);
|
||||||
RAW_NULL_CHECK(alterType);
|
RAW_NULL_CHECK(alterType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "alterType", alterType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "alterType", alterType));
|
||||||
switch (req.alterType) {
|
switch (req.alterType) {
|
||||||
case TSDB_ALTER_TABLE_ADD_TAG:
|
case TSDB_ALTER_TABLE_ADD_TAG:
|
||||||
case TSDB_ALTER_TABLE_ADD_COLUMN: {
|
case TSDB_ALTER_TABLE_ADD_COLUMN: {
|
||||||
|
@ -234,22 +252,22 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(field);
|
RAW_NULL_CHECK(field);
|
||||||
cJSON* colName = cJSON_CreateString(field->name);
|
cJSON* colName = cJSON_CreateString(field->name);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
RAW_NULL_CHECK(colType);
|
RAW_NULL_CHECK(colType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
|
||||||
|
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -258,22 +276,22 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(field);
|
RAW_NULL_CHECK(field);
|
||||||
cJSON* colName = cJSON_CreateString(field->name);
|
cJSON* colName = cJSON_CreateString(field->name);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
RAW_NULL_CHECK(colType);
|
RAW_NULL_CHECK(colType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
|
||||||
|
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
}
|
}
|
||||||
RAW_RETURN_CHECK(setCompressOption(json, field->compress));
|
RAW_RETURN_CHECK(setCompressOption(json, field->compress));
|
||||||
break;
|
break;
|
||||||
|
@ -284,7 +302,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(field);
|
RAW_NULL_CHECK(field);
|
||||||
cJSON* colName = cJSON_CreateString(field->name);
|
cJSON* colName = cJSON_CreateString(field->name);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
|
case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
|
||||||
|
@ -293,21 +311,21 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(field);
|
RAW_NULL_CHECK(field);
|
||||||
cJSON* colName = cJSON_CreateString(field->name);
|
cJSON* colName = cJSON_CreateString(field->name);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
RAW_NULL_CHECK(colType);
|
RAW_NULL_CHECK(colType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -319,10 +337,10 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(newField);
|
RAW_NULL_CHECK(newField);
|
||||||
cJSON* colName = cJSON_CreateString(oldField->name);
|
cJSON* colName = cJSON_CreateString(oldField->name);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colNewName = cJSON_CreateString(newField->name);
|
cJSON* colNewName = cJSON_CreateString(newField->name);
|
||||||
RAW_NULL_CHECK(colNewName);
|
RAW_NULL_CHECK(colNewName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colNewName", colNewName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colNewName", colNewName));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
|
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
|
||||||
|
@ -330,7 +348,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
|
||||||
RAW_NULL_CHECK(field);
|
RAW_NULL_CHECK(field);
|
||||||
cJSON* colName = cJSON_CreateString(field->name);
|
cJSON* colName = cJSON_CreateString(field->name);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
RAW_RETURN_CHECK(setCompressOption(json, field->bytes));
|
RAW_RETURN_CHECK(setCompressOption(json, field->bytes));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -391,51 +409,47 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
||||||
int64_t id = pCreateReq->uid;
|
int64_t id = pCreateReq->uid;
|
||||||
uint8_t tagNum = pCreateReq->ctb.tagNum;
|
uint8_t tagNum = pCreateReq->ctb.tagNum;
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
cJSON* tags = NULL;
|
SArray* pTagVals = NULL;
|
||||||
|
char* pJson = NULL;
|
||||||
|
|
||||||
cJSON* tableName = cJSON_CreateString(name);
|
cJSON* tableName = cJSON_CreateString(name);
|
||||||
RAW_NULL_CHECK(tableName);
|
RAW_NULL_CHECK(tableName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
|
||||||
cJSON* using = cJSON_CreateString(sname);
|
cJSON* using = cJSON_CreateString(sname);
|
||||||
RAW_NULL_CHECK(using);
|
RAW_NULL_CHECK(using);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "using", using));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "using", using));
|
||||||
cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
|
cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
|
||||||
RAW_NULL_CHECK(tagNumJson);
|
RAW_NULL_CHECK(tagNumJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tagNum", tagNumJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tagNum", tagNumJson));
|
||||||
|
|
||||||
tags = cJSON_CreateArray();
|
cJSON* tags = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(tags);
|
RAW_NULL_CHECK(tags);
|
||||||
SArray* pTagVals = NULL;
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags));
|
||||||
RAW_RETURN_CHECK(tTagToValArray(pTag, &pTagVals));
|
RAW_RETURN_CHECK(tTagToValArray(pTag, &pTagVals));
|
||||||
|
|
||||||
if (tTagIsJson(pTag)) {
|
if (tTagIsJson(pTag)) {
|
||||||
STag* p = (STag*)pTag;
|
STag* p = (STag*)pTag;
|
||||||
if (p->nTag == 0) {
|
if (p->nTag == 0) {
|
||||||
uError("p->nTag == 0");
|
uError("p->nTag == 0");
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
char* pJson = NULL;
|
|
||||||
parseTagDatatoJson(pTag, &pJson);
|
parseTagDatatoJson(pTag, &pJson);
|
||||||
if (pJson == NULL) {
|
RAW_NULL_CHECK(pJson);
|
||||||
uError("parseTagDatatoJson failed, pJson == NULL");
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
cJSON* tag = cJSON_CreateObject();
|
cJSON* tag = cJSON_CreateObject();
|
||||||
RAW_NULL_CHECK(tag);
|
RAW_NULL_CHECK(tag);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag));
|
||||||
STagVal* pTagVal = taosArrayGet(pTagVals, 0);
|
STagVal* pTagVal = taosArrayGet(pTagVals, 0);
|
||||||
RAW_NULL_CHECK(pTagVal);
|
RAW_NULL_CHECK(pTagVal);
|
||||||
char* ptname = taosArrayGet(tagName, 0);
|
char* ptname = taosArrayGet(tagName, 0);
|
||||||
RAW_NULL_CHECK(ptname);
|
RAW_NULL_CHECK(ptname);
|
||||||
cJSON* tname = cJSON_CreateString(ptname);
|
cJSON* tname = cJSON_CreateString(ptname);
|
||||||
RAW_NULL_CHECK(tname);
|
RAW_NULL_CHECK(tname);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname));
|
||||||
cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
|
cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
|
||||||
RAW_NULL_CHECK(ttype);
|
RAW_NULL_CHECK(ttype);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype));
|
||||||
cJSON* tvalue = cJSON_CreateString(pJson);
|
cJSON* tvalue = cJSON_CreateString(pJson);
|
||||||
RAW_NULL_CHECK(tvalue);
|
RAW_NULL_CHECK(tvalue);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "value", tvalue));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "value", tvalue));
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag));
|
|
||||||
taosMemoryFree(pJson);
|
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -444,36 +458,34 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
||||||
RAW_NULL_CHECK(pTagVal);
|
RAW_NULL_CHECK(pTagVal);
|
||||||
cJSON* tag = cJSON_CreateObject();
|
cJSON* tag = cJSON_CreateObject();
|
||||||
RAW_NULL_CHECK(tag);
|
RAW_NULL_CHECK(tag);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag));
|
||||||
char* ptname = taosArrayGet(tagName, i);
|
char* ptname = taosArrayGet(tagName, i);
|
||||||
RAW_NULL_CHECK(ptname);
|
RAW_NULL_CHECK(ptname);
|
||||||
cJSON* tname = cJSON_CreateString(ptname);
|
cJSON* tname = cJSON_CreateString(ptname);
|
||||||
RAW_NULL_CHECK(tname);
|
RAW_NULL_CHECK(tname);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname));
|
||||||
cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
|
cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
|
||||||
RAW_NULL_CHECK(ttype);
|
RAW_NULL_CHECK(ttype);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype));
|
||||||
|
|
||||||
cJSON* tvalue = NULL;
|
cJSON* tvalue = NULL;
|
||||||
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
||||||
char* buf = NULL;
|
|
||||||
int64_t bufSize = 0;
|
int64_t bufSize = 0;
|
||||||
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
|
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
|
||||||
bufSize = pTagVal->nData * 2 + 2 + 3;
|
bufSize = pTagVal->nData * 2 + 2 + 3;
|
||||||
} else {
|
} else {
|
||||||
bufSize = pTagVal->nData + 3;
|
bufSize = pTagVal->nData + 3;
|
||||||
}
|
}
|
||||||
buf = taosMemoryCalloc(bufSize, 1);
|
char* buf = taosMemoryCalloc(bufSize, 1);
|
||||||
|
|
||||||
RAW_NULL_CHECK(buf);
|
RAW_NULL_CHECK(buf);
|
||||||
if (!buf) goto end;
|
|
||||||
if (dataConverToStr(buf, bufSize, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL) != TSDB_CODE_SUCCESS) {
|
if (dataConverToStr(buf, bufSize, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL) != TSDB_CODE_SUCCESS) {
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
tvalue = cJSON_CreateString(buf);
|
tvalue = cJSON_CreateString(buf);
|
||||||
RAW_NULL_CHECK(tvalue);
|
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
|
RAW_NULL_CHECK(tvalue);
|
||||||
} else {
|
} else {
|
||||||
double val = 0;
|
double val = 0;
|
||||||
GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
|
GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
|
||||||
|
@ -481,12 +493,11 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
||||||
RAW_NULL_CHECK(tvalue);
|
RAW_NULL_CHECK(tvalue);
|
||||||
}
|
}
|
||||||
|
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "value", tvalue));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "value", tvalue));
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
end:
|
end:
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
|
taosMemoryFree(pJson);
|
||||||
taosArrayDestroy(pTagVals);
|
taosArrayDestroy(pTagVals);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,22 +508,23 @@ static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSO
|
||||||
RAW_NULL_CHECK(json);
|
RAW_NULL_CHECK(json);
|
||||||
cJSON* type = cJSON_CreateString("create");
|
cJSON* type = cJSON_CreateString("create");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
|
|
||||||
cJSON* tableType = cJSON_CreateString("child");
|
cJSON* tableType = cJSON_CreateString("child");
|
||||||
RAW_NULL_CHECK(tableType);
|
RAW_NULL_CHECK(tableType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
|
||||||
|
|
||||||
buildChildElement(json, pCreateReq);
|
buildChildElement(json, pCreateReq);
|
||||||
cJSON* createList = cJSON_CreateArray();
|
cJSON* createList = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(createList);
|
RAW_NULL_CHECK(createList);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "createList", createList));
|
||||||
|
|
||||||
for (int i = 0; nReqs > 1 && i < nReqs; i++) {
|
for (int i = 0; nReqs > 1 && i < nReqs; i++) {
|
||||||
cJSON* create = cJSON_CreateObject();
|
cJSON* create = cJSON_CreateObject();
|
||||||
RAW_NULL_CHECK(create);
|
RAW_NULL_CHECK(create);
|
||||||
buildChildElement(create, pCreateReq + i);
|
buildChildElement(create, pCreateReq + i);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(createList, create));
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(createList, create));
|
||||||
}
|
}
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "createList", createList));
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
*pJson = json;
|
*pJson = json;
|
||||||
|
@ -619,62 +631,62 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
RAW_NULL_CHECK(json);
|
RAW_NULL_CHECK(json);
|
||||||
cJSON* type = cJSON_CreateString("alter");
|
cJSON* type = cJSON_CreateString("alter");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ||
|
cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ||
|
||||||
vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL
|
vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL
|
||||||
? "child"
|
? "child"
|
||||||
: "normal");
|
: "normal");
|
||||||
RAW_NULL_CHECK(tableType);
|
RAW_NULL_CHECK(tableType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
|
||||||
cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
|
cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
|
||||||
RAW_NULL_CHECK(tableName);
|
RAW_NULL_CHECK(tableName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
|
||||||
cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
|
cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
|
||||||
RAW_NULL_CHECK(alterType);
|
RAW_NULL_CHECK(alterType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "alterType", alterType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "alterType", alterType));
|
||||||
|
|
||||||
switch (vAlterTbReq.action) {
|
switch (vAlterTbReq.action) {
|
||||||
case TSDB_ALTER_TABLE_ADD_COLUMN: {
|
case TSDB_ALTER_TABLE_ADD_COLUMN: {
|
||||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
||||||
RAW_NULL_CHECK(colType);
|
RAW_NULL_CHECK(colType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
|
||||||
|
|
||||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
|
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
} else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_ADD_COLUMN_WITH_COMPRESS_OPTION: {
|
case TSDB_ALTER_TABLE_ADD_COLUMN_WITH_COMPRESS_OPTION: {
|
||||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
||||||
RAW_NULL_CHECK(colType);
|
RAW_NULL_CHECK(colType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
|
||||||
|
|
||||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
|
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
} else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
|
} else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
}
|
}
|
||||||
RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress));
|
RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress));
|
||||||
break;
|
break;
|
||||||
|
@ -682,43 +694,43 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
case TSDB_ALTER_TABLE_DROP_COLUMN: {
|
case TSDB_ALTER_TABLE_DROP_COLUMN: {
|
||||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
|
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
|
||||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
||||||
RAW_NULL_CHECK(colType);
|
RAW_NULL_CHECK(colType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
|
||||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY ||
|
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY ||
|
||||||
vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
} else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
|
} else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
|
||||||
int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
RAW_NULL_CHECK(cbytes);
|
RAW_NULL_CHECK(cbytes);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
|
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
|
||||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
|
cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
|
||||||
RAW_NULL_CHECK(colNewName);
|
RAW_NULL_CHECK(colNewName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colNewName", colNewName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colNewName", colNewName));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
|
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
|
||||||
cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
|
cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
|
||||||
RAW_NULL_CHECK(tagName);
|
RAW_NULL_CHECK(tagName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", tagName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", tagName));
|
||||||
|
|
||||||
bool isNull = vAlterTbReq.isNull;
|
bool isNull = vAlterTbReq.isNull;
|
||||||
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
|
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
|
||||||
|
@ -757,12 +769,12 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
cJSON* colValue = cJSON_CreateString(buf);
|
cJSON* colValue = cJSON_CreateString(buf);
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
RAW_NULL_CHECK(colValue);
|
RAW_NULL_CHECK(colValue);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValue", colValue));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colValue", colValue));
|
||||||
}
|
}
|
||||||
|
|
||||||
cJSON* isNullCJson = cJSON_CreateBool(isNull);
|
cJSON* isNullCJson = cJSON_CreateBool(isNull);
|
||||||
RAW_NULL_CHECK(isNullCJson);
|
RAW_NULL_CHECK(isNullCJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValueNull", isNullCJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colValueNull", isNullCJson));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: {
|
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: {
|
||||||
|
@ -774,14 +786,17 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
|
|
||||||
cJSON* tags = cJSON_CreateArray();
|
cJSON* tags = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(tags);
|
RAW_NULL_CHECK(tags);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags));
|
||||||
|
|
||||||
for (int32_t i = 0; i < nTags; i++) {
|
for (int32_t i = 0; i < nTags; i++) {
|
||||||
cJSON* member = cJSON_CreateObject();
|
cJSON* member = cJSON_CreateObject();
|
||||||
RAW_NULL_CHECK(member);
|
RAW_NULL_CHECK(member);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, member));
|
||||||
|
|
||||||
SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i);
|
SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i);
|
||||||
cJSON* tagName = cJSON_CreateString(pTagVal->tagName);
|
cJSON* tagName = cJSON_CreateString(pTagVal->tagName);
|
||||||
RAW_NULL_CHECK(tagName);
|
RAW_NULL_CHECK(tagName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colName", tagName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colName", tagName));
|
||||||
|
|
||||||
if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) {
|
if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) {
|
||||||
uError("processAlterTable isJson false");
|
uError("processAlterTable isJson false");
|
||||||
|
@ -789,14 +804,13 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
}
|
}
|
||||||
bool isNull = pTagVal->isNull;
|
bool isNull = pTagVal->isNull;
|
||||||
if (!isNull) {
|
if (!isNull) {
|
||||||
char* buf = NULL;
|
|
||||||
int64_t bufSize = 0;
|
int64_t bufSize = 0;
|
||||||
if (pTagVal->tagType == TSDB_DATA_TYPE_VARBINARY) {
|
if (pTagVal->tagType == TSDB_DATA_TYPE_VARBINARY) {
|
||||||
bufSize = pTagVal->nTagVal * 2 + 2 + 3;
|
bufSize = pTagVal->nTagVal * 2 + 2 + 3;
|
||||||
} else {
|
} else {
|
||||||
bufSize = pTagVal->nTagVal + 3;
|
bufSize = pTagVal->nTagVal + 3;
|
||||||
}
|
}
|
||||||
buf = taosMemoryCalloc(bufSize, 1);
|
char* buf = taosMemoryCalloc(bufSize, 1);
|
||||||
RAW_NULL_CHECK(buf);
|
RAW_NULL_CHECK(buf);
|
||||||
if (dataConverToStr(buf, bufSize, pTagVal->tagType, pTagVal->pTagVal, pTagVal->nTagVal, NULL) !=
|
if (dataConverToStr(buf, bufSize, pTagVal->tagType, pTagVal->pTagVal, pTagVal->nTagVal, NULL) !=
|
||||||
TSDB_CODE_SUCCESS) {
|
TSDB_CODE_SUCCESS) {
|
||||||
|
@ -806,21 +820,19 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
cJSON* colValue = cJSON_CreateString(buf);
|
cJSON* colValue = cJSON_CreateString(buf);
|
||||||
taosMemoryFree(buf);
|
taosMemoryFree(buf);
|
||||||
RAW_NULL_CHECK(colValue);
|
RAW_NULL_CHECK(colValue);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValue", colValue));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colValue", colValue));
|
||||||
}
|
}
|
||||||
cJSON* isNullCJson = cJSON_CreateBool(isNull);
|
cJSON* isNullCJson = cJSON_CreateBool(isNull);
|
||||||
RAW_NULL_CHECK(isNullCJson);
|
RAW_NULL_CHECK(isNullCJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValueNull", isNullCJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colValueNull", isNullCJson));
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, member));
|
|
||||||
}
|
}
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
|
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
|
||||||
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
|
||||||
RAW_NULL_CHECK(colName);
|
RAW_NULL_CHECK(colName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
|
||||||
RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress));
|
RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -858,13 +870,13 @@ static void processDropSTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
RAW_NULL_CHECK(json);
|
RAW_NULL_CHECK(json);
|
||||||
cJSON* type = cJSON_CreateString("drop");
|
cJSON* type = cJSON_CreateString("drop");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
cJSON* tableType = cJSON_CreateString("super");
|
cJSON* tableType = cJSON_CreateString("super");
|
||||||
RAW_NULL_CHECK(tableType);
|
RAW_NULL_CHECK(tableType);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
|
||||||
cJSON* tableName = cJSON_CreateString(req.name);
|
cJSON* tableName = cJSON_CreateString(req.name);
|
||||||
RAW_NULL_CHECK(tableName);
|
RAW_NULL_CHECK(tableName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
|
||||||
|
|
||||||
end:
|
end:
|
||||||
uDebug("processDropSTable return");
|
uDebug("processDropSTable return");
|
||||||
|
@ -897,10 +909,10 @@ static void processDeleteTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
RAW_NULL_CHECK(json);
|
RAW_NULL_CHECK(json);
|
||||||
cJSON* type = cJSON_CreateString("delete");
|
cJSON* type = cJSON_CreateString("delete");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
cJSON* sqlJson = cJSON_CreateString(sql);
|
cJSON* sqlJson = cJSON_CreateString(sql);
|
||||||
RAW_NULL_CHECK(sqlJson);
|
RAW_NULL_CHECK(sqlJson);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", sqlJson));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "sql", sqlJson));
|
||||||
|
|
||||||
end:
|
end:
|
||||||
uDebug("processDeleteTable return");
|
uDebug("processDeleteTable return");
|
||||||
|
@ -928,16 +940,17 @@ static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
|
||||||
RAW_NULL_CHECK(json);
|
RAW_NULL_CHECK(json);
|
||||||
cJSON* type = cJSON_CreateString("drop");
|
cJSON* type = cJSON_CreateString("drop");
|
||||||
RAW_NULL_CHECK(type);
|
RAW_NULL_CHECK(type);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
|
||||||
cJSON* tableNameList = cJSON_CreateArray();
|
cJSON* tableNameList = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(tableNameList);
|
RAW_NULL_CHECK(tableNameList);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableNameList", tableNameList));
|
||||||
|
|
||||||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||||
SVDropTbReq* pDropTbReq = req.pReqs + iReq;
|
SVDropTbReq* pDropTbReq = req.pReqs + iReq;
|
||||||
cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
|
cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
|
||||||
RAW_NULL_CHECK(tableName);
|
RAW_NULL_CHECK(tableName);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(tableNameList, tableName));
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tableNameList, tableName));
|
||||||
}
|
}
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableNameList", tableNameList));
|
|
||||||
|
|
||||||
end:
|
end:
|
||||||
uDebug("processDropTable return");
|
uDebug("processDropTable return");
|
||||||
|
@ -2183,6 +2196,8 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) {
|
||||||
RAW_FALSE_CHECK(cJSON_AddStringToObject(pJson, "tmq_meta_version", TMQ_META_VERSION));
|
RAW_FALSE_CHECK(cJSON_AddStringToObject(pJson, "tmq_meta_version", TMQ_META_VERSION));
|
||||||
cJSON* pMetaArr = cJSON_CreateArray();
|
cJSON* pMetaArr = cJSON_CreateArray();
|
||||||
RAW_NULL_CHECK(pMetaArr);
|
RAW_NULL_CHECK(pMetaArr);
|
||||||
|
RAW_FALSE_CHECK(tmqAddJsonObjectItem(pJson, "metas", pMetaArr));
|
||||||
|
|
||||||
int32_t num = taosArrayGetSize(rsp.batchMetaReq);
|
int32_t num = taosArrayGetSize(rsp.batchMetaReq);
|
||||||
for (int32_t i = 0; i < num; i++) {
|
for (int32_t i = 0; i < num; i++) {
|
||||||
int32_t* len = taosArrayGet(rsp.batchMetaLen, i);
|
int32_t* len = taosArrayGet(rsp.batchMetaLen, i);
|
||||||
|
@ -2198,10 +2213,9 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) {
|
||||||
cJSON* pItem = NULL;
|
cJSON* pItem = NULL;
|
||||||
processSimpleMeta(&metaRsp, &pItem);
|
processSimpleMeta(&metaRsp, &pItem);
|
||||||
tDeleteMqMetaRsp(&metaRsp);
|
tDeleteMqMetaRsp(&metaRsp);
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToArray(pMetaArr, pItem));
|
RAW_FALSE_CHECK(tmqAddJsonArrayItem(pMetaArr, pItem));
|
||||||
}
|
}
|
||||||
|
|
||||||
RAW_FALSE_CHECK(cJSON_AddItemToObject(pJson, "metas", pMetaArr));
|
|
||||||
tDeleteMqBatchMetaRsp(&rsp);
|
tDeleteMqBatchMetaRsp(&rsp);
|
||||||
char* fullStr = cJSON_PrintUnformatted(pJson);
|
char* fullStr = cJSON_PrintUnformatted(pJson);
|
||||||
cJSON_Delete(pJson);
|
cJSON_Delete(pJson);
|
||||||
|
|
|
@ -52,10 +52,17 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool t
|
||||||
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf);
|
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf);
|
||||||
static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf);
|
static bool mndTransPerformFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf);
|
||||||
|
|
||||||
static bool mndCannotExecuteTransAction(SMnode *pMnode, bool topHalf) {
|
static inline bool mndTransIsInSyncContext(bool topHalf) { return !topHalf; }
|
||||||
return (!pMnode->deploy && !mndIsLeader(pMnode)) || !topHalf;
|
|
||||||
|
static bool mndCannotExecuteTrans(SMnode *pMnode, bool topHalf) {
|
||||||
|
bool isLeader = mndIsLeader(pMnode);
|
||||||
|
bool ret = (!pMnode->deploy && !isLeader) || mndTransIsInSyncContext(topHalf);
|
||||||
|
if (ret) mDebug("cannot execute trans action, deploy:%d, isLeader:%d, topHalf:%d", pMnode->deploy, isLeader, topHalf);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline char *mndStrExecutionContext(bool topHalf) { return topHalf ? "transContext" : "syncContext"; }
|
||||||
|
|
||||||
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
|
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
|
||||||
static int32_t mndProcessTransTimer(SRpcMsg *pReq);
|
static int32_t mndProcessTransTimer(SRpcMsg *pReq);
|
||||||
static int32_t mndProcessTtl(SRpcMsg *pReq);
|
static int32_t mndProcessTtl(SRpcMsg *pReq);
|
||||||
|
@ -1339,7 +1346,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi
|
||||||
// execute in trans context
|
// execute in trans context
|
||||||
static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) {
|
static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction, bool topHalf) {
|
||||||
if (pAction->msgSent) return 0;
|
if (pAction->msgSent) return 0;
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) {
|
if (mndCannotExecuteTrans(pMnode, topHalf)) {
|
||||||
TAOS_RETURN(TSDB_CODE_MND_TRANS_CTX_SWITCH);
|
TAOS_RETURN(TSDB_CODE_MND_TRANS_CTX_SWITCH);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1485,8 +1492,8 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
|
||||||
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions, topHalf);
|
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->redoActions, topHalf);
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
||||||
mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, topHalf(TransContext):%d", pTrans->id,
|
mError("trans:%d, failed to execute redoActions since:%s, code:0x%x, in %s", pTrans->id, terrstr(), terrno,
|
||||||
terrstr(), terrno, topHalf);
|
mndStrExecutionContext(topHalf));
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -1494,8 +1501,8 @@ static int32_t mndTransExecuteRedoActions(SMnode *pMnode, STrans *pTrans, bool t
|
||||||
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions, topHalf);
|
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->undoActions, topHalf);
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
||||||
mError("trans:%d, failed to execute undoActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(),
|
mError("trans:%d, failed to execute undoActions since %s. in %s", pTrans->id, terrstr(),
|
||||||
topHalf);
|
mndStrExecutionContext(topHalf));
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -1503,8 +1510,8 @@ static int32_t mndTransExecuteUndoActions(SMnode *pMnode, STrans *pTrans, bool t
|
||||||
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
static int32_t mndTransExecuteCommitActions(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions, topHalf);
|
int32_t code = mndTransExecuteActions(pMnode, pTrans, pTrans->commitActions, topHalf);
|
||||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
||||||
mError("trans:%d, failed to execute commitActions since %s. topHalf(TransContext):%d", pTrans->id, terrstr(),
|
mError("trans:%d, failed to execute commitActions since %s. in %s", pTrans->id, terrstr(),
|
||||||
topHalf);
|
mndStrExecutionContext(topHalf));
|
||||||
}
|
}
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
@ -1524,7 +1531,7 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr
|
||||||
for (int32_t action = pTrans->actionPos; action < numOfActions; ++action) {
|
for (int32_t action = pTrans->actionPos; action < numOfActions; ++action) {
|
||||||
STransAction *pAction = taosArrayGet(pActions, action);
|
STransAction *pAction = taosArrayGet(pActions, action);
|
||||||
|
|
||||||
mInfo("trans:%d, current action:%d, stage:%s, actionType(0:log,1:msg):%d", pTrans->id, pTrans->actionPos,
|
mInfo("trans:%d, current action:%d, stage:%s, actionType(1:msg,2:log):%d", pTrans->id, pTrans->actionPos,
|
||||||
mndTransStr(pAction->stage), pAction->actionType);
|
mndTransStr(pAction->stage), pAction->actionType);
|
||||||
|
|
||||||
code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf);
|
code = mndTransExecSingleAction(pMnode, pTrans, pAction, topHalf);
|
||||||
|
@ -1555,11 +1562,11 @@ static int32_t mndTransExecuteActionsSerial(SMnode *pMnode, STrans *pTrans, SArr
|
||||||
}
|
}
|
||||||
mndSetTransLastAction(pTrans, pAction);
|
mndSetTransLastAction(pTrans, pAction);
|
||||||
|
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) {
|
if (mndCannotExecuteTrans(pMnode, topHalf)) {
|
||||||
pTrans->lastErrorNo = code;
|
pTrans->lastErrorNo = code;
|
||||||
pTrans->code = code;
|
pTrans->code = code;
|
||||||
mInfo("trans:%d, %s:%d, topHalf(TransContext):%d, not execute next action, code:%s", pTrans->id,
|
mInfo("trans:%d, %s:%d, cannot execute next action in %s, code:%s", pTrans->id, mndTransStr(pAction->stage),
|
||||||
mndTransStr(pAction->stage), action, topHalf, tstrerror(code));
|
action, mndStrExecutionContext(topHalf), tstrerror(code));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1660,20 +1667,21 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
||||||
code = mndTransExecuteRedoActions(pMnode, pTrans, topHalf);
|
code = mndTransExecuteRedoActions(pMnode, pTrans, topHalf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) {
|
if (code != 0 && code != TSDB_CODE_MND_TRANS_CTX_SWITCH && mndTransIsInSyncContext(topHalf)) {
|
||||||
pTrans->lastErrorNo = code;
|
pTrans->lastErrorNo = code;
|
||||||
pTrans->code = code;
|
pTrans->code = code;
|
||||||
bool continueExec = true;
|
mInfo(
|
||||||
if (code != 0 && code != TSDB_CODE_MND_TRANS_CTX_SWITCH) {
|
"trans:%d, failed to execute, will retry redo action stage in 100 ms , in %s, "
|
||||||
taosMsleep(100);
|
"continueExec:%d, code:%s",
|
||||||
continueExec = true;
|
pTrans->id, mndStrExecutionContext(topHalf), continueExec, tstrerror(code));
|
||||||
} else {
|
taosMsleep(100);
|
||||||
continueExec = false;
|
return true;
|
||||||
|
} else {
|
||||||
|
if (mndCannotExecuteTrans(pMnode, topHalf)) {
|
||||||
|
mInfo("trans:%d, cannot continue to execute redo action stage in %s, continueExec:%d, code:%s", pTrans->id,
|
||||||
|
mndStrExecutionContext(topHalf), continueExec, tstrerror(code));
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
mInfo("trans:%d, cannot execute redo action stage, topHalf(TransContext):%d, continueExec:%d, code:%s", pTrans->id,
|
|
||||||
topHalf, continueExec, tstrerror(code));
|
|
||||||
|
|
||||||
return continueExec;
|
|
||||||
}
|
}
|
||||||
terrno = code;
|
terrno = code;
|
||||||
|
|
||||||
|
@ -1716,9 +1724,9 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
||||||
return continueExec;
|
return continueExec;
|
||||||
}
|
}
|
||||||
|
|
||||||
// in trans context
|
// execute in trans context
|
||||||
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) return false;
|
if (mndCannotExecuteTrans(pMnode, topHalf)) return false;
|
||||||
|
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
int32_t code = mndTransCommit(pMnode, pTrans);
|
int32_t code = mndTransCommit(pMnode, pTrans);
|
||||||
|
@ -1772,7 +1780,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
||||||
code = mndTransExecuteUndoActions(pMnode, pTrans, topHalf);
|
code = mndTransExecuteUndoActions(pMnode, pTrans, topHalf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) return false;
|
if (mndCannotExecuteTrans(pMnode, topHalf)) return false;
|
||||||
terrno = code;
|
terrno = code;
|
||||||
|
|
||||||
if (code == 0) {
|
if (code == 0) {
|
||||||
|
@ -1793,7 +1801,7 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans, bool
|
||||||
|
|
||||||
// in trans context
|
// in trans context
|
||||||
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) return false;
|
if (mndCannotExecuteTrans(pMnode, topHalf)) return false;
|
||||||
|
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
int32_t code = mndTransRollback(pMnode, pTrans);
|
int32_t code = mndTransRollback(pMnode, pTrans);
|
||||||
|
@ -1810,8 +1818,9 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans, bool to
|
||||||
return continueExec;
|
return continueExec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// excute in trans context
|
||||||
static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
static bool mndTransPerformPreFinishStage(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
if (mndCannotExecuteTransAction(pMnode, topHalf)) return false;
|
if (mndCannotExecuteTrans(pMnode, topHalf)) return false;
|
||||||
|
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
int32_t code = mndTransPreFinish(pMnode, pTrans);
|
int32_t code = mndTransPreFinish(pMnode, pTrans);
|
||||||
|
@ -1854,8 +1863,8 @@ void mndTransExecuteImp(SMnode *pMnode, STrans *pTrans, bool topHalf) {
|
||||||
bool continueExec = true;
|
bool continueExec = true;
|
||||||
|
|
||||||
while (continueExec) {
|
while (continueExec) {
|
||||||
mInfo("trans:%d, continue to execute, stage:%s createTime:%" PRId64 " topHalf(TransContext):%d", pTrans->id,
|
mInfo("trans:%d, continue to execute stage:%s in %s, createTime:%" PRId64 "", pTrans->id,
|
||||||
mndTransStr(pTrans->stage), pTrans->createdTime, topHalf);
|
mndTransStr(pTrans->stage), mndStrExecutionContext(topHalf), pTrans->createdTime);
|
||||||
pTrans->lastExecTime = taosGetTimestampMs();
|
pTrans->lastExecTime = taosGetTimestampMs();
|
||||||
switch (pTrans->stage) {
|
switch (pTrans->stage) {
|
||||||
case TRN_STAGE_PREPARE:
|
case TRN_STAGE_PREPARE:
|
||||||
|
|
|
@ -612,10 +612,10 @@ static int32_t getIntegerFromAuthStr(const char* pStart, char** pNext) {
|
||||||
return taosStr2Int32(buf, NULL, 10);
|
return taosStr2Int32(buf, NULL, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void getStringFromAuthStr(const char* pStart, char* pStr, char** pNext) {
|
static void getStringFromAuthStr(const char* pStart, char* pStr, uint32_t dstLen, char** pNext) {
|
||||||
char* p = strchr(pStart, '*');
|
char* p = strchr(pStart, '*');
|
||||||
if (NULL == p) {
|
if (NULL == p) {
|
||||||
tstrncpy(pStr, pStart, strlen(pStart) + 1);
|
tstrncpy(pStr, pStart, dstLen);
|
||||||
*pNext = NULL;
|
*pNext = NULL;
|
||||||
} else {
|
} else {
|
||||||
strncpy(pStr, pStart, p - pStart);
|
strncpy(pStr, pStart, p - pStart);
|
||||||
|
@ -628,10 +628,10 @@ static void getStringFromAuthStr(const char* pStart, char* pStr, char** pNext) {
|
||||||
|
|
||||||
static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) {
|
static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) {
|
||||||
char* p = NULL;
|
char* p = NULL;
|
||||||
getStringFromAuthStr(pStr, pUserAuth->user, &p);
|
getStringFromAuthStr(pStr, pUserAuth->user, TSDB_USER_LEN, &p);
|
||||||
pUserAuth->tbName.acctId = getIntegerFromAuthStr(p, &p);
|
pUserAuth->tbName.acctId = getIntegerFromAuthStr(p, &p);
|
||||||
getStringFromAuthStr(p, pUserAuth->tbName.dbname, &p);
|
getStringFromAuthStr(p, pUserAuth->tbName.dbname, TSDB_DB_NAME_LEN, &p);
|
||||||
getStringFromAuthStr(p, pUserAuth->tbName.tname, &p);
|
getStringFromAuthStr(p, pUserAuth->tbName.tname, TSDB_TABLE_NAME_LEN, &p);
|
||||||
if (pUserAuth->tbName.tname[0]) {
|
if (pUserAuth->tbName.tname[0]) {
|
||||||
pUserAuth->tbName.type = TSDB_TABLE_NAME_T;
|
pUserAuth->tbName.type = TSDB_TABLE_NAME_T;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -91,11 +91,12 @@ static int32_t getSlotKey(SNode* pNode, const char* pStmtName, char** ppKey, int
|
||||||
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
|
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
*ppKey = taosMemoryCalloc(1, strlen(pVal->literal) + 1 + TSDB_COL_NAME_LEN + 1 + extraBufLen);
|
int32_t literalLen = strlen(pVal->literal);
|
||||||
|
*ppKey = taosMemoryCalloc(1, literalLen + 1 + TSDB_COL_NAME_LEN + 1 + extraBufLen);
|
||||||
if (!*ppKey) {
|
if (!*ppKey) {
|
||||||
return terrno;
|
return terrno;
|
||||||
}
|
}
|
||||||
TAOS_STRNCAT(*ppKey, pVal->literal, strlen(pVal->literal));
|
TAOS_STRNCAT(*ppKey, pVal->literal, literalLen);
|
||||||
TAOS_STRNCAT(*ppKey, ".", 2);
|
TAOS_STRNCAT(*ppKey, ".", 2);
|
||||||
TAOS_STRNCAT(*ppKey, ((SExprNode*)pNode)->aliasName, TSDB_COL_NAME_LEN);
|
TAOS_STRNCAT(*ppKey, ((SExprNode*)pNode)->aliasName, TSDB_COL_NAME_LEN);
|
||||||
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
|
*pLen = taosHashBinary(*ppKey, strlen(*ppKey));
|
||||||
|
|
Loading…
Reference in New Issue