Merge branch '3.0' of https://github.com/taosdata/TDengine into feat/TS-4994-3.0

This commit is contained in:
Hongze Cheng 2024-12-13 13:54:45 +08:00
commit 8bf5f3f62b
287 changed files with 6846 additions and 3358 deletions

10
.gitignore vendored
View File

@ -99,6 +99,7 @@ tests/examples/JDBC/JDBCDemo/.classpath
tests/examples/JDBC/JDBCDemo/.project
tests/examples/JDBC/JDBCDemo/.settings/
source/libs/parser/inc/sql.*
source/os/src/timezone/
tests/script/tmqResult.txt
tests/system-test/case_to_run.txt
tests/develop-test/case_to_run.txt
@ -162,3 +163,12 @@ geos_c.h
source/libs/parser/src/sql.c
include/common/ttokenauto.h
!packaging/smokeTest/pytest_require.txt
tdengine-test-dir/
localtime.c
private.h
strftime.c
tzdir.h
tzfile.h
coverage.info
taos
taosd

View File

@ -0,0 +1,15 @@
# timezone
ExternalProject_Add(tz
GIT_REPOSITORY https://github.com/eggert/tz.git
GIT_TAG main
SOURCE_DIR "${TD_CONTRIB_DIR}/tz"
BINARY_DIR ""
CONFIGURE_COMMAND ""
#BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
GIT_SHALLOW true
GIT_PROGRESS true
BUILD_COMMAND ""
)

View File

@ -106,6 +106,10 @@ cat("${TD_SUPPORT_DIR}/zlib_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
# cJson
cat("${TD_SUPPORT_DIR}/cjson_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
if(NOT ${TD_WINDOWS})
cat("${TD_SUPPORT_DIR}/tz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(NOT ${TD_WINDOWS})
# xz
# cat("${TD_SUPPORT_DIR}/xz_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -651,6 +655,35 @@ if(${TD_LINUX} AND ${BUILD_WITH_S3})
add_subdirectory(azure-cmake EXCLUDE_FROM_ALL)
endif()
IF(TD_LINUX)
SET(TZ_OUTPUT_PATH /usr/share/zoneinfo)
ELSEIF(TD_DARWIN)
SET(TZ_OUTPUT_PATH /var/db/timezone/zoneinfo)
ENDIF()
if(NOT ${TD_WINDOWS})
MESSAGE(STATUS "timezone file path: " ${TZ_OUTPUT_PATH})
execute_process(
COMMAND make TZDIR=${TZ_OUTPUT_PATH}/ clean tzdir.h
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/tz"
)
set(TZ_SRC_DIR "${TD_SOURCE_DIR}/source/os/src/timezone")
file(REMOVE_RECURSE ${TZ_SRC_DIR})
file(MAKE_DIRECTORY ${TZ_SRC_DIR})
file(COPY ${TD_CONTRIB_DIR}/tz/private.h ${TD_CONTRIB_DIR}/tz/tzdir.h ${TD_CONTRIB_DIR}/tz/tzfile.h
${TD_CONTRIB_DIR}/tz/localtime.c ${TD_CONTRIB_DIR}/tz/strftime.c
DESTINATION ${TZ_SRC_DIR})
endif(NOT ${TD_WINDOWS})
#if(NOT ${TD_WINDOWS})
# execute_process(
# COMMAND make CFLAGS+=-fPIC CFLAGS+=-g TZDIR=${TZ_OUTPUT_PATH} clean libtz.a
# WORKING_DIRECTORY "${TD_CONTRIB_DIR}/tz"
# )
#endif(NOT ${TD_WINDOWS})
# ================================================================================================
# Build test
# ================================================================================================

View File

@ -112,14 +112,14 @@ Fill in the example data from the MQTT message body in **Message Body**.
JSON data supports JSONObject or JSONArray, and the json parser can parse the following data:
``` json
```json
{"id": 1, "message": "hello-word"}
{"id": 2, "message": "hello-word"}
```
or
``` json
```json
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
```

View File

@ -109,7 +109,7 @@ In addition, the [Kerberos](https://web.mit.edu/kerberos/) authentication servic
After configuration, you can use the [kcat](https://github.com/edenhill/kcat) tool to verify Kafka topic consumption:
```bash
```shell
kcat <topic> \
-b <kafka-server:port> \
-G kcat \
@ -171,14 +171,14 @@ Enter sample data from the Kafka message body in **Message Body**.
JSON data supports JSONObject or JSONArray, and the following data can be parsed using a JSON parser:
``` json
```json
{"id": 1, "message": "hello-word"}
{"id": 2, "message": "hello-word"}
```
or
``` json
```json
[{"id": 1, "message": "hello-word"},{"id": 2, "message": "hello-word"}]
```

View File

@ -83,7 +83,7 @@ Parsing is the process of parsing unstructured strings into structured data. The
JSON parsing supports JSONObject or JSONArray. The following JSON sample data can automatically parse fields: `groupid`, `voltage`, `current`, `ts`, `inuse`, `location`.
``` json
```json
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}
@ -91,7 +91,7 @@ JSON parsing supports JSONObject or JSONArray. The following JSON sample data ca
Or
``` json
```json
[{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"},
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"},
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}]
@ -101,7 +101,7 @@ Subsequent examples will only explain using JSONObject.
The following nested JSON data can automatically parse fields `groupid`, `data_voltage`, `data_current`, `ts`, `inuse`, `location_0_province`, `location_0_city`, `location_0_datun`, and you can also choose which fields to parse and set aliases for the parsed fields.
``` json
```json
{"groupid": 170001, "data": { "voltage": "221V", "current": 12.3 }, "ts": "2023-12-18T22:12:00", "inuse": true, "location": [{"province": "beijing", "city":"chaoyang", "street": "datun"}]}
```
@ -114,7 +114,7 @@ The following nested JSON data can automatically parse fields `groupid`, `data_v
You can use **named capture groups** in regular expressions to extract multiple fields from any string (text) field. As shown in the figure, extract fields such as access IP, timestamp, and accessed URL from nginx logs.
``` re
```regex
(?<ip>\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b)\s-\s-\s\[(?<ts>\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}\s\+\d{4})\]\s"(?<method>[A-Z]+)\s(?<url>[^\s"]+).*(?<status>\d{3})\s(?<length>\d+)
```
@ -133,7 +133,7 @@ Custom rhai syntax scripts for parsing input data (refer to `https://rhai.rs/boo
For example, for data reporting three-phase voltage values, which are entered into three subtables respectively, such data needs to be parsed
``` json
```json
{
"ts": "2024-06-27 18:00:00",
"voltage": "220.1,220.3,221.1",
@ -164,7 +164,7 @@ The final parsing result is shown below:
The parsed data may still not meet the data requirements of the target table. For example, the original data collected by a smart meter is as follows (in json format):
``` json
```json
{"groupid": 170001, "voltage": "221V", "current": 12.3, "ts": "2023-12-18T22:12:00", "inuse": true, "location": "beijing.chaoyang.datun"}
{"groupid": 170001, "voltage": "220V", "current": 12.2, "ts": "2023-12-18T22:12:02", "inuse": true, "location": "beijing.chaoyang.datun"}
{"groupid": 170001, "voltage": "216V", "current": 12.5, "ts": "2023-12-18T22:12:04", "inuse": false, "location": "beijing.chaoyang.datun"}

View File

@ -164,9 +164,6 @@ If you are using Maven to manage your project, simply add the following dependen
pip3 install taospy[ws]
```
</TabItem>
</Tabs>
- **Installation Verification**
<Tabs defaultValue="rest">
@ -199,8 +196,8 @@ import taosws
</TabItem>
</Tabs>
</TabItem>
<Tabs>
<TabItem label="Go" value="go">
Edit `go.mod` to add the `driver-go` dependency.

View File

@ -83,14 +83,14 @@ Next, create a supertable (STABLE) named `meters`, whose table structure include
Create Database
```bash
```shell
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
--data 'CREATE DATABASE IF NOT EXISTS power'
```
Create Table, specify the database as `power` in the URL
```bash
```shell
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql/power' \
--data 'CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))'
```
@ -167,7 +167,7 @@ NOW is an internal system function, defaulting to the current time of the client
Write data
```bash
```shell
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
--data 'INSERT INTO power.d1001 USING power.meters TAGS(2,'\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 219, 0.31000) (NOW + 2a, 12.60000, 218, 0.33000) (NOW + 3a, 12.30000, 221, 0.31000) power.d1002 USING power.meters TAGS(3, '\''California.SanFrancisco'\'') VALUES (NOW + 1a, 10.30000, 218, 0.25000)'
```
@ -247,7 +247,7 @@ Rust connector also supports using **serde** for deserializing to get structured
Query Data
```bash
```shell
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql' \
--data 'SELECT ts, current, location FROM power.meters limit 100'
```
@ -329,7 +329,7 @@ Below are code examples of setting reqId to execute SQL in various language conn
Query data, specify reqId as 3
```bash
```shell
curl --location -uroot:taosdata 'http://127.0.0.1:6041/rest/sql?req_id=3' \
--data 'SELECT ts, current, location FROM power.meters limit 1'
```

View File

@ -273,19 +273,19 @@ To better operate the above data structures, some convenience functions are prov
Create table:
```bash
```shell
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
```
Create custom function:
```bash
```shell
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
Use custom function:
```bash
```shell
select max_vol(vol1, vol2, vol3, deviceid) from battery;
```
@ -334,7 +334,7 @@ When developing UDFs in Python, you need to implement the specified interface fu
The interface for scalar functions is as follows.
```Python
```python
def process(input: datablock) -> tuple[output_type]:
```
@ -347,7 +347,7 @@ The main parameters are as follows:
The interface for aggregate functions is as follows.
```Python
```python
def start() -> bytes:
def reduce(inputs: datablock, buf: bytes) -> bytes
def finish(buf: bytes) -> output_type:
@ -365,7 +365,7 @@ Finally, when all row data blocks have been processed, the finish function is ca
The interfaces for initialization and destruction are as follows.
```Python
```python
def init()
def destroy()
```
@ -381,7 +381,7 @@ Parameter description:
The template for developing scalar functions in Python is as follows.
```Python
```python
def init():
# initialization
def destroy():
@ -393,7 +393,7 @@ def process(input: datablock) -> tuple[output_type]:
The template for developing aggregate functions in Python is as follows.
```Python
```python
def init():
#initialization
def destroy():
@ -828,7 +828,7 @@ Through this example, we learned how to define aggregate functions and print cus
<details>
<summary>pybitand.py</summary>
```Python
```python
{{#include tests/script/sh/pybitand.py}}
```

View File

@ -15,7 +15,7 @@ TDengine is designed for various writing scenarios, and many of these scenarios
### Syntax
```SQL
```sql
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
SHOW COMPACTS [compact_id];
KILL COMPACT compact_id;
@ -41,7 +41,7 @@ KILL COMPACT compact_id;
When one or more nodes in a multi-replica cluster restart due to upgrades or other reasons, it may lead to an imbalance in the load among the various dnodes in the cluster. In extreme cases, all vgroup leaders may be located on the same dnode. To solve this problem, you can use the following commands, which were first released in version 3.0.4.0. It is recommended to use the latest version as much as possible.
```SQL
```sql
balance vgroup leader; # Rebalance all vgroup leaders
balance vgroup leader on <vgroup_id>; # Rebalance a vgroup leader
balance vgroup leader database <database_name>; # Rebalance all vgroup leaders within a database

View File

@ -121,7 +121,7 @@ The cost of using object storage services is related to the amount of data store
When the TSDB time-series data exceeds the time specified by the `s3_keeplocal` parameter, the related data files will be split into multiple file blocks, each with a default size of 512 MB (`s3_chunkpages * tsdb_pagesize`). Except for the last file block, which is retained on the local file system, the rest of the file blocks are uploaded to the object storage service.
```math
```text
Upload Count = Data File Size / (s3_chunkpages * tsdb_pagesize) - 1
```
@ -135,7 +135,7 @@ During query operations, if data in object storage needs to be accessed, TSDB do
Adjacent multiple data pages are downloaded as a single data block from object storage to reduce the number of downloads. The size of each data page is specified by the `tsdb_pagesize` parameter when creating the database, with a default of 4 KB.
```math
```text
Download Count = Number of Data Blocks Needed for Query - Number of Cached Data Blocks
```
@ -155,7 +155,7 @@ For deployment methods, please refer to the [Flexify](https://azuremarketplace.m
In the configuration file /etc/taos/taos.cfg, add parameters for S3 access:
```cfg
```text
s3EndPoint http //20.191.157.23,http://20.191.157.24,http://20.191.157.25
s3AccessKey FLIOMMNL0:uhRNdeZMLD4wo,ABCIOMMN:uhRNdeZMD4wog,DEFOMMNL049ba:uhRNdeZMLD4wogXd
s3BucketName td-test

View File

@ -12,19 +12,20 @@ TDengine is configured by default with only one root user, who has the highest p
Only the root user can perform the operation of creating users, with the syntax as follows.
```sql
create user user_name pass'password' [sysinfo {1|0}]
create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
```
The parameters are explained as follows.
- user_name: Up to 23 B long.
- password: Up to 128 B long, valid characters include letters and numbers as well as special characters other than single and double quotes, apostrophes, backslashes, and spaces, and it cannot be empty.
- password: The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
- sysinfo: Whether the user can view system information. 1 means they can view it, 0 means they cannot. System information includes server configuration information, various node information such as dnode, query node (qnode), etc., as well as storage-related information, etc. The default is to view system information.
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. The default value is 0. // Supported starting from TDengine Enterprise version 3.3.2.0
The following SQL can create a user named test with the password 123456 who can view system information.
The following SQL can create a user named test with the password abc123!@# who can view system information.
```sql
create user test pass '123456' sysinfo 1
create user test pass 'abc123!@#' sysinfo 1
```
### Viewing Users
@ -51,6 +52,7 @@ alter_user_clause: {
pass 'literal'
| enable value
| sysinfo value
| createdb value
}
```
@ -59,6 +61,7 @@ The parameters are explained as follows.
- pass: Modify the user's password.
- enable: Whether to enable the user. 1 means to enable this user, 0 means to disable this user.
- sysinfo: Whether the user can view system information. 1 means they can view system information, 0 means they cannot.
- createdb: Whether the user can create databases. 1 means they can create databases, 0 means they cannot. // Supported starting from TDengine Enterprise version 3.3.2.0
The following SQL disables the user test.

View File

@ -140,7 +140,7 @@ Finally, click the "Create" button at the bottom left to save the rule.
## Write a Mock Test Program
```javascript
```js
{{#include docs/examples/other/mock.js}}
```

View File

@ -95,7 +95,7 @@ curl http://localhost:8083/connectors
If all components have started successfully, the following output will be displayed:
```txt
```text
[]
```
@ -181,7 +181,7 @@ If the above command is executed successfully, the following output will be disp
Prepare a text file with test data, content as follows:
```txt title="test-data.txt"
```text title="test-data.txt"
meters,location=California.LosAngeles,groupid=2 current=11.8,voltage=221,phase=0.28 1648432611249000000
meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0.29 1648432611250000000
meters,location=California.LosAngeles,groupid=3 current=10.8,voltage=223,phase=0.29 1648432611249000000
@ -303,7 +303,7 @@ kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --t
Output:
```txt
```text
......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000

View File

@ -60,7 +60,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc
For users using Grafana version 7.x or configuring with [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/), you can use the installation script on the Grafana server to automatically install the plugin and add the data source Provisioning configuration file.
```sh
```shell
bash -c "$(curl -fsSL \
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
-a http://localhost:6041 \
@ -77,7 +77,7 @@ Save the script and execute `./install.sh --help` to view detailed help document
Use the [`grafana-cli` command line tool](https://grafana.com/docs/grafana/latest/administration/cli/) to install the plugin [installation](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation).
```bash
```shell
grafana-cli plugins install tdengine-datasource
# with sudo
sudo -u grafana grafana-cli plugins install tdengine-datasource
@ -85,7 +85,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
Alternatively, download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) to your local machine and unzip it into the Grafana plugins directory. Example command line download is as follows:
```bash
```shell
GF_VERSION=3.5.1
# from GitHub
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
@ -95,13 +95,13 @@ wget -O tdengine-datasource-$GF_VERSION.zip https://grafana.com/api/plugins/tden
For CentOS 7.2 operating system, unzip the plugin package into the /var/lib/grafana/plugins directory and restart Grafana.
```bash
```shell
sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
If Grafana is running in a Docker environment, you can use the following environment variable to set up automatic installation of the TDengine data source plugin:
```bash
```shell
GF_INSTALL_PLUGINS=tdengine-datasource
```
@ -120,7 +120,7 @@ Click `Save & Test` to test, if successful, it will prompt: `TDengine Data sourc
Refer to [Grafana containerized installation instructions](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). Use the following command to start a container and automatically install the TDengine plugin:
```bash
```shell
docker run -d \
-p 3000:3000 \
--name=grafana \

View File

@ -31,7 +31,7 @@ The following parameter descriptions and examples use `<content>` as a placehold
In command line mode, taosX uses DSN to represent a data source (source or destination), a typical DSN is as follows:
```bash
```shell
# url-like
<driver>[+<protocol>]://[[<username>:<password>@]<host>:<port>][/<object>][?<p1>=<v1>[&<p2>=<v2>]]
|------|------------|---|-----------|-----------|------|------|----------|-----------------------|
@ -390,7 +390,7 @@ You can view the log files or use the `journalctl` command to view the logs of `
The command to view logs under Linux using `journalctl` is as follows:
```bash
```shell
journalctl -u taosx [-f]
```
@ -572,7 +572,7 @@ uint32_t len: The binary length of this string (excluding `\0`).
**Return Value**:
``` c
```c
struct parser_resp_t {
int e; // 0 if success.
void* p; // Success if contains.
@ -589,7 +589,7 @@ When creation is successful, e = 0, p is the parser object.
Parse the input payload and return the result in JSON format [u8]. The returned JSON will be fully decoded using the default JSON parser (expanding the root array and all objects).
``` c
```c
const char* parser_mutate(
void* parser,
const uint8_t* in_ptr, uint32_t in_len,

View File

@ -26,7 +26,7 @@ The default configuration file for `Agent` is located at `/etc/taos/agent.toml`,
As shown below:
```TOML
```toml
# taosX service endpoint
#
#endpoint = "http://localhost:6055"
@ -83,7 +83,7 @@ You don't need to be confused about how to set up the configuration file. Read a
On Linux systems, the `Agent` can be started with the Systemd command:
```bash
```shell
systemctl start taosx-agent
```
@ -95,6 +95,6 @@ You can view the log files or use the `journalctl` command to view the logs of t
The command to view logs with `journalctl` on Linux is as follows:
```bash
```shell
journalctl -u taosx-agent [-f]
```

View File

@ -143,13 +143,13 @@ For details on TDengine monitoring configuration, please refer to: [TDengine Mon
After installation, please use the `systemctl` command to start the taoskeeper service process.
```bash
```shell
systemctl start taoskeeper
```
Check if the service is working properly:
```bash
```shell
systemctl status taoskeeper
```
@ -261,7 +261,7 @@ Query OK, 14 row(s) in set (0.006542s)
You can view the most recent report record of a supertable, such as:
``` shell
```shell
taos> select last_row(*) from taosd_dnodes_info;
last_row(_ts) | last_row(disk_engine) | last_row(system_net_in) | last_row(vnodes_num) | last_row(system_net_out) | last_row(uptime) | last_row(has_mnode) | last_row(io_read_disk) | last_row(error_log_count) | last_row(io_read) | last_row(cpu_cores) | last_row(has_qnode) | last_row(has_snode) | last_row(disk_total) | last_row(mem_engine) | last_row(info_log_count) | last_row(cpu_engine) | last_row(io_write_disk) | last_row(debug_log_count) | last_row(disk_used) | last_row(mem_total) | last_row(io_write) | last_row(masters) | last_row(cpu_system) | last_row(trace_log_count) | last_row(mem_free) |
======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================

View File

@ -14,7 +14,7 @@ taosExplorer does not require separate installation. Starting from TDengine vers
Before starting taosExplorer, please make sure the content in the configuration file is correct.
```TOML
```toml
# This is an automatically generated configuration file for Explorer in [TOML](https://toml.io/) format.
#
# Here is a full list of available options.
@ -148,7 +148,7 @@ Description:
Then start taosExplorer, you can directly execute taos-explorer in the command line or use the systemctl command:
```bash
```shell
systemctl start taos-explorer # Linux
sc.exe start taos-explorer # Windows
```

View File

@ -248,13 +248,13 @@ The new version of the plugin uses the Grafana unified alerting feature, the `-E
Assuming you start the TDengine database on the host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script:
```bash
```shell
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
```
If you want to monitor multiple TDengine clusters, you need to set up multiple TDinsight dashboards. Setting up a non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and if using the built-in SMS alert feature, `-N` and `-L` should also be changed.
```bash
```shell
sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
```

View File

@ -10,7 +10,7 @@ The TDengine command line program (hereinafter referred to as TDengine CLI) is t
To enter the TDengine CLI, simply execute `taos` in the terminal.
```bash
```shell
taos
```
@ -81,7 +81,7 @@ There are many other parameters:
Example:
```bash
```shell
taos -h h1.taos.com -s "use db; show tables;"
```

View File

@ -28,7 +28,7 @@ taosBenchmark supports comprehensive performance testing for TDengine, and the T
Execute the following command to quickly experience taosBenchmark performing a write performance test on TDengine based on the default configuration.
```bash
```shell
taosBenchmark
```
@ -38,7 +38,7 @@ When running without parameters, taosBenchmark by default connects to the TDengi
When running taosBenchmark using command line parameters and controlling its behavior, the `-f <json file>` parameter cannot be used. All configuration parameters must be specified through the command line. Below is an example of using command line mode to test the write performance of taosBenchmark.
```bash
```shell
taosBenchmark -I stmt -n 200 -t 100
```
@ -50,7 +50,7 @@ The taosBenchmark installation package includes examples of configuration files,
Use the following command line to run taosBenchmark and control its behavior through a configuration file.
```bash
```shell
taosBenchmark -f <json file>
```

View File

@ -170,7 +170,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
alter_table_clause: {
alter_table_options
| SET tag tag_name = new_tag_value,tag_name2=new_tag2_value...
| SET tag tag_name = new_tag_value, tag_name2=new_tag2_value ...
}
alter_table_options:
@ -194,7 +194,7 @@ alter_table_option: {
### Modify Subtable Tag Value
```sql
ALTER TABLE tb_name SET tag tag_name=new_tag_value;
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
```
### Modify Table Lifespan

View File

@ -210,19 +210,19 @@ However, renaming individual columns is not supported for `first(*)`, `last(*)`,
Retrieve all subtable names and related tag information from a supertable:
```mysql
```sql
SELECT TAGS TBNAME, location FROM meters;
```
It is recommended that users query the subtable tag information of supertables using the INS_TAGS system table under INFORMATION_SCHEMA, for example, to get all subtable names and tag values of the supertable meters:
```mysql
```sql
SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tags WHERE stable_name='meters';
```
Count the number of subtables under a supertable:
```mysql
```sql
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
```
@ -385,7 +385,7 @@ SELECT CURRENT_USER();
### Syntax
```txt
```text
WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
```
@ -403,7 +403,7 @@ The length of the regular match string cannot exceed 128 bytes. You can set and
### Syntax
```txt
```text
CASE value WHEN compare_value THEN result [WHEN compare_value THEN result ...] [ELSE result] END
CASE WHEN condition THEN result [WHEN condition THEN result ...] [ELSE result] END
```
@ -493,7 +493,7 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
## UNION ALL Clause
```txt title=Syntax
```text title=Syntax
SELECT ...
UNION ALL SELECT ...
[UNION ALL SELECT ...]

View File

@ -417,7 +417,7 @@ MOD(expr1, expr2)
**Example**:
``` sql
```sql
taos> select mod(10,3);
mod(10,3) |
============================
@ -454,7 +454,7 @@ RAND([seed])
**Example**:
``` sql
```sql
taos> select rand();
rand() |
============================

View File

@ -317,28 +317,29 @@ Configuration parameters for each dnode in the system. Users with SYSINFO attrib
Note: Users with SYSINFO property set to 0 cannot view this table.
| # | **Column Name** | **Data Type** | **Description** |
| --- | :-------------: | -------------- | ------------------------------------ |
| 1 | user_name | VARCHAR(24) | Username |
| 2 | privilege | VARCHAR(10) | Permission description |
| 3 | db_name | VARCHAR(65) | Database name |
| 4 | table_name | VARCHAR(193) | Table name |
| 5 | condition | VARCHAR(49152) | Subtable permission filter condition |
| # | **Column Name** | **Data Type** | **Description** |
|:-----|:----------------|:---------------|:-------------------------------------|
| 1 | user_name | VARCHAR(24) | Username |
| 2 | privilege | VARCHAR(10) | Permission description |
| 3 | db_name | VARCHAR(65) | Database name |
| 4 | table_name | VARCHAR(193) | Table name |
| 5 | condition | VARCHAR(49152) | Subtable permission filter condition |
## INS_DISK_USAGE
| # | **Column Name** | **Data type** | **Description**|
| --- | :----------: | ------------ | ------------------------|
| 1 | db_name | VARCHAR(32) | Database name
| 2 | vgroup_id | INT | vgroup ID
| 3 | wal | BIGINT | WAL file size, in KB
| 4 | data1 | BIGINT | Data file size on primary storage, in KB
| 5 | data2 | BIGINT | Data file size on secondary storage, in KB
| 6 | data3 | BIGINT | Data file size on tertiary storage, in KB
| 7 | cache_rdb | BIGINT | Size of last/last_row files, in KB
| 8 | table_meta | BIGINT | Size of meta files, in KB
| 9 | s3 | BIGINT | Size occupied on S3, in KB
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB
| # | **Column Name** | **Data type** | **Description**|
|:----|:-----------|:-----------|:--------------------|
| 1 | db_name | VARCHAR(32) | Database name |
| 2 | vgroup_id | INT | vgroup ID |
| 3 | wal | BIGINT | WAL file size, in KB |
| 4 | data1 | BIGINT | Data file size on primary storage, in KB |
| 5 | data2 | BIGINT | Data file size on secondary storage, in KB |
| 6 | data3 | BIGINT | Data file size on tertiary storage, in KB |
| 7 | cache_rdb | BIGINT | Size of last/last_row files, in KB |
| 8 | table_meta | BIGINT | Size of meta files, in KB |
| 9 | s3 | BIGINT | Size occupied on S3, in KB |
| 10 | raw_data | BIGINT | Estimated size of raw data, in KB |
note:

View File

@ -8,19 +8,21 @@ User and permission management is a feature of TDengine Enterprise Edition. This
## Create User
```sql
CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
```
The username can be up to 23 bytes long.
The password can be up to 31 bytes long. The password can include letters, numbers, and special characters except for single quotes, double quotes, backticks, backslashes, and spaces, and it cannot be an empty string.
The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. Special characters include `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`.
`SYSINFO` indicates whether the user can view system information. `1` means they can view, `0` means they have no permission to view. System information includes service configuration, dnode, vnode, storage, etc. The default value is `1`.
In the example below, we create a user with the password `123456` who can view system information.
`CREATEDB` indicates whether the user can create databases. `1` means they can create databases, `0` means they have no permission to create databases. The default value is `0`. // Supported starting from TDengine Enterprise version 3.3.2.0
In the example below, we create a user with the password `abc123!@#` who can view system information.
```sql
taos> create user test pass '123456' sysinfo 1;
taos> create user test pass 'abc123!@#' sysinfo 1;
Query OK, 0 of 0 rows affected (0.001254s)
```
@ -76,7 +78,7 @@ alter_user_clause: {
- PASS: Change the password, followed by the new password
- ENABLE: Enable or disable the user, `1` means enable, `0` means disable
- SYSINFO: Allow or prohibit viewing system information, `1` means allow, `0` means prohibit
- CREATEDB: Allow or prohibit creating databases, `1` means allow, `0` means prohibit
- CREATEDB: Allow or prohibit creating databases, `1` means allow, `0` means prohibit. // Supported starting from TDengine Enterprise version 3.3.2.0
The following example disables the user named `test`:

View File

@ -28,13 +28,14 @@ In this document, it specifically refers to the internal levels of the second-le
- Default compression algorithms list and applicable range for each data type
| Data Type | Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms|Default Compression Algorithm| Default Compression Level|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
| tinyint/untinyint/smallint/usmallint/int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| lz4| medium|
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| lz4| medium|
| Data Type |Available Encoding Algorithms | Default Encoding Algorithm | Available Compression Algorithms | Default Compression Algorithm | Default Compression Level |
|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:|
| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
## SQL Syntax

View File

@ -688,6 +688,27 @@ The basic API is used to establish database connections and provide a runtime en
- `arg`: [Input] Setting item value.
- **Return Value**: `0`: Success, `-1`: Failure.
- `int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...)`
- **description**:Set each connection option on the client side. Currently, it supports character set setting(`TSDB_OPTION_CONNECTION_CHARSET`), time zone setting(`TSDB_OPTION_CONNECTION_TIMEZONE`), user IP setting(`TSDB_OPTION_CONNECTION_USER_IP`), and user APP setting(`TSDB_OPTION_CONNECTION_USER_APP`).
- **input**:
- `taos`: returned by taos_connect.
- `option`: option name.
- `arg`: option value.
- **return**:
- `0`: success.
- `others`: fail.
- **notice**:
- The character set and time zone default to the current settings of the operating system, and Windows does not support connection level time zone settings.
- When arg is NULL, it means resetting the option.
- This interface is only valid for the current connection and will not affect other connections.
- If the same parameter is called multiple times, the latter shall prevail and can be used as a modification method.
- The option of TSDB_OPTION_CONNECTION_CLEAR is used to reset all connection options.
- After resetting the time zone and character set, using the operating system settings, the user IP and user app will be reset to empty.
- The values of the connection options are all string type, and the maximum value of the user app parameter is 23, which will be truncated if exceeded; Error reported when other parameters are illegal.
- If time zone value can not be used to find a time zone file or can not be interpreted as a direct specification, UTC is used, which is the same as the operating system time zone rules. Please refer to the tzset function description for details. You can view the current time zone of the connection by sql:select timezone().
- Time zones and character sets only work on the client side and do not affect related behaviors on the server side.
- The time zone file uses the operating system time zone file and can be updated by oneself. If there is an error when setting the time zone, please check if the time zone file or path (mac:/var/db/timezone/zoneinfo, Linux:/var/share/zoneinfo) is correct.
- `char *taos_get_client_info()`
- **Interface Description**: Gets client version information.
- **Return Value**: Returns client version information.

View File

@ -108,7 +108,7 @@ For the source code of the example programs, please refer to: [Example Programs]
The Data Source Name has a generic format, similar to [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php), but without the type prefix (brackets indicate optional):
``` text
```text
[username[:password]@][protocol[(address)]]/[dbname][?param1=value1&...&paramN=valueN]
```

View File

@ -21,7 +21,7 @@ Below is an example using the `curl` tool in an Ubuntu environment (please confi
The following example lists all databases, please replace `h1.tdengine.com` and 6041 (default value) with the actual running TDengine service FQDN and port number:
```bash
```shell
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
-d "select name, ntables, status from information_schema.ins_databases;" \
h1.tdengine.com:6041/rest/sql
@ -100,13 +100,13 @@ The BODY of the HTTP request contains a complete SQL statement. The data table i
Use `curl` to initiate an HTTP Request with custom authentication as follows:
```bash
```shell
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
```
Or,
```bash
```shell
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id][&row_with_meta=true]]
```
@ -279,7 +279,7 @@ Column types use the following strings:
Prepare data
```bash
```shell
create database demo
use demo
create table t(ts timestamp,c1 varbinary(20),c2 geometry(100))
@ -288,7 +288,7 @@ insert into t values(now,'\x7f8290','point(100 100)')
Execute query
```bash
```shell
curl --location 'http://<fqdn>:<port>/rest/sql' \
--header 'Content-Type: text/plain' \
--header 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' \
@ -428,7 +428,7 @@ Data Query Return Example
HTTP requests need to include an authorization code `<TOKEN>`, used for identity verification. The authorization code is usually provided by the administrator and can be simply obtained by sending an `HTTP GET` request as follows:
```bash
```shell
curl http://<fqnd>:<port>/rest/login/<username>/<password>
```
@ -440,7 +440,7 @@ Here, `fqdn` is the FQDN or IP address of the TDengine database, `port` is the p
Example of obtaining an authorization code:
```bash
```shell
curl http://192.168.0.1:6041/rest/login/root/taosdata
```
@ -457,7 +457,7 @@ Return value:
- Query all records of table d1001 in the demo database:
```bash
```shell
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
@ -509,7 +509,7 @@ Return value:
- Create database demo:
```bash
```shell
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" -d "create database demo" 192.168.0.1:6041/rest/sql
```
@ -560,7 +560,7 @@ Return value:
#### TDengine 2.x response codes and message bodies
```JSON
```json
{
"status": "succ",
"head": [
@ -624,7 +624,7 @@ Return value:
#### TDengine 3.0 Response Codes and Message Body
```JSON
```json
{
"code": 0,
"column_meta": [

View File

@ -129,7 +129,7 @@ This document details the server error codes that may be encountered when using
| 0x80000350 | User already exists | Create user, duplicate creation | Confirm if the operation is correct |
| 0x80000351 | Invalid user | User does not exist | Confirm if the operation is correct |
| 0x80000352 | Invalid user format | Incorrect format | Confirm if the operation is correct |
| 0x80000353 | Invalid password format | Incorrect format | Confirm if the operation is correct |
| 0x80000353 | Invalid password format | The password must be between 8 and 16 characters long and include at least three types of characters from the following: uppercase letters, lowercase letters, numbers, and special characters. | Confirm the format of the password string |
| 0x80000354 | Can not get user from conn | Internal error | Report issue |
| 0x80000355 | Too many users | (Enterprise only) Exceeding user limit | Adjust configuration |
| 0x80000357 | Authentication failure | Incorrect password | Confirm if the operation is correct |
@ -251,6 +251,7 @@ This document details the server error codes that may be encountered when using
| 0x80000529 | Vnode is stopped | Vnode is closed | Report issue |
| 0x80000530 | Duplicate write request | Duplicate write request, internal error | Report issue |
| 0x80000531 | Vnode query is busy | Query is busy | Report issue |
| 0x80000540 | Vnode already exist but Dbid not match | Internal error | Report issue |
## tsdb
@ -503,6 +504,7 @@ This document details the server error codes that may be encountered when using
| 0x80003103 | Invalid tsma state | The vgroup of the stream computing result is inconsistent with the vgroup that created the TSMA index | Check error logs, contact development for handling |
| 0x80003104 | Invalid tsma pointer | Processing the results issued by stream computing, the message body is a null pointer. | Check error logs, contact development for handling |
| 0x80003105 | Invalid tsma parameters | Processing the results issued by stream computing, the result count is 0. | Check error logs, contact development for handling |
| 0x80003113 | Tsma optimization cannot be applied with INTERVAL AUTO offset. | Tsma optimization cannot be enabled with INTERVAL AUTO OFFSET under the current query conditions. | Use SKIP_TSMA Hint or specify a manual INTERVAL OFFSET. |
| 0x80003150 | Invalid rsma env | Rsma execution environment is abnormal. | Check error logs, contact development for handling |
| 0x80003151 | Invalid rsma state | Rsma execution state is abnormal. | Check error logs, contact development for handling |
| 0x80003152 | Rsma qtaskinfo creation error | Creating stream computing environment failed. | Check error logs, contact development for handling |

View File

@ -90,7 +90,7 @@ Batch insertion. Each insert statement can insert multiple records into one tabl
When inserting nchar type data containing Chinese characters on Windows, first ensure that the system's regional settings are set to China (this can be set in the Control Panel). At this point, the `taos` client in cmd should already be working properly; if developing a Java application in an IDE, such as Eclipse or IntelliJ, ensure that the file encoding in the IDE is set to GBK (which is the default encoding type for Java), then initialize the client configuration when creating the Connection, as follows:
```JAVA
```java
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8");
@ -145,7 +145,7 @@ Version 3.0 of TDengine includes a standalone component developed in Go called `
The Go language version requirement is 1.14 or higher. If there are Go compilation errors, often due to issues accessing Go mod in China, they can be resolved by setting Go environment variables:
```sh
```shell
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
@ -196,7 +196,7 @@ Here are the solutions:
1. Create a file /Library/LaunchDaemons/limit.maxfiles.plist, write the following content (the example changes limit and maxfiles to 100,000, modify as needed):
```plist
```xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">

View File

@ -303,13 +303,12 @@ Query OK, 5 row(s) in set (0.016812s)
#### FILL 子句
FILL 子句,用于指定某一窗口区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充NONE默认填充模式
2. VALUE 填充固定值填充此时需要指定填充的数值。例如FILLVALUE 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILLVALUE 1.23),相应列为 INT 类型,则填充值为 1
3. PREV 填充:使用前一个非 NULL 值填充数据。例如FILLPREV
4. NULL 填充:使用 NULL 填充数据。例如FILLNULL
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如FILLLINEAR
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如FILLNEXT
2. VALUE 填充固定值填充此时需要指定填充的数值。例如FILL(VALUE, 1.23)。这里需要注意,最终填充的值受由相应列的类型决定,如 FILL(VALUE, 1.23),相应列为 INT 类型,则填充值为 1, 若查询列表中有多列需要 FILL, 则需要给每一个 FILL 列指定 VALUE, 如 `SELECT _wstart, min(c1), max(c1) FROM ... FILL(VALUE, 0, 0)`, 注意, SELECT 表达式中只有包含普通列时才需要指定 FILL VALUE, 如 `_wstart`, `_wstart+1a`, `now`, `1+1` 以及使用 partition by 时的 partition key (如 tbname)都不需要指定 VALUE, 如 `timediff(last(ts), _wstart)` 则需要指定VALUE
3. PREV 填充:使用前一个非 NULL 值填充数据。例如FILL(PREV)
4. NULL 填充:使用 NULL 填充数据。例如FILL(NULL)
5. LINEAR 填充:根据前后距离最近的非 NULL 值做线性插值填充。例如FILL(LINEAR)
6. NEXT 填充:使用下一个非 NULL 值填充数据。例如FILL(NEXT)
以上填充模式中,除了 NONE 模式默认不填充值之外,其他模式在查询的整个时间范围内如果没有数据 FILL 子句将被忽略即不产生填充数据查询结果为空。这种行为在部分模式PREV、NEXT、LINEAR下具有合理性因为在这些模式下没有数据意味着无法产生填充数值。

View File

@ -37,7 +37,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
关键不同点在于:
1. 使用 原生连接,需要保证客户端的驱动程序 taosc 和服务端的 TDengine 版本配套
1. 使用 原生连接,需要保证客户端的驱动程序 taosc 和服务端的 TDengine 版本保持一致
2. 使用 REST 连接,用户无需安装客户端驱动程序 taosc具有跨平台易用的优势但是无法体验数据订阅和二进制数据类型等功能。另外与 原生连接 和 WebSocket 连接相比REST连接的性能最低。REST 接口是无状态的。在使用 REST 连接时,需要在 SQL 中指定表、超级表的数据库名称。
3. 使用 WebSocket 连接,用户也无需安装客户端驱动程序 taosc。
4. 连接云服务实例,必须使用 REST 连接 或 WebSocket 连接。

View File

@ -68,19 +68,19 @@ dataDir /mnt/data6 2 0
在配置文件 /etc/taos/taos.cfg 中,添加用于 S3 访问的参数:
|参数名称 | 参数含义 |
|:-------------|:-----------------------------------------------|
|s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 httpsbucket 的区域需要与 endpoint 的保持一致,否则无法访问。 |
|s3AccessKey |冒号分隔的用户 SecretId:SecretKey。例如AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
|s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分使用减号分隔。参数值均为字符串类型但不需要引号。例如test0711-1309024725 |
|s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3单位秒。最小值1最大值2592000 (30天默认值 60 秒 |
|s3PageCacheSize |s3 page cache 缓存页数目单位页。最小值4最大值1024*1024\*1024。 ,默认值 4096|
|s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600 |
|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0表示关闭自动 S3 迁移,可配置为 1。 |
| 参数名称 | 参数含义 |
|:---------------------|:-----------------------------------------------|
| s3EndPoint | 用户所在地域的 COS 服务域名,支持 http 和 httpsbucket 的区域需要与 endpoint 的保持一致,否则无法访问。 |
| s3AccessKey | 冒号分隔的用户 SecretId:SecretKey。例如AKIDsQmwsfKxTo2A6nGVXZN0UlofKn6JRRSJ:lIdoy99ygEacU7iHfogaN2Xq0yumSm1E |
| s3BucketName | 存储桶名称,减号后面是用户注册 COS 服务的 AppId。其中 AppId 是 COS 特有AWS 和阿里云都没有,配置时需要作为 bucket name 的一部分使用减号分隔。参数值均为字符串类型但不需要引号。例如test0711-1309024725 |
| s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3单位秒。最小值1最大值259200030天默认值 60 秒 |
| s3PageCacheSize | S3 page cache 缓存页数目单位页。最小值4最大值1024*1024*1024。 ,默认值 4096|
| s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期单位为秒。最小值600最大值100000。默认值 3600 |
| s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0表示关闭自动 S3 迁移,可配置为 1。 |
### 检查配置参数可用性
在 taos.cfg 中完成对 s3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
在 taos.cfg 中完成对 S3 的配置后,通过 taosd 命令的 checks3 参数可以检查所配置的 S3 服务是否可用:
```
taosd --checks3
@ -106,11 +106,11 @@ s3migrate database <db_name>;
详细的 DB 参数见下表:
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
| :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- |
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 |
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作 |
| # | 参数 | 默认值 | 最小值 | 最大值 | 描述 |
|:--|:--------------|:-------|:------ |:------- | :----------------------------------------------------------- |
| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位支持 m分钟、h小时和 d三个单位 |
| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 |
| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作 |
### 对象存储读写次数估算
@ -168,10 +168,10 @@ s3BucketName td-test
用户界面同 S3不同的地方在于下面三个参数的配置
| # | 参数 | 示例值 | 描述 |
| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- |
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
| 3 | s3BucketName | test-container | Container name |
| # | 参数 | 示例值 | 描述 |
|:--|:-------------|:-----------------------------------------|:----------------------------------|
| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL |
| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey |
| 3 | s3BucketName | test-container | Container name |
其中 fd2d01c73 是账户 ID微软 Blob 存储服务只支持 Https 协议,不支持 Http。

View File

@ -12,18 +12,19 @@ TDengine 默认仅配置了一个 root 用户该用户拥有最高权限。TD
创建用户的操作只能由 root 用户进行,语法如下。
```sql
create user user_name pass'password' [sysinfo {1|0}]
create user user_name pass'password' [sysinfo {1|0}] [createdb {1|0}]
```
相关参数说明如下。
- user_name最长为 23 B
- password最长为 128 B合法字符包括字母和数字以及单双引号、撇号、反斜杠和空格以外的特殊字符且不可以为空
- user_name用户名最长不超过 23 个字节
- password密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
- sysinfo 用户是否可以查看系统信息。1 表示可以查看0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息,如 dnode、查询节点qnode以及与存储相关的信息等。默认为可以查看系统信息。
- createdb用户是否可以创建数据库。1 表示可以创建0 表示不可以创建。缺省值为 0。// 从 TDengine 企业版 3.3.2.0 开始支持
如下 SQL 可以创建密码为 123456 且可以查看系统信息的用户 test。
如下 SQL 可以创建密码为 abc123!@# 且可以查看系统信息的用户 test。
```sql
create user test pass '123456' sysinfo 1
create user test pass 'abc123!@#' sysinfo 1
```
### 查看用户
@ -47,6 +48,7 @@ alter_user_clause: {
pass 'literal'
| enable value
| sysinfo value
| createdb value
}
```
@ -54,6 +56,7 @@ alter_user_clause: {
- pass修改用户密码。
- enable是否启用用户。1 表示启用此用户0 表示禁用此用户。
- sysinfo 用户是否可查看系统信息。1 表示可以查看系统信息0 表示不可以查看系统信息
- createdb用户是否可创建数据库。1 表示可以创建数据库0 表示不可以创建数据库。// 从 TDengine 企业版 3.3.2.0 开始支持
如下 SQL 禁用 test 用户。
```sql

Binary file not shown.

After

Width:  |  Height:  |  Size: 212 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

View File

@ -145,6 +145,44 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,
还有上述分类的细分维度折线图。
### 预配置告警规则自动导入
涛思总结用户使用经验,整理出 14 个常用的告警规则alert rule能够对集群关键指标进行监测并及时上报指标异常、超限等告警信息。
从 TDengine-server 3.3.4.3 版本tdengine-datasource 3.6.3开始TDengine Datasource 支持预配置告警规则自动导入功能,用户可将 14 个告警规则一键导入 Grafana11 及以上版本),直接使用。
预配置告警规则导入方法如下图所示,在 tdengine-datasource setting 界面,打开 “Load Tengine Alert” 开关,点击 “Save & test” 按钮后,插件会自动加载上述告警规则, 规则会放入以数据源名称 + “-alert” 的 grafana 告警目录中。如不需要,关闭 “Load TDengine Alert” 开关,点击 “Clear TDengine Alert” 旁边的按钮则会清除此数据源已导入的所有告警规则。
![TDengine Alert](./assets/TDengine-Alert.webp)
导入后,点击 Grafana 左侧 “Alert rules” ,可查看当前所有告警规则。
用户只需配置联络点Contact points即可获取告警通知。联络点配置方法见[告警配置](https://docs.taosdata.com/third-party/visual/grafana/#%E5%91%8A%E8%AD%A6%E9%85%8D%E7%BD%AE)。
![Alert-rules](./assets/Alert-rules.webp)
14 个告警规则具体配置如下:
| 规则名称| 规则阈值| 无监控数据时的行为 | 数据扫描间隔 |持续时间 | 执行SQL |
| ------ | --------- | ---------------- | ----------- |------- |----------------------|
|dnode 节点的CPU负载|均值 > 80%|触发告警|5分钟|5分钟 |`select now(), dnode_id, last(cpu_system) as cup_use from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts < now partition by dnode_id having first(_ts) > 0 `|
|dnode 节点的的内存 |均值 > 60%|触发告警|5分钟|5分钟|`select now(), dnode_id, last(mem_engine) / last(mem_total) * 100 as taosd from log.taosd_dnodes_info where _ts >= (now- 5m) and _ts <now partition by dnode_id`|
|dnode 节点的磁盘容量占用 | > 80%|触发告警|5分钟|5分钟|`select now(), dnode_id, data_dir_level, data_dir_name, last(used) / last(total) * 100 as used from log.taosd_dnodes_data_dirs where _ts >= (now - 5m) and _ts < now partition by dnode_id, data_dir_level, data_dir_name`|
|集群授权到期 |< 60天|触发告警|1天|0秒|`select now(), cluster_id, last(grants_expire_time) / 86400 as expire_time from log.taosd_cluster_info where _ts >= (now - 24h) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|测点数达到授权测点数|>= 90%|触发告警|1天|0秒|`select now(), cluster_id, CASE WHEN max(grants_timeseries_total) > 0.0 THEN max(grants_timeseries_used) /max(grants_timeseries_total) * 100.0 ELSE 0.0 END AS result from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1m) > 0`|
|查询并发请求数 | > 100|不触发报警|1分钟|0秒|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries`|
|慢查询执行最长时间 (无时间窗口) |> 300秒|不触发报警|1分钟|0秒|`select now() as ts, count(*) as slow_count from performance_schema.perf_queries where exec_usec>300000000`|
|dnode下线 |total != alive|触发告警|30秒|0秒|`select now(), cluster_id, last(dnodes_total) - last(dnodes_alive) as dnode_offline from log.taosd_cluster_info where _ts >= (now -30s) and _ts < now partition by cluster_id having first(_ts) > 0`|
|vnode下线 |total != alive|触发告警|30秒|0秒|`select now(), cluster_id, last(vnodes_total) - last(vnodes_alive) as vnode_offline from log.taosd_cluster_info where _ts >= (now - 30s) and _ts < now partition by cluster_id having first(_ts) > 0 `|
|数据删除请求数 |> 0|不触发报警|30秒|0秒|``select now(), count(`count`) as `delete_count` from log.taos_sql_req where sql_type = 'delete' and _ts >= (now -30s) and _ts < now``|
|Adapter RESTful 请求失败 |> 5|不触发报警|30秒|0秒|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=0 and ts >= (now -30s) and ts < now``|
|Adapter WebSocket 请求失败 |> 5|不触发报警|30秒|0秒|``select now(), sum(`fail`) as `Failed` from log.adapter_requests where req_type=1 and ts >= (now -30s) and ts < now``|
|dnode 数据上报缺少 |< 3|触发告警|180秒|0秒|`select now(), cluster_id, count(*) as dnode_report from log.taosd_cluster_info where _ts >= (now -180s) and _ts < now partition by cluster_id having timetruncate(first(_ts), 1h) > 0`|
|dnode 重启 |max(update_time) > last(update_time)|触发告警|90秒|0秒|`select now(), dnode_id, max(uptime) - last(uptime) as dnode_restart from log.taosd_dnodes_info where _ts >= (now - 90s) and _ts < now partition by dnode_id`|
用户可参考上述告警规则,根据自己业务需求进行修改与完善。
Grafana7.5 及以下版本Dashboards 与 Alert rules 功能合在一起,而之后的新版本两个功能是分开的。为兼容 Grafana7.5 及以下版本TDinsight 面板中增加了 Alert Used Only 面板,仅 Grafana7.5 及以下版本需要使用。
![Alert Used Only](./assets/Alert-Used-Only.webp)
## 升级
下面三种方式都可以进行升级:
- 用图形界面,若有新版本,可以在 ”TDengine Datasource“ 插件页面点击 update 升级。
@ -155,10 +193,11 @@ TDinsight 仪表盘旨在提供 TDengine 相关资源的使用情况和状态,
针对不同的安装方式,卸载时:
- 用图形界面,在 ”TDengine Datasource“ 插件页面点击 ”Uninstall“ 卸载。
- 通过 `TDinsight.sh` 脚本安装的 TDinsight可以使用命令行 `TDinsight.sh -R` 清理相关资源。
- 手动安装的 TDinsight要完全卸载需要清理以下内容
- 手动安装的 TDinsight要完全卸载需要按照顺序清理以下内容:
1. Grafana 中的 TDinsight Dashboard。
2. Grafana 中的 Data Source 数据源。
3. 从插件安装目录删除 `tdengine-datasource` 插件。
2. Grafana 中的 Alert rules 告警规则。
3. Grafana 中的 Data Source 数据源。
4. 从插件安装目录删除 `tdengine-datasource` 插件。
## 附录

View File

@ -54,7 +54,7 @@ taos> SET MAX_BINARY_DISPLAY_WIDTH <nn>;
- -h HOST: 要连接的 TDengine 服务端所在服务器的 FQDN, 默认为连接本地服务
- -P PORT: 指定服务端所用端口号
- -u USER: 连接时使用的用户名
- -p PASSWORD: 连接服务端时使用的密码
- -p PASSWORD: 连接服务端时使用的密码,特殊字符如 `! & ( ) < > ; |` 需使用字符 `\` 进行转义处理
- -?, --help: 打印出所有命令行参数
还有更多其他参数:

View File

@ -64,7 +64,8 @@ database_option: {
- DURATION数据文件存储数据的时间跨度。可以使用加单位的表示形式如 DURATION 100h、DURATION 10d 等,支持 m分钟、h小时和 d三个单位。不加时间单位时默认单位为天如 DURATION 50 表示 50 天。
- MAXROWS文件块中记录的最大条数默认为 4096 条。
- MINROWS文件块中记录的最小条数默认为 100 条。
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。企业版支持[多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2如 KEEP 100h,100d,3650d; 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
- KEEP表示数据文件保存的天数缺省值为 3650取值范围 [1, 365000],且必须大于或等于 3 倍的 DURATION 参数值。数据库会自动删除保存时间超过 KEEP 值的数据从而释放存储空间。KEEP 可以使用加单位的表示形式,如 KEEP 100h、KEEP 10d 等,支持 m分钟、h小时和 d三个单位。也可以不写单位如 KEEP 50此时默认单位为天。企业版支持[多级存储](../../operation/planning/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8)功能, 因此, 可以设置多个保存时间(多个以英文逗号分隔,最多 3 个,满足 keep 0 \<= keep 1 \<= keep 2如 KEEP 100h,100d,3650d; 社区版不支持多级存储功能(即使配置了多个保存时间, 也不会生效, KEEP 会取最大的保存时间)。了解更多,请点击 [关于主键时间戳](https://docs.taosdata.com/reference/taos-sql/insert/)
- KEEP_TIME_OFFSET自 3.2.0.0 版本生效。删除或迁移保存时间超过 KEEP 值的数据的延迟执行时间,默认值为 0 (小时)。在数据文件保存时间超过 KEEP 后,删除或迁移操作不会立即执行,而会额外等待本参数指定的时间间隔,以实现与业务高峰期错开的目的。
- STT_TRIGGER表示落盘文件触发文件合并的个数。开源版本固定为 1企业版本可设置范围为 1 到 16。对于少表高频写入场景此参数建议使用默认配置而对于多表低频写入场景此参数建议配置较大的值。
- SINGLE_STABLE表示此数据库中是否只可以创建一个超级表用于超级表列非常多的情况。
@ -232,6 +233,3 @@ SHOW db_name.disk_info;
该命令本质上等同于 `select sum(data1 + data2 + data3)/sum(raw_data), sum(data1 + data2 + data3) from information_schema.ins_disk_usage where db_name="dbname"`

View File

@ -171,7 +171,7 @@ ALTER TABLE [db_name.]tb_name alter_table_clause
alter_table_clause: {
alter_table_options
| SET TAG tag_name = new_tag_value,tag_name2=new_tag2_value...
| SET TAG tag_name = new_tag_value, tag_name2=new_tag2_value ...
}
alter_table_options:
@ -195,7 +195,7 @@ alter_table_option: {
### 修改子表标签值
```
ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
ALTER TABLE tb_name SET TAG tag_name1=new_tag_value1, tag_name2=new_tag_value2 ...;
```
### 修改表生命周期

View File

@ -6,7 +6,7 @@ title: "删除数据"
删除数据是 TDengine 提供的根据指定时间段删除指定表或超级表中数据记录的功能,方便用户清理由于设备故障等原因产生的异常数据。
**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动或用户手动进行数据重整时。
**注意**:删除数据并不会立即释放该表所占用的磁盘空间,而是把该表的数据标记为已删除,在查询时这些数据将不会再出现,但释放磁盘空间会延迟到系统自动清理(建库参数 keep 生效)或用户手动进行数据重整时(企业版功能 compact
**语法:**

View File

@ -1817,7 +1817,7 @@ ignore_null_values: {
}
```
**功能说明**返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1为 1 时表示忽略 NULL 值, 缺省值为0。
**功能说明**返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1为 1 时表示忽略 NULL 值, 缺省值为 0。
**返回数据类型**:同字段类型。
@ -1838,9 +1838,9 @@ ignore_null_values: {
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0 版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0 版本以后支持)。
- INTERP 对于带复合主键的表的查询,若存在相同时间戳的数据,则只有对应的复合主键最小的数据参与运算。
- INTERP 查询支持NEAR FILL模式, 即当需要FILL时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)(3.3.4.9版本及以后支持)。
- INTERP 只有在使用FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在3.3.4.9版本及以后支持。
- INTERP `RANEG`子句支持时间范围的扩展(3.3.4.9版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点'2023-01-01 00:00:00'查找前后10s的数据进行插值, FILL PREV/NEXT/NEAR分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用FILL指定的值进行插值, 因此此时FILL子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1). 目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与EVERY类似, 单位不能是年或月, 值不能为0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他FILL模式, 且不能指定EVERY子句。
- INTERP 查询支持 NEAR FILL 模式, 即当需要 FILL 时, 使用距离当前时间点最近的数据进行插值, 当前后时间戳与当前时间断面一样近时, FILL 前一行的值. 此模式在流计算中和窗口查询中不支持。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', '2023-01-01 00:10:00') FILL(NEAR)(3.3.4.9 版本及以后支持)。
- INTERP 只有在使用 FILL PREV/NEXT/NEAR 模式时才可以使用伪列 `_irowts_origin`。`_irowts_origin`在 3.3.4.9 版本及以后支持。
- INTERP `RANGE`子句支持时间范围的扩展(3.3.4.9 版本及以后支持), 如`RANGE('2023-01-01 00:00:00', 10s)`表示在时间点 '2023-01-01 00:00:00' 查找前后 10s 的数据进行插值, FILL PREV/NEXT/NEAR 分别表示从时间点向前/向后/前后查找数据, 若时间点周围没有数据, 则使用 FILL 指定的值进行插值, 因此此时 FILL 子句必须指定值。例如: SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00', 10s) FILL(PREV, 1)目前仅支持时间点和时间范围的组合, 不支持时间区间和时间范围的组合, 即不支持 RANGE('2023-01-01 00:00:00', '2023-02-01 00:00:00', 1h)。所指定的时间范围规则与 EVERY 类似, 单位不能是年或月, 值不能为 0, 不能带引号。使用该扩展时, 不支持除FILL PREV/NEXT/NEAR外的其他 FILL 模式, 且不能指定 EVERY 子句。
### LAST

View File

@ -124,7 +124,7 @@ INTERVAL 子句允许使用 AUTO 关键字来指定窗口偏移量,此时如
```sql
-- 有起始时间限制,从 '2018-10-03 14:38:05' 切分时间窗口
SELECT COUNT(*) FROM meters WHERE _rowts >= '2018-10-03 14:38:05' INTERVAL (1m AUTO);
SELECT COUNT(*) FROM meters WHERE _rowts >= '2018-10-03 14:38:05' INTERVAL (1m, AUTO);
-- 无起始时间限制,不生效,仍以 0 为偏移量
SELECT COUNT(*) FROM meters WHERE _rowts < '2018-10-03 15:00:00' INTERVAL (1m, AUTO);

View File

@ -303,24 +303,24 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_STREAMS
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | VARCHAR(20) | 流当前状态 |
| 5 | source_db | VARCHAR(64) | 源数据库 |
| 6 | target_db | VARCHAR(64) | 目的数据库 |
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| # | **列名** | **数据类型** | **说明** |
|:----|:-----------|:------------|:--------|
| 1 | stream_name | VARCHAR(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | VARCHAR(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | VARCHAR(20) | 流当前状态 |
| 5 | source_db | VARCHAR(64) | 源数据库 |
| 6 | target_db | VARCHAR(64) | 目的数据库 |
| 7 | target_table | VARCHAR(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。|
## INS_USER_PRIVILEGES
SYSINFO 属性为 0 的用户不能查看此表。
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| # | **列名** | **数据类型** | **说明** |
|:----|:-----------|:------------|:--------|
| 1 | user_name | VARCHAR(24) | 用户名
| 2 | privilege | VARCHAR(10) | 权限描述
| 3 | db_name | VARCHAR(65) | 数据库名称
@ -329,18 +329,18 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
## INS_DISK_USAGE
| # | **列名** | **数据类型** | **说明** |
| --- | :----------: | ------------ | -------------------------------------------------------------------------------------------------------------------- |
| 1 | db_name | VARCHAR(32) | 数据库名称
| 2 | vgroup_id | INT | vgroup 的 ID
| 3 | wal | BIGINT | wal 文件大小, 单位为 K
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为KB
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB
| 6 | data3 | BIGINT | 三级存储上数据文件的大小, 单位为KB
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为KB
| 8 | table_meta | BIGINT | meta 文件的大小, 单位为KB
| 9 | s3 | BIGINT | s3 上占用的大小, 单位为KB
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB
| # | **列名** | **数据类型** | **说明** |
|:----|:-----------|:------------|:--------|
| 1 | db_name | VARCHAR(32) | 数据库名称 |
| 2 | vgroup_id | INT | vgroup 的 ID |
| 3 | wal | BIGINT | wal 文件大小, 单位为 K |
| 4 | data1 | BIGINT | 一级存储上数据文件的大小,单位为KB |
| 5 | data2 | BIGINT | 二级存储上数据文件的大小,单位为 KB |
| 6 | data3 | BIGINT | 三级存储上数据文件的大小, 单位为KB |
| 7 | cache_rdb | BIGINT | last/last_row 文件的大小,单位为KB |
| 8 | table_meta | BIGINT | meta 文件的大小, 单位为KB |
| 9 | s3 | BIGINT | s3 上占用的大小, 单位为KB |
| 10 | raw_data | BIGINT | 预估的原始数据的大小, 单位为KB |
## INS_FILESETS

View File

@ -9,19 +9,21 @@ description: 本节讲述基本的用户管理功能
## 创建用户
```sql
CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
CREATE USER user_name PASS 'password' [SYSINFO {1|0}] [CREATEDB {1|0}];
```
用户名最长不超过 23 个字节。
密码最长不超过 31 个字节。密码可以包含字母、数字以及除单引号、双引号、反引号、反斜杠和空格以外的特殊字符,密码不能为空字符串
密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类。特殊字符包括 `! @ # $ % ^ & * ( ) - _ + = [ ] { } : ; > < ? | ~ , .`
`SYSINFO` 表示该用户是否能够查看系统信息。`1` 表示可以查看,`0` 表示无权查看。系统信息包括服务配置、dnode、vnode、存储等信息。缺省值为 `1`
在下面的示例中,我们创建一个密码为 `123456` 且可以查看系统信息的用户。
`CREATEDB` 表示该用户是否能够创建数据库。`1` 表示可以创建,`0` 表示无权创建。缺省值为 `0`。// 从 TDengine 企业版 3.3.2.0 开始支持
在下面的示例中,我们创建一个密码为 `abc123!@#` 且可以查看系统信息的用户。
```sql
taos> create user test pass '123456' sysinfo 1;
taos> create user test pass 'abc123!@#' sysinfo 1;
Query OK, 0 of 0 rows affected (0.001254s)
```
@ -77,7 +79,7 @@ alter_user_clause: {
- PASS: 修改密码,后跟新密码
- ENABLE: 启用或禁用该用户,`1` 表示启用,`0` 表示禁用
- SYSINFO: 允许或禁止查看系统信息,`1` 表示允许,`0` 表示禁止
- CREATEDB: 允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止
- CREATEDB: 允许或禁止创建数据库,`1` 表示允许,`0` 表示禁止。// 从 TDengine 企业版 3.3.2.0 开始支持
下面的示例禁用了名为 `test` 的用户:

View File

@ -29,14 +29,15 @@ description: 可配置压缩算法
- 各个数据类型的默认压缩算法列表和适用范围
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法|压缩算法默认值| 压缩等级默认值|
| :-----------:|:----------:|:-------:|:-------:|:----------:|:----:|
| int/uint | simple8b| simple8b | lz4/zlib/zstd/xz| lz4 | medium|
| tinyint/untinyint/smallint/usmallint | simple8b| simple8b | lz4/zlib/zstd/xz| zlib | medium|
| bigint/ubigint/timestamp | simple8b/delta-i | delta-i |lz4/zlib/zstd/xz | lz4| medium|
|float/double | delta-d|delta-d |lz4/zlib/zstd/xz/tsz|lz4| medium|
|binary/nchar| disabled| disabled|lz4/zlib/zstd/xz| zstd| medium|
|bool| bit-packing| bit-packing| lz4/zlib/zstd/xz| zstd| medium|
| 数据类型 | 可选编码算法 | 编码算法默认值 | 可选压缩算法 | 压缩算法默认值 |压缩等级默认值|
|:------------------------------------:|:-------------------------:|:-----------:|:--------------------:|:----:|:------:|
| int/uint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | lz4 | medium |
| tinyint/untinyint/smallint/usmallint | disabled/simple8b | simple8b | lz4/zlib/zstd/xz | zlib | medium |
| bigint/ubigint/timestamp | disabled/simple8b/delta-i | delta-i | lz4/zlib/zstd/xz | lz4 | medium |
| float/double | disabled/delta-d | delta-d | lz4/zlib/zstd/xz/tsz | lz4 | medium |
| binary/nchar | disabled | disabled | lz4/zlib/zstd/xz | zstd | medium |
| bool | disabled/bit-packing | bit-packing | lz4/zlib/zstd/xz | zstd | medium |
## SQL 语法

View File

@ -680,12 +680,31 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
- **接口说明**:清理运行环境,应用退出前应调用。
- `int taos_options(TSDB_OPTION option, const void * arg, ...)`
- **接口说明**:设置客户端选项,目前支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。
- **接口说明**:设置客户端选项,支持区域设置(`TSDB_OPTION_LOCALE`)、字符集设置(`TSDB_OPTION_CHARSET`)、时区设置(`TSDB_OPTION_TIMEZONE`)、配置文件路径设置(`TSDB_OPTION_CONFIGDIR`)。区域设置、字符集、时区默认为操作系统当前设置。
- **参数说明**
- `option`[入参] 设置项类型。
- `arg`[入参] 设置项值。
- **返回值**`0`:成功,`-1`:失败。
- `int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...)`
- **接口说明**:设置客户端连接选项,目前支持字符集设置(`TSDB_OPTION_CONNECTION_CHARSET`)、时区设置(`TSDB_OPTION_CONNECTION_TIMEZONE`)、用户 IP 设置(`TSDB_OPTION_CONNECTION_USER_IP`)、用户 APP 设置(`TSDB_OPTION_CONNECTION_USER_APP`)。
- **参数说明**
- `taos`: [入参] taos_connect 返回的连接句柄。
- `option`[入参] 设置项类型。
- `arg`[入参] 设置项值。
- **返回值**`0`:成功,`非0`:失败。
- **说明**
- 字符集、时区默认为操作系统当前设置windows 不支持连接级别的时区设置。
- arg 为 NULL 时表示重置该选项。
- 该接口只对当前连接有效,不会影响其他连接。
- 同样参数多次调用该接口,以后面的为准,可以作为修改的方法。
- TSDB_OPTION_CONNECTION_CLEAR 选项用于重置所有连接选项。
- 时区和字符集重置后使用系统的设置user ip 和 user app 重置后为空。
- 连接选项的值都是 string 类型user app 参数值最大长度为 23超过该长度会被截断其他参数非法时报错。
- 时区配置找不到时区文件或者不能按照规范解释时,默认为 UTC和操作系统时区规则相同详见 tzset 函数说明。可通过 select timezone() 查看当前连接的时区。
- 时区和字符集只在 client 侧起作用,对于在服务端的相关行为不起作用。
- 时区文件使用操作系统时区文件可以自行更新操作系统时区文件。如果设置时区报错请检查是否有时区文件或路径mac:/var/db/timezone/zoneinfo, linux:/usr/share/zoneinfo是否正确。
- `char *taos_get_client_info()`
- **接口说明**:获取客户端版本信息。
- **返回值**:返回客户端版本信息。

View File

@ -172,7 +172,7 @@ WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/w
**原因**:程序没有找到依赖的本地函数库 taos。
**解决方法**Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib`。
**解决方法**Windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下Linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可macOS 下需要建立软链 `ln -s /usr/local/lib/libtaos.dylib /usr/lib/libtaos.dylib`。
3. java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform

View File

@ -136,7 +136,7 @@ description: TDengine 服务端的错误码列表和详细说明
| 0x80000350 | User already exists | Create user, 重复创建 | 确认操作是否正确 |
| 0x80000351 | Invalid user | 用户不存在 | 确认操作是否正确 |
| 0x80000352 | Invalid user format | 格式不正确 | 确认操作是否正确 |
| 0x80000353 | Invalid password format | 格式不正确 | 确认操作是否正确 |
| 0x80000353 | Invalid password format | 密码长度必须为 8 到 16 位,并且至少包含大写字母、小写字母、数字、特殊字符中的三类 | 确认密码字符串的格式 |
| 0x80000354 | Can not get user from conn | 内部错误 | 上报issue |
| 0x80000355 | Too many users | (仅企业版)用户数量超限 | 调整配置 |
| 0x80000357 | Authentication failure | 密码不正确 | 确认操作是否正确 |
@ -261,6 +261,7 @@ description: TDengine 服务端的错误码列表和详细说明
| 0x80000529 | Vnode is stopped | Vnode 已经关闭 | 上报问题 |
| 0x80000530 | Duplicate write request | 重复写入请求,内部错误 | 上报问题 |
| 0x80000531 | Vnode query is busy | 查询忙碌 | 上报问题 |
| 0x80000540 | Vnode already exist but Dbid not match | 内部错误 | 上报问题 |
## tsdb
@ -523,6 +524,7 @@ description: TDengine 服务端的错误码列表和详细说明
| 0x80003103 | Invalid tsma state | 流计算下发结果的 vgroup 与创建 TSMA index 的 vgroup 不一致 | 检查错误日志,联系开发处理 |
| 0x80003104 | Invalid tsma pointer | 在处理写入流计算下发的结果,消息体为空指针。 | 检查错误日志,联系开发处理 |
| 0x80003105 | Invalid tsma parameters | 在处理写入流计算下发的结果结果数量为0。 | 检查错误日志,联系开发处理 |
| 0x80003113 | Tsma optimization cannot be applied with INTERVAL AUTO offset. | 当前查询条件下使用 INTERVAL AUTO OFFSET 无法启用 tsma 优化。 | 使用 SKIP_TSMA Hint 或者手动指定 INTERVAL OFFSET。 |
| 0x80003150 | Invalid rsma env | Rsma 执行环境异常。 | 检查错误日志,联系开发处理 |
| 0x80003151 | Invalid rsma state | Rsma 执行状态异常。 | 检查错误日志,联系开发处理 |
| 0x80003152 | Rsma qtaskinfo creation error | 创建流计算环境异常。 | 检查错误日志,联系开发处理 |

View File

@ -280,4 +280,12 @@ TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集
https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B8%E5%85%B3
您可以随时关闭该参数只需要在taos.cfg 中修改telemetryReporting为 0然后重启数据库服务即可。
代码位于:https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c
此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。
此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。
### 31 第一次连接集群时遇到“Sync leader is unreachable”怎么办
报这个错说明第一次向集群的连接是成功的但第一次访问的IP不是mnode的leader节点客户端试图与leader建立连接时发生错误。客户端通过EP也就是指定的fqdn与端口号寻找leader节点常见的报错原因有两个
- 集群中其他节点的端口没有打开
- 客户端的hosts未正确配置
因此用户首先要检查服务端集群的所有端口原生连接默认6030http连接默认6041有无打开其次是客户端的hosts文件中是否配置了集群所有节点的fqdn与IP信息。
如仍无法解决,则需要联系涛思技术人员支持。

View File

@ -64,6 +64,15 @@ typedef enum {
TSDB_MAX_OPTIONS
} TSDB_OPTION;
typedef enum {
TSDB_OPTION_CONNECTION_CLEAR = -1, // means clear all option in this connection
TSDB_OPTION_CONNECTION_CHARSET, // charset, Same as the scope supported by the system
TSDB_OPTION_CONNECTION_TIMEZONE, // timezone, Same as the scope supported by the system
TSDB_OPTION_CONNECTION_USER_IP, // user ip
TSDB_OPTION_CONNECTION_USER_APP, // user app, max lengthe is 23, truncated if longer than 23
TSDB_MAX_OPTIONS_CONNECTION
} TSDB_OPTION_CONNECTION;
typedef enum {
TSDB_SML_UNKNOWN_PROTOCOL = 0,
TSDB_SML_LINE_PROTOCOL = 1,
@ -174,11 +183,12 @@ typedef struct TAOS_STMT_OPTIONS {
DLL_EXPORT void taos_cleanup(void);
DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...);
DLL_EXPORT setConfRet taos_set_config(const char *config);
DLL_EXPORT int taos_init(void);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos);
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
DLL_EXPORT void taos_close(TAOS *taos);
DLL_EXPORT const char *taos_data_type(int type);

View File

@ -154,7 +154,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf, void *charsetCxt);
// SColData ================================
typedef struct {

View File

@ -1245,7 +1245,7 @@ typedef struct {
} STsBufInfo;
typedef struct {
int32_t tz; // query client timezone
void* timezone;
char intervalUnit;
char slidingUnit;
char offsetUnit;
@ -3472,6 +3472,8 @@ typedef struct {
SAppHbReq app;
SQueryHbReqBasic* query;
SHashObj* info; // hash<Skv.key, Skv>
char userApp[TSDB_APP_NAME_LEN];
uint32_t userIp;
} SClientHbReq;
typedef struct {
@ -3893,7 +3895,7 @@ typedef struct {
int8_t igExists;
int8_t intervalUnit;
int8_t slidingUnit;
int8_t timezone;
int8_t timezone; // int8_t is not enough, timezone is unit of second
int32_t dstVgId; // for stream
int64_t interval;
int64_t offset;

View File

@ -61,22 +61,9 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
* precision == TSDB_TIME_PRECISION_MILLI, it returns timestamp in millisecond.
* precision == TSDB_TIME_PRECISION_NANO, it returns timestamp in nanosecond.
*/
static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
int64_t factor = (precision == TSDB_TIME_PRECISION_MILLI) ? 1000
: (precision == TSDB_TIME_PRECISION_MICRO) ? 1000000
: 1000000000;
time_t t;
(void) taosTime(&t);
struct tm tm;
(void) taosLocalTime(&t, &tm, NULL, 0);
tm.tm_hour = 0;
tm.tm_min = 0;
tm.tm_sec = 0;
int64_t taosGetTimestampToday(int32_t precision, timezone_t tz);
return (int64_t)taosMktime(&tm) * factor;
}
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision, timezone_t tz);
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
int64_t taosTimeGetIntervalEnd(int64_t ts, const SInterval* pInterval);
@ -86,13 +73,12 @@ void calcIntervalAutoOffset(SInterval* interval);
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision, bool negativeAllow);
int32_t taosParseTime(const char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
int32_t taosParseTime(const char* timestr, int64_t* pTime, int32_t len, int32_t timePrec, timezone_t tz);
char getPrecisionUnit(int32_t precision);
int64_t convertTimePrecision(int64_t ts, int32_t fromPrecision, int32_t toPrecision);
int32_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit, int64_t* pRes);
int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec, int64_t* timeVal);
int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec, int64_t* timeVal, timezone_t tz, void* charsetCxt);
int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision);
int32_t taosFormatUtcTime(char* buf, int32_t bufLen, int64_t ts, int32_t precision);
@ -102,8 +88,8 @@ struct STm {
int64_t fsec; // in NANOSECOND
};
int32_t taosTs2Tm(int64_t ts, int32_t precision, struct STm* tm);
int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision);
int32_t taosTs2Tm(int64_t ts, int32_t precision, struct STm* tm, timezone_t tz);
int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision, timezone_t tz);
/// @brief convert a timestamp to a formatted string
/// @param format the timestamp format, must null terminated
@ -112,7 +98,7 @@ int32_t taosTm2Ts(struct STm* tm, int64_t* ts, int32_t precision);
/// formats array; If not NULL, [formats] will be used instead of [format] to skip parse formats again.
/// @param out output buffer, should be initialized by memset
/// @notes remember to free the generated formats
int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen);
int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t precision, char* out, int32_t outLen, timezone_t tz);
/// @brief convert a formatted timestamp string to a timestamp
/// @param format must null terminated
/// @param [in, out] formats, see taosTs2Char
@ -120,7 +106,7 @@ int32_t taosTs2Char(const char* format, SArray** formats, int64_t ts, int32_t pr
/// @retval 0 for success, otherwise error occured
/// @notes remember to free the generated formats even when error occured
int32_t taosChar2Ts(const char* format, SArray** formats, const char* tsStr, int64_t* ts, int32_t precision, char* errMsg,
int32_t errMsgLen);
int32_t errMsgLen, timezone_t tz);
int32_t TEST_ts2char(const char* format, int64_t ts, int32_t precision, char* out, int32_t outLen);
int32_t TEST_char2ts(const char* format, int64_t* ts, int32_t precision, const char* tsStr);

View File

@ -22,7 +22,7 @@
typedef struct SExplainCtx SExplainCtx;
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp, int8_t biMode);
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp, int8_t biMode, void* charsetCxt);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);

View File

@ -292,8 +292,16 @@ struct SScalarParam {
void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value
int32_t numOfRows;
int32_t numOfQualified; // number of qualified elements in the final results
timezone_t tz;
void *charsetCxt;
};
static inline void setTzCharset(SScalarParam* param, timezone_t tz, void* charsetCxt){
if (param == NULL) return;
param->tz = tz;
param->charsetCxt = charsetCxt;
}
#define cleanupResultRowEntry(p) p->initialized = false
#define isRowEntryCompleted(p) (p->complete)
#define isRowEntryInitialized(p) (p->initialized)

View File

@ -129,8 +129,10 @@ typedef struct SValueNode {
double d;
char* p;
} datum;
int64_t typeData;
int8_t unit;
int64_t typeData;
int8_t unit;
timezone_t tz;
void *charsetCxt;
} SValueNode;
typedef struct SLeftValueNode {
@ -159,6 +161,8 @@ typedef struct SOperatorNode {
EOperatorType opType;
SNode* pLeft;
SNode* pRight;
timezone_t tz;
void* charsetCxt;
} SOperatorNode;
typedef struct SLogicConditionNode {
@ -190,7 +194,9 @@ typedef struct SFunctionNode {
bool hasOriginalFunc;
int32_t originalFuncId;
ETrimType trimType;
bool dual; // whether select stmt without from stmt, true for without.
bool dual; // whether select stmt without from stmt, true for without.
timezone_t tz;
void *charsetCxt;
} SFunctionNode;
typedef struct STableNode {
@ -332,6 +338,7 @@ typedef struct SIntervalWindowNode {
SNode* pSliding; // SValueNode
SNode* pFill;
STimeWindow timeRange;
void* timezone;
} SIntervalWindowNode;
typedef struct SEventWindowNode {
@ -401,6 +408,8 @@ typedef struct SCaseWhenNode {
SNode* pCase;
SNode* pElse;
SNodeList* pWhenThenList;
timezone_t tz;
void* charsetCxt;
} SCaseWhenNode;
typedef struct SWindowOffsetNode {

View File

@ -101,6 +101,8 @@ typedef struct SParseContext {
int8_t biMode;
SArray* pSubMetaList;
setQueryFn setQueryFp;
timezone_t timezone;
void *charsetCxt;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
@ -139,28 +141,28 @@ void qDestroyStmtDataBlock(STableDataCxt* pBlock);
STableMeta* qGetTableMetaInDataBlock(STableDataCxt* pDataBlock);
int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData** pData);
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx);
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx, void *charsetCxt);
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
int32_t qBindStmtStbColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
STSchema** pTSchema, SBindInfo* pBindInfos);
int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
STSchema** pTSchema, SBindInfo* pBindInfos, void* charsetCxt);
int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, void* charsetCxt);
int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen,
int32_t colIdx, int32_t rowNum);
int32_t colIdx, int32_t rowNum, void* charsetCxt);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtStbColFields(void* pBlock, void* boundTags, bool hasCtbName, int32_t* fieldNum,
TAOS_FIELD_STB** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, void* charsetCxt);
int32_t qStmtBindParams2(SQuery* pQuery, TAOS_STMT2_BIND* pParams, int32_t colIdx);
int32_t qStmtBindParams2(SQuery* pQuery, TAOS_STMT2_BIND* pParams, int32_t colIdx, void* charsetCxt);
int32_t qBindStmtStbColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
STSchema** pTSchema, SBindInfo2* pBindInfos);
int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen);
STSchema** pTSchema, SBindInfo2* pBindInfos, void *charsetCxt);
int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen, void *charsetCxt);
int32_t qBindStmtSingleColValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen,
int32_t colIdx, int32_t rowNum);
int32_t colIdx, int32_t rowNum, void *charsetCxt);
int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen);
TAOS_STMT2_BIND* bind, char* msgBuf, int32_t msgBufLen, void *charsetCxt);
void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
@ -170,13 +172,13 @@ void qDestroyBoundColInfo(void* pInfo);
int32_t smlInitHandle(SQuery** query);
int32_t smlBuildRow(STableDataCxt* pTableCxt);
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index, void* charsetCxt);
int32_t smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta, STableDataCxt** cxt);
void clearColValArraySml(SArray* pCols);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen);
char* msgBuf, int32_t msgBufLen, void* charsetCxt);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields,
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw);

View File

@ -46,6 +46,7 @@ typedef struct SPlanContext {
int64_t allocatorId;
bool destHasPrimaryKey;
bool sourceHasPrimaryKey;
void* timezone;
} SPlanContext;
// Create the physical plan for the query, according to the AST.

View File

@ -194,6 +194,7 @@ typedef struct SBoundColInfo {
int32_t numOfCols;
int32_t numOfBound;
bool hasBoundCols;
bool mixTagsCols;
} SBoundColInfo;
typedef struct STableColsData {
@ -337,7 +338,7 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam
void destroyQueryExecRes(SExecResult* pRes);
int32_t dataConverToStr(char* str, int64_t capacity, int type, void* buf, int32_t bufSize, int32_t* len);
void parseTagDatatoJson(void* p, char** jsonStr);
void parseTagDatatoJson(void* p, char** jsonStr, void *charsetCxt);
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);

View File

@ -105,7 +105,6 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara
int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t timeZoneStrLen();
int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -48,13 +48,13 @@ void atomic_store_8(int8_t volatile *ptr, int8_t val);
void atomic_store_16(int16_t volatile *ptr, int16_t val);
void atomic_store_32(int32_t volatile *ptr, int32_t val);
void atomic_store_64(int64_t volatile *ptr, int64_t val);
double atomic_store_double(double volatile *ptr, double val);
void atomic_store_double(double volatile *ptr, double val);
void atomic_store_ptr(void *ptr, void *val);
int8_t atomic_exchange_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_exchange_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_exchange_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_exchange_64(int64_t volatile *ptr, int64_t val);
double atomic_exchange_double(double volatile *ptr, int64_t val);
double atomic_exchange_double(double volatile *ptr, double val);
void *atomic_exchange_ptr(void *ptr, void *val);
int8_t atomic_val_compare_exchange_8(int8_t volatile *ptr, int8_t oldval, int8_t newval);
int16_t atomic_val_compare_exchange_16(int16_t volatile *ptr, int16_t oldval, int16_t newval);
@ -71,7 +71,7 @@ int16_t atomic_fetch_add_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_add_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_add_64(int64_t volatile *ptr, int64_t val);
double atomic_fetch_add_double(double volatile *ptr, double val);
void *atomic_fetch_add_ptr(void *ptr, void *val);
void *atomic_fetch_add_ptr(void *ptr, int64_t val);
int8_t atomic_sub_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_sub_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_sub_fetch_32(int32_t volatile *ptr, int32_t val);
@ -82,37 +82,37 @@ int16_t atomic_fetch_sub_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_sub_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_sub_64(int64_t volatile *ptr, int64_t val);
double atomic_fetch_sub_double(double volatile *ptr, double val);
void *atomic_fetch_sub_ptr(void *ptr, void *val);
void *atomic_fetch_sub_ptr(void *ptr, int64_t val);
int8_t atomic_and_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_and_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_and_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_and_fetch_64(int64_t volatile *ptr, int64_t val);
void *atomic_and_fetch_ptr(void *ptr, void *val);
void *atomic_and_fetch_ptr(void *ptr, int64_t val);
int8_t atomic_fetch_and_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_and_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_and_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_and_64(int64_t volatile *ptr, int64_t val);
void *atomic_fetch_and_ptr(void *ptr, void *val);
void *atomic_fetch_and_ptr(void *ptr, int64_t val);
int8_t atomic_or_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_or_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_or_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_or_fetch_64(int64_t volatile *ptr, int64_t val);
void *atomic_or_fetch_ptr(void *ptr, void *val);
void *atomic_or_fetch_ptr(void *ptr, int64_t val);
int8_t atomic_fetch_or_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_or_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_or_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_or_64(int64_t volatile *ptr, int64_t val);
void *atomic_fetch_or_ptr(void *ptr, void *val);
void *atomic_fetch_or_ptr(void *ptr, int64_t val);
int8_t atomic_xor_fetch_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_xor_fetch_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_xor_fetch_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_xor_fetch_64(int64_t volatile *ptr, int64_t val);
void *atomic_xor_fetch_ptr(void *ptr, void *val);
void *atomic_xor_fetch_ptr(void *ptr, int64_t val);
int8_t atomic_fetch_xor_8(int8_t volatile *ptr, int8_t val);
int16_t atomic_fetch_xor_16(int16_t volatile *ptr, int16_t val);
int32_t atomic_fetch_xor_32(int32_t volatile *ptr, int32_t val);
int64_t atomic_fetch_xor_64(int64_t volatile *ptr, int64_t val);
void *atomic_fetch_xor_ptr(void *ptr, void *val);
void *atomic_fetch_xor_ptr(void *ptr, int64_t val);
#ifdef _MSC_VER
#define tmemory_barrier(order) MemoryBarrier()

View File

@ -25,10 +25,9 @@ extern "C" {
extern char tsOsName[];
extern char tsTimezoneStr[];
extern enum TdTimezone tsTimezone;
extern char tsCharset[];
extern void *tsCharsetCxt;
extern char tsLocale[];
extern int8_t tsDaylight;
extern bool tsEnableCoreFile;
extern int64_t tsPageSizeKB;
extern int64_t tsOpenMax;
@ -67,8 +66,7 @@ bool osDataSpaceSufficient();
bool osTempSpaceSufficient();
int32_t osSetTimezone(const char *timezone);
void osSetSystemLocale(const char *inLocale, const char *inCharSet);
void osSetProcPath(int32_t argc, char **argv);
void osSetProcPath(int32_t argc, char **argv);
#ifdef __cplusplus
}

View File

@ -28,9 +28,9 @@ extern "C" {
#define setlocale SETLOCALE_FUNC_TAOS_FORBID
#endif
char *taosCharsetReplace(char *charsetstr);
void taosGetSystemLocale(char *outLocale, char *outCharset);
int32_t taosSetSystemLocale(const char *inLocale, const char *inCharSet);
char *taosCharsetReplace(char *charsetstr);
void taosGetSystemLocale(char *outLocale, char *outCharset);
int32_t taosSetSystemLocale(const char *inLocale);
#ifdef __cplusplus
}

View File

@ -26,8 +26,8 @@
#define epoll_create EPOLL_CREATE_FUNC_TAOS_FORBID
#define epoll_ctl EPOLL_CTL_FUNC_TAOS_FORBID
#define epoll_wait EPOLL_WAIT_FUNC_TAOS_FORBID
#define inet_addr INET_ADDR_FUNC_TAOS_FORBID
#define inet_ntoa INET_NTOA_FUNC_TAOS_FORBID
#define inet_addr INET_ADDR_FUNC_TAOS_FORBID
#endif
#if defined(WINDOWS)
@ -55,10 +55,10 @@
#define __BIG_ENDIAN BIG_ENDIAN
#define __LITTLE_ENDIAN LITTLE_ENDIAN
#define __PDP_ENDIAN PDP_ENDIAN
#else
#include <netinet/in.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#if defined(_TD_DARWIN_64)
#include <osEok.h>
@ -162,10 +162,10 @@ int32_t taosGetSocketName(TdSocketPtr pSocket, struct sockaddr *destAddr, int *a
int32_t taosBlockSIGPIPE();
int32_t taosGetIpv4FromFqdn(const char *fqdn, uint32_t *ip);
int32_t taosGetFqdn(char *);
void tinet_ntoa(char *ipstr, uint32_t ip);
uint32_t ip2uint(const char *const ip_addr);
void taosInetNtoa(char *ipstr, uint32_t ip);
uint32_t taosInetAddr(const char *ipstr);
int32_t taosIgnSIGPIPE();
const char *taosInetNtoa(struct in_addr ipInt, char *dstStr, int32_t len);
const char *taosInetNtop(struct in_addr ipInt, char *dstStr, int32_t len);
uint64_t taosHton64(uint64_t val);
uint64_t taosNtoh64(uint64_t val);

View File

@ -22,12 +22,24 @@ extern "C" {
typedef wchar_t TdWchar;
typedef int32_t TdUcs4;
#if !defined(DISALLOW_NCHAR_WITHOUT_ICONV) && defined(DARWIN)
#if !defined(DISALLOW_NCHAR_WITHOUT_ICONV)// && defined(DARWIN)
#include "iconv.h"
#else
typedef void *iconv_t;
typedef void *iconv_t;
#endif
typedef enum { M2C = 0, C2M } ConvType;
typedef enum { M2C = 0, C2M, CM_NUM } ConvType;
typedef struct {
iconv_t conv;
int8_t inUse;
} SConv;
typedef struct {
SConv *gConv[CM_NUM];
int32_t convUsed[CM_NUM];
int32_t gConvMaxNum[CM_NUM];
char charset[TD_CHARSET_LEN];
} SConvInfo;
// If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following section.
@ -55,36 +67,39 @@ typedef enum { M2C = 0, C2M } ConvType;
#ifdef strndup
#undef strndup
#endif
#define strndup STR_TO_F_FUNC_TAOS_FORBID
#define strndup STR_TO_F_FUNC_TAOS_FORBID
#endif
#define tstrncpy(dst, src, size) \
do { \
(void)strncpy((dst), (src), (size)); \
(dst)[(size) - 1] = 0; \
(dst)[(size)-1] = 0; \
} while (0)
int64_t tsnprintf(char *dst, int64_t size, const char *format, ...);
#define TAOS_STRCPY(_dst, _src) ((void)strcpy(_dst, _src))
#define TAOS_STRCPY(_dst, _src) ((void)strcpy(_dst, _src))
#define TAOS_STRNCPY(_dst, _src, _size) ((void)strncpy(_dst, _src, _size))
#define TAOS_STRCAT(_dst, _src) ((void)strcat(_dst, _src))
#define TAOS_STRNCAT(_dst, _src, len) ((void)strncat(_dst, _src, len))
#define TAOS_STRCAT(_dst, _src) ((void)strcat(_dst, _src))
#define TAOS_STRNCAT(_dst, _src, len) ((void)strncat(_dst, _src, len))
char *tstrdup(const char *src);
int32_t taosUcs4len(TdUcs4 *ucs4);
int32_t taosStr2int64(const char *str, int64_t *val);
int32_t taosStr2int16(const char *str, int16_t *val);
int32_t taosStr2int32(const char *str, int32_t *val);
int32_t taosStr2int16(const char *str, int16_t *val);
int32_t taosStr2int8(const char *str, int8_t *val);
int32_t taosConvInit(void);
void taosConvDestroy();
iconv_t taosAcquireConv(int32_t *idx, ConvType type);
void taosReleaseConv(int32_t idx, iconv_t conv, ConvType type);
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs);
int32_t taosStr2Uint64(const char *str, uint64_t *val);
int32_t taosStr2Uint32(const char *str, uint32_t *val);
int32_t taosStr2Uint16(const char *str, uint16_t *val);
int32_t taosStr2Uint8(const char *str, uint8_t *val);
iconv_t taosAcquireConv(int32_t *idx, ConvType type, void* charsetCxt);
void taosReleaseConv(int32_t idx, iconv_t conv, ConvType type, void* charsetCxt);
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, void* charsetCxt);
int32_t taosUcs4ToMbsEx(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, iconv_t conv);
bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len);
bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len, void* charsetCxt);
int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes);
int32_t tasoUcs4Copy(TdUcs4 *target_ucs4, TdUcs4 *source_ucs4, int32_t len_ucs4);
bool taosValidateEncodec(const char *encodec);
@ -112,9 +127,9 @@ float taosStr2Float(const char *str, char **pEnd);
int32_t taosHex2Ascii(const char *z, uint32_t n, void **data, uint32_t *size);
int32_t taosAscii2Hex(const char *z, uint32_t n, void **data, uint32_t *size);
char *taosStrndup(const char *s, int n);
//int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
bool isHex(const char* z, uint32_t n);
bool isValidateHex(const char* z, uint32_t n);
// int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
bool isHex(const char *z, uint32_t n);
bool isValidateHex(const char *z, uint32_t n);
#ifdef __cplusplus
}

View File

@ -24,6 +24,7 @@ extern "C" {
// When you want to use this feature, you should find or add the same function in the following section.
#ifndef ALLOW_FORBID_FUNC
#define strptime STRPTIME_FUNC_TAOS_FORBID
#define strftime STRFTIME_FUNC_TAOS_FORBID
#define gettimeofday GETTIMEOFDAY_FUNC_TAOS_FORBID
#define localtime LOCALTIME_FUNC_TAOS_FORBID
#define localtime_s LOCALTIMES_FUNC_TAOS_FORBID
@ -42,6 +43,7 @@ extern "C" {
#define MILLISECOND_PER_SECOND ((int64_t)1000LL)
#endif
#include "osTimezone.h"
#define MILLISECOND_PER_MINUTE (MILLISECOND_PER_SECOND * 60)
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
@ -91,13 +93,17 @@ static FORCE_INLINE int64_t taosGetMonoTimestampMs() {
}
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf, int32_t bufSize);
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
size_t taosStrfTime(char *s, size_t maxsize, char const *format, struct tm const *t);
struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf, int32_t bufSize, timezone_t tz);
struct tm *taosGmTimeR(const time_t *timep, struct tm *result);
time_t taosTimeGm(struct tm *tmp);
int32_t taosTime(time_t *t);
time_t taosMktime(struct tm *timep);
time_t taosMktime(struct tm *timep, timezone_t tz);
int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour,
const uint32_t min, const uint32_t sec, int64_t time_zone);
//struct tm *taosLocalTimeRz(timezone_t state, const time_t *timep, struct tm *result);
//time_t taosMktimeRz(timezone_t state, struct tm *timep);
#ifdef __cplusplus
}
#endif

View File

@ -20,43 +20,29 @@
extern "C" {
#endif
// If the error is in a third-party library, place this header file under the third-party library header file.
// When you want to use this feature, you should find or add the same function in the following section.
#ifndef ALLOW_FORBID_FUNC
#define tzset TZSET_FUNC_TAOS_FORBID
#define TdEastZone8 8*60*60
#define TZ_UNKNOWN "n/a"
extern void* pTimezoneNameMap;
#ifdef WINDOWS
typedef void *timezone_t;
#else
typedef struct state *timezone_t;
struct tm* localtime_rz(timezone_t , time_t const *, struct tm *);
time_t mktime_z(timezone_t, struct tm *);
timezone_t tzalloc(char const *);
void tzfree(timezone_t);
void getTimezoneStr(char *tz);
#endif
enum TdTimezone {
TdWestZone12 = -12,
TdWestZone11,
TdWestZone10,
TdWestZone9,
TdWestZone8,
TdWestZone7,
TdWestZone6,
TdWestZone5,
TdWestZone4,
TdWestZone3,
TdWestZone2,
TdWestZone1,
TdZeroZone,
TdEastZone1,
TdEastZone2,
TdEastZone3,
TdEastZone4,
TdEastZone5,
TdEastZone6,
TdEastZone7,
TdEastZone8,
TdEastZone9,
TdEastZone10,
TdEastZone11,
TdEastZone12
};
int32_t taosGetSystemTimezone(char *outTimezone, enum TdTimezone *tsTimezone);
int32_t taosSetSystemTimezone(const char *inTimezone, char *outTimezone, int8_t *outDaylight, enum TdTimezone *tsTimezone);
int32_t taosGetLocalTimezoneOffset();
int32_t taosGetSystemTimezone(char *outTimezone);
int32_t taosSetGlobalTimezone(const char *tz);
int32_t taosFormatTimezoneStr(time_t t, const char* tzStr, timezone_t sp, char *outTimezoneStr);
#ifdef __cplusplus
}
#endif

View File

@ -210,6 +210,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_TSC_COMPRESS_LEVEL_ERROR TAOS_DEF_ERROR_CODE(0, 0x0234)
#define TSDB_CODE_TSC_FAIL_GENERATE_JSON TAOS_DEF_ERROR_CODE(0, 0x0235)
#define TSDB_CODE_TSC_STMT_BIND_NUMBER_ERROR TAOS_DEF_ERROR_CODE(0, 0x0236)
#define TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS TAOS_DEF_ERROR_CODE(0, 0x0237)
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x02FF)
// mnode-common
@ -481,7 +482,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_MNODE_STOPPED TAOS_DEF_ERROR_CODE(0, 0x042A)
#define TSDB_CODE_DNODE_INVALID_COMPACT_TASKS TAOS_DEF_ERROR_CODE(0, 0x042B)
// anode
// anode
#define TSDB_CODE_MND_ANODE_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0430)
#define TSDB_CODE_MND_ANODE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0431)
#define TSDB_CODE_MND_ANODE_TOO_LONG_URL TAOS_DEF_ERROR_CODE(0, 0x0432)
@ -565,6 +566,7 @@ int32_t taosGetErrSize();
#define TSDB_CODE_VND_ARB_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0537) // internal
#define TSDB_CODE_VND_WRITE_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0538) // internal
#define TSDB_CODE_VND_TTL_FLUSH_INCOMPLETION TAOS_DEF_ERROR_CODE(0, 0x0539) // internal
#define TSDB_CODE_VND_ALREADY_EXIST_BUT_NOT_MATCH TAOS_DEF_ERROR_CODE(0, 0x0540)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)

View File

@ -34,7 +34,8 @@ typedef enum {
CFG_STYPE_APOLLO_URL,
CFG_STYPE_ARG_LIST,
CFG_STYPE_TAOS_OPTIONS,
CFG_STYPE_ALTER_CMD,
CFG_STYPE_ALTER_CLIENT_CMD,
CFG_STYPE_ALTER_SERVER_CMD,
} ECfgSrcType;
typedef enum {

41
include/util/tconv.h Normal file
View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TCONV_H
#define TDENGINE_TCONV_H
#ifdef __cplusplus
extern "C" {
#endif
//#include "osString.h"
//
//bool taosValidateEncodec(const char *encodec);
//int32_t taosUcs4len(TdUcs4 *ucs4);
void* taosConvInit(const char* charset);
void taosConvDestroy();
//iconv_t taosAcquireConv(int32_t *idx, ConvType type, void* charsetCxt);
//void taosReleaseConv(int32_t idx, iconv_t conv, ConvType type, void* charsetCxt);
//int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, void* charsetCxt);
//int32_t taosUcs4ToMbsEx(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs, iconv_t conv);
//bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len, void* charsetCxt);
//int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes);
//int32_t tasoUcs4Copy(TdUcs4 *target_ucs4, TdUcs4 *source_ucs4, int32_t len_ucs4);
#ifdef __cplusplus
}
#endif
#endif // TDENGINE_TCONV_H

View File

@ -295,6 +295,8 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_JSON_KEY_LEN 256
#define TSDB_AUTH_LEN 16
#define TSDB_PASSWORD_MIN_LEN 8
#define TSDB_PASSWORD_MAX_LEN 16
#define TSDB_PASSWORD_LEN 32
#define TSDB_USET_PASSWORD_LEN 129
#define TSDB_VERSION_LEN 32

View File

@ -413,21 +413,26 @@ static FORCE_INLINE int32_t tDecodeBinary(SDecoder* pCoder, uint8_t** val, uint3
static FORCE_INLINE int32_t tDecodeCStrAndLen(SDecoder* pCoder, char** val, uint32_t* len) {
TAOS_CHECK_RETURN(tDecodeBinary(pCoder, (uint8_t**)val, len));
(*len) -= 1;
if (*len > 0) { // notice!!! *len maybe 0
(*len) -= 1;
}
return 0;
}
static FORCE_INLINE int32_t tDecodeCStr(SDecoder* pCoder, char** val) {
uint32_t len;
uint32_t len = 0;
return tDecodeCStrAndLen(pCoder, val, &len);
}
static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
char* pStr;
uint32_t len;
char* pStr = NULL;
uint32_t len = 0;
TAOS_CHECK_RETURN(tDecodeCStrAndLen(pCoder, &pStr, &len));
TAOS_MEMCPY(val, pStr, len + 1);
if (len < pCoder->size) {
TAOS_MEMCPY(val, pStr, len + 1);
}
return 0;
}
@ -479,12 +484,14 @@ static FORCE_INLINE int32_t tDecodeBinaryAlloc32(SDecoder* pCoder, void** val, u
static FORCE_INLINE int32_t tDecodeCStrAndLenAlloc(SDecoder* pCoder, char** val, uint64_t* len) {
TAOS_CHECK_RETURN(tDecodeBinaryAlloc(pCoder, (void**)val, len));
(*len) -= 1;
if (*len > 0){
(*len) -= 1;
}
return 0;
}
static FORCE_INLINE int32_t tDecodeCStrAlloc(SDecoder* pCoder, char** val) {
uint64_t len;
uint64_t len = 0;
return tDecodeCStrAndLenAlloc(pCoder, val, &len);
}

View File

@ -48,11 +48,6 @@ int32_t taosHexStrToByteArray(char hexstr[], char bytes[]);
int32_t tintToHex(uint64_t val, char hex[]);
int32_t titoa(uint64_t val, size_t radix, char str[]);
char *taosIpStr(uint32_t ipInt);
uint32_t ip2uint(const char *const ip_addr);
void taosIp2String(uint32_t ip, char *str);
void taosIpPort2String(uint32_t ip, uint16_t port, char *str);
void *tmemmem(const char *haystack, int hlen, const char *needle, int nlen);
int32_t parseCfgReal(const char *str, float *out);
@ -232,6 +227,11 @@ static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen,
#define TAOS_UNUSED(expr) (void)(expr)
bool taosIsBigChar(char c);
bool taosIsSmallChar(char c);
bool taosIsNumberChar(char c);
bool taosIsSpecialChar(char c);
#ifdef __cplusplus
}
#endif

View File

@ -1,59 +1,80 @@
import subprocess
import re
# 执行 git fetch 命令并捕获输出
def git_fetch():
result = subprocess.run(['git', 'fetch'], capture_output=True, text=True)
return result
# 解析分支名称
def git_prune():
# git remote prune origin
print("git remote prune origin")
result = subprocess.run(['git', 'remote', 'prune', 'origin'], capture_output=True, text=True)
return result
def parse_branch_name_type1(error_output):
# 使用正则表达式匹配 'is at' 前的分支名称
# error: cannot lock ref 'refs/remotes/origin/fix/3.0/TD-32817': is at 7af5 but expected eaba
# match the branch name before is at with a regular expression
match = re.search(r"error: cannot lock ref '(refs/remotes/origin/[^']+)': is at", error_output)
if match:
return match.group(1)
return None
# 解析第二种错误中的分支名称
def parse_branch_name_type2(error_output):
# 使用正则表达式匹配 'exists' 前的第一个引号内的分支名称
# match the branch name before exists; cannot create with a regular expression
match = re.search(r"'(refs/remotes/origin/[^']+)' exists;", error_output)
if match:
return match.group(1)
return None
# 执行 git update-ref -d 命令
# parse branch name from error output of git remote prune origin
def parse_branch_name_type3(error_output):
# match the branch name before the first single quote before 'Unable to' with a regular expression
# git error: could not delete references: cannot lock ref 'refs/remotes/origin/test/3.0/TS-4893': Unable to create 'D:/workspace/main/TDinternal/community/.git/refs/remotes/origin/test/3.0/TS-4893.lock': File exists
match = re.search(r"references: cannot lock ref '(refs/remotes/origin/[^']+)': Unable to", error_output)
if match:
return match.group(1)
return None
# execute git update-ref -d <branch_name> to delete the ref
def git_update_ref(branch_name):
if branch_name:
subprocess.run(['git', 'update-ref', '-d', f'{branch_name}'], check=True)
# 解析错误类型并执行相应的修复操作
# parse error type and execute corresponding repair operation
def handle_error(error_output):
# 错误类型1本地引用的提交ID与远程不一致
if "is at" in error_output and "but expected" in error_output:
branch_name = parse_branch_name_type1(error_output)
if branch_name:
print(f"Detected error type 1, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print("Error parsing branch name for type 1.")
# 错误类型2尝试创建新的远程引用时本地已经存在同名的引用
elif "exists; cannot create" in error_output:
branch_name = parse_branch_name_type2(error_output)
if branch_name:
print(f"Detected error type 2, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print("Error parsing branch name for type 2.")
error_types = [
("is at", "but expected", parse_branch_name_type1, "type 1"),
("exists; cannot create", None, parse_branch_name_type2, "type 2"),
("Unable to create", "File exists", parse_branch_name_type3, "type 3")
]
for error_type in error_types:
if error_type[0] in error_output and (error_type[1] is None or error_type[1] in error_output):
branch_name = error_type[2](error_output)
if branch_name:
print(f"Detected error {error_type[3]}, attempting to delete ref for branch: {branch_name}")
git_update_ref(branch_name)
else:
print(f"Error parsing branch name for {error_type[3]}.")
break
# 主函数
def main():
fetch_result = git_fetch()
if fetch_result.returncode != 0: # 如果 git fetch 命令失败
if fetch_result.returncode != 0:
error_output = fetch_result.stderr
handle_error(error_output)
else:
print("Git fetch successful.")
prune_result = git_prune()
print(prune_result.returncode)
if prune_result.returncode != 0:
error_output = prune_result.stderr
print(error_output)
handle_error(error_output)
else:
print("Git prune successful.")
if __name__ == "__main__":
main()

View File

@ -52,6 +52,7 @@ else
installDir="/usr/local/taos"
fi
fi
install_main_dir=${installDir}
bin_dir="${installDir}/bin"
cfg_dir="${installDir}/cfg"

View File

@ -153,6 +153,13 @@ typedef struct {
__taos_notify_fn_t fp;
} SWhiteListInfo;
typedef struct {
timezone_t timezone;
void *charsetCxt;
char userApp[TSDB_APP_NAME_LEN];
uint32_t userIp;
}SOptionInfo;
typedef struct STscObj {
char user[TSDB_USER_LEN];
char pass[TSDB_PASSWORD_LEN];
@ -175,6 +182,7 @@ typedef struct STscObj {
SPassInfo passInfo;
SWhiteListInfo whiteListInfo;
STscNotifyInfo userDroppedInfo;
SOptionInfo optionInfo;
} STscObj;
typedef struct STscDbg {
@ -211,6 +219,7 @@ typedef struct SReqResultInfo {
int32_t precision;
int32_t payloadLen;
char* convertJson;
void* charsetCxt;
} SReqResultInfo;
typedef struct SRequestSendRecvBody {
@ -338,6 +347,7 @@ extern int32_t clientReqRefPool;
extern int32_t clientConnRefPool;
extern int32_t timestampDeltaLimit;
extern int64_t lastClusterId;
extern SHashObj* pTimezoneMap;
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
@ -437,6 +447,9 @@ void stopAllQueries(SRequestObj* pRequest);
void doRequestCallback(SRequestObj* pRequest, int32_t code);
void freeQueryParam(SSyncQueryParam* param);
int32_t tzInit();
void tzCleanup();
#ifdef TD_ENTERPRISE
int32_t clientParseSqlImpl(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effeciveUser,
SParseSqlRes* pRes);

View File

@ -36,6 +36,7 @@
#include "tsched.h"
#include "ttime.h"
#include "tversion.h"
#include "tconv.h"
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
#include "cus_name.h"
@ -74,6 +75,7 @@ int64_t lastClusterId = 0;
int32_t clientReqRefPool = -1;
int32_t clientConnRefPool = -1;
int32_t clientStop = -1;
SHashObj* pTimezoneMap = NULL;
int32_t timestampDeltaLimit = 900; // s
@ -559,6 +561,7 @@ int32_t createRequest(uint64_t connId, int32_t type, int64_t reqid, SRequestObj
(*pRequest)->metric.start = taosGetTimestampUs();
(*pRequest)->body.resInfo.convertUcs4 = true; // convert ucs4 by default
(*pRequest)->body.resInfo.charsetCxt = pTscObj->optionInfo.charsetCxt;
(*pRequest)->type = type;
(*pRequest)->allocatorRefId = -1;
@ -956,25 +959,31 @@ void taos_init_imp(void) {
return;
}
taosHashSetFreeFp(appInfo.pInstMap, destroyAppInst);
deltaToUtcInitOnce();
const char *logName = CUS_PROMPT "slog";
ENV_ERR_RET(taosInitLogOutput(&logName), "failed to init log output");
if (taosCreateLog(logName, 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) {
(void)printf(" WARING: Create %s failed:%s. configDir=%s\n", logName, strerror(errno), configDir);
tscInitRes = -1;
tscInitRes = terrno;
return;
}
ENV_ERR_RET(taosInitCfg(configDir, NULL, NULL, NULL, NULL, 1), "failed to init cfg");
initQueryModuleMsgHandle();
ENV_ERR_RET(taosConvInit(), "failed to init conv");
if ((tsCharsetCxt = taosConvInit(tsCharset)) == NULL){
tscInitRes = terrno;
tscError("failed to init conv");
return;
}
#ifndef WINDOWS
ENV_ERR_RET(tzInit(), "failed to init timezone");
#endif
ENV_ERR_RET(monitorInit(), "failed to init monitor");
ENV_ERR_RET(rpcInit(), "failed to init rpc");
if (InitRegexCache() != 0) {
tscInitRes = -1;
tscInitRes = terrno;
(void)printf("failed to init regex cache\n");
return;
}

View File

@ -1194,6 +1194,9 @@ int32_t hbGatherAllInfo(SAppHbMgr *pAppHbMgr, SClientHbBatchReq **pBatchReq) {
continue;
}
tstrncpy(pOneReq->userApp, pTscObj->optionInfo.userApp, sizeof(pOneReq->userApp));
pOneReq->userIp = pTscObj->optionInfo.userIp;
pOneReq = taosArrayPush((*pBatchReq)->reqs, pOneReq);
if (NULL == pOneReq) {
releaseTscObj(connKey->tscRid);

View File

@ -300,7 +300,9 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
.isStmtBind = pRequest->isStmtBind,
.setQueryFp = setQueryRequest};
.setQueryFp = setQueryRequest,
.timezone = pTscObj->optionInfo.timezone,
.charsetCxt = pTscObj->optionInfo.charsetCxt,};
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &cxt.pCatalog);
@ -331,7 +333,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
int8_t biMode = atomic_load_8(&pRequest->pTscObj->biMode);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode, pRequest->pTscObj->optionInfo.charsetCxt);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, pRequest->body.resInfo.convertUcs4);
}
@ -369,7 +371,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
}
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp,
atomic_load_8(&pRequest->pTscObj->biMode));
atomic_load_8(&pRequest->pTscObj->biMode), pRequest->pTscObj->optionInfo.charsetCxt);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, pRequest->body.resInfo.convertUcs4);
}
@ -507,6 +509,7 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
.timezone = pRequest->pTscObj->optionInfo.timezone,
.sysInfo = pRequest->pTscObj->sysInfo};
return qCreateQueryPlan(&cxt, pPlan, pNodeList);
@ -1361,6 +1364,7 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
.sysInfo = pRequest->pTscObj->sysInfo,
.timezone = pRequest->pTscObj->optionInfo.timezone,
.allocatorId = pRequest->allocatorRefId};
if (TSDB_CODE_SUCCESS == code) {
code = qCreateQueryPlan(&cxt, &pDag, pMnodeList);
@ -2086,7 +2090,7 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) {
static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
int32_t idx = -1;
iconv_t conv = taosAcquireConv(&idx, C2M);
iconv_t conv = taosAcquireConv(&idx, C2M, pResultInfo->charsetCxt);
if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR;
for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) {
@ -2096,7 +2100,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
if (type == TSDB_DATA_TYPE_NCHAR && colLength[i] > 0) {
char* p = taosMemoryRealloc(pResultInfo->convertBuf[i], colLength[i]);
if (p == NULL) {
taosReleaseConv(idx, conv, C2M);
taosReleaseConv(idx, conv, C2M, pResultInfo->charsetCxt);
return terrno;
}
@ -2113,7 +2117,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
"doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + "
"colLength[i]):%p",
len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i]));
taosReleaseConv(idx, conv, C2M);
taosReleaseConv(idx, conv, C2M, pResultInfo->charsetCxt);
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
@ -2127,7 +2131,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) {
pResultInfo->row[i] = pResultInfo->pCol[i].pData;
}
}
taosReleaseConv(idx, conv, C2M);
taosReleaseConv(idx, conv, C2M, pResultInfo->charsetCxt);
return TSDB_CODE_SUCCESS;
}
@ -2292,7 +2296,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
varDataSetLen(dst, strlen(varDataVal(dst)));
} else if (tTagIsJson(data)) {
char* jsonString = NULL;
parseTagDatatoJson(data, &jsonString);
parseTagDatatoJson(data, &jsonString, pResultInfo->charsetCxt);
if (jsonString == NULL) {
tscError("doConvertJson error: parseTagDatatoJson failed");
return terrno;
@ -2302,9 +2306,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) {
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
*(char*)varDataVal(dst) = '\"';
int32_t length = taosUcs4ToMbs((TdUcs4*)varDataVal(jsonInnerData), varDataLen(jsonInnerData),
varDataVal(dst) + CHAR_BYTES);
varDataVal(dst) + CHAR_BYTES, pResultInfo->charsetCxt);
if (length <= 0) {
tscError("charset:%s to %s. convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset);
tscError("charset:%s to %s. convert failed.", DEFAULT_UNICODE_ENCODEC,
pResultInfo->charsetCxt != NULL ? ((SConvInfo *)(pResultInfo->charsetCxt))->charset : tsCharset);
length = 0;
}
varDataSetLen(dst, length + CHAR_BYTES * 2);

View File

@ -30,6 +30,7 @@
#include "tref.h"
#include "trpc.h"
#include "version.h"
#include "tconv.h"
#define TSC_VAR_NOT_RELEASE 1
#define TSC_VAR_RELEASED 0
@ -38,11 +39,13 @@ static int32_t sentinel = TSC_VAR_NOT_RELEASE;
static int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt, SSqlCallbackWrapper *pWrapper);
int taos_options(TSDB_OPTION option, const void *arg, ...) {
if (arg == NULL) {
return TSDB_CODE_INVALID_PARA;
}
static int32_t lock = 0;
for (int i = 1; atomic_val_compare_exchange_32(&lock, 0, 1) != 0; ++i) {
if (i % 1000 == 0) {
tscInfo("haven't acquire lock after spin %d times.", i);
(void)sched_yield();
}
}
@ -51,6 +54,167 @@ int taos_options(TSDB_OPTION option, const void *arg, ...) {
atomic_store_32(&lock, 0);
return ret;
}
#ifndef WINDOWS
static void freeTz(void *p){
timezone_t tz = *(timezone_t *)p;
tzfree(tz);
}
int32_t tzInit(){
pTimezoneMap = taosHashInit(0, MurmurHash3_32, false, HASH_ENTRY_LOCK);
if (pTimezoneMap == NULL) {
return terrno;
}
taosHashSetFreeFp(pTimezoneMap, freeTz);
pTimezoneNameMap = taosHashInit(0, taosIntHash_64, false, HASH_ENTRY_LOCK);
if (pTimezoneNameMap == NULL) {
return terrno;
}
return 0;
}
void tzCleanup(){
taosHashCleanup(pTimezoneMap);
taosHashCleanup(pTimezoneNameMap);
}
static timezone_t setConnnectionTz(const char* val){
timezone_t tz = NULL;
timezone_t *tmp = taosHashGet(pTimezoneMap, val, strlen(val));
if (tmp != NULL && *tmp != NULL){
tz = *tmp;
goto END;
}
tscDebug("set timezone to %s", val);
tz = tzalloc(val);
if (tz == NULL) {
tscWarn("%s unknown timezone %s change to UTC", __func__, val);
tz = tzalloc("UTC");
if (tz == NULL) {
tscError("%s set timezone UTC error", __func__);
terrno = TAOS_SYSTEM_ERROR(errno);
goto END;
}
}
int32_t code = taosHashPut(pTimezoneMap, val, strlen(val), &tz, sizeof(timezone_t));
if (code != 0){
tscError("%s put timezone to tz map error:%d", __func__, code);
tzfree(tz);
tz = NULL;
goto END;
}
time_t tx1 = taosGetTimestampSec();
char output[TD_TIMEZONE_LEN] = {0};
taosFormatTimezoneStr(tx1, val, tz, output);
code = taosHashPut(pTimezoneNameMap, &tz, sizeof(timezone_t), output, strlen(output) + 1);
if (code != 0){
tscError("failed to put timezone %s to map", val);
}
END:
return tz;
}
#endif
static int32_t setConnectionOption(TAOS *taos, TSDB_OPTION_CONNECTION option, const char* val){
if (taos == NULL) {
return TSDB_CODE_INVALID_PARA;
}
#ifdef WINDOWS
if (option == TSDB_OPTION_CONNECTION_TIMEZONE){
return TSDB_CODE_NOT_SUPPORTTED_IN_WINDOWS;
}
#endif
if (option < TSDB_OPTION_CONNECTION_CLEAR || option >= TSDB_MAX_OPTIONS_CONNECTION){
return TSDB_CODE_INVALID_PARA;
}
int32_t code = taos_init();
// initialize global config
if (code != 0) {
return code;
}
STscObj *pObj = acquireTscObj(*(int64_t *)taos);
if (NULL == pObj) {
tscError("invalid parameter for %s", __func__);
return terrno;
}
if (option == TSDB_OPTION_CONNECTION_CLEAR){
val = NULL;
}
if (option == TSDB_OPTION_CONNECTION_CHARSET || option == TSDB_OPTION_CONNECTION_CLEAR) {
if (val != NULL) {
if (!taosValidateEncodec(val)) {
code = terrno;
goto END;
}
void *tmp = taosConvInit(val);
if (tmp == NULL) {
code = terrno;
goto END;
}
pObj->optionInfo.charsetCxt = tmp;
}else{
pObj->optionInfo.charsetCxt = NULL;
}
}
if (option == TSDB_OPTION_CONNECTION_TIMEZONE || option == TSDB_OPTION_CONNECTION_CLEAR) {
#ifndef WINDOWS
if (val != NULL){
if (val[0] == 0){
val = "UTC";
}
timezone_t tz = setConnnectionTz(val);
if (tz == NULL){
code = terrno;
goto END;
}
pObj->optionInfo.timezone = tz;
} else {
pObj->optionInfo.timezone = NULL;
}
#endif
}
if (option == TSDB_OPTION_CONNECTION_USER_APP || option == TSDB_OPTION_CONNECTION_CLEAR) {
if (val != NULL) {
tstrncpy(pObj->optionInfo.userApp, val, sizeof(pObj->optionInfo.userApp));
} else {
pObj->optionInfo.userApp[0] = 0;
}
}
if (option == TSDB_OPTION_CONNECTION_USER_IP || option == TSDB_OPTION_CONNECTION_CLEAR) {
if (val != NULL) {
pObj->optionInfo.userIp = taosInetAddr(val);
if (pObj->optionInfo.userIp == INADDR_NONE){
code = TSDB_CODE_INVALID_PARA;
goto END;
}
} else {
pObj->optionInfo.userIp = INADDR_NONE;
}
}
END:
releaseTscObj(*(int64_t *)taos);
return code;
}
int taos_options_connection(TAOS *taos, TSDB_OPTION_CONNECTION option, const void *arg, ...){
return setConnectionOption(taos, option, (const char *)arg);
}
// this function may be called by user or system, or by both simultaneously.
void taos_cleanup(void) {
tscDebug("start to cleanup client environment");
@ -73,6 +237,9 @@ void taos_cleanup(void) {
tscWarn("failed to cleanup task queue");
}
#ifndef WINDOWS
tzCleanup();
#endif
tmqMgmtClose();
int32_t id = clientReqRefPool;
@ -1244,7 +1411,9 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt, SS
.allocatorId = pRequest->allocatorRefId,
.parseSqlFp = clientParseSql,
.parseSqlParam = pWrapper,
.setQueryFp = setQueryRequest};
.setQueryFp = setQueryRequest,
.timezone = pTscObj->optionInfo.timezone,
.charsetCxt = pTscObj->optionInfo.charsetCxt};
int8_t biMode = atomic_load_8(&((STscObj *)pTscObj)->biMode);
(*pCxt)->biMode = biMode;
return TSDB_CODE_SUCCESS;

View File

@ -52,6 +52,22 @@
#define TMQ_META_VERSION "1.0"
static bool tmqAddJsonObjectItem(cJSON *object, const char *string, cJSON *item){
bool ret = cJSON_AddItemToObject(object, string, item);
if (!ret){
cJSON_Delete(item);
}
return ret;
}
static bool tmqAddJsonArrayItem(cJSON *array, cJSON *item){
bool ret = cJSON_AddItemToArray(array, item);
if (!ret){
cJSON_Delete(item);
}
return ret;
}
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t,
@ -68,41 +84,43 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche
cJSON* type = cJSON_CreateString("create");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
RAW_NULL_CHECK(tableType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
cJSON* tableName = cJSON_CreateString(name);
RAW_NULL_CHECK(tableName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
cJSON* columns = cJSON_CreateArray();
RAW_NULL_CHECK(columns);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "columns", columns));
for (int i = 0; i < schemaRow->nCols; i++) {
cJSON* column = cJSON_CreateObject();
RAW_NULL_CHECK(column);
RAW_FALSE_CHECK(tmqAddJsonArrayItem(columns, column));
SSchema* s = schemaRow->pSchema + i;
cJSON* cname = cJSON_CreateString(s->name);
RAW_NULL_CHECK(cname);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "name", cname));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "name", cname));
cJSON* ctype = cJSON_CreateNumber(s->type);
RAW_NULL_CHECK(ctype);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "type", ctype));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "type", ctype));
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "length", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "length", cbytes));
} else if (s->type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "length", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "length", cbytes));
}
cJSON* isPk = cJSON_CreateBool(s->flags & COL_IS_KEY);
RAW_NULL_CHECK(isPk);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "isPrimarykey", isPk));
RAW_FALSE_CHECK(cJSON_AddItemToArray(columns, column));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "isPrimarykey", isPk));
if (pColCmprRow == NULL) {
continue;
@ -124,44 +142,44 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche
cJSON* encodeJson = cJSON_CreateString(encode);
RAW_NULL_CHECK(encodeJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "encode", encodeJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "encode", encodeJson));
cJSON* compressJson = cJSON_CreateString(compress);
RAW_NULL_CHECK(compressJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "compress", compressJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "compress", compressJson));
cJSON* levelJson = cJSON_CreateString(level);
RAW_NULL_CHECK(levelJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(column, "level", levelJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(column, "level", levelJson));
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "columns", columns));
cJSON* tags = cJSON_CreateArray();
RAW_NULL_CHECK(tags);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags));
for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
cJSON* tag = cJSON_CreateObject();
RAW_NULL_CHECK(tag);
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag));
SSchema* s = schemaTag->pSchema + i;
cJSON* tname = cJSON_CreateString(s->name);
RAW_NULL_CHECK(tname);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname));
cJSON* ttype = cJSON_CreateNumber(s->type);
RAW_NULL_CHECK(ttype);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype));
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "length", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "length", cbytes));
} else if (s->type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "length", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "length", cbytes));
}
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag));
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
end:
*pJson = json;
@ -175,7 +193,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) {
RAW_NULL_CHECK(encodeStr);
cJSON* encodeJson = cJSON_CreateString(encodeStr);
RAW_NULL_CHECK(encodeJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "encode", encodeJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "encode", encodeJson));
return code;
}
uint8_t compress = COMPRESS_L2_TYPE_U32(para);
@ -184,7 +202,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) {
RAW_NULL_CHECK(compressStr);
cJSON* compressJson = cJSON_CreateString(compressStr);
RAW_NULL_CHECK(compressJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "compress", compressJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "compress", compressJson));
return code;
}
uint8_t level = COMPRESS_L2_TYPE_LEVEL_U32(para);
@ -193,7 +211,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) {
RAW_NULL_CHECK(levelStr);
cJSON* levelJson = cJSON_CreateString(levelStr);
RAW_NULL_CHECK(levelJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "level", levelJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "level", levelJson));
return code;
}
@ -214,19 +232,19 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(json);
cJSON* type = cJSON_CreateString("alter");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
SName name = {0};
RAW_RETURN_CHECK(tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE));
cJSON* tableType = cJSON_CreateString("super");
RAW_NULL_CHECK(tableType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
cJSON* tableName = cJSON_CreateString(name.tname);
RAW_NULL_CHECK(tableName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
cJSON* alterType = cJSON_CreateNumber(req.alterType);
RAW_NULL_CHECK(alterType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "alterType", alterType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "alterType", alterType));
switch (req.alterType) {
case TSDB_ALTER_TABLE_ADD_TAG:
case TSDB_ALTER_TABLE_ADD_COLUMN: {
@ -234,22 +252,22 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(field);
cJSON* colName = cJSON_CreateString(field->name);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colType = cJSON_CreateNumber(field->type);
RAW_NULL_CHECK(colType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
}
break;
}
@ -258,22 +276,22 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(field);
cJSON* colName = cJSON_CreateString(field->name);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colType = cJSON_CreateNumber(field->type);
RAW_NULL_CHECK(colType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
}
RAW_RETURN_CHECK(setCompressOption(json, field->compress));
break;
@ -284,7 +302,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(field);
cJSON* colName = cJSON_CreateString(field->name);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
break;
}
case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
@ -293,21 +311,21 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(field);
cJSON* colName = cJSON_CreateString(field->name);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colType = cJSON_CreateNumber(field->type);
RAW_NULL_CHECK(colType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
} else if (field->type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
}
break;
}
@ -319,10 +337,10 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(newField);
cJSON* colName = cJSON_CreateString(oldField->name);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colNewName = cJSON_CreateString(newField->name);
RAW_NULL_CHECK(colNewName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colNewName", colNewName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colNewName", colNewName));
break;
}
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
@ -330,7 +348,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON**
RAW_NULL_CHECK(field);
cJSON* colName = cJSON_CreateString(field->name);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
RAW_RETURN_CHECK(setCompressOption(json, field->bytes));
break;
}
@ -391,51 +409,47 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
int64_t id = pCreateReq->uid;
uint8_t tagNum = pCreateReq->ctb.tagNum;
int32_t code = 0;
cJSON* tags = NULL;
SArray* pTagVals = NULL;
char* pJson = NULL;
cJSON* tableName = cJSON_CreateString(name);
RAW_NULL_CHECK(tableName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
cJSON* using = cJSON_CreateString(sname);
RAW_NULL_CHECK(using);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "using", using));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "using", using));
cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
RAW_NULL_CHECK(tagNumJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tagNum", tagNumJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tagNum", tagNumJson));
tags = cJSON_CreateArray();
cJSON* tags = cJSON_CreateArray();
RAW_NULL_CHECK(tags);
SArray* pTagVals = NULL;
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags));
RAW_RETURN_CHECK(tTagToValArray(pTag, &pTagVals));
if (tTagIsJson(pTag)) {
STag* p = (STag*)pTag;
if (p->nTag == 0) {
uError("p->nTag == 0");
goto end;
}
char* pJson = NULL;
parseTagDatatoJson(pTag, &pJson);
if (pJson == NULL) {
uError("parseTagDatatoJson failed, pJson == NULL");
goto end;
}
parseTagDatatoJson(pTag, &pJson, NULL);
RAW_NULL_CHECK(pJson);
cJSON* tag = cJSON_CreateObject();
RAW_NULL_CHECK(tag);
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag));
STagVal* pTagVal = taosArrayGet(pTagVals, 0);
RAW_NULL_CHECK(pTagVal);
char* ptname = taosArrayGet(tagName, 0);
RAW_NULL_CHECK(ptname);
cJSON* tname = cJSON_CreateString(ptname);
RAW_NULL_CHECK(tname);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname));
cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
RAW_NULL_CHECK(ttype);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype));
cJSON* tvalue = cJSON_CreateString(pJson);
RAW_NULL_CHECK(tvalue);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "value", tvalue));
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag));
taosMemoryFree(pJson);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "value", tvalue));
goto end;
}
@ -444,36 +458,34 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
RAW_NULL_CHECK(pTagVal);
cJSON* tag = cJSON_CreateObject();
RAW_NULL_CHECK(tag);
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, tag));
char* ptname = taosArrayGet(tagName, i);
RAW_NULL_CHECK(ptname);
cJSON* tname = cJSON_CreateString(ptname);
RAW_NULL_CHECK(tname);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "name", tname));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "name", tname));
cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
RAW_NULL_CHECK(ttype);
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "type", ttype));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "type", ttype));
cJSON* tvalue = NULL;
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
char* buf = NULL;
int64_t bufSize = 0;
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
bufSize = pTagVal->nData * 2 + 2 + 3;
} else {
bufSize = pTagVal->nData + 3;
}
buf = taosMemoryCalloc(bufSize, 1);
char* buf = taosMemoryCalloc(bufSize, 1);
RAW_NULL_CHECK(buf);
if (!buf) goto end;
if (dataConverToStr(buf, bufSize, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL) != TSDB_CODE_SUCCESS) {
taosMemoryFree(buf);
goto end;
}
tvalue = cJSON_CreateString(buf);
RAW_NULL_CHECK(tvalue);
taosMemoryFree(buf);
RAW_NULL_CHECK(tvalue);
} else {
double val = 0;
GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
@ -481,12 +493,11 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
RAW_NULL_CHECK(tvalue);
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(tag, "value", tvalue));
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(tag, "value", tvalue));
}
end:
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
taosMemoryFree(pJson);
taosArrayDestroy(pTagVals);
}
@ -497,22 +508,23 @@ static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSO
RAW_NULL_CHECK(json);
cJSON* type = cJSON_CreateString("create");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
cJSON* tableType = cJSON_CreateString("child");
RAW_NULL_CHECK(tableType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
buildChildElement(json, pCreateReq);
cJSON* createList = cJSON_CreateArray();
RAW_NULL_CHECK(createList);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "createList", createList));
for (int i = 0; nReqs > 1 && i < nReqs; i++) {
cJSON* create = cJSON_CreateObject();
RAW_NULL_CHECK(create);
buildChildElement(create, pCreateReq + i);
RAW_FALSE_CHECK(cJSON_AddItemToArray(createList, create));
RAW_FALSE_CHECK(tmqAddJsonArrayItem(createList, create));
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "createList", createList));
end:
*pJson = json;
@ -619,62 +631,62 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
RAW_NULL_CHECK(json);
cJSON* type = cJSON_CreateString("alter");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ||
vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL
? "child"
: "normal");
RAW_NULL_CHECK(tableType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
RAW_NULL_CHECK(tableName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
RAW_NULL_CHECK(alterType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "alterType", alterType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "alterType", alterType));
switch (vAlterTbReq.action) {
case TSDB_ALTER_TABLE_ADD_COLUMN: {
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
RAW_NULL_CHECK(colType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
} else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
}
break;
}
case TSDB_ALTER_TABLE_ADD_COLUMN_WITH_COMPRESS_OPTION: {
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
RAW_NULL_CHECK(colType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
} else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
}
RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress));
break;
@ -682,43 +694,43 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
case TSDB_ALTER_TABLE_DROP_COLUMN: {
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
break;
}
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
RAW_NULL_CHECK(colType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colType", colType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colType", colType));
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY ||
vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
} else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
cJSON* cbytes = cJSON_CreateNumber(length);
RAW_NULL_CHECK(cbytes);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colLength", cbytes));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colLength", cbytes));
}
break;
}
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
RAW_NULL_CHECK(colNewName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colNewName", colNewName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colNewName", colNewName));
break;
}
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
RAW_NULL_CHECK(tagName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", tagName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", tagName));
bool isNull = vAlterTbReq.isNull;
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
@ -733,7 +745,7 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
uError("processAlterTable isJson false");
goto end;
}
parseTagDatatoJson(vAlterTbReq.pTagVal, &buf);
parseTagDatatoJson(vAlterTbReq.pTagVal, &buf, NULL);
if (buf == NULL) {
uError("parseTagDatatoJson failed, buf == NULL");
goto end;
@ -757,12 +769,12 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
cJSON* colValue = cJSON_CreateString(buf);
taosMemoryFree(buf);
RAW_NULL_CHECK(colValue);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValue", colValue));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colValue", colValue));
}
cJSON* isNullCJson = cJSON_CreateBool(isNull);
RAW_NULL_CHECK(isNullCJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colValueNull", isNullCJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colValueNull", isNullCJson));
break;
}
case TSDB_ALTER_TABLE_UPDATE_MULTI_TAG_VAL: {
@ -774,14 +786,17 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
cJSON* tags = cJSON_CreateArray();
RAW_NULL_CHECK(tags);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tags", tags));
for (int32_t i = 0; i < nTags; i++) {
cJSON* member = cJSON_CreateObject();
RAW_NULL_CHECK(member);
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tags, member));
SMultiTagUpateVal* pTagVal = taosArrayGet(vAlterTbReq.pMultiTag, i);
cJSON* tagName = cJSON_CreateString(pTagVal->tagName);
RAW_NULL_CHECK(tagName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colName", tagName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colName", tagName));
if (pTagVal->tagType == TSDB_DATA_TYPE_JSON) {
uError("processAlterTable isJson false");
@ -789,14 +804,13 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
}
bool isNull = pTagVal->isNull;
if (!isNull) {
char* buf = NULL;
int64_t bufSize = 0;
if (pTagVal->tagType == TSDB_DATA_TYPE_VARBINARY) {
bufSize = pTagVal->nTagVal * 2 + 2 + 3;
} else {
bufSize = pTagVal->nTagVal + 3;
}
buf = taosMemoryCalloc(bufSize, 1);
char* buf = taosMemoryCalloc(bufSize, 1);
RAW_NULL_CHECK(buf);
if (dataConverToStr(buf, bufSize, pTagVal->tagType, pTagVal->pTagVal, pTagVal->nTagVal, NULL) !=
TSDB_CODE_SUCCESS) {
@ -806,21 +820,19 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
cJSON* colValue = cJSON_CreateString(buf);
taosMemoryFree(buf);
RAW_NULL_CHECK(colValue);
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValue", colValue));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colValue", colValue));
}
cJSON* isNullCJson = cJSON_CreateBool(isNull);
RAW_NULL_CHECK(isNullCJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(member, "colValueNull", isNullCJson));
RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, member));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(member, "colValueNull", isNullCJson));
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags));
break;
}
case TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS: {
cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
RAW_NULL_CHECK(colName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "colName", colName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "colName", colName));
RAW_RETURN_CHECK(setCompressOption(json, vAlterTbReq.compress));
break;
}
@ -858,13 +870,13 @@ static void processDropSTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
RAW_NULL_CHECK(json);
cJSON* type = cJSON_CreateString("drop");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
cJSON* tableType = cJSON_CreateString("super");
RAW_NULL_CHECK(tableType);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableType", tableType));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableType", tableType));
cJSON* tableName = cJSON_CreateString(req.name);
RAW_NULL_CHECK(tableName);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableName", tableName));
end:
uDebug("processDropSTable return");
@ -897,10 +909,10 @@ static void processDeleteTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
RAW_NULL_CHECK(json);
cJSON* type = cJSON_CreateString("delete");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
cJSON* sqlJson = cJSON_CreateString(sql);
RAW_NULL_CHECK(sqlJson);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", sqlJson));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "sql", sqlJson));
end:
uDebug("processDeleteTable return");
@ -928,16 +940,17 @@ static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) {
RAW_NULL_CHECK(json);
cJSON* type = cJSON_CreateString("drop");
RAW_NULL_CHECK(type);
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "type", type));
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "type", type));
cJSON* tableNameList = cJSON_CreateArray();
RAW_NULL_CHECK(tableNameList);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(json, "tableNameList", tableNameList));
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
SVDropTbReq* pDropTbReq = req.pReqs + iReq;
cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
RAW_NULL_CHECK(tableName);
RAW_FALSE_CHECK(cJSON_AddItemToArray(tableNameList, tableName));
RAW_FALSE_CHECK(tmqAddJsonArrayItem(tableNameList, tableName));
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableNameList", tableNameList));
end:
uDebug("processDropTable return");
@ -2183,6 +2196,8 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) {
RAW_FALSE_CHECK(cJSON_AddStringToObject(pJson, "tmq_meta_version", TMQ_META_VERSION));
cJSON* pMetaArr = cJSON_CreateArray();
RAW_NULL_CHECK(pMetaArr);
RAW_FALSE_CHECK(tmqAddJsonObjectItem(pJson, "metas", pMetaArr));
int32_t num = taosArrayGetSize(rsp.batchMetaReq);
for (int32_t i = 0; i < num; i++) {
int32_t* len = taosArrayGet(rsp.batchMetaLen, i);
@ -2198,10 +2213,9 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) {
cJSON* pItem = NULL;
processSimpleMeta(&metaRsp, &pItem);
tDeleteMqMetaRsp(&metaRsp);
RAW_FALSE_CHECK(cJSON_AddItemToArray(pMetaArr, pItem));
RAW_FALSE_CHECK(tmqAddJsonArrayItem(pMetaArr, pItem));
}
RAW_FALSE_CHECK(cJSON_AddItemToObject(pJson, "metas", pMetaArr));
tDeleteMqBatchMetaRsp(&rsp);
char* fullStr = cJSON_PrintUnformatted(pJson);
cJSON_Delete(pJson);

View File

@ -267,7 +267,7 @@ bool isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv) {
goto END;
}
// bind data
int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, cnt + 1);
int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, cnt + 1, info->taos->optionInfo.charsetCxt);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
uDebug("smlBuildCol error, retry");
goto END;
@ -411,8 +411,8 @@ int32_t smlParseEndTelnetJsonFormat(SSmlHandle *info, SSmlLineInfo *elements, SS
int32_t code = 0;
int32_t lino = 0;
uDebug("SML:0x%" PRIx64 " %s format true, ts:%" PRId64, info->id, __FUNCTION__ , kvTs->i);
SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0));
SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, 1));
SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0, info->taos->optionInfo.charsetCxt));
SML_CHECK_CODE(smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kv, 1, info->taos->optionInfo.charsetCxt));
SML_CHECK_CODE(smlBuildRow(info->currTableDataCtx));
END:
@ -438,7 +438,7 @@ END:
int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs) {
if (info->dataFormat) {
uDebug("SML:0x%" PRIx64 " %s format true, ts:%" PRId64, info->id, __FUNCTION__, kvTs->i);
int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0);
int32_t ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, kvTs, 0, info->taos->optionInfo.charsetCxt);
if (ret == TSDB_CODE_SUCCESS) {
ret = smlBuildRow(info->currTableDataCtx);
}
@ -1486,7 +1486,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
SML_CHECK_CODE(smlBindData(info->pQuery, info->dataFormat, tableData->tags, (*pMeta)->cols, tableData->cols,
(*pMeta)->tableMeta, tableData->childTableName, measure, measureLen, info->ttl, info->msgBuf.buf,
info->msgBuf.len));
info->msgBuf.len, info->taos->optionInfo.charsetCxt));
taosMemoryFreeClear(measure);
oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);
}

View File

@ -1071,7 +1071,7 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
tscDebug("start to bind stmt tag values");
STMT_ERR_RET(qBindStmtTagsValue(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.tbSuid, pStmt->bInfo.stbFName,
pStmt->bInfo.sname.tname, tags, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen));
pStmt->exec.pRequest->msgBufLen, pStmt->taos->optionInfo.charsetCxt));
return TSDB_CODE_SUCCESS;
}
@ -1239,7 +1239,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_ERR_RET(qStmtBindParams(pStmt->sql.pQuery, bind, colIdx));
STMT_ERR_RET(qStmtBindParams(pStmt->sql.pQuery, bind, colIdx, pStmt->taos->optionInfo.charsetCxt));
SParseContext ctx = {.requestId = pStmt->exec.pRequest->requestId,
.acctId = pStmt->taos->acctId,
@ -1325,10 +1325,10 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
if (pStmt->sql.stbInterlaceMode) {
(*pDataBlock)->pData->flags = 0;
code = qBindStmtStbColsValue(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo);
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo, pStmt->taos->optionInfo.charsetCxt);
} else {
code =
qBindStmtColsValue(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf, pStmt->exec.pRequest->msgBufLen);
qBindStmtColsValue(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf, pStmt->exec.pRequest->msgBufLen, pStmt->taos->optionInfo.charsetCxt);
}
if (code) {
@ -1353,7 +1353,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
}
code = qBindStmtSingleColValue(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, colIdx, pStmt->bInfo.sBindRowNum);
pStmt->exec.pRequest->msgBufLen, colIdx, pStmt->bInfo.sBindRowNum, pStmt->taos->optionInfo.charsetCxt);
if (code) {
tscError("qBindStmtSingleColValue failed, error:%s", tstrerror(code));
STMT_ERR_RET(code);

View File

@ -1015,7 +1015,7 @@ int stmtSetTbTags2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* tags) {
tscDebug("start to bind stmt tag values");
STMT_ERR_RET(qBindStmtTagsValue2(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.tbSuid, pStmt->bInfo.stbFName,
pStmt->bInfo.sname.tname, tags, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen));
pStmt->exec.pRequest->msgBufLen, pStmt->taos->optionInfo.charsetCxt));
return TSDB_CODE_SUCCESS;
}
@ -1094,6 +1094,13 @@ static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIEL
}
STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.preCtbname, fieldNum, fields));
if (pStmt->bInfo.tbType == TSDB_SUPER_TABLE) {
pStmt->bInfo.needParse = true;
if (taosHashRemove(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)) != 0) {
tscError("get fileds %s remove exec blockHash fail", pStmt->bInfo.tbFName);
STMT_ERR_RET(TSDB_CODE_APP_ERROR);
}
}
return TSDB_CODE_SUCCESS;
}
@ -1324,7 +1331,7 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
}
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_ERR_RET(qStmtBindParams2(pStmt->sql.pQuery, bind, colIdx));
STMT_ERR_RET(qStmtBindParams2(pStmt->sql.pQuery, bind, colIdx, pStmt->taos->optionInfo.charsetCxt));
SParseContext ctx = {.requestId = pStmt->exec.pRequest->requestId,
.acctId = pStmt->taos->acctId,
@ -1408,10 +1415,10 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
if (pStmt->sql.stbInterlaceMode) {
(*pDataBlock)->pData->flags = 0;
code = qBindStmtStbColsValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo);
pStmt->exec.pRequest->msgBufLen, &pStmt->sql.siInfo.pTSchema, pStmt->sql.pBindInfo, pStmt->taos->optionInfo.charsetCxt);
} else {
code =
qBindStmtColsValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf, pStmt->exec.pRequest->msgBufLen);
qBindStmtColsValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf, pStmt->exec.pRequest->msgBufLen, pStmt->taos->optionInfo.charsetCxt);
}
if (code) {
@ -1436,7 +1443,7 @@ int stmtBindBatch2(TAOS_STMT2* stmt, TAOS_STMT2_BIND* bind, int32_t colIdx) {
}
code = qBindStmtSingleColValue2(*pDataBlock, pCols, bind, pStmt->exec.pRequest->msgBuf,
pStmt->exec.pRequest->msgBufLen, colIdx, pStmt->bInfo.sBindRowNum);
pStmt->exec.pRequest->msgBufLen, colIdx, pStmt->bInfo.sBindRowNum, pStmt->taos->optionInfo.charsetCxt);
if (code) {
tscError("qBindStmtSingleColValue failed, error:%s", tstrerror(code));
STMT_ERR_RET(code);

View File

@ -11,6 +11,12 @@ TARGET_LINK_LIBRARIES(
os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function
)
ADD_EXECUTABLE(connectOptionsTest connectOptionsTest.cpp)
TARGET_LINK_LIBRARIES(
connectOptionsTest
os util common transport parser catalog scheduler gtest ${TAOS_LIB_STATIC} qcom executor function
)
ADD_EXECUTABLE(tmqTest tmqTest.cpp)
TARGET_LINK_LIBRARIES(
tmqTest
@ -41,11 +47,21 @@ TARGET_INCLUDE_DIRECTORIES(
PRIVATE "${TD_SOURCE_DIR}/source/client/inc"
)
TARGET_INCLUDE_DIRECTORIES(
connectOptionsTest
PUBLIC "${TD_SOURCE_DIR}/include/client/"
PRIVATE "${TD_SOURCE_DIR}/source/client/inc"
)
IF(${TD_LINUX})
add_test(
NAME clientTest
COMMAND clientTest
)
add_test(
NAME connectOptionsTest
COMMAND connectOptionsTest
)
ENDIF ()
TARGET_INCLUDE_DIRECTORIES(
@ -80,3 +96,4 @@ add_test(
NAME userOperTest
COMMAND userOperTest
)

View File

@ -300,7 +300,13 @@ void* doConsumeData(void* param) {
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
if (argc > 1) {
numOfThreads = atoi(argv[1]);
//numOfThreads = atoi(argv[1]);
int32_t code = taosStr2int32(argv[1], &numOfThreads);
if (code != 0) {
return code;
}
}
numOfThreads = TMAX(numOfThreads, 1);
@ -1609,5 +1615,4 @@ TEST(clientCase, timezone_Test) {
taos_close(pConn);
}
}
#pragma GCC diagnostic pop

Some files were not shown because too many files have changed in this diff Show More