Merge remote-tracking branch 'origin/3.0' into fix/TD-19245

This commit is contained in:
Shengliang Guan 2022-09-29 19:42:09 +08:00
commit 3be4a2ed9c
163 changed files with 9321 additions and 3691 deletions

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.1.2")
SET(TD_VER_NUMBER "3.0.1.4")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG f03c09a
GIT_TAG 70f5a1c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -37,7 +37,8 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- All the data in `tag_set` will be converted to nchar type automatically .
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
- You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
@ -64,3 +65,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
<CLine />
</TabItem>
</Tabs>
## Query Examples
If you want query the data of `location=California.LosAngeles,groupid=2`here is the query sql
select * from `meters.voltage` where location="California.LosAngeles" and groupid=2

View File

@ -31,7 +31,7 @@ For example:
```txt
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify chile table names, for example, `smlChildTableName=tname`. You can insert `meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
## Examples
@ -79,3 +79,6 @@ taos> select tbname, * from `meters.current`;
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.005399s)
```
## Query Examples
If you want query the data of `location=California.LosAngeles groupid=3`here is the query sql
select * from `meters.voltage` where location="California.LosAngeles" and groupid=3

View File

@ -48,7 +48,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
- Only data in array format is accepted and so an array must be used even if there is only one row.
- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify chile table names, for example, `smlChildTableName=tname`. You can insert `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
:::
## Examples
@ -94,3 +94,6 @@ taos> select * from `meters.current`;
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.004076s)
```
## Query Examples
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}here is the query sql
select * from `meters.voltage` where location="California.LosAngeles" and groupid=1

View File

@ -181,6 +181,14 @@ In TDengine, the first column of all tables must be a timestamp. This column is
select _rowts, max(current) from meters;
```
**\_IROWTS**
The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolumn can be used to retrieve the corresponding timestamp column associated with the interpolation results.
```sql
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
```
## Query Objects
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.

View File

@ -867,6 +867,7 @@ INTERP(expr)
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
- Interpolation is performed based on `FILL` parameter.
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
### LAST

View File

@ -343,6 +343,7 @@ The following list shows all reserved keywords:
### \_
- \_C0
- \_IROWTS
- \_QDURATION
- \_QEND
- \_QSTART

View File

@ -11,7 +11,15 @@ TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide acce
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
Note: SHOW statements are still supported for the convenience of existing users.
:::info
- SHOW statements are still supported for the convenience of existing users.
- Some columns in the system table may be keywords, and you need to use the escape character '\`' when querying, for example, to query the VGROUPS in the database `test`:
```sql
select `vgroups` from ins_databases where name = 'test';
```
:::
This document introduces the tables of INFORMATION_SCHEMA and their structure.
@ -102,7 +110,11 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
| 24 | wal_retention_period | INT | WAL retention period |
| 25 | wal_retention_size | INT | Maximum WAL size |
| 26 | wal_roll_period | INT | WAL rotation period |
| 27 | wal_segment_size | WAL file size |
| 27 | wal_segment_size | BIGINT | WAL file size |
| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging |
| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name |
| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name |
| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB |
## INS_FUNCTIONS

View File

@ -11,12 +11,13 @@ description: "This document explains how TDengine SQL has changed in version 3.0
| 1 | VARCHAR | Added | Alias of BINARY.
| 2 | TIMESTAMP literal | Added | TIMESTAMP 'timestamp format' syntax now supported.
| 3 | _ROWTS pseudocolumn | Added | Indicates the primary key. Alias of _C0.
| 4 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
| 5 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
| 6 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
| 7 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
| 8 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
| 9 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
| 4 | _IROWTS pseudocolumn | Added | Used to retrieve timestamps with INTERP function.
| 5 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
| 6 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
| 7 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
| 8 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
| 9 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
| 10 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
## SQL Syntax

View File

@ -109,7 +109,7 @@ TDengine's JDBC URL specification format is:
For establishing connections, native connections differ slightly from REST connections.
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
```java

View File

@ -113,7 +113,7 @@ username:password@protocol(address)/dbname?param=value
```
### Connecting via connector
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
_taosSql_ implements Go's `database/sql/driver` interface via cgo. You can use the [`database/sql`](https://golang.org/pkg/database/sql/) interface by simply introducing the driver.

View File

@ -55,16 +55,6 @@ taos = "*"
</TabItem>
<TabItem value="native" label="native connection only">
In `cargo.toml`, add [taos][taos] and enable the native feature:
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["native"] }
```
</TabItem>
<TabItem value="rest" label="Websocket only">
In `cargo.toml`, add [taos][taos] and enable the ws feature:
@ -75,6 +65,18 @@ taos = { version = "*", default-features = false, features = ["ws"] }
```
</TabItem>
<TabItem value="native" label="native connection only">
In `cargo.toml`, add [taos][taos] and enable the native feature:
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["native"] }
```
</TabItem>
</Tabs>
## Establishing a connection

View File

@ -80,7 +80,7 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
### Verify
<Tabs groupId="connect" default="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
For native connection, you need to verify that both the client driver and the Python connector itself are installed correctly. The client driver and Python connector have been installed properly if you can successfully import the `taos` module. In the Python Interactive Shell, you can type.
@ -118,7 +118,7 @@ Requirement already satisfied: taospy in c:\users\username\appdata\local\program
Before establishing a connection with the connector, we recommend testing the connectivity of the local TDengine CLI to the TDengine cluster.
<Tabs>
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
Ensure that the TDengine instance is up and that the FQDN of the machines in the cluster (the FQDN defaults to hostname if you are starting a standalone version) can be resolved locally, by testing with the `ping` command.
@ -173,7 +173,7 @@ If the test is successful, it will output the server version information, e.g.
The following example code assumes that TDengine is installed locally and that the default configuration is used for both FQDN and serverPort.
<Tabs>
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection" groupId="connect">
```python
@ -219,7 +219,7 @@ All arguments to the `connect()` function are optional keyword arguments. The fo
### Basic Usage
<Tabs default="native" groupId="connect">
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
##### TaosConnection class
@ -289,7 +289,7 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
### Used with pandas
<Tabs default="native" groupId="connect">
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
```python

View File

@ -85,7 +85,7 @@ If using ARM64 Node.js on Windows 10 ARM, you must add "Visual C++ compilers and
### Install via npm
<Tabs defaultValue="install_native">
<Tabs defaultValue="install_rest">
<TabItem value="install_native" label="Install native connector">
```bash
@ -124,7 +124,7 @@ node nodejsChecker.js host=localhost
Please choose to use one of the connectors.
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="native connection">
Install and import the `@tdengine/client` package.

View File

@ -97,7 +97,7 @@ dotnet add exmaple.csproj reference src/TDengine.csproj
## Establish a Connection
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="Native Connection">
@ -173,7 +173,7 @@ ws://localhost:6041/test
#### SQL Write
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="Native Connection">
@ -204,7 +204,7 @@ ws://localhost:6041/test
#### Parameter Binding
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="Native Connection">
@ -227,7 +227,7 @@ ws://localhost:6041/test
#### Synchronous Query
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="Native Connection">

View File

@ -177,12 +177,21 @@ The parameters described in this document by the effect that they have on the sy
### maxNumOfDistinctRes
| Attribute | Description |
| -------- | -------------------------------- | --- |
| -------- | -------------------------------- |
| Applicable | Server Only |
| Meaning | The maximum number of distinct rows returned |
| Value Range | [100,000 - 100,000,000] |
| Default Value | 100,000 |
### keepColumnName
| Attribute | Description |
| -------- | -------------------------------- |
| Applicable | Client only |
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
| Value Range | 0 means including the function name, 1 means not including the function name. |
| Default Value | 0 |
## Locale Parameters
### timezone

View File

@ -47,9 +47,8 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label
is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`.
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
@ -69,7 +68,7 @@ Schemaless writes process row data according to the following principles.
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
@ -78,7 +77,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur.
8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed

View File

@ -22,5 +22,4 @@ An example is as follows.
username = "root"
password = "taosdata"
data_format = "influx"
influx_max_line_bytes = 250
```

View File

@ -89,3 +89,7 @@ VALUE TIMESTAMP
```
:::note
- TDengine will automatically create unique IDs for sub-table names by the rule.
:::

View File

@ -15,6 +15,7 @@ To write Telegraf data to TDengine requires the following preparations.
- The TDengine cluster is deployed and functioning properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
## Configuration steps
<Telegraf />
@ -31,11 +32,12 @@ Use TDengine CLI to verify Telegraf correctly writing data to TDengine and read
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
telegraf | 2022-04-20 08:47:53.488 | 22 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
log | 2022-04-20 07:19:50.260 | 9 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
Query OK, 2 row(s) in set (0.002401s)
name |
=================================
information_schema |
performance_schema |
telegraf |
Query OK, 3 rows in database (0.010568s)
taos> use telegraf;
Database changed.
@ -65,3 +67,11 @@ taos> select * from telegraf.system limit 10;
|
Query OK, 3 row(s) in set (0.013269s)
```
:::note
- TDengine take influxdb format data and create unique ID for table names by the rule.
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
:::

View File

@ -72,3 +72,7 @@ taos> select * from collectd.memory_value limit 10;
Query OK, 10 row(s) in set (0.010348s)
```
:::note
- TDengine will automatically create unique IDs for sub-table names by the rule.
:::

View File

@ -36,11 +36,12 @@ After waiting about 10 seconds, use the TDengine CLI to query TDengine to verify
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
icinga2 | 2022-04-20 12:11:39.697 | 13 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.001867s)
name |
=================================
information_schema |
performance_schema |
icinga2 |
Query OK, 3 row(s) in set (0.001867s)
taos> use icinga2;
Database changed.
@ -72,3 +73,8 @@ taos> show stables;
icinga.service.users.state | 2022-04-20 12:11:39.704 | 2 | 1 | 1 |
Query OK, 22 row(s) in set (0.002317s)
```
:::note
- TDengine will automatically create unique IDs for sub-table names by the rule.
:::

View File

@ -60,7 +60,6 @@ For the configuration method, add the following text to `/etc/telegraf/telegraf.
username = "<TDengine's username>"
password = "<TDengine's password>"
data_format = "influx"
influx_max_line_bytes = 250
```
Then restart telegraf:

View File

@ -6,6 +6,14 @@ description: TDengine release history, Release Notes and download links.
import Release from "/components/ReleaseV3";
## 3.0.1.4
<Release type="tdengine" version="3.0.1.4" />
## 3.0.1.3
<Release type="tdengine" version="3.0.1.3" />
## 3.0.1.2
<Release type="tdengine" version="3.0.1.2" />

View File

@ -6,6 +6,14 @@ description: taosTools release history, Release Notes, download links.
import Release from "/components/ReleaseV3";
## 2.2.4
<Release type="tools" version="2.2.4" />
## 2.2.3
<Release type="tools" version="2.2.3" />
## 2.2.2
<Release type="tools" version="2.2.2" />

View File

@ -37,7 +37,9 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- tag_set 中的所有的数据自动转化为 nchar 数据类型;
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 float 类型的数值 1.2, 如果不带类型后缀会被当作 double 处理;
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3之后的版本 smlDataFormat 默认为 false [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
@ -64,3 +66,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
<CLine />
</TabItem>
</Tabs>
## 查询示例
比如查询 location=California.LosAngeles,groupid=2 子表的数据可以通过如下sql
select * from meters where location=California.LosAngeles and groupid=2

View File

@ -32,6 +32,8 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码
@ -79,3 +81,6 @@ taos> select tbname, * from `meters.current`;
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.005399s)
```
## 查询示例:
想要查询 location=California.LosAngeles groupid=3 的数据可以通过如下sql:
select * from `meters.voltage` where location="California.LosAngeles" and groupid=3

View File

@ -48,7 +48,8 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 "tags": { "host": "web02","dc": "lga","tname":"cpu1"} 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::
## 示例代码
@ -94,3 +95,7 @@ taos> select * from `meters.current`;
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.004076s)
```
## 查询示例
想要查询"tags": {"location": "California.LosAngeles", "groupid": 1} 的数据可以通过如下sql
select * from `meters.voltage` where location="California.LosAngeles" and groupid=1

View File

@ -109,7 +109,7 @@ TDengine 的 JDBC URL 规范格式为:
对于建立连接,原生连接与 REST 连接有细微不同。
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
```java

View File

@ -114,7 +114,7 @@ username:password@protocol(address)/dbname?param=value
```
### 使用连接器进行连接
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
_taosSql_ 通过 cgo 实现了 Go 的 `database/sql/driver` 接口。只需要引入驱动就可以使用 [`database/sql`](https://golang.org/pkg/database/sql/) 的接口。

View File

@ -55,16 +55,6 @@ taos = "*"
</TabItem>
<TabItem value="native" label="仅原生连接">
在 `Cargo.toml` 文件中添加 [taos][taos],并启用 `native` 特性:
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["native"] }
```
</TabItem>
<TabItem value="rest" label="仅 Websocket">
在 `Cargo.toml` 文件中添加 [taos][taos],并启用 `ws` 特性。
@ -74,6 +64,17 @@ taos = { version = "*", default-features = false, features = ["native"] }
taos = { version = "*", default-features = false, features = ["ws"] }
```
</TabItem>
<TabItem value="native" label="仅原生连接">
在 `Cargo.toml` 文件中添加 [taos][taos],并启用 `native` 特性:
```toml
[dependencies]
taos = { version = "*", default-features = false, features = ["native"] }
```
</TabItem>
</Tabs>

View File

@ -80,7 +80,7 @@ pip3 install git+https://github.com/taosdata/taos-connector-python.git
### 安装验证
<Tabs groupId="connect" default="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
对于原生连接,需要验证客户端驱动和 Python 连接器本身是否都正确安装。如果能成功导入 `taos` 模块,则说明已经正确安装了客户端驱动和 Python 连接器。可在 Python 交互式 Shell 中输入:
@ -118,7 +118,7 @@ Requirement already satisfied: taospy in c:\users\username\appdata\local\program
在用连接器建立连接之前,建议先测试本地 TDengine CLI 到 TDengine 集群的连通性。
<Tabs>
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
请确保 TDengine 集群已经启动, 且集群中机器的 FQDN 如果启动的是单机版FQDN 默认为 hostname在本机能够解析, 可用 `ping` 命令进行测试:
@ -173,7 +173,7 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
以下示例代码假设 TDengine 安装在本机, 且 FQDN 和 serverPort 都使用了默认配置。
<Tabs>
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接" groupId="connect">
```python
@ -219,7 +219,7 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
### 基本使用
<Tabs default="native" groupId="connect">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
##### TaosConnection 类的使用
@ -289,7 +289,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
### 与 pandas 一起使用
<Tabs default="native" groupId="connect">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
```python

View File

@ -85,7 +85,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
### 使用 npm 安装
<Tabs defaultValue="install_native">
<Tabs defaultValue="install_rest">
<TabItem value="install_native" label="安装原生连接器">
```bash
@ -124,7 +124,7 @@ node nodejsChecker.js host=localhost
请选择使用一种连接器。
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
安装并引用 `@tdengine/client` 包。

View File

@ -35,7 +35,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
## 支持的功能特性
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
@ -96,7 +96,7 @@ dotnet add exmaple.csproj reference src/TDengine.csproj
## 建立连接
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
@ -171,7 +171,7 @@ namespace TDengineExample
#### SQL 写入
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
@ -203,7 +203,7 @@ namespace TDengineExample
#### 参数绑定
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">
@ -227,7 +227,7 @@ namespace TDengineExample
#### 同步查询
<Tabs defaultValue="native">
<Tabs defaultValue="rest">
<TabItem value="native" label="原生连接">

View File

@ -182,6 +182,14 @@ TDengine 中,所有表的第一列都必须是时间戳类型,且为其主
select _rowts, max(current) from meters;
```
**\_IROWTS**
\_irowts 伪列只能与 interp 函数一起使用,用于返回 interp 函数插值结果对应的时间戳列。
```sql
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
```
## 查询对象
FROM 关键字后面可以是若干个表(超级表)列表,也可以是子查询的结果。

View File

@ -868,6 +868,7 @@ INTERP(expr)
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间EVERY 值)进行插值。如果没有指定 EVERY则默认窗口大小为无穷大即从 timestamp1 开始只有一个窗口。
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
### LAST

View File

@ -344,6 +344,7 @@ description: TDengine 保留关键字的详细列表
### \_
- \_C0
- \_IROWTS
- \_QDURATION
- \_QEND
- \_QSTART

View File

@ -12,7 +12,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响
5. 与其他数据库系统更具互操作性。例如Oracle 数据库用户熟悉查询 Oracle 数据字典中的表
Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
:::info
- 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
- 系统表中的一些列可能是关键字,在查询时需要使用转义符'\`',例如查询数据库 test 有几个 VGROUP
```sql
select `vgroups` from ins_databases where name = 'test';
```
:::
本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。
@ -103,7 +111,11 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 24 | wal_retention_period | INT | WAL 的保存时长 |
| 25 | wal_retention_size | INT | WAL 的保存上限 |
| 26 | wal_roll_period | INT | wal 文件切换时长 |
| 27 | wal_segment_size | wal 单个文件大小 |
| 27 | wal_segment_size | BIGINT | wal 单个文件大小 |
| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数 |
| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度 |
| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度 |
| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小 |
## INS_FUNCTIONS

View File

@ -11,12 +11,13 @@ description: "TDengine 3.0 版本的语法变更说明"
| 1 | VARCHAR | 新增 | BINARY类型的别名。
| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
| 4 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
| 5 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
| 6 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
| 7 | 混合运算 | 增强 | 查询中的混合运算标量运算和矢量运算混合全面增强SELECT的各个子句均全面支持符合语法语义的混合运算。
| 8 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
| 9 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时超级表的数据会被合并成一条时间线。
| 4 | _IROWTS伪列 | 新增 | 用于返回 interp 函数插值结果对应的时间戳列。
| 5 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
| 6 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
| 7 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
| 8 | 混合运算 | 增强 | 查询中的混合运算标量运算和矢量运算混合全面增强SELECT的各个子句均全面支持符合语法语义的混合运算。
| 9 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
| 10 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时超级表的数据会被合并成一条时间线。
## SQL 语句变更

View File

@ -177,12 +177,21 @@ taos --dump-config
### maxNumOfDistinctRes
| 属性 | 说明 |
| -------- | -------------------------------- | --- |
| -------- | -------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 允许返回的 distinct 结果最大行数 |
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
| 缺省值 | 10 万 |
### keepColumnName
| 属性 | 说明 |
| -------- | -------------------------------- |
| 适用范围 | 仅客户端适用 |
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
| 取值范围 | 0 表示包含函数名1 表示不包含函数名。 |
| 缺省值 | 0 |
## 区域相关
### timezone

View File

@ -67,9 +67,12 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set其他的会忽略
:::tip
为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。

View File

@ -22,6 +22,5 @@
username = "root"
password = "taosdata"
data_format = "influx"
influx_max_line_bytes = 250
```

View File

@ -88,3 +88,7 @@ VALUE TIMESTAMP
```
:::note
- TDengine 默认生成的子表名是根据规则生成的唯一 ID 值。
:::

View File

@ -16,6 +16,7 @@ Telegraf 是一款十分流行的指标采集开源软件。在数据采集和
- TDengine 集群已经部署并正常运行
- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)
- Telegraf 已经安装。安装 Telegraf 请参考[官方文档](https://docs.influxdata.com/telegraf/v1.22/install/)
- Telegraf 默认采集系统运行状态数据。通过使能[输入插件](https://docs.influxdata.com/telegraf/v1.22/plugins/)方式可以输出[其他格式](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/)的数据到 Telegraf 再写入到 TDengine中。
## 配置步骤
<Telegraf />
@ -32,11 +33,12 @@ sudo systemctl restart telegraf
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
telegraf | 2022-04-20 08:47:53.488 | 22 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
log | 2022-04-20 07:19:50.260 | 9 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
Query OK, 2 row(s) in set (0.002401s)
name |
=================================
information_schema |
performance_schema |
telegraf |
Query OK, 3 rows in database (0.010568s)
taos> use telegraf;
Database changed.
@ -66,3 +68,11 @@ taos> select * from telegraf.system limit 10;
|
Query OK, 3 row(s) in set (0.013269s)
```
:::note
- TDengine 接收 influxdb 格式数据默认生成的子表名是根据规则生成的唯一 ID 值。
用户如需指定生成的表名,可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定。如果通过控制输入数据格式,即可利用 TDengine 这个功能指定生成的表名。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1。如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::

View File

@ -72,3 +72,7 @@ taos> select * from collectd.memory_value limit 10;
Query OK, 10 row(s) in set (0.010348s)
```
:::note
- TDengine 默认生成的子表名是根据规则生成的唯一 ID 值。
:::

View File

@ -37,11 +37,12 @@ sudo systemctl restart icinga2
```
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
icinga2 | 2022-04-20 12:11:39.697 | 13 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
Query OK, 2 row(s) in set (0.001867s)
name |
=================================
information_schema |
performance_schema |
icinga2 |
Query OK, 3 row(s) in set (0.001867s)
taos> use icinga2;
Database changed.
@ -73,3 +74,9 @@ taos> show stables;
icinga.service.users.state | 2022-04-20 12:11:39.704 | 2 | 1 | 1 |
Query OK, 22 row(s) in set (0.002317s)
```
:::note
- TDengine 默认生成的子表名是根据规则生成的唯一 ID 值。
:::

View File

@ -61,7 +61,6 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
username = "<TDengine's username>"
password = "<TDengine's password>"
data_format = "influx"
influx_max_line_bytes = 250
```
然后重启 Telegraf

View File

@ -6,6 +6,14 @@ description: TDengine 发布历史、Release Notes 及下载链接
import Release from "/components/ReleaseV3";
## 3.0.1.4
<Release type="tdengine" version="3.0.1.4" />
## 3.0.1.3
<Release type="tdengine" version="3.0.1.3" />
## 3.0.1.2
<Release type="tdengine" version="3.0.1.2" />

View File

@ -6,6 +6,14 @@ description: taosTools 的发布历史、Release Notes 和下载链接
import Release from "/components/ReleaseV3";
## 2.2.4
<Release type="tools" version="2.2.4" />
## 2.2.3
<Release type="tools" version="2.2.3" />
## 2.2.2
<Release type="tools" version="2.2.2" />

View File

@ -1,6 +1,12 @@
# TDengine driver connector for Lua
It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 .
As TDengine is built with lua-enable with default configure, the built-in lua lib conflicts with external lua lib. The following commands require TDengine built with lua-disable.
To disable built-in lua:
```
mkdir debug && cd debug
cmake .. -DBUILD_LUA=false && cmake --build .
```
## Lua Dependencies
- Lua:

View File

@ -29,7 +29,7 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L, 1,"host");
if (lua_isstring(L,-1)){
if (lua_isstring(L, -1)){
host = lua_tostring(L, -1);
// printf("host = %s\n", host);
}
@ -58,8 +58,9 @@ static int l_connect(lua_State *L){
//printf("password = %s\n", password);
}
lua_settop(L,0);
lua_settop(L, 0);
taos_init();
lua_newtable(L);
int table_index = lua_gettop(L);
@ -125,7 +126,7 @@ static int l_query(lua_State *L){
//printf("row index:%d\n",rows);
rows++;
lua_pushnumber(L,rows);
lua_pushnumber(L, rows);
lua_newtable(L);
for (int i = 0; i < num_fields; ++i) {
@ -136,15 +137,19 @@ static int l_query(lua_State *L){
lua_pushstring(L,fields[i].name);
int32_t* length = taos_fetch_lengths(result);
switch (fields[i].type) {
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
lua_pushinteger(L,*((char *)row[i]));
break;
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_SMALLINT:
lua_pushinteger(L,*((short *)row[i]));
break;
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
lua_pushinteger(L,*((int *)row[i]));
break;
case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BIGINT:
lua_pushinteger(L,*((int64_t *)row[i]));
break;
@ -154,6 +159,7 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_DOUBLE:
lua_pushnumber(L,*((double *)row[i]));
break;
case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
@ -197,7 +203,7 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L,"something is wrong");// taos_errstr(taos));
lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
}else{
//printf("success to async query.\n");
@ -214,9 +220,9 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
static int l_async_query(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
TAOS * taos = (TAOS*)lua_topointer(L,1);
const char * sqlstr = lua_tostring(L,2);
// int stime = luaL_checknumber(L,3);
TAOS * taos = (TAOS*)lua_topointer(L, 1);
const char * sqlstr = lua_tostring(L, 2);
// int stime = luaL_checknumber(L, 3);
lua_newtable(L);
int table_index = lua_gettop(L);
@ -224,7 +230,7 @@ static int l_async_query(lua_State *L){
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
p->state = L;
p->callback=r;
// printf("r:%d, L:%d\n",r,L);
// printf("r:%d, L:%d\n", r, L);
taos_query_a(taos,sqlstr,async_query_callback,p);
lua_pushnumber(L, 0);
@ -267,7 +273,7 @@ static const struct luaL_Reg lib[] = {
extern int luaopen_luaconnector51(lua_State* L)
{
// luaL_register(L, "luaconnector51", lib);
lua_newtable (L);
lua_newtable(L);
luaL_setfuncs(L,lib,0);
return 1;
}

View File

@ -29,7 +29,7 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L, 1,"host");
if (lua_isstring(L,-1)){
if (lua_isstring(L, -1)){
host = lua_tostring(L, -1);
// printf("host = %s\n", host);
}
@ -58,7 +58,7 @@ static int l_connect(lua_State *L){
//printf("password = %s\n", password);
}
lua_settop(L,0);
lua_settop(L, 0);
taos_init();
@ -126,7 +126,7 @@ static int l_query(lua_State *L){
//printf("row index:%d\n",rows);
rows++;
lua_pushnumber(L,rows);
lua_pushnumber(L, rows);
lua_newtable(L);
for (int i = 0; i < num_fields; ++i) {
@ -203,7 +203,7 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
lua_pushstring(L,"something is wrong");// taos_errstr(taos));
lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
}else{
//printf("success to async query.\n");
@ -220,9 +220,9 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
static int l_async_query(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
TAOS * taos = (TAOS*)lua_topointer(L,1);
const char * sqlstr = lua_tostring(L,2);
// int stime = luaL_checknumber(L,3);
TAOS * taos = (TAOS*)lua_topointer(L, 1);
const char * sqlstr = lua_tostring(L, 2);
// int stime = luaL_checknumber(L, 3);
lua_newtable(L);
int table_index = lua_gettop(L);
@ -230,7 +230,7 @@ static int l_async_query(lua_State *L){
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
p->state = L;
p->callback=r;
// printf("r:%d, L:%d\n",r,L);
// printf("r:%d, L:%d\n", r, L);
taos_query_a(taos,sqlstr,async_query_callback,p);
lua_pushnumber(L, 0);

View File

@ -176,8 +176,14 @@ end
driver.query_a(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.005', 100),('2019-09-01 00:00:00.006', 101),('2019-09-01 00:00:00.007', 102)", async_query_callback)
res = driver.query(conn, "create stream stream_avg_degree into avg_degree as select avg(degree) from thermometer interval(5s) sliding(1s)")
if res.code ~=0 then
print("create stream--- failed:"..res.error)
return
else
print("create stream--- pass")
end
print("From now on we start continous insert in an definite loop, pls wait for about 10 seconds and check stream table for result.")
print("From now on we start continous insertion in an definite loop, please wait for about 10 seconds and check stream table avg_degree for result.")
local loop_index = 0
while loop_index < 10 do
local t = os.time()*1000
@ -193,5 +199,5 @@ while loop_index < 10 do
os.execute("sleep " .. 1)
loop_index = loop_index + 1
end
driver.query(conn,"DROP STREAM IF EXISTS avg_therm_s")
driver.query(conn,"DROP STREAM IF EXISTS stream_avg_degree")
driver.close(conn)

View File

@ -177,6 +177,7 @@ typedef struct SSDataBlock {
enum {
FETCH_TYPE__DATA = 1,
FETCH_TYPE__META,
FETCH_TYPE__SEP,
FETCH_TYPE__NONE,
};

View File

@ -98,6 +98,7 @@ extern int32_t tsQueryRsmaTolerance;
extern bool tsQueryPlannerTrace;
extern int32_t tsQueryNodeChunkSize;
extern bool tsQueryUseNodeAllocator;
extern bool tsKeepColumnName;
// client
extern int32_t tsMinSlidingTime;

View File

@ -226,110 +226,112 @@
#define TK_WSTART 208
#define TK_WEND 209
#define TK_WDURATION 210
#define TK_CAST 211
#define TK_NOW 212
#define TK_TODAY 213
#define TK_TIMEZONE 214
#define TK_CLIENT_VERSION 215
#define TK_SERVER_VERSION 216
#define TK_SERVER_STATUS 217
#define TK_CURRENT_USER 218
#define TK_COUNT 219
#define TK_LAST_ROW 220
#define TK_CASE 221
#define TK_END 222
#define TK_WHEN 223
#define TK_THEN 224
#define TK_ELSE 225
#define TK_BETWEEN 226
#define TK_IS 227
#define TK_NK_LT 228
#define TK_NK_GT 229
#define TK_NK_LE 230
#define TK_NK_GE 231
#define TK_NK_NE 232
#define TK_MATCH 233
#define TK_NMATCH 234
#define TK_CONTAINS 235
#define TK_IN 236
#define TK_JOIN 237
#define TK_INNER 238
#define TK_SELECT 239
#define TK_DISTINCT 240
#define TK_WHERE 241
#define TK_PARTITION 242
#define TK_BY 243
#define TK_SESSION 244
#define TK_STATE_WINDOW 245
#define TK_SLIDING 246
#define TK_FILL 247
#define TK_VALUE 248
#define TK_NONE 249
#define TK_PREV 250
#define TK_LINEAR 251
#define TK_NEXT 252
#define TK_HAVING 253
#define TK_RANGE 254
#define TK_EVERY 255
#define TK_ORDER 256
#define TK_SLIMIT 257
#define TK_SOFFSET 258
#define TK_LIMIT 259
#define TK_OFFSET 260
#define TK_ASC 261
#define TK_NULLS 262
#define TK_ABORT 263
#define TK_AFTER 264
#define TK_ATTACH 265
#define TK_BEFORE 266
#define TK_BEGIN 267
#define TK_BITAND 268
#define TK_BITNOT 269
#define TK_BITOR 270
#define TK_BLOCKS 271
#define TK_CHANGE 272
#define TK_COMMA 273
#define TK_COMPACT 274
#define TK_CONCAT 275
#define TK_CONFLICT 276
#define TK_COPY 277
#define TK_DEFERRED 278
#define TK_DELIMITERS 279
#define TK_DETACH 280
#define TK_DIVIDE 281
#define TK_DOT 282
#define TK_EACH 283
#define TK_FAIL 284
#define TK_FILE 285
#define TK_FOR 286
#define TK_GLOB 287
#define TK_ID 288
#define TK_IMMEDIATE 289
#define TK_IMPORT 290
#define TK_INITIALLY 291
#define TK_INSTEAD 292
#define TK_ISNULL 293
#define TK_KEY 294
#define TK_NK_BITNOT 295
#define TK_NK_SEMI 296
#define TK_NOTNULL 297
#define TK_OF 298
#define TK_PLUS 299
#define TK_PRIVILEGE 300
#define TK_RAISE 301
#define TK_REPLACE 302
#define TK_RESTRICT 303
#define TK_ROW 304
#define TK_SEMI 305
#define TK_STAR 306
#define TK_STATEMENT 307
#define TK_STRING 308
#define TK_TIMES 309
#define TK_UPDATE 310
#define TK_VALUES 311
#define TK_VARIABLE 312
#define TK_VIEW 313
#define TK_WAL 314
#define TK_IROWTS 211
#define TK_QTAGS 212
#define TK_CAST 213
#define TK_NOW 214
#define TK_TODAY 215
#define TK_TIMEZONE 216
#define TK_CLIENT_VERSION 217
#define TK_SERVER_VERSION 218
#define TK_SERVER_STATUS 219
#define TK_CURRENT_USER 220
#define TK_COUNT 221
#define TK_LAST_ROW 222
#define TK_CASE 223
#define TK_END 224
#define TK_WHEN 225
#define TK_THEN 226
#define TK_ELSE 227
#define TK_BETWEEN 228
#define TK_IS 229
#define TK_NK_LT 230
#define TK_NK_GT 231
#define TK_NK_LE 232
#define TK_NK_GE 233
#define TK_NK_NE 234
#define TK_MATCH 235
#define TK_NMATCH 236
#define TK_CONTAINS 237
#define TK_IN 238
#define TK_JOIN 239
#define TK_INNER 240
#define TK_SELECT 241
#define TK_DISTINCT 242
#define TK_WHERE 243
#define TK_PARTITION 244
#define TK_BY 245
#define TK_SESSION 246
#define TK_STATE_WINDOW 247
#define TK_SLIDING 248
#define TK_FILL 249
#define TK_VALUE 250
#define TK_NONE 251
#define TK_PREV 252
#define TK_LINEAR 253
#define TK_NEXT 254
#define TK_HAVING 255
#define TK_RANGE 256
#define TK_EVERY 257
#define TK_ORDER 258
#define TK_SLIMIT 259
#define TK_SOFFSET 260
#define TK_LIMIT 261
#define TK_OFFSET 262
#define TK_ASC 263
#define TK_NULLS 264
#define TK_ABORT 265
#define TK_AFTER 266
#define TK_ATTACH 267
#define TK_BEFORE 268
#define TK_BEGIN 269
#define TK_BITAND 270
#define TK_BITNOT 271
#define TK_BITOR 272
#define TK_BLOCKS 273
#define TK_CHANGE 274
#define TK_COMMA 275
#define TK_COMPACT 276
#define TK_CONCAT 277
#define TK_CONFLICT 278
#define TK_COPY 279
#define TK_DEFERRED 280
#define TK_DELIMITERS 281
#define TK_DETACH 282
#define TK_DIVIDE 283
#define TK_DOT 284
#define TK_EACH 285
#define TK_FAIL 286
#define TK_FILE 287
#define TK_FOR 288
#define TK_GLOB 289
#define TK_ID 290
#define TK_IMMEDIATE 291
#define TK_IMPORT 292
#define TK_INITIALLY 293
#define TK_INSTEAD 294
#define TK_ISNULL 295
#define TK_KEY 296
#define TK_NK_BITNOT 297
#define TK_NK_SEMI 298
#define TK_NOTNULL 299
#define TK_OF 300
#define TK_PLUS 301
#define TK_PRIVILEGE 302
#define TK_RAISE 303
#define TK_REPLACE 304
#define TK_RESTRICT 305
#define TK_ROW 306
#define TK_SEMI 307
#define TK_STAR 308
#define TK_STATEMENT 309
#define TK_STRING 310
#define TK_TIMES 311
#define TK_UPDATE 312
#define TK_VALUES 313
#define TK_VARIABLE 314
#define TK_VIEW 315
#define TK_WAL 316
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301

View File

@ -29,13 +29,13 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
typedef int32_t (*localFetchFp)(void *, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
typedef struct {
void *handle;
void* handle;
bool localExec;
localFetchFp fp;
SArray *explainRes;
SArray* explainRes;
} SLocalFetch;
typedef struct {
@ -51,9 +51,9 @@ typedef struct {
bool initTqReader;
int32_t numOfVgroups;
void* sContext; // SSnapContext*
void* sContext; // SSnapContext*
void* pStateBackend;
void* pStateBackend;
} SReadHandle;
// in queue mode, data streams are seperated by msg
@ -136,7 +136,8 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
* @param handle
* @return
*/
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, SLocalFetch *pLocal);
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bool* hasMore, SLocalFetch *pLocal);
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
/**
@ -195,6 +196,8 @@ int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);

View File

@ -119,9 +119,11 @@ typedef enum EFunctionType {
FUNCTION_TYPE_WSTART,
FUNCTION_TYPE_WEND,
FUNCTION_TYPE_WDURATION,
FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_TAGS,
// internal function
FUNCTION_TYPE_SELECT_VALUE,
FUNCTION_TYPE_SELECT_VALUE = 3750,
FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function
FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function
FUNCTION_TYPE_TO_COLUMN,
@ -212,6 +214,7 @@ bool fmIsClientPseudoColumnFunc(int32_t funcId);
bool fmIsMultiRowsFunc(int32_t funcId);
bool fmIsKeepOrderFunc(int32_t funcId);
bool fmIsCumulativeFunc(int32_t funcId);
bool fmIsInterpPseudoColumnFunc(int32_t funcId);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);

View File

@ -27,9 +27,10 @@ extern "C" {
#define LIST_LENGTH(l) (NULL != (l) ? (l)->length : 0)
#define FOREACH(node, list) \
for (SListCell *cell = (NULL != (list) ? (list)->pHead : NULL), *pNext; \
(NULL != cell ? (node = cell->pNode, pNext = cell->pNext, true) : (node = NULL, pNext = NULL, false)); cell = pNext)
#define FOREACH(node, list) \
for (SListCell* cell = (NULL != (list) ? (list)->pHead : NULL), *pNext; \
(NULL != cell ? (node = cell->pNode, pNext = cell->pNext, true) : (node = NULL, pNext = NULL, false)); \
cell = pNext)
#define REPLACE_NODE(newNode) cell->pNode = (SNode*)(newNode)
@ -192,6 +193,7 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
@ -239,6 +241,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,

View File

@ -464,6 +464,8 @@ typedef struct SFillPhysiNode {
EOrder inputTsOrder;
} SFillPhysiNode;
typedef SFillPhysiNode SStreamFillPhysiNode;
typedef struct SMultiTableIntervalPhysiNode {
SIntervalPhysiNode interval;
SNodeList* pPartitionKeys;

View File

@ -31,6 +31,7 @@ typedef struct {
TDB* db;
TTB* pStateDb;
TTB* pFuncStateDb;
TTB* pFillStateDb; // todo refactor
TXN txn;
} SStreamState;
@ -51,15 +52,22 @@ int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
void streamFreeVal(void* val);
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key);
void streamStateFreeCur(SStreamStateCur* pCur);
int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);

View File

@ -565,6 +565,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_GET_META_ERROR TAOS_DEF_ERROR_CODE(0, 0x2662)
#define TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS TAOS_DEF_ERROR_CODE(0, 0x2663)
#define TSDB_CODE_PAR_NOT_SUPPORT_JOIN TAOS_DEF_ERROR_CODE(0, 0x2664)
#define TSDB_CODE_PAR_INVALID_TAGS_PC TAOS_DEF_ERROR_CODE(0, 0x2665)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner

View File

@ -1,22 +0,0 @@
[Unit]
Description=Nginx For TDengine Service
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=/usr/local/nginxd/logs/nginx.pid
ExecStart=/usr/local/nginxd/sbin/nginx
ExecStop=/usr/local/nginxd/sbin/nginx -s stop
TimeoutStopSec=1000000s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
StandardOutput=null
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target

View File

@ -38,8 +38,6 @@ temp_version=""
fin_result=""
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
@ -132,10 +130,7 @@ function check_main_path() {
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
check_file ${install_main_dir} "share/admin"
fi
echo -e "Check main path:\033[32mOK\033[0m!"
}
@ -150,9 +145,6 @@ function check_bin_path() {
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
check_file ${nginx_dir}/sbin nginx
fi
echo -e "Check bin path:\033[32mOK\033[0m!"
}

View File

@ -219,12 +219,12 @@ fi
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]]; then
if [ "$verMode" != "cluster" ]; then
# community-version compile
cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
else
if [[ "$dbName" != "taos" ]]; then
replace_enterprise_$dbName
fi
cmake ../../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
fi
else
echo "input cpuType=${cpuType} error!!!"

View File

@ -50,8 +50,7 @@ install_main_dir=${installDir}
bin_dir="${installDir}/bin"
service_config_dir="/etc/systemd/system"
nginx_port=6060
nginx_dir="/usr/local/nginxd"
web_port=6041
# Color setting
RED='\033[0;31m'
@ -182,7 +181,7 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}/include
# ${csudo}mkdir -p ${install_main_dir}/init.d
if [ "$verMode" == "cluster" ]; then
${csudo}mkdir -p ${nginx_dir}
${csudo}mkdir -p ${install_main_dir}/share
fi
if [[ -e ${script_dir}/email ]]; then
@ -218,12 +217,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
if [ "$verMode" == "cluster" ]; then
${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/*
${csudo}mkdir -p ${nginx_dir}/logs
${csudo}chmod 777 ${nginx_dir}/sbin/nginx
fi
}
function install_lib() {
@ -574,6 +567,13 @@ function install_examples() {
fi
}
function install_web() {
if [ -d "${script_dir}/share" ]; then
${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share
fi
}
function clean_service_on_sysvinit() {
if pidof ${serverName} &>/dev/null; then
${csudo}service ${serverName} stop || :
@ -654,16 +654,6 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/nginxd.service"
if systemctl is-active --quiet nginxd; then
echo "Nginx for ${productName} is running, stopping it..."
${csudo}systemctl stop nginxd &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable nginxd &>/dev/null || echo &>/dev/null
${csudo}rm -f ${nginx_service_config}
fi
}
function install_service_on_systemd() {
@ -677,19 +667,6 @@ function install_service_on_systemd() {
${csudo}systemctl enable ${serverName}
${csudo}systemctl daemon-reload
if [ "$verMode" == "cluster" ]; then
[ -f ${script_dir}/cfg/nginxd.service ] &&
${csudo}cp ${script_dir}/cfg/nginxd.service \
${service_config_dir}/ || :
${csudo}systemctl daemon-reload
if ! ${csudo}systemctl enable nginxd &>/dev/null; then
${csudo}systemctl daemon-reexec
${csudo}systemctl enable nginxd
fi
${csudo}systemctl start nginxd
fi
}
function install_adapter_service() {
@ -793,19 +770,6 @@ function updateProduct() {
sleep 1
fi
if [ "$verMode" == "cluster" ]; then
if pidof nginx &>/dev/null; then
if ((${service_mod} == 0)); then
${csudo}systemctl stop nginxd || :
elif ((${service_mod} == 1)); then
${csudo}service nginxd stop || :
else
kill_process nginx
fi
sleep 1
fi
fi
install_main_path
install_log
@ -817,6 +781,7 @@ function updateProduct() {
fi
install_examples
install_web
if [ -z $1 ]; then
install_bin
install_service
@ -825,18 +790,6 @@ function updateProduct() {
install_adapter_config
openresty_work=false
if [ "$verMode" == "cluster" ]; then
# Check if openresty is installed
# Check if nginx is installed successfully
if type curl &>/dev/null; then
if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then
echo -e "\033[44;32;1mNginx for ${productName} is updated successfully!${NC}"
openresty_work=true
else
echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m"
fi
fi
fi
echo
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
@ -857,7 +810,7 @@ function updateProduct() {
fi
if [ ${openresty_work} = 'true' ]; then
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}"
else
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell${NC}"
fi
@ -906,6 +859,7 @@ function installProduct() {
install_connector
fi
install_examples
install_web
if [ -z $1 ]; then # install service and client
# For installing new
@ -915,17 +869,6 @@ function installProduct() {
install_adapter_config
openresty_work=false
if [ "$verMode" == "cluster" ]; then
# Check if nginx is installed successfully
if type curl &>/dev/null; then
if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then
echo -e "\033[44;32;1mNginx for ${productName} is installed successfully!${NC}"
openresty_work=true
else
echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m"
fi
fi
fi
install_config

View File

@ -151,6 +151,7 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include
${csudo}mkdir -p ${install_main_dir}/share
# ${csudo}mkdir -p ${install_main_dir}/init.d
else
${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || :
@ -161,6 +162,7 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include
${csudo}mkdir -p ${install_main_dir}/share || ${csudo}mkdir -p ${install_main_2_dir}/share
fi
}
@ -469,6 +471,16 @@ function install_examples() {
fi
}
function install_web() {
if [ -d "${binary_dir}/build/share" ]; then
if [ "$osType" != "Darwin" ]; then
${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || :
else
${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || ${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_2_dir}/share || :
fi
fi
}
function clean_service_on_sysvinit() {
if pidof ${serverName} &>/dev/null; then
${csudo}service ${serverName} stop || :
@ -596,6 +608,7 @@ function update_TDengine() {
install_lib
# install_connector
install_examples
install_web
install_bin
install_service

View File

@ -107,7 +107,7 @@ else
fi
install_files="${script_dir}/install.sh"
nginx_dir="${top_dir}/../enterprise/src/plugins/web"
web_dir="${top_dir}/../enterprise/src/plugins/web"
init_file_deb=${script_dir}/../deb/taosd
init_file_rpm=${script_dir}/../rpm/taosd
@ -132,10 +132,6 @@ if [ -f "${cfg_dir}/${serverName}.service" ]; then
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
fi
if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
fi
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
@ -222,16 +218,6 @@ if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
rm -rf ${install_dir}/nginxd/png
if [ "$cpuType" == "aarch64" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
elif [ "$cpuType" == "aarch32" ]; then
cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
fi
rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
@ -288,6 +274,13 @@ if [[ $dbName == "taos" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
# Add web files
if [ -d "${web_dir}/admin" ]; then
mkdir -p ${install_dir}/share/
cp ${web_dir}/admin ${install_dir}/share/ -r
cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
fi
fi
# Copy driver

View File

@ -27,13 +27,11 @@ local_bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
install_nginxd_dir="/usr/local/nginxd"
service_config_dir="/etc/systemd/system"
taos_service_name=${serverName}
taosadapter_service_name="taosadapter"
tarbitrator_service_name="tarbitratord"
nginx_service_name="nginxd"
csudo=""
if command -v sudo >/dev/null; then
csudo="sudo "
@ -153,18 +151,6 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
if [ "$verMode" == "cluster" ]; then
nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
if [ -d ${install_nginxd_dir} ]; then
if systemctl is-active --quiet ${nginx_service_name}; then
echo "Nginx for ${productName} is running, stopping it..."
${csudo}systemctl stop ${nginx_service_name} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${nginx_service_name} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${nginx_service_config}
fi
fi
}
function clean_service_on_sysvinit() {
@ -239,7 +225,6 @@ clean_config
${csudo}rm -rf ${data_link_dir} || :
${csudo}rm -rf ${install_main_dir}
${csudo}rm -rf ${install_nginxd_dir}
if [[ -e /etc/os-release ]]; then
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
else

View File

@ -19,6 +19,7 @@
#include "functionMgt.h"
#include "os.h"
#include "query.h"
#include "qworker.h"
#include "scheduler.h"
#include "tcache.h"
#include "tglobal.h"
@ -27,7 +28,6 @@
#include "trpc.h"
#include "tsched.h"
#include "ttime.h"
#include "qworker.h"
#define TSC_VAR_NOT_RELEASE 1
#define TSC_VAR_RELEASED 0
@ -70,11 +70,10 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
int64_t nowUs = taosGetTimestampUs();
int64_t duration = nowUs - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
" ms, current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%.2f ms, "
"current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
@ -85,11 +84,12 @@ static void deregisterRequest(SRequestObj *pRequest) {
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
"us, planner:%" PRId64 "us, exec:%" PRId64 "us",
"us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%"PRIx64,
duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.planEnd - pRequest->metric.semanticEnd,
pRequest->metric.resultReady - pRequest->metric.planEnd);
pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}

View File

@ -483,7 +483,7 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) {
int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SArray* pDbVgList) {
SArray* nodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
char *policy = (tsQueryPolicy == QUERY_POLICY_VNODE) ? "vnode" : "client";
char* policy = (tsQueryPolicy == QUERY_POLICY_VNODE) ? "vnode" : "client";
int32_t dbNum = taosArrayGetSize(pDbVgList);
for (int32_t i = 0; i < dbNum; ++i) {
@ -815,7 +815,7 @@ int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
if (NULL == pRequest->body.resInfo.execRes.res) {
return TSDB_CODE_SUCCESS;
return pRequest->code;
}
SCatalog* pCatalog = NULL;
@ -868,44 +868,43 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
return code;
}
//todo refacto the error code mgmt
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
STscObj* pTscObj = pRequest->pTscObj;
pRequest->code = code;
pRequest->metric.resultReady = taosGetTimestampUs();
if (pResult) {
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
}
if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_DELETE == pRequest->type ||
TDMT_VND_CREATE_TABLE == pRequest->type) {
int32_t type = pRequest->type;
if (TDMT_VND_SUBMIT == type || TDMT_VND_DELETE == type || TDMT_VND_CREATE_TABLE == type) {
if (pResult) {
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
STscObj* pTscObj = pRequest->pTscObj;
// record the insert rows
if (TDMT_VND_SUBMIT == type) {
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
}
}
schedulerFreeJob(&pRequest->body.queryJob, 0);
pRequest->metric.execEnd = taosGetTimestampUs();
}
taosMemoryFree(pResult);
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
pRequest->requestId);
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);
STscObj* pTscObj = pRequest->pTscObj;
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, tstrerror(code), pRequest->retry, pRequest->requestId);
pRequest->prevCode = code;
schedulerFreeJob(&pRequest->body.queryJob, 0);
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
doAsyncQuery(pRequest, true);
return;
}
@ -915,7 +914,11 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
removeMeta(pTscObj, pRequest->targetTableList);
}
handleQueryExecRsp(pRequest);
pRequest->metric.execEnd = taosGetTimestampUs();
int32_t code1 = handleQueryExecRsp(pRequest);
if (pRequest->code == TSDB_CODE_SUCCESS && pRequest->code != code1) {
pRequest->code = code1;
}
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@ -1056,7 +1059,6 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
}
pRequest->metric.planEnd = taosGetTimestampUs();
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);

View File

@ -20,13 +20,13 @@
#include "functionMgt.h"
#include "os.h"
#include "query.h"
#include "qworker.h"
#include "scheduler.h"
#include "tglobal.h"
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
#include "version.h"
#include "qworker.h"
#define TSC_VAR_NOT_RELEASE 1
#define TSC_VAR_RELEASED 0
@ -700,6 +700,7 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
SQuery *pQuery = pRequest->pQuery;
pRequest->metric.ctgEnd = taosGetTimestampUs();
qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
if (code == TSDB_CODE_SUCCESS) {
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
@ -723,13 +724,16 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
destorySqlParseWrapper(pWrapper);
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, reqId:0x%" PRIx64, pRequest->self,
pRequest->requestId);
double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd)/1000.0;
tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64,
pRequest->self, el, pRequest->requestId);
launchAsyncQuery(pRequest, pQuery, pResultMeta);
} else {
destorySqlParseWrapper(pWrapper);
qDestroyQuery(pRequest->pQuery);
pRequest->pQuery = NULL;
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
@ -813,7 +817,6 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
pRequest->metric.syntaxEnd = taosGetTimestampUs();
if (!updateMetaForce) {
STscObj *pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
if (NULL == pRequest->pQuery->pRoot) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
@ -860,6 +863,7 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
SRequestObj *pRequest = (SRequestObj *)param;
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
pRequest->metric.resultReady = taosGetTimestampUs();
tscDebug("0x%" PRIx64 " enter scheduler fetch cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);

View File

@ -515,7 +515,7 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)msg;
topic = pMetaRspObj->topic;
vgId = pMetaRspObj->vgId;
} else if(TD_RES_TMQ_METADATA(msg)) {
} else if (TD_RES_TMQ_METADATA(msg)) {
SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)msg;
topic = pRspObj->topic;
vgId = pRspObj->vgId;
@ -715,7 +715,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
int32_t epoch = tmq->epoch;
SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq));
if (pReq == NULL) goto OVER;
pReq->consumerId = consumerId;
pReq->consumerId = htobe64(consumerId);
pReq->epoch = epoch;
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
@ -1603,6 +1603,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
return NULL;
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) {
SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
tscDebug("consumer %ld actual process poll rsp", tmq->consumerId);
/*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
if (pollRspWrapper->dataRsp.head.epoch == consumerEpoch) {
@ -1661,9 +1662,9 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
// build rsp
void* pRsp = NULL;
if(pollRspWrapper->taosxRsp.createTableNum == 0){
if (pollRspWrapper->taosxRsp.createTableNum == 0) {
pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
}else{
} else {
pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper);
}
taosFreeQitem(pollRspWrapper);
@ -1718,7 +1719,10 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
while (1) {
tmqHandleAllDelayedTask(tmq);
if (tmqPollImpl(tmq, timeout) < 0) return NULL;
if (tmqPollImpl(tmq, timeout) < 0) {
tscDebug("return since poll err");
/*return NULL;*/
}
rspObj = tmqHandleAllRsp(tmq, timeout, false);
if (rspObj) {
@ -1850,12 +1854,12 @@ const char* tmq_get_table_name(TAOS_RES* res) {
return (const char*)taosArrayGetP(pRspObj->rsp.blockTbName, pRspObj->resIter);
} else if (TD_RES_TMQ_METADATA(res)) {
SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)res;
if (!pRspObj->rsp.withTbName || pRspObj->rsp.blockTbName == NULL || pRspObj->resIter < 0 ||
pRspObj->resIter >= pRspObj->rsp.blockNum) {
return NULL;
}
return (const char*)taosArrayGetP(pRspObj->rsp.blockTbName, pRspObj->resIter);
if (!pRspObj->rsp.withTbName || pRspObj->rsp.blockTbName == NULL || pRspObj->resIter < 0 ||
pRspObj->resIter >= pRspObj->rsp.blockNum) {
return NULL;
}
return (const char*)taosArrayGetP(pRspObj->rsp.blockTbName, pRspObj->resIter);
}
return NULL;
}

View File

@ -85,8 +85,7 @@ uint16_t tsTelemPort = 80;
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
// If set to empty system will generate table name using MD5 hash.
bool tsSmlDataFormat =
true; // true means that the name and order of cols in each line are the same(only for influx protocol)
bool tsSmlDataFormat = false; // true means that the name and order of cols in each line are the same(only for influx protocol)
// query
int32_t tsQueryPolicy = 1;
@ -95,6 +94,7 @@ int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from w
bool tsQueryPlannerTrace = false;
int32_t tsQueryNodeChunkSize = 32 * 1024;
bool tsQueryUseNodeAllocator = true;
bool tsKeepColumnName = false;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -292,6 +292,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1;
if (cfgAddBool(pCfg, "queryUseNodeAllocator", tsQueryUseNodeAllocator, true) != 0) return -1;
if (cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, true) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
@ -654,6 +655,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
return 0;
}
@ -847,6 +849,9 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'k': {
if (strcasecmp("keepColumnName", name) == 0) {
tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
}
break;
}
case 'l': {
@ -923,9 +928,9 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
}
case 'u': {
if (strcasecmp("multiProcess", name) == 0) {
#if !defined(WINDOWS) && !defined(DARWIN)
#if !defined(WINDOWS) && !defined(DARWIN)
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
#endif
#endif
} else if (strcasecmp("udfDebugFlag", name) == 0) {
udfDebugFlag = cfgGetItem(pCfg, "udfDebugFlag")->i32;
}

View File

@ -272,6 +272,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
if (pConsumer == NULL) {
mError("consumer %ld not exist", consumerId);
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
return -1;
}

View File

@ -379,6 +379,8 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
topicObj.ast = strdup(pCreate->ast);
topicObj.astLen = strlen(pCreate->ast) + 1;
qDebugL("ast %s", topicObj.ast);
SNode *pAst = NULL;
if (nodesStringToNode(pCreate->ast, &pAst) != 0) {
taosMemoryFree(topicObj.ast);

View File

@ -217,7 +217,7 @@ int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver);
int32_t tqNextBlock(STqReader *pReader, SFetchRet *ret);
int32_t tqReaderSetDataMsg(STqReader *pReader, SSubmitReq *pMsg, int64_t ver);
int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock(STqReader *pReader);
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader);

View File

@ -113,10 +113,20 @@ typedef struct {
} STqHandle;
typedef struct {
SMqDataRsp dataRsp;
SMqRspHead rspHead;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
SRpcHandleInfo pInfo;
} STqPushEntry;
struct STQ {
SVnode* pVnode;
char* path;
SHashObj* pPushMgr; // consumerId -> STqHandle*
SVnode* pVnode;
char* path;
SRWLatch pushLock;
SHashObj* pPushMgr; // consumerId -> STqPushEntry
SHashObj* pHandle; // subKey -> STqHandle
SHashObj* pCheckInfo; // topic -> SAlterCheckInfo
@ -146,7 +156,9 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
// tqExec
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp);
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry);
// tqMeta
int32_t tqMetaOpen(STQ* pTq);

View File

@ -32,6 +32,12 @@ extern "C" {
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSD ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
#define TSDB_CHECK_CODE(CODE, LINO, LABEL) \
if (CODE) { \
LINO = __LINE__; \
goto LABEL; \
}
typedef struct TSDBROW TSDBROW;
typedef struct TABLEID TABLEID;
typedef struct TSDBKEY TSDBKEY;
@ -150,7 +156,7 @@ int32_t tCmprBlockL(void const *lhs, void const *rhs);
int32_t tBlockDataCreate(SBlockData *pBlockData);
void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear);
int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema);
int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int16_t *aCid, int32_t nCid);
int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
void tBlockDataReset(SBlockData *pBlockData);
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid);
@ -272,6 +278,7 @@ int32_t tsdbReadSttBlk(SDataFReader *pReader, int32_t iStt, SArray *aSttBlk);
int32_t tsdbReadBlockSma(SDataFReader *pReader, SDataBlk *pBlock, SArray *aColumnDataAgg);
int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pBlock, SBlockData *pBlockData);
int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData);
int32_t tsdbReadSttBlockEx(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData);
// SDelFWriter
int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync);
@ -633,6 +640,9 @@ typedef struct SSttBlockLoadInfo {
int32_t currentLoadBlockIndex;
int32_t loadBlocks;
double elapsedTime;
STSchema *pSchema;
int16_t *colIds;
int32_t numOfCols;
} SSttBlockLoadInfo;
typedef struct SMergeTree {
@ -652,13 +662,14 @@ typedef struct {
} SSkmInfo;
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
STimeWindow *pTimeWindow, SVersionRange *pVerRange, void *pLoadInfo, const char *idStr);
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
bool destroyLoadInfo, const char *idStr);
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
bool tMergeTreeNext(SMergeTree *pMTree);
TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
void tMergeTreeClose(SMergeTree *pMTree);
SSttBlockLoadInfo *tCreateLastBlockLoadInfo();
SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols);
void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el);
void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);

View File

@ -99,7 +99,6 @@ void vnodeSyncStart(SVnode* pVnode);
void vnodeSyncClose(SVnode* pVnode);
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg);
bool vnodeIsLeader(SVnode* pVnode);
bool vnodeIsReadyForRead(SVnode* pVnode);
bool vnodeIsRoleLeader(SVnode* pVnode);
#ifdef __cplusplus

View File

@ -872,7 +872,7 @@ SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) {
pSmaIdxKey = (SSmaIdxKey *)pCur->pKey;
if (taosArrayPush(pUids, &pSmaIdxKey->smaUid) < 0) {
if (!taosArrayPush(pUids, &pSmaIdxKey->smaUid)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
metaCloseSmaCursor(pCur);
taosArrayDestroy(pUids);
@ -915,7 +915,7 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
}
}
if (taosArrayPush(pUids, &uid) < 0) {
if (!taosArrayPush(pUids, &uid)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
metaCloseSmaCursor(pCur);
taosArrayDestroy(pUids);

View File

@ -213,7 +213,7 @@ static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pStat) {
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), committed, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
if (taosCheckExistFile(qTaskInfoFullName)) {
SQTaskFile qFile = {.nRef = 1, .padding = 0, .version = committed, .size = 0};
if (taosArrayPush(pFS->aQTaskInf, &qFile) < 0) {
if (!taosArrayPush(pFS->aQTaskInf, &qFile)) {
taosWUnLockLatch(RSMA_FS_LOCK(pStat));
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;

View File

@ -386,7 +386,7 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) {
}
break;
default:
smaError("vgId:%d, undefined smaType:%", SMA_VID(pSma), smaType);
smaError("vgId:%d, undefined smaType:%" PRIi8, SMA_VID(pSma), smaType);
return TSDB_CODE_FAILED;
}

View File

@ -54,7 +54,7 @@ int32_t tdRSmaFSOpen(SSma *pSma, int64_t version) {
if ((terrno = tdRSmaFSUpsertQTaskFile(RSMA_FS(pStat), &qTaskFile)) < 0) {
goto _end;
}
smaInfo("vgId:%d, open fs, version:%" PRIi64 ", ref:%" PRIi64, TD_VID(pVnode), qTaskFile.version, qTaskFile.nRef);
smaInfo("vgId:%d, open fs, version:%" PRIi64 ", ref:%d", TD_VID(pVnode), qTaskFile.version, qTaskFile.nRef);
}
_end:

View File

@ -196,7 +196,8 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids,
if (!suid || !tbUids) {
terrno = TSDB_CODE_INVALID_PTR;
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), suid ? *suid : -1,
terrstr());
return TSDB_CODE_FAILED;
}
@ -566,6 +567,7 @@ static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid)
}
if (!taosArrayPush(pUidArray, uid)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosArrayDestroy(pUidArray);
return TSDB_CODE_FAILED;
}
if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), &pUidArray, sizeof(pUidArray)) < 0) {
@ -690,7 +692,8 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
while (1) {
uint64_t ts;
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts, NULL);
bool hasMore = false;
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts, &hasMore, NULL);
if (code < 0) {
if (code == TSDB_CODE_QRY_IN_EXEC) {
break;
@ -1677,13 +1680,13 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
SRSmaInfoItem *pItem = NULL;
if (!(pRSmaRef = taosHashGet(smaMgmt.refHash, &param, POINTER_BYTES))) {
smaDebug("rsma fetch task not start since rsma info item:%p not exist in refHash:%p, rsetId:%" PRIi64, param,
*(int64_t *)&param, smaMgmt.refHash, smaMgmt.rsetId);
smaDebug("rsma fetch task not start since rsma info item:%p not exist in refHash:%p, rsetId:%d", param,
smaMgmt.refHash, smaMgmt.rsetId);
return;
}
if (!(pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaRef->refId))) {
smaDebug("rsma fetch task not start since rsma stat already destroyed, rsetId:%" PRIi64 " refId:%d)",
smaDebug("rsma fetch task not start since rsma stat already destroyed, rsetId:%d refId:%" PRIi64 ")",
smaMgmt.rsetId, pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
taosHashRemove(smaMgmt.refHash, &param, POINTER_BYTES);
return;
@ -1692,7 +1695,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
pSma = pStat->pSma;
if (!(pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pRSmaRef->suid))) {
smaDebug("rsma fetch task not start since rsma info not exist, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
smaDebug("rsma fetch task not start since rsma info not exist, rsetId:%d refId:%" PRIi64 ")", smaMgmt.rsetId,
pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
taosHashRemove(smaMgmt.refHash, &param, POINTER_BYTES);
@ -1700,7 +1703,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
}
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%d refId:%" PRIi64 ")", smaMgmt.rsetId,
pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
tdReleaseRSmaInfo(pSma, pRSmaInfo);
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
@ -1716,7 +1719,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
", rsetId rsetId:%" PRIi64 " refId:%d",
", rsetId:%d refId:%" PRIi64,
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaRef->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
@ -1844,7 +1847,7 @@ static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SA
void *msg = NULL;
taosGetQitem(qall, (void **)&msg);
if (msg) {
if (taosArrayPush(pSubmitArr, &msg) < 0) {
if (!taosArrayPush(pSubmitArr, &msg)) {
tdFreeRSmaSubmitItems(pSubmitArr);
goto _err;
}

View File

@ -70,6 +70,8 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapRead
return TSDB_CODE_SUCCESS;
_err:
if (pReader) rsmaSnapReaderClose(&pReader);
*ppReader = NULL;
smaError("vgId:%d, vnode snapshot rsma reader open failed since %s", TD_VID(pVnode), tstrerror(code));
return TSDB_CODE_FAILED;
}
@ -101,8 +103,8 @@ static int32_t rsmaQTaskInfSnapReaderOpen(SRSmaSnapReader* pReader, int64_t vers
if (!taosCheckExistFile(qTaskInfoFullName)) {
tdRSmaFSUnRef(pSma, pStat, version);
smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as %s not exists",
TD_VID(pVnode), qTaskInfoFullName);
smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as %s not exist",
TD_VID(pVnode), version, qTaskInfoFullName);
return TSDB_CODE_SUCCESS;
}
@ -336,6 +338,7 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWrit
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), 0, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
TdFilePtr qTaskF = taosCreateFile(qTaskInfoFullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (!qTaskF) {
taosMemoryFree(qWriter);
code = TAOS_SYSTEM_ERROR(errno);
smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName,
tstrerror(code));
@ -356,6 +359,7 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWrit
_err:
smaError("vgId:%d, rsma snapshot writer open failed since %s", TD_VID(pSma->pVnode), tstrerror(code));
if (pWriter) rsmaSnapWriterClose(&pWriter, 0);
*ppWriter = NULL;
return code;
}
@ -449,11 +453,11 @@ static int32_t rsmaSnapWriteQTaskInfo(SRSmaSnapWriter* pWriter, uint8_t* pData,
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
smaInfo("vgId:%d, vnode snapshot rsma write qtaskinfo %s succeed", SMA_VID(pWriter->pSma), qWriter->fname);
} else {
smaInfo("vgId:%d, vnode snapshot rsma write qtaskinfo is not needed", SMA_VID(pWriter->pSma));
}
smaInfo("vgId:%d, vnode snapshot rsma write qtaskinfo %s succeed", SMA_VID(pWriter->pSma), qWriter->fname);
_exit:
return code;

View File

@ -290,19 +290,19 @@ int32_t tdRemoveTFile(STFile *pTFile) {
void *tdAcquireSmaRef(int32_t rsetId, int64_t refId) {
void *pResult = taosAcquireRef(rsetId, refId);
if (!pResult) {
smaWarn("rsma acquire ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr());
smaWarn("rsma acquire ref for rsetId:%d refId:%" PRIi64 " failed since %s", rsetId, refId, terrstr());
} else {
smaDebug("rsma acquire ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
smaDebug("rsma acquire ref for rsetId:%d refId:%" PRIi64 " success", rsetId, refId);
}
return pResult;
}
int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId) {
if (taosReleaseRef(rsetId, refId) < 0) {
smaWarn("rsma release ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr());
smaWarn("rsma release ref for rsetId:%d refId:%" PRIi64 " failed since %s", rsetId, refId, terrstr());
return TSDB_CODE_FAILED;
}
smaDebug("rsma release ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
smaDebug("rsma release ref for rsetId:%d refId:%" PRIi64 " success", rsetId, refId);
return TSDB_CODE_SUCCESS;
}

View File

@ -65,6 +65,11 @@ static void destroySTqHandle(void* data) {
}
}
static void tqPushEntryFree(void* data) {
STqPushEntry* p = *(void**)data;
taosMemoryFree(p);
}
STQ* tqOpen(const char* path, SVnode* pVnode) {
STQ* pTq = taosMemoryCalloc(1, sizeof(STQ));
if (pTq == NULL) {
@ -78,7 +83,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
taosHashSetFreeFp(pTq->pHandle, destroySTqHandle);
pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
taosInitRWLatch(&pTq->pushLock);
pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
taosHashSetFreeFp(pTq->pPushMgr, tqPushEntryFree);
pTq->pCheckInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
@ -153,6 +160,65 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
return 0;
}
int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry) {
SMqDataRsp* pRsp = &pPushEntry->dataRsp;
ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
ASSERT(!pRsp->withSchema);
ASSERT(taosArrayGetSize(pRsp->blockSchema) == 0);
if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
if (pRsp->blockNum > 0) {
ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
} else {
ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
}
}
int32_t len = 0;
int32_t code = 0;
tEncodeSize(tEncodeSMqDataRsp, pRsp, len, code);
if (code < 0) {
return -1;
}
int32_t tlen = sizeof(SMqRspHead) + len;
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
return -1;
}
memcpy(buf, &pPushEntry->rspHead, sizeof(SMqRspHead));
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
SEncoder encoder = {0};
tEncoderInit(&encoder, abuf, len);
tEncodeSMqDataRsp(&encoder, pRsp);
tEncoderClear(&encoder);
SRpcMsg rsp = {
.info = pPushEntry->pInfo,
.pCont = buf,
.contLen = tlen,
.code = 0,
};
tmsgSendRsp(&rsp);
char buf1[80] = {0};
char buf2[80] = {0};
tFormatOffset(buf1, 80, &pRsp->reqOffset);
tFormatOffset(buf2, 80, &pRsp->rspOffset);
tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) push rsp, block num: %d, reqOffset:%s, rspOffset:%s",
TD_VID(pTq->pVnode), pPushEntry->rspHead.consumerId, pRsp->head.epoch, pRsp->blockNum, buf1, buf2);
return 0;
}
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp) {
ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
@ -354,6 +420,8 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return -1;
}
pRsp->withTbName = 0;
#if 0
pRsp->withTbName = pReq->withTbName;
if (pRsp->withTbName) {
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
@ -362,6 +430,7 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return -1;
}
}
#endif
if (subType == TOPIC_SUB_TYPE__COLUMN) {
pRsp->withSchema = false;
@ -477,11 +546,33 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
// lock
taosWLockLatch(&pTq->pushLock);
tqScanData(pTq, pHandle, &dataRsp, &fetchOffsetNew);
#if 1
if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
dataRsp.reqOffset.version == dataRsp.rspOffset.version) {
STqPushEntry* pPushEntry = taosMemoryCalloc(1, sizeof(STqPushEntry));
if (pPushEntry != NULL) {
pPushEntry->pInfo = pMsg->info;
memcpy(pPushEntry->subKey, pHandle->subKey, TSDB_SUBSCRIBE_KEY_LEN);
dataRsp.withTbName = 0;
memcpy(&pPushEntry->dataRsp, &dataRsp, sizeof(SMqDataRsp));
pPushEntry->rspHead.consumerId = consumerId;
pPushEntry->rspHead.epoch = reqEpoch;
pPushEntry->rspHead.mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
taosHashPut(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey) + 1, &pPushEntry, sizeof(void*));
tqDebug("tmq poll: consumer %ld, subkey %s, vg %d save handle to push mgr", consumerId, pHandle->subKey,
TD_VID(pTq->pVnode));
// unlock
taosWUnLockLatch(&pTq->pushLock);
return 0;
}
}
taosWUnLockLatch(&pTq->pushLock);
#endif
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
@ -614,10 +705,22 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
int32_t code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
ASSERT(code == 0);
taosWLockLatch(&pTq->pushLock);
int32_t code = taosHashRemove(pTq->pPushMgr, pReq->subKey, strlen(pReq->subKey));
if (code != 0) {
tqDebug("vgId:%d, tq remove push handle %s", pTq->pVnode->config.vgId, pReq->subKey);
}
taosWUnLockLatch(&pTq->pushLock);
tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
if (code != 0) {
tqError("cannot process tq delete req %s, since no such handle", pReq->subKey);
}
code = tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
if (code != 0) {
tqError("cannot process tq delete req %s, since no such offset", pReq->subKey);
}
if (tqMetaDeleteHandle(pTq, pReq->subKey) < 0) {
ASSERT(0);
@ -756,7 +859,9 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
atomic_add_fetch_32(&pHandle->epoch, 1);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
ASSERT(0);
}
// close handle
}
return 0;

View File

@ -15,7 +15,7 @@
#include "tq.h"
static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols) {
int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) return -1;
@ -243,7 +243,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
}
if (pHandle->fetchMeta) {
SSubmitBlk* pBlk = pReader->pBlock;
int32_t schemaLen = htonl(pBlk->schemaLen);
int32_t schemaLen = htonl(pBlk->schemaLen);
if (schemaLen > 0) {
if (pRsp->createTableNum == 0) {
pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
@ -278,7 +278,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
}
if (pHandle->fetchMeta) {
SSubmitBlk* pBlk = pReader->pBlock;
int32_t schemaLen = htonl(pBlk->schemaLen);
int32_t schemaLen = htonl(pBlk->schemaLen);
if (schemaLen > 0) {
if (pRsp->createTableNum == 0) {
pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));

View File

@ -213,6 +213,97 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
tqDebug("vgId:%d tq push msg ver %ld, type: %s", pTq->pVnode->config.vgId, ver, TMSG_INFO(msgType));
if (msgType == TDMT_VND_SUBMIT) {
// lock push mgr to avoid potential msg lost
taosWLockLatch(&pTq->pushLock);
tqDebug("vgId:%d push handle num %d", pTq->pVnode->config.vgId, taosHashGetSize(pTq->pPushMgr));
if (taosHashGetSize(pTq->pPushMgr) != 0) {
SArray* cachedKeys = taosArrayInit(0, sizeof(void*));
SArray* cachedKeyLens = taosArrayInit(0, sizeof(size_t));
void* data = taosMemoryMalloc(msgLen);
if (data == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("failed to copy data for stream since out of memory");
return -1;
}
memcpy(data, msg, msgLen);
SSubmitReq* pReq = (SSubmitReq*)data;
pReq->version = ver;
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->pPushMgr, pIter);
if (pIter == NULL) break;
STqPushEntry* pPushEntry = *(STqPushEntry**)pIter;
STqHandle* pHandle = taosHashGet(pTq->pHandle, pPushEntry->subKey, strlen(pPushEntry->subKey));
if (pHandle == NULL) {
tqDebug("vgId:%d cannot find handle %s", pTq->pVnode->config.vgId, pPushEntry->subKey);
continue;
}
if (pPushEntry->dataRsp.reqOffset.version > ver) {
tqDebug("vgId:%d push entry req version %ld, while push version %ld, skip", pTq->pVnode->config.vgId,
pPushEntry->dataRsp.reqOffset.version, ver);
continue;
}
STqExecHandle* pExec = &pHandle->execHandle;
qTaskInfo_t task = pExec->task;
SMqDataRsp* pRsp = &pPushEntry->dataRsp;
// prepare scan mem data
qStreamScanMemData(task, pReq);
// exec
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
if (pDataBlock == NULL) {
break;
}
tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
pRsp->blockNum++;
}
tqDebug("vgId:%d tq handle push, subkey: %s, block num: %d", pTq->pVnode->config.vgId, pPushEntry->subKey,
pRsp->blockNum);
if (pRsp->blockNum > 0) {
// set offset
tqOffsetResetToLog(&pRsp->rspOffset, ver);
// remove from hash
size_t kLen;
void* key = taosHashGetKey(pIter, &kLen);
void* keyCopy = taosMemoryMalloc(kLen);
memcpy(keyCopy, key, kLen);
taosArrayPush(cachedKeys, &keyCopy);
taosArrayPush(cachedKeyLens, &kLen);
tqPushDataRsp(pTq, pPushEntry);
}
}
// delete entry
for (int32_t i = 0; i < taosArrayGetSize(cachedKeys); i++) {
void* key = taosArrayGetP(cachedKeys, i);
size_t kLen = *(size_t*)taosArrayGet(cachedKeyLens, i);
if (taosHashRemove(pTq->pPushMgr, key, kLen) != 0) {
ASSERT(0);
}
}
taosArrayDestroyP(cachedKeys, (FDelete)taosMemoryFree);
taosArrayDestroy(cachedKeyLens);
}
// unlock
taosWUnLockLatch(&pTq->pushLock);
}
if (vnodeIsRoleLeader(pTq->pVnode)) {
if (msgType == TDMT_VND_SUBMIT) {
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;

View File

@ -15,21 +15,20 @@
#include "tq.h"
bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
if(pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE){
bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) {
if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE) {
return true;
}
int16_t msgType = pHead->msgType;
char* body = pHead->body;
int32_t bodyLen = pHead->bodyLen;
int16_t msgType = pHead->msgType;
char* body = pHead->body;
int32_t bodyLen = pHead->bodyLen;
int64_t tbSuid = pHandle->execHandle.execTb.suid;
int64_t realTbSuid = 0;
SDecoder coder;
void* data = POINTER_SHIFT(body, sizeof(SMsgHead));
int32_t len = bodyLen - sizeof(SMsgHead);
int64_t tbSuid = pHandle->execHandle.execTb.suid;
int64_t realTbSuid = 0;
SDecoder coder;
void* data = POINTER_SHIFT(body, sizeof(SMsgHead));
int32_t len = bodyLen - sizeof(SMsgHead);
tDecoderInit(&coder, data, len);
if (msgType == TDMT_VND_CREATE_STB || msgType == TDMT_VND_ALTER_STB) {
@ -43,38 +42,38 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
if (tDecodeSVDropStbReq(&coder, &req) < 0) {
goto end;
}
realTbSuid = req.suid;
realTbSuid = req.suid;
} else if (msgType == TDMT_VND_CREATE_TABLE) {
SVCreateTbBatchReq req = {0};
if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
goto end;
}
int32_t needRebuild = 0;
int32_t needRebuild = 0;
SVCreateTbReq* pCreateReq = NULL;
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
if (pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid) {
needRebuild++;
}
}
if(needRebuild == 0){
if (needRebuild == 0) {
// do nothing
}else if(needRebuild == req.nReqs){
} else if (needRebuild == req.nReqs) {
realTbSuid = tbSuid;
}else{
} else {
realTbSuid = tbSuid;
SVCreateTbBatchReq reqNew = {0};
reqNew.pArray = taosArrayInit(req.nReqs, sizeof(struct SVCreateTbReq));
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
if (pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid) {
reqNew.nReqs++;
taosArrayPush(reqNew.pArray, pCreateReq);
}
}
int tlen;
int tlen;
int32_t ret = 0;
tEncodeSize(tEncodeSVCreateTbBatchReq, &reqNew, tlen, ret);
void* buf = taosMemoryMalloc(tlen);
@ -107,7 +106,7 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
}
}
} else if (msgType == TDMT_VND_ALTER_TABLE) {
SVAlterTbReq req = {0};
SVAlterTbReq req = {0};
if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
goto end;
@ -129,32 +128,32 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
goto end;
}
int32_t needRebuild = 0;
int32_t needRebuild = 0;
SVDropTbReq* pDropReq = NULL;
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pDropReq = req.pReqs + iReq;
if(pDropReq->suid == tbSuid){
if (pDropReq->suid == tbSuid) {
needRebuild++;
}
}
if(needRebuild == 0){
if (needRebuild == 0) {
// do nothing
}else if(needRebuild == req.nReqs){
} else if (needRebuild == req.nReqs) {
realTbSuid = tbSuid;
}else{
} else {
realTbSuid = tbSuid;
SVDropTbBatchReq reqNew = {0};
reqNew.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbReq));
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pDropReq = req.pReqs + iReq;
if(pDropReq->suid == tbSuid){
if (pDropReq->suid == tbSuid) {
reqNew.nReqs++;
taosArrayPush(reqNew.pArray, pDropReq);
}
}
int tlen;
int tlen;
int32_t ret = 0;
tEncodeSize(tEncodeSVDropTbBatchReq, &reqNew, tlen, ret);
void* buf = taosMemoryMalloc(tlen);
@ -177,11 +176,11 @@ bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
goto end;
}
realTbSuid = req.suid;
} else{
} else {
ASSERT(0);
}
end:
end:
tDecoderClear(&coder);
return tbSuid == realTbSuid;
}
@ -224,7 +223,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
code = -1;
goto END;
}
if(isValValidForTable(pHandle, pHead)){
if (isValValidForTable(pHandle, pHead)) {
*fetchOffset = offset;
code = 0;
goto END;
@ -241,7 +240,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
offset++;
}
}
END:
END:
taosThreadMutexUnlock(&pHandle->pWalReader->mutex);
return code;
}
@ -315,14 +314,18 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
return -1;
}
void* body = pReader->pWalReader->pHead->head.body;
#if 0
if (pReader->pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) {
// TODO do filter
ret->fetchType = FETCH_TYPE__META;
ret->meta = pReader->pWalReader->pHead->head.body;
return 0;
} else {
tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version);
#endif
tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version);
#if 0
}
#endif
}
while (tqNextDataBlock(pReader)) {
@ -334,6 +337,7 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
continue;
}
ret->fetchType = FETCH_TYPE__DATA;
tqDebug("return data rows %d", ret->data.info.rows);
return 0;
}
@ -341,14 +345,14 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
ret->offset.type = TMQ_OFFSET__LOG;
ret->offset.version = pReader->ver;
ASSERT(pReader->ver >= 0);
ret->fetchType = FETCH_TYPE__NONE;
ret->fetchType = FETCH_TYPE__SEP;
tqDebug("return offset %" PRId64 ", processed finish", ret->offset.version);
return 0;
}
}
}
int32_t tqReaderSetDataMsg(STqReader* pReader, SSubmitReq* pMsg, int64_t ver) {
int32_t tqReaderSetDataMsg(STqReader* pReader, const SSubmitReq* pMsg, int64_t ver) {
pReader->pMsg = pMsg;
if (tInitSubmitMsgIter(pMsg, &pReader->msgIter) < 0) return -1;

View File

@ -420,6 +420,7 @@ typedef enum {
typedef struct {
SFSLASTNEXTROWSTATES state; // [input]
STsdb *pTsdb; // [input]
STSchema *pTSchema;// [input]
tb_uid_t suid;
tb_uid_t uid;
int32_t nFileSet;
@ -455,9 +456,10 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
SSttBlockLoadInfo* pLoadInfo = tCreateLastBlockLoadInfo(state->pTSchema, NULL, 0);
tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid,
&(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX},
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, NULL, NULL);
&(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, pLoadInfo,true, NULL);
bool hasVal = tMergeTreeNext(&state->mergeTree);
if (!hasVal) {
state->state = SFSLASTNEXTROW_FILESET;
@ -612,7 +614,8 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetDataBlk);
/* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */
tBlockDataReset(state->pBlockData);
code = tBlockDataInit(state->pBlockData, state->suid, state->uid, state->pTSchema);
TABLEID tid = {.suid = state->suid, .uid = state->uid};
code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, NULL, 0);
if (code) goto _err;
code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData);
@ -891,6 +894,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS;
pIter->fsLastState.pTsdb = pTsdb;
pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
pIter->fsLastState.pTSchema = pTSchema;
pIter->fsLastState.suid = suid;
pIter->fsLastState.uid = uid;

View File

@ -437,7 +437,7 @@ static int32_t tsdbOpenCommitIter(SCommitter *pCommitter) {
pIter->iSttBlk = 0;
SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, 0);
code = tsdbReadSttBlock(pCommitter->dReader.pReader, iStt, pSttBlk, &pIter->bData);
code = tsdbReadSttBlockEx(pCommitter->dReader.pReader, iStt, pSttBlk, &pIter->bData);
if (code) goto _err;
pIter->iRow = 0;
@ -1049,7 +1049,7 @@ static int32_t tsdbNextCommitRow(SCommitter *pCommitter) {
if (pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk)) {
SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
code = tsdbReadSttBlock(pCommitter->dReader.pReader, pIter->iStt, pSttBlk, &pIter->bData);
code = tsdbReadSttBlockEx(pCommitter->dReader.pReader, pIter->iStt, pSttBlk, &pIter->bData);
if (code) goto _exit;
pIter->iRow = 0;
@ -1305,7 +1305,8 @@ static int32_t tsdbInitLastBlockIfNeed(SCommitter *pCommitter, TABLEID id) {
if (!pBDatal->suid && !pBDatal->uid) {
ASSERT(pCommitter->skmTable.suid == id.suid);
ASSERT(pCommitter->skmTable.uid == id.uid);
code = tBlockDataInit(pBDatal, id.suid, id.suid ? 0 : id.uid, pCommitter->skmTable.pTSchema);
TABLEID tid = {.suid = id.suid, .uid = id.suid ? 0 : id.uid};
code = tBlockDataInit(pBDatal, &tid, pCommitter->skmTable.pTSchema, NULL, 0);
if (code) goto _exit;
}
@ -1428,9 +1429,9 @@ static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter) {
// impl
code = tsdbUpdateTableSchema(pCommitter->pTsdb->pVnode->pMeta, id.suid, id.uid, &pCommitter->skmTable);
if (code) goto _err;
code = tBlockDataInit(&pCommitter->dReader.bData, id.suid, id.uid, pCommitter->skmTable.pTSchema);
code = tBlockDataInit(&pCommitter->dReader.bData, &id, pCommitter->skmTable.pTSchema, NULL, 0);
if (code) goto _err;
code = tBlockDataInit(&pCommitter->dWriter.bData, id.suid, id.uid, pCommitter->skmTable.pTSchema);
code = tBlockDataInit(&pCommitter->dWriter.bData, &id, pCommitter->skmTable.pTSchema, NULL, 0);
if (code) goto _err;
/* merge with data in .data file */

View File

@ -28,11 +28,10 @@ struct SLDataIter {
uint64_t uid;
STimeWindow timeWindow;
SVersionRange verRange;
SSttBlockLoadInfo* pBlockLoadInfo;
};
SSttBlockLoadInfo* tCreateLastBlockLoadInfo() {
SSttBlockLoadInfo* tCreateLastBlockLoadInfo(STSchema* pSchema, int16_t* colList, int32_t numOfCols) {
SSttBlockLoadInfo* pLoadInfo = taosMemoryCalloc(TSDB_DEFAULT_STT_FILE, sizeof(SSttBlockLoadInfo));
if (pLoadInfo == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -55,6 +54,9 @@ SSttBlockLoadInfo* tCreateLastBlockLoadInfo() {
}
pLoadInfo[i].aSttBlk = taosArrayInit(4, sizeof(SSttBlk));
pLoadInfo[i].pSchema = pSchema;
pLoadInfo[i].colIds = colList;
pLoadInfo[i].numOfCols = numOfCols;
}
return pLoadInfo;
@ -111,7 +113,19 @@ static SBlockData* loadLastBlock(SLDataIter *pIter, const char* idStr) {
pInfo->currentLoadBlockIndex ^= 1;
if (pIter->pSttBlk != NULL) { // current block not loaded yet
int64_t st = taosGetTimestampUs();
code = tsdbReadSttBlock(pIter->pReader, pIter->iStt, pIter->pSttBlk, &pInfo->blockData[pInfo->currentLoadBlockIndex]);
SBlockData* pBlock = &pInfo->blockData[pInfo->currentLoadBlockIndex];
TABLEID id = {0};
if (pIter->pSttBlk->suid != 0) {
id.suid = pIter->pSttBlk->suid;
} else {
id.uid = pIter->uid;
}
tBlockDataInit(pBlock, &id, pInfo->pSchema, pInfo->colIds, pInfo->numOfCols);
code = tsdbReadSttBlock(pIter->pReader, pIter->iStt, pIter->pSttBlk, pBlock);
double el = (taosGetTimestampUs() - st)/ 1000.0;
pInfo->elapsedTime += el;
pInfo->loadBlocks += 1;
@ -460,7 +474,8 @@ static FORCE_INLINE int32_t tLDataIterCmprFn(const void *p1, const void *p2) {
}
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
STimeWindow *pTimeWindow, SVersionRange *pVerRange, void* pBlockLoadInfo, const char* idStr) {
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
bool destroyLoadInfo, const char *idStr) {
pMTree->backward = backward;
pMTree->pIter = NULL;
pMTree->pIterList = taosArrayInit(4, POINTER_BYTES);
@ -473,21 +488,12 @@ int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFRead
tRBTreeCreate(&pMTree->rbt, tLDataIterCmprFn);
int32_t code = TSDB_CODE_SUCCESS;
SSttBlockLoadInfo* pLoadInfo = NULL;
if (pBlockLoadInfo == NULL) {
if (pMTree->pLoadInfo == NULL) {
pMTree->destroyLoadInfo = true;
pMTree->pLoadInfo = tCreateLastBlockLoadInfo();
}
pLoadInfo = pMTree->pLoadInfo;
} else {
pLoadInfo = pBlockLoadInfo;
}
pMTree->pLoadInfo = pBlockLoadInfo;
pMTree->destroyLoadInfo = destroyLoadInfo;
for (int32_t i = 0; i < pFReader->pSet->nSttF; ++i) { // open all last file
struct SLDataIter* pIter = NULL;
code = tLDataIterOpen(&pIter, pFReader, i, pMTree->backward, suid, uid, pTimeWindow, pVerRange, &pLoadInfo[i]);
code = tLDataIterOpen(&pIter, pFReader, i, pMTree->backward, suid, uid, pTimeWindow, pVerRange, &pMTree->pLoadInfo[i]);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}

View File

@ -85,6 +85,8 @@ _err:
int tsdbClose(STsdb **pTsdb) {
if (*pTsdb) {
taosThreadRwlockDestroy(&(*pTsdb)->rwLock);
tsdbMemTableDestroy((*pTsdb)->mem);
(*pTsdb)->mem = NULL;
tsdbFSClose(*pTsdb);
tsdbCloseCache(*pTsdb);
taosMemoryFreeClear(*pTsdb);

View File

@ -79,6 +79,7 @@ typedef struct SBlockLoadSuppInfo {
SColumnDataAgg tsColAgg;
SColumnDataAgg** plist;
int16_t* colIds; // column ids for loading file block data
int32_t numOfCols;
char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated.
} SBlockLoadSuppInfo;
@ -203,6 +204,7 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
size_t numOfCols = blockDataGetNumOfCols(pBlock);
pSupInfo->numOfCols = numOfCols;
pSupInfo->colIds = taosMemoryMalloc(numOfCols * sizeof(int16_t));
pSupInfo->buildBuf = taosMemoryCalloc(numOfCols, POINTER_BYTES);
if (pSupInfo->buildBuf == NULL || pSupInfo->colIds == NULL) {
@ -352,7 +354,8 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb
tMergeTreeClose(&pLReader->mergeTree);
if (pLReader->pInfo == NULL) {
pLReader->pInfo = tCreateLastBlockLoadInfo();
// here we ignore the first column, which is always be the primary timestamp column
pLReader->pInfo = tCreateLastBlockLoadInfo(pReader->pSchema, &pReader->suppInfo.colIds[1], pReader->suppInfo.numOfCols - 1);
if (pLReader->pInfo == NULL) {
tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr);
return terrno;
@ -483,7 +486,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
pReader->suid = pCond->suid;
pReader->order = pCond->order;
pReader->capacity = 4096;
pReader->capacity = capacity;
pReader->idStr = (idstr != NULL) ? strdup(idstr) : NULL;
pReader->verRange = getQueryVerRange(pVnode, pCond, level);
pReader->type = pCond->type;
@ -838,14 +841,18 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
bool asc = ASCENDING_TRAVERSE(pReader->order);
int32_t step = asc ? 1 : -1;
if (asc && pReader->window.skey <= pBlock->minKey.ts) {
pDumpInfo->rowIndex = 0;
} else if (!asc && pReader->window.ekey >= pBlock->maxKey.ts) {
pDumpInfo->rowIndex = pBlock->nRow - 1;
} else {
int32_t pos = asc ? pBlock->nRow - 1 : 0;
int32_t order = (pReader->order == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
pDumpInfo->rowIndex = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, pReader->window.skey, order);
if ((pDumpInfo->rowIndex == 0 && asc) || (pDumpInfo->rowIndex == pBlock->nRow - 1 && (!asc))) {
if (asc && pReader->window.skey <= pBlock->minKey.ts) {
//pDumpInfo->rowIndex = 0;
} else
if (!asc && pReader->window.ekey >= pBlock->maxKey.ts) {
//pDumpInfo->rowIndex = pBlock->nRow - 1;
} else {
int32_t pos = asc ? pBlock->nRow - 1 : 0;
int32_t order = (pReader->order == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
pDumpInfo->rowIndex = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, pReader->window.skey, order);
}
}
// time window check
@ -929,8 +936,8 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
pDumpInfo->rowIndex += step * remain;
if (pDumpInfo->rowIndex >= 0 && pDumpInfo->rowIndex < pBlock->nRow) {
int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex];
setBlockAllDumped(pDumpInfo, ts, pReader->order);
// int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex];
// setBlockAllDumped(pDumpInfo, ts, pReader->order);
} else {
int64_t k = asc ? pBlock->maxKey.ts : pBlock->minKey.ts;
setBlockAllDumped(pDumpInfo, k, pReader->order);
@ -948,15 +955,22 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
return TSDB_CODE_SUCCESS;
}
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) {
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData, uint64_t uid) {
int64_t st = taosGetTimestampUs();
tBlockDataReset(pBlockData);
TABLEID tid = {.suid = pReader->suid, .uid = uid};
int32_t code = tBlockDataInit(pBlockData, &tid, pReader->pSchema, &pReader->suppInfo.colIds[1], pReader->suppInfo.numOfCols-1);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
ASSERT(pBlockInfo != NULL);
SDataBlk* pBlock = getCurrentBlock(pBlockIter);
int32_t code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
", rows:%d, code:%s %s",
@ -1741,7 +1755,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
}
int64_t key = hasDataInFileBlock(pBlockData, pDumpInfo)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN;
int64_t key = hasDataInFileBlock(pBlockData, pDumpInfo) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN;
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
@ -1995,7 +2009,7 @@ static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScan
int32_t code =
tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC), pReader->pFileReader,
pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange, pLBlockReader->pInfo, pReader->idStr);
pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange, pLBlockReader->pInfo, false, pReader->idStr);
if (code != TSDB_CODE_SUCCESS) {
return false;
}
@ -2009,12 +2023,12 @@ static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
}
static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { return pLastBlockReader->mergeTree.pIter != NULL; }
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
if (pBlockData->nRow > 0) {
ASSERT(pBlockData->nRow == pDumpInfo->totalRows);
bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
if (pBlockData->nRow > 0) {
ASSERT(pBlockData->nRow == pDumpInfo->totalRows);
}
return pBlockData->nRow > 0 && (!pDumpInfo->allDumped);
return pBlockData->nRow > 0 && (!pDumpInfo->allDumped);
}
int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
@ -2451,13 +2465,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
ASSERT(pBlockIter->numOfBlocks == 0);
code = buildComposedDataBlock(pReader);
} else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
tBlockDataReset(&pStatus->fileBlockData);
code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pScanInfo->uid, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData);
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -2931,13 +2939,7 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
setFileBlockActiveInBlockIter(pBlockIter, neighborIndex, step);
// 3. load the neighbor block, and set it to be the currently accessed file data block
tBlockDataReset(&pStatus->fileBlockData);
int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pFBlock->uid, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData);
int32_t code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pFBlock->uid);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -3404,10 +3406,14 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
// we need only one row
pPrevReader->capacity = 1;
pPrevReader->status.pTableMap = pReader->status.pTableMap;
pPrevReader->pSchema = pReader->pSchema;
pPrevReader->pMemSchema = pReader->pMemSchema;
pPrevReader->pReadSnap = pReader->pReadSnap;
pNextReader->capacity = 1;
pNextReader->status.pTableMap = pReader->status.pTableMap;
pNextReader->pSchema = pReader->pSchema;
pNextReader->pMemSchema = pReader->pMemSchema;
pNextReader->pReadSnap = pReader->pReadSnap;
code = doOpenReaderImpl(pPrevReader);
@ -3441,11 +3447,19 @@ void tsdbReaderClose(STsdbReader* pReader) {
{
if (pReader->innerReader[0] != NULL) {
pReader->innerReader[0]->status.pTableMap = NULL;
pReader->innerReader[0]->pReadSnap = NULL;
STsdbReader* p = pReader->innerReader[0];
pReader->innerReader[1]->status.pTableMap = NULL;
pReader->innerReader[1]->pReadSnap = NULL;
p->status.pTableMap = NULL;
p->pReadSnap = NULL;
p->pSchema = NULL;
p->pMemSchema = NULL;
p = pReader->innerReader[1];
p->status.pTableMap = NULL;
p->pReadSnap = NULL;
p->pSchema = NULL;
p->pMemSchema = NULL;
tsdbReaderClose(pReader->innerReader[0]);
tsdbReaderClose(pReader->innerReader[1]);
@ -3683,14 +3697,7 @@ static SArray* doRetrieveDataBlock(STsdbReader* pReader) {
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pStatus->blockIter);
STableBlockScanInfo* pBlockScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
tBlockDataReset(&pStatus->fileBlockData);
int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pBlockScanInfo->uid, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
return NULL;
}
code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData);
int32_t code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData, pBlockScanInfo->uid);
if (code != TSDB_CODE_SUCCESS) {
tBlockDataDestroy(&pStatus->fileBlockData, 1);
terrno = code;

View File

@ -926,12 +926,13 @@ _err:
return code;
}
static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, SBlockData *pBlockData) {
static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, SBlockData *pBlockData,
int32_t iStt) {
int32_t code = 0;
tBlockDataClear(pBlockData);
STsdbFD *pFD = pReader->pDataFD;
STsdbFD *pFD = (iStt < 0) ? pReader->pDataFD : pReader->aSttFD[iStt];
// uid + version + tskey
code = tRealloc(&pReader->aBuf[0], pBlkInfo->szKey);
@ -1070,9 +1071,12 @@ _err:
int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData) {
int32_t code = 0;
code = tsdbReadBlockDataImpl(pReader, &pDataBlk->aSubBlock[0], pBlockData);
code = tsdbReadBlockDataImpl(pReader, &pDataBlk->aSubBlock[0], pBlockData, -1);
if (code) goto _err;
ASSERT(pDataBlk->nSubBlock == 1);
#if 0
if (pDataBlk->nSubBlock > 1) {
SBlockData bData1;
SBlockData bData2;
@ -1113,6 +1117,7 @@ int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData
tBlockDataDestroy(&bData1, 1);
tBlockDataDestroy(&bData2, 1);
}
#endif
return code;
@ -1123,23 +1128,38 @@ _err:
int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData) {
int32_t code = 0;
int32_t lino = 0;
code = tsdbReadBlockDataImpl(pReader, &pSttBlk->bInfo, pBlockData, iStt);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
if (code) {
tsdbError("vgId:%d %s failed at %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code));
}
return code;
}
int32_t tsdbReadSttBlockEx(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData) {
int32_t code = 0;
int32_t lino = 0;
// alloc
code = tRealloc(&pReader->aBuf[0], pSttBlk->bInfo.szBlock);
if (code) goto _err;
TSDB_CHECK_CODE(code, lino, _exit);
// read
code = tsdbReadFile(pReader->aSttFD[iStt], pSttBlk->bInfo.offset, pReader->aBuf[0], pSttBlk->bInfo.szBlock);
if (code) goto _err;
TSDB_CHECK_CODE(code, lino, _exit);
// decmpr
code = tDecmprBlockData(pReader->aBuf[0], pSttBlk->bInfo.szBlock, pBlockData, &pReader->aBuf[1]);
if (code) goto _err;
TSDB_CHECK_CODE(code, lino, _exit);
return code;
_err:
tsdbError("vgId:%d tsdb read stt block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
_exit:
if (code) {
tsdbError("vgId:%d %s failed at %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code));
}
return code;
}

View File

@ -140,7 +140,7 @@ static int32_t tsdbSnapReadOpenFile(STsdbSnapReader* pReader) {
if (pSttBlk->minVer > pReader->ever) continue;
if (pSttBlk->maxVer < pReader->sver) continue;
code = tsdbReadSttBlock(pReader->pDataFReader, iStt, pSttBlk, &pIter->bData);
code = tsdbReadSttBlockEx(pReader->pDataFReader, iStt, pSttBlk, &pIter->bData);
if (code) goto _err;
for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
@ -223,7 +223,7 @@ static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) {
if (pSttBlk->minVer > pReader->ever || pSttBlk->maxVer < pReader->sver) continue;
code = tsdbReadSttBlock(pReader->pDataFReader, pIter->iStt, pSttBlk, &pIter->bData);
code = tsdbReadSttBlockEx(pReader->pDataFReader, pIter->iStt, pSttBlk, &pIter->bData);
if (code) goto _err;
pIter->iRow = -1;
@ -319,7 +319,7 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
code = tsdbUpdateTableSchema(pTsdb->pVnode->pMeta, id.suid, id.uid, &pReader->skmTable);
if (code) goto _err;
code = tBlockDataInit(pBlockData, id.suid, id.uid, pReader->skmTable.pTSchema);
code = tBlockDataInit(pBlockData, &id, pReader->skmTable.pTSchema, NULL, 0);
if (code) goto _err;
while (pRowInfo->suid == id.suid && pRowInfo->uid == id.uid) {
@ -715,7 +715,7 @@ static int32_t tsdbSnapWriteTableDataStart(STsdbSnapWriter* pWriter, TABLEID* pI
if (code) goto _err;
tMapDataReset(&pWriter->dWriter.mDataBlk);
code = tBlockDataInit(&pWriter->dWriter.bData, pId->suid, pId->uid, pWriter->skmTable.pTSchema);
code = tBlockDataInit(&pWriter->dWriter.bData, pId, pWriter->skmTable.pTSchema, NULL, 0);
if (code) goto _err;
return code;
@ -1000,7 +1000,8 @@ static int32_t tsdbSnapWriteToSttFile(STsdbSnapWriter* pWriter, int32_t iRow) {
code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pWriter->id.suid, pWriter->id.uid, &pWriter->skmTable);
if (code) goto _err;
code = tBlockDataInit(pBData, pWriter->id.suid, pWriter->id.suid ? 0 : pWriter->id.uid, pWriter->skmTable.pTSchema);
TABLEID tid = {.suid = pWriter->id.suid, .uid = pWriter->id.suid ? 0 : pWriter->id.uid};
code = tBlockDataInit(pBData, &tid, pWriter->skmTable.pTSchema, NULL, 0);
if (code) goto _err;
}

View File

@ -948,24 +948,47 @@ void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear) {
pBlockData->aColData = NULL;
}
int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema) {
int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int16_t *aCid, int32_t nCid) {
int32_t code = 0;
ASSERT(suid || uid);
ASSERT(pId->suid || pId->uid);
pBlockData->suid = suid;
pBlockData->uid = uid;
pBlockData->suid = pId->suid;
pBlockData->uid = pId->uid;
pBlockData->nRow = 0;
taosArrayClear(pBlockData->aIdx);
for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
if (aCid) {
int32_t iColumn = 1;
STColumn *pTColumn = &pTSchema->columns[iColumn];
for (int32_t iCid = 0; iCid < nCid; iCid++) {
while (pTColumn && pTColumn->colId < aCid[iCid]) {
iColumn++;
pTColumn = (iColumn < pTSchema->numOfCols) ? &pTSchema->columns[iColumn] : NULL;
}
SColData *pColData;
code = tBlockDataAddColData(pBlockData, iColumn - 1, &pColData);
if (code) goto _exit;
if (pTColumn == NULL) {
break;
} else if (pTColumn->colId == aCid[iCid]) {
SColData *pColData;
code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
if (code) goto _exit;
tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
iColumn++;
pTColumn = (iColumn < pTSchema->numOfCols) ? &pTSchema->columns[iColumn] : NULL;
}
}
} else {
for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
STColumn *pTColumn = &pTSchema->columns[iColumn];
SColData *pColData;
code = tBlockDataAddColData(pBlockData, iColumn - 1, &pColData);
if (code) goto _exit;
tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
}
}
_exit:

Some files were not shown because too many files have changed in this diff Show More