add/delete tag idx
This commit is contained in:
commit
72b43d240d
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 4776778
|
||||
GIT_TAG 94d6895
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -30,8 +30,10 @@ database_option: {
|
|||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
|
@ -69,8 +71,10 @@ database_option: {
|
|||
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
|
||||
- 0: The database can contain multiple supertables.
|
||||
- 1: The database can contain only one supertable.
|
||||
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
|
||||
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
|
||||
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
|
||||
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
|
||||
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
|
||||
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
|
||||
|
@ -112,6 +116,10 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
|
@ -154,3 +162,19 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
The preceding SQL statement deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
|
||||
## Redistribute Vgroup
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
Adjust the distribution of vnodes in the vgroup according to the given list of dnodes.
|
||||
|
||||
## Balance Vgroup
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
Automatically adjusts the distribution of vnodes in all vgroups of the cluster, which is equivalent to load balancing the data of the cluster at the vnode level.
|
||||
|
|
|
@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN
|
||||
|
||||
TDengine supports natural joins between supertables, between standard tables, and between subqueries. The difference between natural joins and inner joins is that natural joins require that the fields being joined in the supertables or standard tables must have the same name. Data or tag columns must be joined with the equivalent column in another table.
|
||||
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
|
||||
|
||||
For standard tables, only the timestamp (primary key) can be used in join operations. For example:
|
||||
For standard tables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
For supertables, tags as well as timestamps can be used in join operations. For example:
|
||||
For supertables:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -368,21 +368,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
For sub-table and super table:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
Similarly, join operations can be performed on the result sets of multiple subqueries.
|
||||
|
||||
:::note
|
||||
|
||||
The following restriction apply to JOIN statements:
|
||||
|
||||
- The number of tables or supertables in a single join operation cannot exceed 10.
|
||||
- `FILL` cannot be used in a JOIN statement.
|
||||
- Arithmetic operations cannot be performed on the result sets of join operation.
|
||||
- `GROUP BY` is not allowed on a segment of the tables that participate in a join operation.
|
||||
- `OR` cannot be used in the conditions for join operation
|
||||
- Join operation can be performed only on tags or timestamps. You cannot perform a join operation on data columns.
|
||||
|
||||
:::
|
||||
|
||||
## Nested Query
|
||||
|
||||
Nested query is also called sub query. This means that in a single SQL statement the result of inner query can be used as the data source of the outer query.
|
||||
|
|
|
@ -877,7 +877,7 @@ INTERP(expr)
|
|||
- Interpolation is performed based on `FILL` parameter.
|
||||
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
|
||||
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.1).
|
||||
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.2.3).
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@ The following data types can be used in the schema for standard tables.
|
|||
| 27 | GRANT | Added | Grants permissions to a user.
|
||||
| 28 | KILL TRANSACTION | Added | Terminates an mnode transaction.
|
||||
| 29 | KILL STREAM | Deprecated | Terminated a continuous query. The continuous query feature has been replaced with the stream processing feature.
|
||||
| 30 | MERGE VGROUP | Added | Merges vgroups.
|
||||
| 31 | REVOKE | Added | Revokes permissions from a user.
|
||||
| 32 | SELECT | Modified | <ul><li>SELECT does not use the implicit results column. Output columns must be specified in the SELECT clause. </li><li>DISTINCT support is enhanced. In previous versions, DISTINCT only worked on the tag column and could not be used with JOIN or GROUP BY. </li><li>JOIN support is enhanced. The following are now supported after JOIN: a WHERE clause with OR, operations on multiple tables, and GROUP BY on multiple tables. </li><li>Subqueries after FROM are enhanced. Levels of nesting are no longer restricted. Subqueries can be used with UNION ALL. Other syntax restrictions are eliminated. </li><li>All scalar functions can be used after WHERE. </li><li>GROUP BY is enhanced. You can group by any scalar expression or combination thereof. </li><li>SESSION can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>STATE_WINDOW can be used on supertables. When PARTITION BY is not used, data in supertables is merged into a single timeline. </li><li>ORDER BY is enhanced. It is no longer required to use ORDER BY and GROUP BY together. There is no longer a restriction on the number of order expressions. NULLS FIRST and NULLS LAST syntax has been added. Any expression that conforms to the ORDER BY semantics can be used. </li><li>Added PARTITION BY syntax. PARTITION BY replaces GROUP BY tags. </li></ul>
|
||||
| 33 | SHOW ACCOUNTS | Deprecated | This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
|
||||
|
@ -76,8 +75,9 @@ The following data types can be used in the schema for standard tables.
|
|||
| 49 | SHOW TRANSACTIONS | Added | Shows all running transactions in the system.
|
||||
| 50 | SHOW DNODE VARIABLES | Added | Shows the configuration of the specified dnode.
|
||||
| 51 | SHOW VNODES | Not supported | Shows information about vnodes in the system. Not supported.
|
||||
| 52 | SPLIT VGROUP | Added | Splits a vgroup into two vgroups.
|
||||
| 53 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 52 | TRIM DATABASE | Added | Deletes data that has expired and orders the remaining data in accordance with the storage configuration.
|
||||
| 53 | REDISTRIBUTE VGROUP | Added | Adjust the distribution of VNODES in VGROUP.
|
||||
| 54 | BALANCE VGROUP | Added | Auto adjust the distribution of VNODES in VGROUP.
|
||||
|
||||
## SQL Functions
|
||||
|
||||
|
|
|
@ -252,14 +252,14 @@ ws://localhost:6041/test
|
|||
|
||||
|Sample program |Sample program description |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | Parameter binding with TDengine Connector |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | Schemaless writes with TDengine Connector |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
|
||||
| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
|
||||
| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
|
||||
|
||||
## Important update records
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ taosBenchmark -f <json file>
|
|||
|
||||
</details>
|
||||
|
||||
## Command-line argument in detailed
|
||||
## Command-line argument in detail
|
||||
|
||||
- **-f/--file <json file\>** :
|
||||
specify the configuration file to use. This file includes All parameters. Users should not use this parameter with other parameters on the command-line. There is no default value.
|
||||
|
@ -198,7 +198,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-R/--disorder-range <timeRange\>** :
|
||||
Specify the timestamp range for the disordered data. It leads the resulting disorder timestamp as the ordered timestamp minus a random value in this range. Valid only if the percentage of disordered data specified by `-O/--disorder` is greater than 0.
|
||||
|
||||
- **-F/--prepare_rand <Num\>** :
|
||||
- **-F/--prepared_rand <Num\>** :
|
||||
Specify the number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
- **-a/--replica <replicaNum\>** :
|
||||
|
@ -216,7 +216,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
|
|||
- **-? /--help** :
|
||||
Show help information and exit. Users should not use it with other parameters.
|
||||
|
||||
## Configuration file parameters in detailed
|
||||
## Configuration file parameters in detail
|
||||
|
||||
### General configuration parameters
|
||||
|
||||
|
@ -380,7 +380,7 @@ The configuration parameters for specifying super table tag columns and data col
|
|||
- **num_of_records_per_req** :
|
||||
Writing the number of rows of records per request to TDengine, the default value is 30000. When it is set too large, the TDengine client driver will return the corresponding error message, so you need to lower the setting of this parameter to meet the writing requirements.
|
||||
|
||||
- **prepare_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
- **prepared_rand**: The number of unique values in the generated random data. A value of 1 means that all data are equal. The default value is 10000.
|
||||
|
||||
### Query scenario configuration parameters
|
||||
|
||||
|
|
|
@ -142,6 +142,15 @@ The parameters described in this document by the effect that they have on the sy
|
|||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||
| Value Range | 0: Not allowed; 1: Allowed |
|
||||
| Default Value | 1 |
|
||||
### crashReporting
|
||||
|
||||
| Attribute | Description |
|
||||
| -------- | -------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
||||
| Value Range | 0,1 0: Not allowed;1:allowed |
|
||||
| Default Value | 1 |
|
||||
|
||||
|
||||
## Query Parameters
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.3
|
||||
|
||||
<Release type="tdengine" version="3.0.2.3" />
|
||||
|
||||
## 3.0.2.2
|
||||
|
||||
<Release type="tdengine" version="3.0.2.2" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.4.1
|
||||
|
||||
<Release type="tools" version="2.4.1" />
|
||||
|
||||
## 2.4.0
|
||||
|
||||
<Release type="tools" version="2.4.0" />
|
||||
|
|
|
@ -253,14 +253,14 @@ namespace TDengineExample
|
|||
|
||||
|示例程序 | 示例程序描述 |
|
||||
|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
|
||||
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
|
||||
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
|
||||
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/Query/Query.cs) | 使用 TDengine.Connector 实现的建表、插入、查询示例 |
|
||||
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/JSONTag) | 使用 TDengine.Connector 实现的写入和查询 JSON tag 类型数据的示例 |
|
||||
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/NET6Examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
|
||||
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
|
||||
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
|
||||
| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/NET6Examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
|
||||
| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
|
||||
| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/FrameWork45/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
|
||||
|
||||
## 重要更新记录
|
||||
|
||||
|
|
|
@ -30,8 +30,10 @@ database_option: {
|
|||
| WAL_LEVEL {1 | 2}
|
||||
| VGROUPS value
|
||||
| SINGLE_STABLE {0 | 1}
|
||||
| STT_TRIGGER value
|
||||
| TABLE_PREFIX value
|
||||
| TABLE_SUFFIX value
|
||||
| TSDB_PAGESIZE value
|
||||
| WAL_RETENTION_PERIOD value
|
||||
| WAL_ROLL_PERIOD value
|
||||
| WAL_RETENTION_SIZE value
|
||||
|
@ -69,8 +71,10 @@ database_option: {
|
|||
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
|
||||
- 0:表示可以创建多张超级表。
|
||||
- 1:表示只可以创建一张超级表。
|
||||
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
|
||||
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
|
||||
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
|
||||
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
|
||||
- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
|
||||
- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
|
||||
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
|
||||
|
@ -112,6 +116,10 @@ alter_database_options:
|
|||
alter_database_option: {
|
||||
CACHEMODEL {'none' | 'last_row' | 'last_value' | 'both'}
|
||||
| CACHESIZE value
|
||||
| BUFFER value
|
||||
| PAGES value
|
||||
| REPLICA value
|
||||
| STT_TRIGGER value
|
||||
| WAL_LEVEL value
|
||||
| WAL_FSYNC_PERIOD value
|
||||
| KEEP value
|
||||
|
@ -154,3 +162,19 @@ TRIM DATABASE db_name;
|
|||
```
|
||||
|
||||
删除过期数据,并根据多级存储的配置归整数据。
|
||||
|
||||
## 调整VGROUP中VNODE的分布
|
||||
|
||||
```sql
|
||||
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
|
||||
```
|
||||
|
||||
按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。
|
||||
|
||||
## 自动调整VGROUP中VNODE的分布
|
||||
|
||||
```sql
|
||||
BALANCE VGROUP
|
||||
```
|
||||
|
||||
自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
|
|
@ -350,9 +350,9 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
|
|||
|
||||
## JOIN 子句
|
||||
|
||||
TDengine 支持“普通表与普通表之间”、“超级表与超级表之间”、“子查询与子查询之间” 进行自然连接。自然连接与内连接的主要区别是,自然连接要求参与连接的字段在不同的表/超级表中必须是同名字段。也即,TDengine 在连接关系的表达中,要求必须使用同名数据列/标签列的相等关系。
|
||||
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
|
||||
|
||||
在普通表与普通表之间的 JOIN 操作中,只能使用主键时间戳之间的相等关系。例如:
|
||||
普通表与普通表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -360,7 +360,7 @@ FROM temp_tb_1 t1, pressure_tb_1 t2
|
|||
WHERE t1.ts = t2.ts
|
||||
```
|
||||
|
||||
在超级表与超级表之间的 JOIN 操作中,除了主键时间戳一致的条件外,还要求引入能实现一一对应的标签列的相等关系。例如:
|
||||
超级表与超级表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
|
@ -368,21 +368,16 @@ FROM temp_stable t1, temp_stable t2
|
|||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
子表与超级表之间的 JOIN 操作:
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM temp_ctable t1, temp_stable t2
|
||||
WHERE t1.ts = t2.ts AND t1.deviceid = t2.deviceid AND t1.status=0;
|
||||
```
|
||||
|
||||
类似地,也可以对多个子查询的查询结果进行 JOIN 操作。
|
||||
|
||||
:::note
|
||||
|
||||
JOIN 语句存在如下限制要求:
|
||||
|
||||
- 参与一条语句中 JOIN 操作的表/超级表最多可以有 10 个。
|
||||
- 在包含 JOIN 操作的查询语句中不支持 FILL。
|
||||
- 暂不支持参与 JOIN 操作的表之间聚合后的四则运算。
|
||||
- 不支持只对其中一部分表做 GROUP BY。
|
||||
- JOIN 查询的不同表的过滤条件之间不能为 OR。
|
||||
- JOIN 查询要求连接条件不能是普通列,只能针对标签和主时间字段列(第一列)。
|
||||
|
||||
:::
|
||||
|
||||
## 嵌套查询
|
||||
|
||||
“嵌套查询”又称为“子查询”,也即在一条 SQL 语句中,“内层查询”的计算结果可以作为“外层查询”的计算对象来使用。
|
||||
|
|
|
@ -880,7 +880,7 @@ INTERP(expr)
|
|||
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
|
||||
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
|
||||
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.1版本以后支持)。
|
||||
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.2.3版本以后支持)。
|
||||
|
||||
### LAST
|
||||
|
||||
|
|
|
@ -54,7 +54,6 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 27 | GRANT | 新增 | 授予用户权限。
|
||||
| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
|
||||
| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
|
||||
| 30 | MERGE VGROUP | 新增 | 合并VGROUP。
|
||||
| 31 | REVOKE | 新增 | 回收用户权限。
|
||||
| 32 | SELECT | 调整 | <ul><li>SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。</li><li>DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。</li><li>JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。</li><li>FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。</li><li>WHERE后可以使用任意的标量表达式。</li><li>GROUP BY功能增强。支持任意标量表达式及其组合的分组。</li><li>SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。</li><li>ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。</li><li>新增PARTITION BY语法。替代原来的GROUP BY tags。</li></ul>
|
||||
| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
|
||||
|
@ -76,8 +75,9 @@ description: "TDengine 3.0 版本的语法变更说明"
|
|||
| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
|
||||
| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
|
||||
| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
|
||||
| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。
|
||||
| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 52 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
|
||||
| 53 | REDISTRIBUTE VGROUP | 新增 | 调整VGROUP中VNODE的分布。
|
||||
| 54 | BALANCE VGROUP | 新增 | 自动调整VGROUP中VNODE的分布。
|
||||
|
||||
## SQL 函数变更
|
||||
|
||||
|
|
|
@ -134,6 +134,24 @@ taos --dump-config
|
|||
| 取值范围 | 1-200000 |
|
||||
| 缺省值 | 30 |
|
||||
|
||||
### telemetryReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 |是否上传 telemetry |
|
||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### crashReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 |是否上传 crash 信息 |
|
||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 查询相关
|
||||
|
||||
### queryPolicy
|
||||
|
@ -698,7 +716,7 @@ charset 的有效值是 UTF-8。
|
|||
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 |
|
||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 (暂不支持) |
|
||||
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||
|
|
|
@ -243,3 +243,8 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist
|
|||
```
|
||||
launchctl limit maxfiles
|
||||
```
|
||||
### 19 建库时提示Out of dnode
|
||||
该提示是创建db的vnode数量不够了,需要的vnode不能超过了dnode中vnode的上限。因为系统默认是一个dnode中有cpu核数两倍的vnode,也可以通过配置文件中的参数supportVnodes控制。
|
||||
正常调大taos.cfg种这个supportVnodes参数即可。
|
||||
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.2.3
|
||||
|
||||
<Release type="tdengine" version="3.0.2.3" />
|
||||
|
||||
## 3.0.2.2
|
||||
|
||||
<Release type="tdengine" version="3.0.2.2" />
|
||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.4.1
|
||||
|
||||
<Release type="tools" version="2.4.1" />
|
||||
|
||||
## 2.4.0
|
||||
|
||||
<Release type="tools" version="2.4.0" />
|
||||
|
|
|
@ -220,6 +220,7 @@ DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res);
|
|||
|
||||
DLL_EXPORT int taos_get_db_route_info(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo);
|
||||
DLL_EXPORT int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId);
|
||||
DLL_EXPORT int taos_get_tables_vgId(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId);
|
||||
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||
|
||||
|
|
|
@ -162,6 +162,7 @@ typedef enum EStreamType {
|
|||
STREAM_PULL_DATA,
|
||||
STREAM_PULL_OVER,
|
||||
STREAM_FILL_OVER,
|
||||
STREAM_CREATE_CHILD_TABLE,
|
||||
} EStreamType;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
@ -205,8 +206,6 @@ typedef struct SDataBlockInfo {
|
|||
TSKEY watermark; // used for stream
|
||||
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
|
||||
int32_t tagLen;
|
||||
void* pTag; // used for stream partition
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
@ -379,6 +378,11 @@ typedef struct SSortExecInfo {
|
|||
#define CALCULATE_END_TS_COLUMN_INDEX 5
|
||||
#define TABLE_NAME_COLUMN_INDEX 6
|
||||
|
||||
// stream create table block column
|
||||
#define UD_TABLE_NAME_COLUMN_INDEX 0
|
||||
#define UD_GROUPID_COLUMN_INDEX 1
|
||||
#define UD_TAG_COLUMN_INDEX 2
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -69,6 +69,9 @@ extern int32_t tsElectInterval;
|
|||
extern int32_t tsHeartbeatInterval;
|
||||
extern int32_t tsHeartbeatTimeout;
|
||||
|
||||
// vnode
|
||||
extern int64_t tsVndCommitMaxIntervalMs;
|
||||
|
||||
// monitor
|
||||
extern bool tsEnableMonitor;
|
||||
extern int32_t tsMonitorInterval;
|
||||
|
@ -82,6 +85,10 @@ extern bool tsEnableTelem;
|
|||
extern int32_t tsTelemInterval;
|
||||
extern char tsTelemServer[];
|
||||
extern uint16_t tsTelemPort;
|
||||
extern bool tsEnableCrashReport;
|
||||
extern char* tsTelemUri;
|
||||
extern char* tsClientCrashReportUri;
|
||||
extern char* tsSvrCrashReportUri;
|
||||
|
||||
// query buffer management
|
||||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||
|
|
|
@ -78,7 +78,7 @@ typedef struct {
|
|||
|
||||
// output
|
||||
char* ctbShortName; // must have size of TSDB_TABLE_NAME_LEN;
|
||||
uint64_t uid; // child table uid, may be useful
|
||||
// uint64_t uid; // child table uid, may be useful
|
||||
} RandTableName;
|
||||
|
||||
void buildChildTableName(RandTableName* rName);
|
||||
|
|
|
@ -343,8 +343,8 @@ typedef struct tDataTypeDescriptor {
|
|||
extern tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX];
|
||||
bool isValidDataType(int32_t type);
|
||||
|
||||
int32_t operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
||||
void assignVal(char *val, const char *src, int32_t len, int32_t type);
|
||||
void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type);
|
||||
void *getDataMin(int32_t type, void* value);
|
||||
void *getDataMax(int32_t type, void* value);
|
||||
|
||||
|
|
|
@ -210,6 +210,9 @@ int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STabl
|
|||
|
||||
int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
|
||||
|
||||
int32_t catalogGetTablesHashVgId(SCatalog* pCtg, SRequestConnInfo* pConn, int32_t acctId, const char* pDb, const char* pTableName[],
|
||||
int32_t tableNum, int32_t *vgId);
|
||||
|
||||
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
|
||||
|
||||
int32_t catalogGetCachedTableVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta);
|
||||
|
|
|
@ -154,6 +154,8 @@ void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
|||
*/
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
bool qTaskIsExecuting(qTaskInfo_t qinfo);
|
||||
|
||||
/**
|
||||
* destroy query info structure
|
||||
* @param qHandle
|
||||
|
@ -216,6 +218,7 @@ int32_t qStreamSourceRecoverStep2(qTaskInfo_t tinfo, int64_t ver);
|
|||
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
|
||||
int32_t qStreamRestoreParam(qTaskInfo_t tinfo);
|
||||
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo);
|
||||
void qStreamCloseTsdbReader(void* task);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -49,10 +49,12 @@ extern "C" {
|
|||
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
||||
#define SYNC_SNAP_RESEND_MS 1000 * 60
|
||||
|
||||
#define SYNC_VND_COMMIT_MIN_MS 1000
|
||||
|
||||
#define SYNC_MAX_BATCH_SIZE 1
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
#define SYNC_INDEX_INVALID -1
|
||||
#define SYNC_TERM_INVALID -1 // 0xFFFFFFFFFFFFFFFF
|
||||
#define SYNC_TERM_INVALID -1
|
||||
|
||||
typedef enum {
|
||||
SYNC_STRATEGY_NO_SNAPSHOT = 0,
|
||||
|
@ -232,6 +234,7 @@ int64_t syncOpen(SSyncInfo* pSyncInfo);
|
|||
int32_t syncStart(int64_t rid);
|
||||
void syncStop(int64_t rid);
|
||||
void syncPreStop(int64_t rid);
|
||||
void syncPostStop(int64_t rid);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
|
||||
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
|
||||
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
|
||||
|
|
|
@ -24,7 +24,7 @@ extern "C" {
|
|||
|
||||
typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag;
|
||||
|
||||
int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -116,6 +116,7 @@ extern "C" {
|
|||
#include "osTimer.h"
|
||||
#include "osTimezone.h"
|
||||
#include "taoserror.h"
|
||||
#include "tlog.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -46,27 +46,73 @@ void taosSetTerminalMode();
|
|||
int32_t taosGetOldTerminalMode();
|
||||
void taosResetTerminalMode();
|
||||
|
||||
#define STACKSIZE 100
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \
|
||||
void* array[STACKSIZE]; \
|
||||
int32_t size = backtrace(array, STACKSIZE); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
int32_t offset = 0; \
|
||||
if (strings != NULL) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%d, %s\n", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
|
||||
#define taosPrintTrace(flags, level, dflag, ignoreNum) \
|
||||
{ \
|
||||
void* array[100]; \
|
||||
int32_t size = backtrace(array, 100); \
|
||||
void* array[STACKSIZE]; \
|
||||
int32_t size = backtrace(array, STACKSIZE); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", size); \
|
||||
for (int32_t i = 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", i, strings[i]); \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", (ignoreNum > 0) ? size - ignoreNum : size); \
|
||||
for (int32_t i = (ignoreNum > 0) ? ignoreNum : 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", (ignoreNum > 0) ? i - ignoreNum : i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
#else
|
||||
|
||||
#include <windows.h>
|
||||
#include <dbghelp.h>
|
||||
|
||||
#define STACKSIZE 64
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
#define taosLogTraceToBuf(buf, bufSize, ignoreNum) { \
|
||||
unsigned int i; \
|
||||
void* stack[STACKSIZE]; \
|
||||
unsigned short frames; \
|
||||
SYMBOL_INFO* symbol; \
|
||||
HANDLE process; \
|
||||
int32_t offset = 0; \
|
||||
\
|
||||
process = GetCurrentProcess(); \
|
||||
\
|
||||
SymInitialize(process, NULL, TRUE); \
|
||||
\
|
||||
frames = CaptureStackBackTrace(0, STACKSIZE, stack, NULL); \
|
||||
symbol = (SYMBOL_INFO*)calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1); \
|
||||
if (symbol != NULL) { \
|
||||
symbol->MaxNameLen = 255; \
|
||||
symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \
|
||||
\
|
||||
if (frames > 0) { \
|
||||
offset = snprintf(buf, bufSize - 1, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
offset += snprintf(buf + offset, bufSize - 1 - offset, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
free(symbol); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define taosPrintTrace(flags, level, dflag, ignoreNum) \
|
||||
{ \
|
||||
unsigned int i; \
|
||||
void* stack[STACKSIZE]; \
|
||||
|
@ -85,10 +131,10 @@ void taosResetTerminalMode();
|
|||
symbol->SizeOfStruct = sizeof(SYMBOL_INFO); \
|
||||
\
|
||||
if (frames > 0) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", frames); \
|
||||
for (i = 0; i < frames; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames\n", (ignoreNum > 0) ? frames - ignoreNum : frames); \
|
||||
for (i = (ignoreNum > 0) ? ignoreNum : 0; i < frames; i++) { \
|
||||
SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol); \
|
||||
taosPrintLog(flags, level, dflag, "frame:%i: %s - 0x%0X", frames - i - 1, symbol->Name, symbol->Address); \
|
||||
taosPrintLog(flags, level, dflag, "frame:%i, %s - 0x%0X\n", (ignoreNum > 0) ? i - ignoreNum : i, symbol->Name, symbol->Address); \
|
||||
} \
|
||||
} \
|
||||
free(symbol); \
|
||||
|
|
|
@ -159,6 +159,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TSC_NO_EXEC_NODE TAOS_DEF_ERROR_CODE(0, 0X022E)
|
||||
#define TSDB_CODE_TSC_NOT_STABLE_ERROR TAOS_DEF_ERROR_CODE(0, 0X022F)
|
||||
#define TSDB_CODE_TSC_STMT_CACHE_ERROR TAOS_DEF_ERROR_CODE(0, 0X0230)
|
||||
#define TSDB_CODE_TSC_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0X0231)
|
||||
|
||||
// mnode-common
|
||||
// #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) // 2.x
|
||||
|
@ -414,6 +415,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_VND_NO_AVAIL_BUFPOOL TAOS_DEF_ERROR_CODE(0, 0x0528)
|
||||
#define TSDB_CODE_VND_STOPPED TAOS_DEF_ERROR_CODE(0, 0x0529)
|
||||
#define TSDB_CODE_VND_DUP_REQUEST TAOS_DEF_ERROR_CODE(0, 0x0530)
|
||||
#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531)
|
||||
|
||||
// tsdb
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||
|
@ -700,6 +702,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_SML_INVALID_DATA TAOS_DEF_ERROR_CODE(0, 0x3002)
|
||||
#define TSDB_CODE_SML_INVALID_DB_CONF TAOS_DEF_ERROR_CODE(0, 0x3003)
|
||||
#define TSDB_CODE_SML_NOT_SAME_TYPE TAOS_DEF_ERROR_CODE(0, 0x3004)
|
||||
#define TSDB_CODE_SML_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x3005)
|
||||
|
||||
//tsma
|
||||
#define TSDB_CODE_TSMA_INIT_FAILED TAOS_DEF_ERROR_CODE(0, 0x3100)
|
||||
|
|
|
@ -99,6 +99,11 @@ bool taosAssertRelease(bool condition);
|
|||
#endif
|
||||
#endif
|
||||
|
||||
void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo);
|
||||
void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd);
|
||||
void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile);
|
||||
int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t startTime);
|
||||
|
||||
// clang-format off
|
||||
#define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", DEBUG_FATAL, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
#define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }}
|
||||
|
|
|
@ -43,6 +43,9 @@
|
|||
# Switch for allowing TDengine to collect and report service usage information
|
||||
# telemetryReporting 1
|
||||
|
||||
# Switch for allowing TDengine to collect and report crash information
|
||||
# crashReporting 1
|
||||
|
||||
# The maximum number of vnodes supported by this dnode
|
||||
# supportVnodes 0
|
||||
|
||||
|
|
|
@ -1,319 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Generate the deb package for ubuntu, or rpm package for centos, or tar.gz package for other linux os
|
||||
|
||||
set -e
|
||||
# set -x
|
||||
|
||||
# release.sh -v [cluster | edge]
|
||||
# -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64...]
|
||||
# -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
# -V [stable | beta]
|
||||
# -l [full | lite]
|
||||
# -s [static | dynamic]
|
||||
# -d [taos | ...]
|
||||
# -n [2.0.0.3]
|
||||
# -m [2.0.0.0]
|
||||
# -H [ false | true]
|
||||
|
||||
# set parameters by default value
|
||||
verMode=edge # [cluster, edge, cloud]
|
||||
verType=stable # [stable, beta]
|
||||
cpuType=x64 # [aarch32 | aarch64 | x64 | x86 | mips64 loongarch64...]
|
||||
osType=Linux # [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...]
|
||||
pagMode=full # [full | lite]
|
||||
soMode=dynamic # [static | dynamic]
|
||||
dbName=taos # [taos | ...]
|
||||
allocator=glibc # [glibc | jemalloc]
|
||||
verNumber=""
|
||||
verNumberComp="3.0.0.0"
|
||||
httpdBuild=false
|
||||
|
||||
while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do
|
||||
case $arg in
|
||||
v)
|
||||
#echo "verMode=$OPTARG"
|
||||
verMode=$(echo $OPTARG)
|
||||
;;
|
||||
V)
|
||||
#echo "verType=$OPTARG"
|
||||
verType=$(echo $OPTARG)
|
||||
;;
|
||||
c)
|
||||
#echo "cpuType=$OPTARG"
|
||||
cpuType=$(echo $OPTARG)
|
||||
;;
|
||||
l)
|
||||
#echo "pagMode=$OPTARG"
|
||||
pagMode=$(echo $OPTARG)
|
||||
;;
|
||||
s)
|
||||
#echo "soMode=$OPTARG"
|
||||
soMode=$(echo $OPTARG)
|
||||
;;
|
||||
d)
|
||||
#echo "dbName=$OPTARG"
|
||||
dbName=$(echo $OPTARG)
|
||||
;;
|
||||
a)
|
||||
#echo "allocator=$OPTARG"
|
||||
allocator=$(echo $OPTARG)
|
||||
;;
|
||||
n)
|
||||
#echo "verNumber=$OPTARG"
|
||||
verNumber=$(echo $OPTARG)
|
||||
;;
|
||||
m)
|
||||
#echo "verNumberComp=$OPTARG"
|
||||
verNumberComp=$(echo $OPTARG)
|
||||
;;
|
||||
o)
|
||||
#echo "osType=$OPTARG"
|
||||
osType=$(echo $OPTARG)
|
||||
;;
|
||||
H)
|
||||
#echo "httpdBuild=$OPTARG"
|
||||
httpdBuild=$(echo $OPTARG)
|
||||
;;
|
||||
h)
|
||||
echo "Usage: $(basename $0) -v [cluster | edge] "
|
||||
echo " -c [aarch32 | aarch64 | x64 | x86 | mips64 | loongarch64 ...] "
|
||||
echo " -o [Linux | Kylin | Alpine | Raspberrypi | Darwin | Windows | Ningsi60 | Ningsi80 |...] "
|
||||
echo " -V [stable | beta] "
|
||||
echo " -l [full | lite] "
|
||||
echo " -a [glibc | jemalloc] "
|
||||
echo " -s [static | dynamic] "
|
||||
echo " -d [taos | ...] "
|
||||
echo " -n [version number] "
|
||||
echo " -m [compatible version number] "
|
||||
echo " -H [false | true] "
|
||||
exit 0
|
||||
;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
osType=$(uname)
|
||||
|
||||
echo "verMode=${verMode} verType=${verType} cpuType=${cpuType} osType=${osType} pagMode=${pagMode} soMode=${soMode} dbName=${dbName} allocator=${allocator} verNumber=${verNumber} verNumberComp=${verNumberComp} httpdBuild=${httpdBuild}"
|
||||
|
||||
curr_dir=$(pwd)
|
||||
|
||||
if [ "$osType" == "Darwin" ]; then
|
||||
script_dir=$(dirname $0)
|
||||
cd ${script_dir}
|
||||
script_dir="$(pwd)"
|
||||
top_dir=${script_dir}/..
|
||||
else
|
||||
script_dir="$(dirname $(readlink -f $0))"
|
||||
top_dir="$(readlink -f ${script_dir}/..)"
|
||||
fi
|
||||
|
||||
csudo=""
|
||||
#if command -v sudo > /dev/null; then
|
||||
# csudo="sudo "
|
||||
#fi
|
||||
|
||||
function is_valid_version() {
|
||||
[ -z $1 ] && return 1 || :
|
||||
|
||||
rx='^([0-9]+\.){3}(\*|[0-9]+)$'
|
||||
if [[ $1 =~ $rx ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function vercomp() {
|
||||
if [[ $1 == $2 ]]; then
|
||||
echo 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
local IFS=.
|
||||
local i ver1=($1) ver2=($2)
|
||||
|
||||
# fill empty fields in ver1 with zeros
|
||||
for ((i = ${#ver1[@]}; i < ${#ver2[@]}; i++)); do
|
||||
ver1[i]=0
|
||||
done
|
||||
|
||||
for ((i = 0; i < ${#ver1[@]}; i++)); do
|
||||
if [[ -z ${ver2[i]} ]]; then
|
||||
# fill empty fields in ver2 with zeros
|
||||
ver2[i]=0
|
||||
fi
|
||||
if ((10#${ver1[i]} > 10#${ver2[i]})); then
|
||||
echo 1
|
||||
exit 0
|
||||
fi
|
||||
if ((10#${ver1[i]} < 10#${ver2[i]})); then
|
||||
echo 2
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo 0
|
||||
}
|
||||
|
||||
# 1. check version information
|
||||
if ( (! is_valid_version $verNumber) || (! is_valid_version $verNumberComp) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
|
||||
echo "please enter correct version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "=======================new version number: ${verNumber}, compatible version: ${verNumberComp}======================================"
|
||||
|
||||
build_time=$(date +"%F %R")
|
||||
|
||||
# get commint id from git
|
||||
gitinfo=$(git rev-parse --verify HEAD)
|
||||
|
||||
if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
|
||||
enterprise_dir="${top_dir}/../enterprise"
|
||||
cd ${enterprise_dir}
|
||||
gitinfoOfInternal=$(git rev-parse --verify HEAD)
|
||||
else
|
||||
gitinfoOfInternal=NULL
|
||||
fi
|
||||
|
||||
cd "${curr_dir}"
|
||||
|
||||
# 2. cmake executable file
|
||||
compile_dir="${top_dir}/debug"
|
||||
if [ -d ${compile_dir} ]; then
|
||||
rm -rf ${compile_dir}
|
||||
fi
|
||||
|
||||
mkdir -p ${compile_dir}
|
||||
cd ${compile_dir}
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
allocator_macro="-DJEMALLOC_ENABLED=true"
|
||||
else
|
||||
allocator_macro=""
|
||||
fi
|
||||
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
source ${enterprise_dir}/packaging/oem/sed_$dbName.sh
|
||||
replace_community_$dbName
|
||||
fi
|
||||
|
||||
if [[ "$httpdBuild" == "true" ]]; then
|
||||
BUILD_HTTP=true
|
||||
else
|
||||
BUILD_HTTP=false
|
||||
fi
|
||||
|
||||
if [[ "$verMode" == "cluster" ]] || [[ "$verMode" == "cloud" ]]; then
|
||||
BUILD_HTTP=internal
|
||||
fi
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
BUILD_TOOLS=true
|
||||
else
|
||||
BUILD_TOOLS=false
|
||||
fi
|
||||
|
||||
# check support cpu type
|
||||
if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]] || [[ "$cpuType" == "loongarch64" ]] ; then
|
||||
if [ "$verMode" == "edge" ]; then
|
||||
# community-version compile
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cloud" ]; then
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cluster" ]; then
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
replace_enterprise_$dbName
|
||||
fi
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ostype=`uname`
|
||||
if [ "${ostype}" == "Darwin" ]; then
|
||||
CORES=$(sysctl -n hw.ncpu)
|
||||
else
|
||||
CORES=$(grep -c ^processor /proc/cpuinfo)
|
||||
fi
|
||||
|
||||
if [[ "$allocator" == "jemalloc" ]]; then
|
||||
# jemalloc need compile first, so disable parallel build
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
else
|
||||
make -j ${CORES} && ${csudo}make install
|
||||
fi
|
||||
|
||||
cd ${curr_dir}
|
||||
|
||||
# 3. Call the corresponding script for packaging
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
if [[ "$verMode" != "cluster" ]] && [[ "$verMode" != "cloud" ]] && [[ "$pagMode" == "full" ]] && [[ "$cpuType" == "x64" ]] && [[ "$dbName" == "taos" ]]; then
|
||||
ret='0'
|
||||
command -v dpkg >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do deb package for the ubuntu system===="
|
||||
output_dir="${top_dir}/debs"
|
||||
if [ -d ${output_dir} ]; then
|
||||
rm -rf ${output_dir}
|
||||
fi
|
||||
mkdir -p ${output_dir}
|
||||
cd ${script_dir}/deb
|
||||
${csudo}./makedeb.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/deb ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/deb
|
||||
taos_tools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
${csudo}./make-taos-tools-deb.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========dpkg command not exist, so not release deb package!!!"
|
||||
fi
|
||||
ret='0'
|
||||
command -v rpmbuild >/dev/null 2>&1 || { ret='1'; }
|
||||
if [ "$ret" -eq 0 ]; then
|
||||
echo "====do rpm package for the centos system===="
|
||||
output_dir="${top_dir}/rpms"
|
||||
if [ -d ${output_dir} ]; then
|
||||
rm -rf ${output_dir}
|
||||
fi
|
||||
mkdir -p ${output_dir}
|
||||
cd ${script_dir}/rpm
|
||||
${csudo}./makerpm.sh ${compile_dir} ${output_dir} ${verNumber} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
|
||||
if [[ "$pagMode" == "full" ]]; then
|
||||
if [ -d ${top_dir}/tools/taos-tools/packaging/rpm ]; then
|
||||
cd ${top_dir}/tools/taos-tools/packaging/rpm
|
||||
taos_tools_ver=$(git tag |grep -v taos | sort | tail -1)
|
||||
[ -z "$taos_tools_ver" ] && taos_tools_ver="0.1.0"
|
||||
|
||||
${csudo}./make-taos-tools-rpm.sh ${top_dir} \
|
||||
${compile_dir} ${output_dir} ${taos_tools_ver} ${cpuType} ${osType} ${verMode} ${verType}
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "==========rpmbuild command not exist, so not release rpm package!!!"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "====do tar.gz package for all systems===="
|
||||
cd ${script_dir}/tools
|
||||
|
||||
${csudo}./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
${csudo}./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
|
||||
else
|
||||
cd ${script_dir}/tools
|
||||
./makepkg.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${verNumberComp} ${dbName}
|
||||
./makeclient.sh ${compile_dir} ${verNumber} "${build_time}" ${cpuType} ${osType} ${verMode} ${verType} ${pagMode} ${dbName}
|
||||
fi
|
|
@ -481,10 +481,10 @@ function install_adapter_config() {
|
|||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] && ${csudo}cp ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/${adapterName}.toml ] && ${csudo}chmod 644 ${cfg_install_dir}/${adapterName}.toml
|
||||
fi
|
||||
|
||||
else
|
||||
[ -f ${script_dir}/cfg/${adapterName}.toml ] &&
|
||||
${csudo}cp -f ${script_dir}/cfg/${adapterName}.toml ${cfg_install_dir}/${adapterName}.toml.new
|
||||
fi
|
||||
|
||||
[ -f ${cfg_install_dir}/${adapterName}.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/${adapterName}.toml ${install_main_dir}/cfg/${adapterName}.toml
|
||||
|
@ -499,9 +499,10 @@ function install_config() {
|
|||
${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${script_dir}/cfg/${configFile} ] && ${csudo}cp ${script_dir}/cfg/${configFile} ${cfg_install_dir}
|
||||
${csudo}chmod 644 ${cfg_install_dir}/*
|
||||
else
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new
|
||||
fi
|
||||
|
||||
${csudo}cp -f ${script_dir}/cfg/${configFile} ${cfg_install_dir}/${configFile}.new
|
||||
${csudo}ln -s ${cfg_install_dir}/${configFile} ${install_main_dir}/cfg
|
||||
|
||||
[ ! -z $1 ] && return 0 || : # only install client
|
||||
|
|
|
@ -109,6 +109,13 @@ function kill_taosadapter() {
|
|||
fi
|
||||
}
|
||||
|
||||
function kill_taoskeeper() {
|
||||
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
# ${csudo}pkill -f taosd || :
|
||||
pid=$(ps -ef | grep "taosd" | grep -v "grep" | awk '{print $2}')
|
||||
|
@ -161,6 +168,7 @@ function install_bin() {
|
|||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
|
||||
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
${csudo}rm -f ${bin_link_dir}/rmtaos || :
|
||||
|
@ -179,6 +187,7 @@ function install_bin() {
|
|||
[ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
|
||||
[ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
|
||||
[ -x ${bin_dir}/remove.sh ] && ${csudo}ln -s ${bin_dir}/remove.sh ${bin_link_dir}/rmtaos || :
|
||||
[ -x ${bin_dir}/taoskeeper ] && ${csudo}ln -sf ${bin_dir}/taoskeeper ${bin_link_dir}/taoskeeper || :
|
||||
}
|
||||
|
||||
function add_newHostname_to_hosts() {
|
||||
|
@ -351,6 +360,22 @@ function install_taosadapter_config() {
|
|||
${csudo}ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_taoskeeper_config() {
|
||||
if [ ! -f "${cfg_install_dir}/keeper.toml" ]; then
|
||||
[ ! -d %{cfg_install_dir} ] &&
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
[ -f ${cfg_dir}/keeper.toml ] && ${csudo}cp ${cfg_dir}/keeper.toml ${cfg_install_dir}
|
||||
[ -f ${cfg_install_dir}/keeper.toml ] &&
|
||||
${csudo}chmod 644 ${cfg_install_dir}/keeper.toml
|
||||
fi
|
||||
|
||||
[ -f ${cfg_dir}/keeper.toml ] &&
|
||||
${csudo}mv ${cfg_dir}/keeper.toml ${cfg_dir}/keeper.toml.new
|
||||
|
||||
[ -f ${cfg_install_dir}/keeper.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/keeper.toml ${cfg_dir}
|
||||
}
|
||||
|
||||
function install_config() {
|
||||
if [ ! -f "${cfg_install_dir}/taos.cfg" ]; then
|
||||
${csudo}${csudo}mkdir -p ${cfg_install_dir}
|
||||
|
@ -583,6 +608,7 @@ function install_TDengine() {
|
|||
install_bin
|
||||
install_config
|
||||
install_taosadapter_config
|
||||
install_taoskeeper_config
|
||||
install_taosadapter_service
|
||||
install_service
|
||||
install_app
|
||||
|
|
|
@ -312,6 +312,8 @@ extern SAppInfo appInfo;
|
|||
extern int32_t clientReqRefPool;
|
||||
extern int32_t clientConnRefPool;
|
||||
extern int32_t timestampDeltaLimit;
|
||||
extern int64_t lastClusterId;
|
||||
|
||||
|
||||
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType);
|
||||
|
||||
|
@ -339,6 +341,7 @@ void resetConnectDB(STscObj* pTscObj);
|
|||
int taos_options_imp(TSDB_OPTION option, const char* str);
|
||||
|
||||
void* openTransporter(const char* user, const char* auth, int32_t numOfThreads);
|
||||
void tscStopCrashReport();
|
||||
|
||||
typedef struct AsyncArg {
|
||||
SRpcMsg msg;
|
||||
|
|
|
@ -164,6 +164,7 @@ typedef struct {
|
|||
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
|
||||
bool isRawLine;
|
||||
int32_t ttl;
|
||||
int32_t uid; // used for automatic create child table
|
||||
|
||||
NodeList *childTables;
|
||||
NodeList *superTables;
|
||||
|
@ -182,6 +183,7 @@ typedef struct {
|
|||
int8_t offset[OTD_JSON_FIELDS_NUM];
|
||||
SSmlLineInfo *lines; // element is SSmlLineInfo
|
||||
bool parseJsonByLib;
|
||||
SArray *tagJsonArray;
|
||||
|
||||
//
|
||||
SArray *preLineTagKV;
|
||||
|
@ -230,6 +232,8 @@ int64_t smlParseOpenTsdbTime(SSmlHandle *info, const char *data, int32
|
|||
int32_t smlClearForRerun(SSmlHandle *info);
|
||||
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
|
||||
uint8_t smlGetTimestampLen(int64_t num);
|
||||
void clearColValArray(SArray* pCols);
|
||||
void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag);
|
||||
|
||||
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
|
||||
|
|
|
@ -28,13 +28,16 @@
|
|||
#include "trpc.h"
|
||||
#include "tsched.h"
|
||||
#include "ttime.h"
|
||||
#include "thttp.h"
|
||||
|
||||
#define TSC_VAR_NOT_RELEASE 1
|
||||
#define TSC_VAR_RELEASED 0
|
||||
|
||||
SAppInfo appInfo;
|
||||
int64_t lastClusterId = 0;
|
||||
int32_t clientReqRefPool = -1;
|
||||
int32_t clientConnRefPool = -1;
|
||||
int32_t clientStop = 0;
|
||||
|
||||
int32_t timestampDeltaLimit = 900; // s
|
||||
|
||||
|
@ -62,7 +65,10 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
|
|||
|
||||
static void deregisterRequest(SRequestObj *pRequest) {
|
||||
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
tscError("pRequest == NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
|
||||
|
@ -390,6 +396,146 @@ void destroyRequest(SRequestObj *pRequest) {
|
|||
removeRequest(pRequest->self);
|
||||
}
|
||||
|
||||
void taosClientCrash(int signum, void *sigInfo, void *context) {
|
||||
taosIgnSignal(SIGTERM);
|
||||
taosIgnSignal(SIGHUP);
|
||||
taosIgnSignal(SIGINT);
|
||||
taosIgnSignal(SIGBREAK);
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
taosIgnSignal(SIGBUS);
|
||||
#endif
|
||||
taosIgnSignal(SIGABRT);
|
||||
taosIgnSignal(SIGFPE);
|
||||
taosIgnSignal(SIGSEGV);
|
||||
|
||||
char *pMsg = NULL;
|
||||
const char *flags = "UTL FATAL ";
|
||||
ELogLevel level = DEBUG_FATAL;
|
||||
int32_t dflag = 255;
|
||||
int64_t msgLen= -1;
|
||||
|
||||
if (tsEnableCrashReport) {
|
||||
if (taosGenCrashJsonMsg(signum, &pMsg, lastClusterId, appInfo.startTime)) {
|
||||
taosPrintLog(flags, level, dflag, "failed to generate crash json msg");
|
||||
goto _return;
|
||||
} else {
|
||||
msgLen = strlen(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
taosLogCrashInfo("taos", pMsg, msgLen, signum, sigInfo);
|
||||
|
||||
exit(signum);
|
||||
}
|
||||
|
||||
void crashReportThreadFuncUnexpectedStopped(void) { atomic_store_32(&clientStop, -1); }
|
||||
|
||||
static void *tscCrashReportThreadFp(void *param) {
|
||||
setThreadName("client-crashReport");
|
||||
char filepath[PATH_MAX] = {0};
|
||||
snprintf(filepath, sizeof(filepath), "%s%s.taosCrashLog", tsLogDir, TD_DIRSEP);
|
||||
char *pMsg = NULL;
|
||||
int64_t msgLen = 0;
|
||||
TdFilePtr pFile = NULL;
|
||||
bool truncateFile = false;
|
||||
int32_t sleepTime = 200;
|
||||
int32_t reportPeriodNum = 3600 * 1000 / sleepTime;
|
||||
int32_t loopTimes = reportPeriodNum;
|
||||
|
||||
#ifdef WINDOWS
|
||||
if (taosCheckCurrentInDll()) {
|
||||
atexit(crashReportThreadFuncUnexpectedStopped);
|
||||
}
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
if (clientStop) break;
|
||||
if (loopTimes++ < reportPeriodNum) {
|
||||
taosMsleep(sleepTime);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosReadCrashInfo(filepath, &pMsg, &msgLen, &pFile);
|
||||
if (pMsg && msgLen > 0) {
|
||||
if (taosSendHttpReport(tsTelemServer, tsClientCrashReportUri, tsTelemPort, pMsg, msgLen, HTTP_FLAT) != 0) {
|
||||
tscError("failed to send crash report");
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, false);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
tscInfo("succeed to send crash report");
|
||||
truncateFile = true;
|
||||
}
|
||||
} else {
|
||||
tscDebug("no crash info");
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg);
|
||||
|
||||
if (pMsg && msgLen > 0) {
|
||||
pMsg = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, truncateFile);
|
||||
truncateFile = false;
|
||||
}
|
||||
|
||||
taosMsleep(sleepTime);
|
||||
loopTimes = 0;
|
||||
}
|
||||
|
||||
clientStop = -1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t tscCrashReportInit() {
|
||||
if (!tsEnableCrashReport) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
TdThread crashReportThread;
|
||||
if (taosThreadCreate(&crashReportThread, &thAttr, tscCrashReportThreadFp, NULL) != 0) {
|
||||
tscError("failed to create crashReport thread since %s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadAttrDestroy(&thAttr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tscStopCrashReport() {
|
||||
if (!tsEnableCrashReport) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_val_compare_exchange_32(&clientStop, 0, 1)) {
|
||||
tscDebug("hb thread already stopped");
|
||||
return;
|
||||
}
|
||||
|
||||
while (atomic_load_32(&clientStop) > 0) {
|
||||
taosMsleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
static void tscSetSignalHandle() {
|
||||
#if !defined(WINDOWS)
|
||||
taosSetSignal(SIGBUS, taosClientCrash);
|
||||
#endif
|
||||
taosSetSignal(SIGABRT, taosClientCrash);
|
||||
taosSetSignal(SIGFPE, taosClientCrash);
|
||||
taosSetSignal(SIGSEGV, taosClientCrash);
|
||||
}
|
||||
|
||||
void taos_init_imp(void) {
|
||||
// In the APIs of other program language, taos_cleanup is not available yet.
|
||||
// So, to make sure taos_cleanup will be invoked to clean up the allocated resource to suppress the valgrind warning.
|
||||
|
@ -397,6 +543,10 @@ void taos_init_imp(void) {
|
|||
errno = TSDB_CODE_SUCCESS;
|
||||
taosSeedRand(taosGetTimestampSec());
|
||||
|
||||
appInfo.pid = taosGetPId();
|
||||
appInfo.startTime = taosGetTimestampMs();
|
||||
appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
|
||||
deltaToUtcInitOnce();
|
||||
|
||||
if (taosCreateLog("taoslog", 10, configDir, NULL, NULL, NULL, NULL, 1) != 0) {
|
||||
|
@ -409,10 +559,13 @@ void taos_init_imp(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscSetSignalHandle();
|
||||
|
||||
initQueryModuleMsgHandle();
|
||||
|
||||
if (taosConvInit() != 0) {
|
||||
ASSERTS(0, "failed to init conv");
|
||||
tscError("failed to init conv");
|
||||
return;
|
||||
}
|
||||
|
||||
rpcInit();
|
||||
|
@ -438,9 +591,8 @@ void taos_init_imp(void) {
|
|||
taosGetAppName(appInfo.appName, NULL);
|
||||
taosThreadMutexInit(&appInfo.mutex, NULL);
|
||||
|
||||
appInfo.pid = taosGetPId();
|
||||
appInfo.startTime = taosGetTimestampMs();
|
||||
appInfo.pInstMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
tscCrashReportInit();
|
||||
|
||||
tscDebug("client is initialized successfully");
|
||||
}
|
||||
|
||||
|
|
|
@ -376,7 +376,6 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
desc.subPlanNum = 0;
|
||||
}
|
||||
desc.subPlanNum = taosArrayGetSize(desc.subDesc);
|
||||
ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc));
|
||||
} else {
|
||||
desc.subDesc = NULL;
|
||||
}
|
||||
|
@ -813,7 +812,10 @@ static void hbStopThread() {
|
|||
}
|
||||
|
||||
SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
|
||||
hbMgrInit();
|
||||
if(hbMgrInit() != 0){
|
||||
terrno = TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
return NULL;
|
||||
}
|
||||
SAppHbMgr *pAppHbMgr = taosMemoryMalloc(sizeof(SAppHbMgr));
|
||||
if (pAppHbMgr == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -899,16 +901,28 @@ int hbMgrInit() {
|
|||
TdThreadMutexAttr attr = {0};
|
||||
|
||||
int ret = taosThreadMutexAttrInit(&attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrInit error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrSetType error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexInit(&clientHbMgr.lock, &attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexInit error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = taosThreadMutexAttrDestroy(&attr);
|
||||
assert(ret == 0);
|
||||
if(ret != 0){
|
||||
uError("hbMgrInit:taosThreadMutexAttrDestroy error")
|
||||
return ret;
|
||||
}
|
||||
|
||||
// init handle funcs
|
||||
hbMgrInitHandle();
|
||||
|
|
|
@ -452,7 +452,10 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
|
|||
}
|
||||
|
||||
void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols) {
|
||||
ASSERT(pSchema != NULL && numOfCols > 0);
|
||||
if(pResInfo == NULL || pSchema == NULL || numOfCols <= 0){
|
||||
tscError("invalid paras, pResInfo == NULL || pSchema == NULL || numOfCols <= 0");
|
||||
return;
|
||||
}
|
||||
|
||||
pResInfo->numOfCols = numOfCols;
|
||||
if (pResInfo->fields != NULL) {
|
||||
|
@ -463,7 +466,10 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
|
|||
}
|
||||
pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
|
||||
pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
|
||||
ASSERT(numOfCols == pResInfo->numOfCols);
|
||||
if(numOfCols != pResInfo->numOfCols){
|
||||
tscError("numOfCols:%d != pResInfo->numOfCols:%d", numOfCols, pResInfo->numOfCols);
|
||||
return;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pResInfo->numOfCols; ++i) {
|
||||
pResInfo->fields[i].bytes = pSchema[i].bytes;
|
||||
|
@ -1339,7 +1345,10 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
SEpSet* pEpSet = arg->pEpset;
|
||||
|
||||
SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle;
|
||||
assert(pMsg->info.ahandle != NULL);
|
||||
if(pMsg->info.ahandle == NULL){
|
||||
tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
STscObj* pTscObj = NULL;
|
||||
|
||||
STraceId* trace = &pMsg->info.traceId;
|
||||
|
@ -1352,8 +1361,10 @@ int32_t doProcessMsgFromServer(void* param) {
|
|||
if (pSendInfo->requestObjRefId != 0) {
|
||||
SRequestObj* pRequest = (SRequestObj*)taosAcquireRef(clientReqRefPool, pSendInfo->requestObjRefId);
|
||||
if (pRequest) {
|
||||
assert(pRequest->self == pSendInfo->requestObjRefId);
|
||||
|
||||
if(pRequest->self != pSendInfo->requestObjRefId){
|
||||
tscError("doProcessMsgFromServer pRequest->self:%"PRId64" != pSendInfo->requestObjRefId:%"PRId64, pRequest->self, pSendInfo->requestObjRefId);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
pRequest->metric.rsp = taosGetTimestampUs();
|
||||
pTscObj = pRequest->pTscObj;
|
||||
/*
|
||||
|
@ -1495,7 +1506,9 @@ void doSetOneRowPtr(SReqResultInfo* pResultInfo) {
|
|||
}
|
||||
|
||||
void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
|
||||
|
@ -1549,7 +1562,9 @@ static void syncFetchFn(void* param, TAOS_RES* res, int32_t numOfRows) {
|
|||
}
|
||||
|
||||
void* doAsyncFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4) {
|
||||
assert(pRequest != NULL);
|
||||
if(pRequest == NULL){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
|
||||
if (pResultInfo->pData == NULL || pResultInfo->current >= pResultInfo->numOfRows) {
|
||||
|
@ -1613,8 +1628,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
char* pStart = pCol->offset[j] + pCol->pData;
|
||||
|
||||
int32_t len = taosUcs4ToMbs((TdUcs4*)varDataVal(pStart), varDataLen(pStart), varDataVal(p));
|
||||
ASSERT(len <= bytes);
|
||||
ASSERT((p + len) < (pResultInfo->convertBuf[i] + colLength[i]));
|
||||
if(len > bytes || (p + len) >= (pResultInfo->convertBuf[i] + colLength[i])){
|
||||
tscError("doConvertUCS4 error, invalid data. len:%d, bytes:%d, (p + len):%p, (pResultInfo->convertBuf[i] + colLength[i]):%p", len, bytes, (p + len), (pResultInfo->convertBuf[i] + colLength[i]));
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
varDataSetLen(p, len);
|
||||
pCol->offset[j] = (p - pResultInfo->convertBuf[i]);
|
||||
|
@ -1631,9 +1648,6 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
|
|||
}
|
||||
|
||||
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
ASSERT(numOfCols == cols);
|
||||
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t));
|
||||
}
|
||||
|
@ -1643,6 +1657,12 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
if(ASSERT(numOfCols == cols)){
|
||||
tscError("estimateJsonLen error: numOfCols:%d != cols:%d", numOfCols, cols);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
int32_t* colLength = (int32_t*)(p + len);
|
||||
len += sizeof(int32_t) * numOfCols;
|
||||
|
@ -1676,7 +1696,8 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
|
|||
} else if (jsonInnerType == TSDB_DATA_TYPE_BOOL) {
|
||||
len += (VARSTR_HEADER_SIZE + 5);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("estimateJsonLen error: invalid type:%d", jsonInnerType);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
|
@ -1710,12 +1731,21 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
char* p = (char*)pResultInfo->pData;
|
||||
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
|
||||
if(dataLen <= 0){
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
|
||||
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
char* p1 = pResultInfo->convertJson;
|
||||
|
||||
int32_t totalLen = 0;
|
||||
int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
|
||||
if(ASSERT(numOfCols == cols)){
|
||||
tscError("doConvertJson error: numOfCols:%d != cols:%d", numOfCols, cols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t len = getVersion1BlockMetaSize(p, numOfCols);
|
||||
memcpy(p1, p, len);
|
||||
|
||||
|
@ -1736,8 +1766,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
int32_t colLen = htonl(colLength[i]);
|
||||
int32_t colLen1 = htonl(colLength1[i]);
|
||||
ASSERT(colLen < dataLen);
|
||||
|
||||
if(ASSERT(colLen < dataLen)){
|
||||
tscError("doConvertJson error: colLen:%d >= dataLen:%d", colLen, dataLen);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
|
||||
int32_t* offset = (int32_t*)pStart;
|
||||
int32_t* offset1 = (int32_t*)pStart1;
|
||||
|
@ -1782,7 +1814,8 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
sprintf(varDataVal(dst), "%s", (*((char*)jsonInnerData) == 1) ? "true" : "false");
|
||||
varDataSetLen(dst, strlen(varDataVal(dst)));
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("doConvertJson error: invalid type:%d", jsonInnerType);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
offset1[j] = len;
|
||||
|
@ -1820,7 +1853,10 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
|
|||
|
||||
int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows,
|
||||
bool convertUcs4) {
|
||||
assert(numOfCols > 0 && pFields != NULL && pResultInfo != NULL);
|
||||
if(ASSERT(numOfCols > 0 && pFields != NULL && pResultInfo != NULL)){
|
||||
tscError("setResultDataPtr paras error");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
if (numOfRows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1849,7 +1885,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
int32_t cols = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
||||
ASSERT(rows == numOfRows && cols == numOfCols);
|
||||
if(ASSERT(rows == numOfRows && cols == numOfCols)){
|
||||
tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, numOfCols);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
int32_t hasColumnSeg = *(int32_t*)p;
|
||||
p += sizeof(int32_t);
|
||||
|
@ -1876,7 +1915,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
colLength[i] = htonl(colLength[i]);
|
||||
if (colLength[i] >= dataLen) {
|
||||
tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen);
|
||||
ASSERT(0);
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
|
||||
|
@ -1914,7 +1953,11 @@ char* getDbOfConnection(STscObj* pObj) {
|
|||
}
|
||||
|
||||
void setConnectionDB(STscObj* pTscObj, const char* db) {
|
||||
assert(db != NULL && pTscObj != NULL);
|
||||
if(db == NULL || pTscObj == NULL){
|
||||
tscError("setConnectionDB para is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pTscObj->mutex);
|
||||
tstrncpy(pTscObj->db, db, tListLen(pTscObj->db));
|
||||
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||
|
@ -1932,7 +1975,10 @@ void resetConnectDB(STscObj* pTscObj) {
|
|||
|
||||
int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4,
|
||||
bool freeAfterUse) {
|
||||
assert(pResultInfo != NULL && pRsp != NULL);
|
||||
if(pResultInfo == NULL || pRsp == NULL){
|
||||
tscError("setQueryResultFromRsp paras is null");
|
||||
return TSDB_CODE_TSC_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (freeAfterUse) taosMemoryFreeClear(pResultInfo->pRspMsg);
|
||||
|
||||
|
|
|
@ -574,7 +574,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNI
|
|||
TAOS_RES *tres = (TAOS_RES *)res;
|
||||
|
||||
int32_t numOfFields = taos_num_fields(tres);
|
||||
assert(numOfFields > 0);
|
||||
if(numOfFields <= 0){
|
||||
jniError("jobj:%p, conn:%p, query interrupted. taos_num_fields error code:%d, msg:%s", jobj, tscon, numOfFields,
|
||||
taos_errstr(tres));
|
||||
return JNI_RESULT_SET_NULL;
|
||||
}
|
||||
|
||||
void *data;
|
||||
int32_t numOfRows;
|
||||
|
|
|
@ -55,6 +55,8 @@ void taos_cleanup(void) {
|
|||
return;
|
||||
}
|
||||
|
||||
tscStopCrashReport();
|
||||
|
||||
int32_t id = clientReqRefPool;
|
||||
clientReqRefPool = -1;
|
||||
taosCloseRef(id);
|
||||
|
@ -291,7 +293,6 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
|
|||
tscError("invalid result passed to taos_fetch_row");
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
|
@ -355,9 +356,13 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
|||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
assert(charLen <= fields[i].bytes && charLen >= 0);
|
||||
if(ASSERT(charLen <= fields[i].bytes && charLen >= 0)){
|
||||
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||
}
|
||||
} else {
|
||||
assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0);
|
||||
if(ASSERT(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0)){
|
||||
tscError("taos_print_row error. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(str + len, row[i], charLen);
|
||||
|
@ -577,7 +582,7 @@ int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) {
|
|||
(*numOfRows) = pResultInfo->numOfRows;
|
||||
return 0;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
tscError("taos_fetch_block_s invalid res type");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -802,7 +807,7 @@ static void doAsyncQueryFromParse(SMetaData *pResultMeta, void *param, int32_t c
|
|||
tstrerror(code));
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
//pWrapper->pCatalogReq->forceUpdate = false;
|
||||
// pWrapper->pCatalogReq->forceUpdate = false;
|
||||
code = qContinueParseSql(pWrapper->pParseCtx, pWrapper->pCatalogReq, pResultMeta, pQuery);
|
||||
}
|
||||
|
||||
|
@ -831,8 +836,8 @@ void continueInsertFromCsv(SSqlCallbackWrapper *pWrapper, SRequestObj *pRequest)
|
|||
tstrerror(code), pWrapper->pRequest->requestId);
|
||||
destorySqlCallbackWrapper(pWrapper);
|
||||
terrno = code;
|
||||
pWrapper->pRequest->code = code;
|
||||
pWrapper->pRequest->body.queryFp(pWrapper->pRequest->body.param, pWrapper->pRequest, code);
|
||||
pRequest->code = code;
|
||||
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1000,8 +1005,14 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
|
|||
}
|
||||
|
||||
void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
if(ASSERT(res != NULL && fp != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
SRequestObj *pRequest = res;
|
||||
pRequest->body.fetchFp = fp;
|
||||
|
@ -1044,9 +1055,14 @@ void taos_fetch_rows_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
||||
ASSERT(res != NULL && fp != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
|
||||
if(ASSERT(res != NULL && fp != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return;
|
||||
}
|
||||
SRequestObj *pRequest = res;
|
||||
SReqResultInfo *pResultInfo = &pRequest->body.resInfo;
|
||||
|
||||
|
@ -1058,8 +1074,14 @@ void taos_fetch_raw_block_a(TAOS_RES *res, __taos_async_fn_t fp, void *param) {
|
|||
}
|
||||
|
||||
const void *taos_get_raw_block(TAOS_RES *res) {
|
||||
ASSERT(res != NULL);
|
||||
ASSERT(TD_RES_QUERY(res));
|
||||
if(ASSERT(res != NULL)){
|
||||
tscError("taos_fetch_rows_a invalid paras");
|
||||
return NULL;
|
||||
}
|
||||
if(ASSERT(TD_RES_QUERY(res))){
|
||||
tscError("taos_fetch_rows_a res is NULL");
|
||||
return NULL;
|
||||
}
|
||||
SRequestObj *pRequest = res;
|
||||
|
||||
return pRequest->body.resInfo.pData;
|
||||
|
@ -1167,6 +1189,54 @@ _return:
|
|||
return code;
|
||||
}
|
||||
|
||||
int taos_get_tables_vgId(TAOS *taos, const char *db, const char *table[], int tableNum, int *vgId) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (NULL == db || NULL == table || NULL == vgId || tableNum <= 0) {
|
||||
tscError("invalid input param, db:%p, table:%p, vgId:%p, tbNum:%d", db, table, vgId, tableNum);
|
||||
terrno = TSDB_CODE_TSC_INVALID_INPUT;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int64_t connId = *(int64_t *)taos;
|
||||
SRequestObj *pRequest = NULL;
|
||||
char *sql = "taos_get_table_vgId";
|
||||
int32_t code = buildRequest(connId, sql, strlen(sql), NULL, false, &pRequest, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pRequest->syncQuery = true;
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SCatalog *pCtg = NULL;
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCtg);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
SRequestConnInfo conn = {
|
||||
.pTrans = pTscObj->pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
|
||||
|
||||
conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
code = catalogGetTablesHashVgId(pCtg, &conn, pTscObj->acctId, db, table, tableNum, vgId);
|
||||
if (code) {
|
||||
goto _return;
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
terrno = code;
|
||||
|
||||
destroyRequest(pRequest);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
int taos_load_table_info(TAOS *taos, const char *tableNameList) {
|
||||
if (NULL == taos) {
|
||||
terrno = TSDB_CODE_TSC_DISCONNECTED;
|
||||
|
@ -1331,8 +1401,7 @@ int taos_stmt_get_col_fields(TAOS_STMT *stmt, int *fieldNum, TAOS_FIELD_E **fiel
|
|||
}
|
||||
|
||||
// let stmt to reclaim TAOS_FIELD_E that was allocated by `taos_stmt_get_tag_fields`/`taos_stmt_get_col_fields`
|
||||
void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields)
|
||||
{
|
||||
void taos_stmt_reclaim_fields(TAOS_STMT *stmt, TAOS_FIELD_E *fields) {
|
||||
(void)stmt;
|
||||
if (!fields) return;
|
||||
taosMemoryFree(fields);
|
||||
|
|
|
@ -119,6 +119,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
|
||||
// update the appInstInfo
|
||||
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
|
||||
lastClusterId = connectRsp.clusterId;
|
||||
|
||||
pTscObj->connType = connectRsp.connType;
|
||||
|
||||
|
@ -149,7 +150,6 @@ SMsgSendInfo* buildMsgInfoImpl(SRequestObj* pRequest) {
|
|||
pMsgSendInfo->msgType = pRequest->type;
|
||||
pMsgSendInfo->target.type = TARGET_TYPE_MNODE;
|
||||
|
||||
assert(pRequest != NULL);
|
||||
pMsgSendInfo->msgInfo = pRequest->body.requestMsg;
|
||||
pMsgSendInfo->fp = getMsgRspHandle(pRequest->type);
|
||||
return pMsgSendInfo;
|
||||
|
@ -273,7 +273,9 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
|
||||
assert(pMsg != NULL && param != NULL);
|
||||
if(pMsg == NULL || param == NULL){
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
SRequestObj* pRequest = param;
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -454,7 +456,10 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
|
|||
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
|
||||
|
||||
int32_t len = blockEncode(pBlock, (*pRsp)->data, SHOW_VARIABLES_RESULT_COLS);
|
||||
ASSERT(len == rspSize - sizeof(SRetrieveTableRsp));
|
||||
if(len != rspSize - sizeof(SRetrieveTableRsp)){
|
||||
uError("buildShowVariablesRsp error, len:%d != rspSize - sizeof(SRetrieveTableRsp):%" PRIu64, len, (uint64_t) (rspSize - sizeof(SRetrieveTableRsp)));
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
|
||||
blockDataDestroy(pBlock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -373,7 +373,10 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
|
||||
static char* processAutoCreateTable(STaosxRsp* rsp) {
|
||||
ASSERT(rsp->createTableNum != 0);
|
||||
if(rsp->createTableNum <= 0){
|
||||
uError("WriteRaw:processAutoCreateTable rsp->createTableNum <= 0");
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
SDecoder* decoder = taosMemoryCalloc(rsp->createTableNum, sizeof(SDecoder));
|
||||
SVCreateTbReq* pCreateReq = taosMemoryCalloc(rsp->createTableNum, sizeof(SVCreateTbReq));
|
||||
|
@ -389,7 +392,10 @@ static char* processAutoCreateTable(STaosxRsp* rsp) {
|
|||
goto _exit;
|
||||
}
|
||||
|
||||
ASSERT(pCreateReq[iReq].type == TSDB_CHILD_TABLE);
|
||||
if(pCreateReq[iReq].type != TSDB_CHILD_TABLE){
|
||||
uError("WriteRaw:processAutoCreateTable pCreateReq[iReq].type != TSDB_CHILD_TABLE");
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
string = buildCreateCTableJson(pCreateReq, rsp->createTableNum);
|
||||
|
||||
|
@ -494,7 +500,10 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
char* buf = NULL;
|
||||
|
||||
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
|
||||
ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
|
||||
if(!tTagIsJson(vAlterTbReq.pTagVal)){
|
||||
uError("processAlterTable isJson false");
|
||||
goto _exit;
|
||||
}
|
||||
buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
|
||||
} else {
|
||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
|
||||
|
@ -1610,7 +1619,11 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
goto end;
|
||||
}
|
||||
|
||||
ASSERT(pCreateReq.type == TSDB_CHILD_TABLE);
|
||||
if(pCreateReq.type != TSDB_CHILD_TABLE){
|
||||
uError("WriteRaw:pCreateReq.type != TSDB_CHILD_TABLE. table name: %s", tbName);
|
||||
code = TSDB_CODE_TSC_INVALID_VALUE;
|
||||
goto end;
|
||||
}
|
||||
if (strcmp(tbName, pCreateReq.name) == 0) {
|
||||
strcpy(pName.tname, pCreateReq.ctb.stbName);
|
||||
tDecoderClear(&decoderTmp);
|
||||
|
|
|
@ -186,13 +186,10 @@ int32_t smlSetCTableName(SSmlTableInfo *oneTable) {
|
|||
|
||||
if (strlen(oneTable->childTableName) == 0) {
|
||||
SArray *dst = taosArrayDup(oneTable->tags, NULL);
|
||||
RandTableName rName = {dst, oneTable->sTableName, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName, 0};
|
||||
RandTableName rName = {dst, oneTable->sTableName, (uint8_t)oneTable->sTableNameLen, oneTable->childTableName};
|
||||
|
||||
buildChildTableName(&rName);
|
||||
taosArrayDestroy(dst);
|
||||
oneTable->uid = rName.uid;
|
||||
} else {
|
||||
oneTable->uid = *(uint64_t *)(oneTable->childTableName);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -523,7 +520,10 @@ STableMeta *smlGetMeta(SSmlHandle *info, const void *measure, int32_t measureLen
|
|||
memset(pName.tname, 0, TSDB_TABLE_NAME_LEN);
|
||||
memcpy(pName.tname, measure, measureLen);
|
||||
|
||||
catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
|
||||
int32_t code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
|
||||
if(code != TSDB_CODE_SUCCESS){
|
||||
return NULL;
|
||||
}
|
||||
return pTableMeta;
|
||||
}
|
||||
|
||||
|
@ -996,7 +996,10 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols
|
|||
}
|
||||
} else {
|
||||
size_t tmp = taosArrayGetSize(metaArray);
|
||||
ASSERT(tmp <= INT16_MAX);
|
||||
if(tmp > INT16_MAX){
|
||||
uError("too many cols or tags");
|
||||
return -1;
|
||||
}
|
||||
int16_t size = tmp;
|
||||
int ret = taosHashPut(metaHash, kv->key, kv->keyLen, &size, SHORT_BYTES);
|
||||
if (ret == 0) {
|
||||
|
@ -1008,7 +1011,7 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) {
|
||||
void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) {
|
||||
for (size_t i = 0; i < taosArrayGetSize(tag->cols); i++) {
|
||||
SHashObj *kvHash = (SHashObj *)taosArrayGetP(tag->cols, i);
|
||||
taosHashCleanup(kvHash);
|
||||
|
@ -1024,6 +1027,16 @@ static void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) {
|
|||
taosMemoryFree(tag);
|
||||
}
|
||||
|
||||
void clearColValArray(SArray* pCols) {
|
||||
int32_t num = taosArrayGetSize(pCols);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SColVal* pCol = taosArrayGet(pCols, i);
|
||||
if (TSDB_DATA_TYPE_NCHAR == pCol->type) {
|
||||
taosMemoryFreeClear(pCol->value.pData);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void smlDestroyInfo(SSmlHandle *info) {
|
||||
if (!info) return;
|
||||
qDestroyQuery(info->pQuery);
|
||||
|
@ -1053,6 +1066,12 @@ void smlDestroyInfo(SSmlHandle *info) {
|
|||
// destroy info->pVgHash
|
||||
taosHashCleanup(info->pVgHash);
|
||||
|
||||
for(int i = 0; i< taosArrayGetSize(info->tagJsonArray); i++){
|
||||
cJSON *tags = (cJSON *)taosArrayGetP(info->tagJsonArray, i);
|
||||
cJSON_Delete(tags);
|
||||
}
|
||||
taosArrayDestroy(info->tagJsonArray);
|
||||
|
||||
taosArrayDestroy(info->preLineTagKV);
|
||||
taosArrayDestroy(info->maxTagKVs);
|
||||
taosArrayDestroy(info->preLineColKV);
|
||||
|
@ -1067,6 +1086,7 @@ void smlDestroyInfo(SSmlHandle *info) {
|
|||
taosMemoryFree(info->lines);
|
||||
}
|
||||
|
||||
cJSON_Delete(info->root);
|
||||
taosMemoryFreeClear(info);
|
||||
}
|
||||
|
||||
|
@ -1090,6 +1110,7 @@ SSmlHandle *smlBuildSmlInfo(TAOS *taos) {
|
|||
info->pQuery = smlInitHandle();
|
||||
info->dataFormat = true;
|
||||
|
||||
info->tagJsonArray = taosArrayInit(8, POINTER_BYTES);
|
||||
info->preLineTagKV = taosArrayInit(8, sizeof(SSmlKv));
|
||||
info->maxTagKVs = taosArrayInit(8, sizeof(SSmlKv));
|
||||
info->preLineColKV = taosArrayInit(8, sizeof(SSmlKv));
|
||||
|
@ -1211,7 +1232,10 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
|
||||
SSmlSTableMeta *pMeta =
|
||||
(SSmlSTableMeta *)nodeListGet(info->superTables, tableData->sTableName, tableData->sTableNameLen, NULL);
|
||||
ASSERT(NULL != pMeta);
|
||||
if(unlikely(NULL == pMeta || NULL == pMeta->tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
// use tablemeta of stable to save vgid and uid of child table
|
||||
pMeta->tableMeta->vgId = vg.vgId;
|
||||
|
@ -1347,9 +1371,8 @@ static int32_t smlParseLine(SSmlHandle *info, char *lines[], char *rawLine, char
|
|||
} else {
|
||||
code = smlParseTelnetString(info, (char *)tmp, (char *)tmp + len, info->lines + i);
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(0);
|
||||
code = TSDB_CODE_SML_INVALID_PROTOCOL_TYPE;
|
||||
}
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " smlParseLine failed. line %d : %s", info->id, i, tmp);
|
||||
|
|
|
@ -143,7 +143,10 @@ while(*(start)){\
|
|||
// if(unlikely(kv.length > preKV->length)){
|
||||
// preKV->length = kv.length;
|
||||
// SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
|
||||
// ASSERT(tableMeta != NULL);
|
||||
// if(unlikely(NULL == tableMeta)){
|
||||
// uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
// return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
// }
|
||||
//
|
||||
// SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
|
||||
// oldKV->length = kv.length;
|
||||
|
@ -335,6 +338,9 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset){
|
|||
(*start)++;
|
||||
}
|
||||
}
|
||||
if(*(*start) == '\0'){
|
||||
break;
|
||||
}
|
||||
if(*(*start) == '}'){
|
||||
(*start)++;
|
||||
break;
|
||||
|
@ -655,14 +661,14 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo
|
|||
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
|
||||
|
||||
if(unlikely(sMeta == NULL)){
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
if(pTableMeta == NULL){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL);
|
||||
}
|
||||
info->currSTableMeta = sMeta->tableMeta;
|
||||
|
@ -720,7 +726,10 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo
|
|||
if(unlikely(kv.length > maxKV->length)){
|
||||
maxKV->length = kv.length;
|
||||
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
|
||||
ASSERT(tableMeta != NULL);
|
||||
if(unlikely(NULL == tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
|
||||
oldKV->length = kv.length;
|
||||
|
@ -772,11 +781,13 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo
|
|||
tinfo->tags = taosArrayDup(preLineKV, NULL);
|
||||
|
||||
smlSetCTableName(tinfo);
|
||||
tinfo->uid = info->uid++;
|
||||
if (info->dataFormat) {
|
||||
info->currSTableMeta->uid = tinfo->uid;
|
||||
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
|
||||
if (tinfo->tableDataCtx == NULL) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
|
||||
smlDestroyTableInfo(info, tinfo);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
}
|
||||
|
@ -923,9 +934,6 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
|
|||
cJSON *tsJson = NULL;
|
||||
cJSON *valueJson = NULL;
|
||||
cJSON *tagsJson = NULL;
|
||||
char* rootStr = cJSON_PrintUnformatted(root);
|
||||
uError("rootStr:%s", rootStr);
|
||||
taosMemoryFree(rootStr);
|
||||
|
||||
int32_t size = cJSON_GetArraySize(root);
|
||||
// outmost json fields has to be exactly 4
|
||||
|
@ -956,6 +964,7 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
|
|||
}
|
||||
|
||||
// Parse tags
|
||||
bool needFree = info->dataFormat;
|
||||
elements->tags = cJSON_PrintUnformatted(tagsJson);
|
||||
elements->tagsLen = strlen(elements->tags);
|
||||
if(is_same_child_table_telnet(elements, &info->preLine) != 0) {
|
||||
|
@ -968,7 +977,7 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
|
|||
}
|
||||
}
|
||||
|
||||
if(info->dataFormat){
|
||||
if(needFree){
|
||||
taosMemoryFree(elements->tags);
|
||||
elements->tags = NULL;
|
||||
}
|
||||
|
@ -994,6 +1003,7 @@ static int32_t smlParseJSONStringExt(SSmlHandle *info, cJSON *root, SSmlLineInfo
|
|||
if(ret == TSDB_CODE_SUCCESS){
|
||||
ret = smlBuildRow(info->currTableDataCtx);
|
||||
}
|
||||
clearColValArray(info->currTableDataCtx->pValues);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||
return ret;
|
||||
|
@ -1095,6 +1105,11 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *
|
|||
|
||||
if(unlikely(**start == '\0' && elements->measure == NULL)) return TSDB_CODE_SUCCESS;
|
||||
|
||||
if (unlikely(IS_INVALID_TABLE_LEN(elements->measureLen))) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "measure is empty or too large than 192", NULL);
|
||||
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
|
||||
}
|
||||
|
||||
SSmlKv kv = {.key = VALUE, .keyLen = VALUE_LEN, .value = elements->cols, .length = (size_t)elements->colsLen};
|
||||
if (elements->colsLen == 0 || smlParseValue(&kv, &info->msgBuf) != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:cols invalidate:%s", elements->cols);
|
||||
|
@ -1112,8 +1127,8 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *
|
|||
return TSDB_CODE_TSC_INVALID_JSON;
|
||||
}
|
||||
|
||||
taosArrayPush(info->tagJsonArray, &tagsJson);
|
||||
ret = smlParseTagsFromJSON(info, tagsJson, elements);
|
||||
cJSON_free(tagsJson);
|
||||
if (unlikely(ret)) {
|
||||
uError("OTD:0x%" PRIx64 " Unable to parse tags from JSON payload", info->id);
|
||||
return ret;
|
||||
|
@ -1141,6 +1156,7 @@ static int32_t smlParseJSONString(SSmlHandle *info, char **start, SSmlLineInfo *
|
|||
if(ret == TSDB_CODE_SUCCESS){
|
||||
ret = smlBuildRow(info->currTableDataCtx);
|
||||
}
|
||||
clearColValArray(info->currTableDataCtx->pValues);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||
return ret;
|
||||
|
|
|
@ -151,14 +151,14 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
|||
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
|
||||
if(unlikely(sMeta == NULL)){
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
if(pTableMeta == NULL){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
nodeListSet(&info->superTables, currElement->measure, currElement->measureLen, sMeta, NULL);
|
||||
}
|
||||
info->currSTableMeta = sMeta->tableMeta;
|
||||
|
@ -259,7 +259,10 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
|||
if(unlikely(kv.length > maxKV->length)){
|
||||
maxKV->length = kv.length;
|
||||
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
ASSERT(tableMeta != NULL);
|
||||
if(unlikely(NULL == tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
|
||||
oldKV->length = kv.length;
|
||||
|
@ -319,6 +322,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
|||
tinfo->tags = taosArrayDup(preLineKV, NULL);
|
||||
|
||||
smlSetCTableName(tinfo);
|
||||
tinfo->uid = info->uid++;
|
||||
if(info->dataFormat) {
|
||||
info->currSTableMeta->uid = tinfo->uid;
|
||||
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
|
||||
|
@ -353,14 +357,14 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
|||
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
|
||||
if(unlikely(sMeta == NULL)){
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
STableMeta * pTableMeta = smlGetMeta(info, currElement->measure, currElement->measureLen);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
if(pTableMeta == NULL){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
nodeListSet(&info->superTables, currElement->measure, currElement->measureLen, sMeta, NULL);
|
||||
}
|
||||
info->currSTableMeta = sMeta->tableMeta;
|
||||
|
@ -484,7 +488,10 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd,
|
|||
if(unlikely(IS_VAR_DATA_TYPE(kv.type) && kv.length > maxKV->length)){
|
||||
maxKV->length = kv.length;
|
||||
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, currElement->measure, currElement->measureLen, NULL);
|
||||
ASSERT(tableMeta != NULL);
|
||||
if(unlikely(NULL == tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->cols, cnt);
|
||||
oldKV->length = kv.length;
|
||||
|
@ -646,6 +653,7 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
if(info->dataFormat){
|
||||
smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0);
|
||||
smlBuildRow(info->currTableDataCtx);
|
||||
clearColValArray(info->currTableDataCtx->pValues);
|
||||
}else{
|
||||
taosArraySet(elements->colArray, 0, &kv);
|
||||
}
|
||||
|
|
|
@ -86,14 +86,14 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
|
|||
SSmlSTableMeta *sMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
|
||||
|
||||
if(unlikely(sMeta == NULL)){
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
STableMeta * pTableMeta = smlGetMeta(info, elements->measure, elements->measureLen);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
if(pTableMeta == NULL){
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
sMeta = smlBuildSTableMeta(info->dataFormat);
|
||||
sMeta->tableMeta = pTableMeta;
|
||||
nodeListSet(&info->superTables, elements->measure, elements->measureLen, sMeta, NULL);
|
||||
}
|
||||
info->currSTableMeta = sMeta->tableMeta;
|
||||
|
@ -184,7 +184,10 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
|
|||
if(unlikely(kv.length > maxKV->length)){
|
||||
maxKV->length = kv.length;
|
||||
SSmlSTableMeta *tableMeta = (SSmlSTableMeta *)nodeListGet(info->superTables, elements->measure, elements->measureLen, NULL);
|
||||
ASSERT(tableMeta != NULL);
|
||||
if(unlikely(NULL == tableMeta)){
|
||||
uError("SML:0x%" PRIx64 " NULL == tableMeta", info->id);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
SSmlKv *oldKV = (SSmlKv *)taosArrayGet(tableMeta->tags, cnt);
|
||||
oldKV->length = kv.length;
|
||||
|
@ -235,11 +238,13 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
|
|||
tinfo->tags = taosArrayDup(preLineKV, NULL);
|
||||
|
||||
smlSetCTableName(tinfo);
|
||||
tinfo->uid = info->uid++;
|
||||
if (info->dataFormat) {
|
||||
info->currSTableMeta->uid = tinfo->uid;
|
||||
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
|
||||
if (tinfo->tableDataCtx == NULL) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
|
||||
smlDestroyTableInfo(info, tinfo);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
}
|
||||
|
@ -324,6 +329,7 @@ int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
if(ret == TSDB_CODE_SUCCESS){
|
||||
ret = smlBuildRow(info->currTableDataCtx);
|
||||
}
|
||||
clearColValArray(info->currTableDataCtx->pValues);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||
return ret;
|
||||
|
|
|
@ -296,7 +296,6 @@ int32_t stmtCleanExecInfo(STscStmt* pStmt, bool keepTable, bool deepClean) {
|
|||
STableMeta* pMeta = qGetTableMetaInDataBlock(pBlocks);
|
||||
|
||||
if (keepTable && pBlocks == pStmt->exec.pCurrBlock) {
|
||||
ASSERT(NULL == pBlocks->pData);
|
||||
TSWAP(pBlocks->pData, pStmt->exec.pCurrTbData);
|
||||
STMT_ERR_RET(qResetStmtDataBlock(pBlocks, false));
|
||||
|
||||
|
@ -394,7 +393,10 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
|
|||
|
||||
if (NULL == pStmt->sql.pTableCache || taosHashGetSize(pStmt->sql.pTableCache) <= 0) {
|
||||
if (pStmt->bInfo.inExecCache) {
|
||||
ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1);
|
||||
if(ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1)){
|
||||
tscError("stmtGetFromCache error");
|
||||
return TSDB_CODE_TSC_STMT_CACHE_ERROR;
|
||||
}
|
||||
pStmt->bInfo.needParse = false;
|
||||
tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -418,7 +418,10 @@ int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
|
|||
|
||||
static void tmqCommitRspCountDown(SMqCommitCbParamSet* pParamSet) {
|
||||
int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
|
||||
ASSERT(waitingRspNum >= 0);
|
||||
if(ASSERT(waitingRspNum >= 0)){
|
||||
tscError("tmqCommitRspCountDown error:%d", waitingRspNum);
|
||||
return;
|
||||
}
|
||||
if (waitingRspNum == 0) {
|
||||
tmqCommitDone(pParamSet);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#define MALLOC_ALIGN_BYTES 32
|
||||
|
||||
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
|
||||
ASSERT(pColumnInfoData != NULL);
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
return pColumnInfoData->varmeta.length;
|
||||
} else {
|
||||
|
@ -66,8 +65,6 @@ int32_t getJsonValueLen(const char* data) {
|
|||
}
|
||||
|
||||
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull) {
|
||||
ASSERT(pColumnInfoData != NULL);
|
||||
|
||||
if (isNull) {
|
||||
// There is a placehold for each NULL value of binary or nchar type.
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
|
@ -112,7 +109,7 @@ int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, con
|
|||
uint32_t len = pColumnInfoData->varmeta.length;
|
||||
pColumnInfoData->varmeta.offset[currentRow] = len;
|
||||
|
||||
memcpy(pColumnInfoData->pData + len, pData, dataLen);
|
||||
memmove(pColumnInfoData->pData + len, pData, dataLen);
|
||||
pColumnInfoData->varmeta.length += dataLen;
|
||||
} else {
|
||||
memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * currentRow, pData, pColumnInfoData->info.bytes);
|
||||
|
@ -178,8 +175,6 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
|
|||
|
||||
int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
|
||||
uint32_t numOfRows) {
|
||||
ASSERT(pData != NULL && pColumnInfoData != NULL);
|
||||
|
||||
int32_t len = pColumnInfoData->info.bytes;
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
len = varDataTLen(pData);
|
||||
|
@ -237,7 +232,10 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
|
|||
|
||||
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
|
||||
const SColumnInfoData* pSource, int32_t numOfRow2) {
|
||||
ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type);
|
||||
if (pColumnInfoData->info.type != pSource->info.type) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (numOfRow2 == 0) {
|
||||
return numOfRow1;
|
||||
}
|
||||
|
@ -317,13 +315,13 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
|
|||
|
||||
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
|
||||
const SDataBlockInfo* pBlockInfo) {
|
||||
ASSERT(pColumnInfoData != NULL && pSource != NULL && pColumnInfoData->info.type == pSource->info.type);
|
||||
if (numOfRows <= 0) {
|
||||
return numOfRows;
|
||||
if (pColumnInfoData->info.type != pSource->info.type ||
|
||||
(pBlockInfo != NULL && pBlockInfo->capacity < numOfRows)) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
if (pBlockInfo != NULL) {
|
||||
ASSERT(pBlockInfo->capacity >= numOfRows);
|
||||
if (numOfRows <= 0) {
|
||||
return numOfRows;
|
||||
}
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
|
@ -359,14 +357,14 @@ size_t blockDataGetNumOfCols(const SSDataBlock* pBlock) { return taosArrayGetSiz
|
|||
size_t blockDataGetNumOfRows(const SSDataBlock* pBlock) { return pBlock->info.rows; }
|
||||
|
||||
int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex) {
|
||||
if (pDataBlock->info.rows > 0) {
|
||||
// ASSERT(pDataBlock->info.dataLoad == 1);
|
||||
}
|
||||
|
||||
if (pDataBlock == NULL || pDataBlock->info.rows <= 0 || pDataBlock->info.dataLoad == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pDataBlock->info.rows > 0) {
|
||||
// ASSERT(pDataBlock->info.dataLoad == 1);
|
||||
}
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
if (numOfCols <= 0) {
|
||||
return -1;
|
||||
|
@ -389,7 +387,6 @@ int32_t blockDataUpdateTsWindow(SSDataBlock* pDataBlock, int32_t tsColumnIndex)
|
|||
}
|
||||
|
||||
int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) {
|
||||
assert(pSrc != NULL && pDest != NULL);
|
||||
int32_t capacity = pDest->info.capacity;
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDest->pDataBlock);
|
||||
|
@ -407,8 +404,6 @@ int32_t blockDataMerge(SSDataBlock* pDest, const SSDataBlock* pSrc) {
|
|||
}
|
||||
|
||||
size_t blockDataGetSize(const SSDataBlock* pBlock) {
|
||||
assert(pBlock != NULL);
|
||||
|
||||
size_t total = 0;
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
|
@ -423,8 +418,6 @@ size_t blockDataGetSize(const SSDataBlock* pBlock) {
|
|||
// Actual data rows pluses the corresponding meta data must fit in one memory buffer of the given page size.
|
||||
int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startIndex, int32_t* stopIndex,
|
||||
int32_t pageSize) {
|
||||
ASSERT(pBlock != NULL && stopIndex != NULL);
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
int32_t numOfRows = pBlock->info.rows;
|
||||
|
||||
|
@ -438,7 +431,9 @@ int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startInd
|
|||
if (!hasVarCol) {
|
||||
size_t rowSize = blockDataGetRowSize(pBlock);
|
||||
int32_t capacity = payloadSize / (rowSize + numOfCols * bitmapChar / 8.0);
|
||||
ASSERT(capacity > 0);
|
||||
if (capacity <= 0) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
*stopIndex = startIndex + capacity - 1;
|
||||
if (*stopIndex >= numOfRows) {
|
||||
|
@ -470,7 +465,9 @@ int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startInd
|
|||
|
||||
if (size > pageSize) { // pageSize must be able to hold one row
|
||||
*stopIndex = j - 1;
|
||||
ASSERT(*stopIndex >= startIndex);
|
||||
if (*stopIndex < startIndex) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -541,8 +538,6 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3
|
|||
* @return
|
||||
*/
|
||||
int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
|
||||
ASSERT(pBlock != NULL);
|
||||
|
||||
// write the number of rows
|
||||
*(uint32_t*)buf = pBlock->info.rows;
|
||||
|
||||
|
@ -613,7 +608,9 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
|
|||
}
|
||||
|
||||
pCol->varmeta.length = colLength;
|
||||
ASSERT(pCol->varmeta.length <= pCol->varmeta.allocLen);
|
||||
if (pCol->varmeta.length > pCol->varmeta.allocLen) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(pCol->pData, pStart, colLength);
|
||||
|
@ -660,7 +657,9 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity)
|
|||
}
|
||||
|
||||
pCol->varmeta.length = colLength;
|
||||
ASSERT(pCol->varmeta.length <= pCol->varmeta.allocLen);
|
||||
if (pCol->varmeta.length > pCol->varmeta.allocLen) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
if (!colDataIsNNull_s(pCol, 0, pBlock->info.rows)) {
|
||||
|
@ -674,7 +673,6 @@ int32_t blockDataFromBuf1(SSDataBlock* pBlock, const char* buf, size_t capacity)
|
|||
}
|
||||
|
||||
size_t blockDataGetRowSize(SSDataBlock* pBlock) {
|
||||
ASSERT(pBlock != NULL);
|
||||
if (pBlock->info.rowSize == 0) {
|
||||
size_t rowSize = 0;
|
||||
|
||||
|
@ -703,7 +701,6 @@ size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
|||
}
|
||||
|
||||
double blockDataGetSerialRowSize(const SSDataBlock* pBlock) {
|
||||
ASSERT(pBlock != NULL);
|
||||
double rowSize = 0;
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
@ -906,7 +903,6 @@ static int32_t* createTupleIndex(size_t rows) {
|
|||
static void destroyTupleIndex(int32_t* index) { taosMemoryFreeClear(index); }
|
||||
|
||||
int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
|
||||
ASSERT(pDataBlock != NULL && pOrderInfo != NULL);
|
||||
if (pDataBlock->info.rows <= 1) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1150,8 +1146,7 @@ void blockDataCleanup(SSDataBlock* pDataBlock) {
|
|||
|
||||
void blockDataEmpty(SSDataBlock* pDataBlock) {
|
||||
SDataBlockInfo* pInfo = &pDataBlock->info;
|
||||
ASSERT(pInfo->rows <= pDataBlock->info.capacity);
|
||||
if (pInfo->capacity == 0) {
|
||||
if (pInfo->capacity == 0 || pInfo->rows > pDataBlock->info.capacity) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1168,12 +1163,9 @@ void blockDataEmpty(SSDataBlock* pDataBlock) {
|
|||
}
|
||||
|
||||
// todo temporarily disable it
|
||||
|
||||
static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* pBlockInfo, uint32_t numOfRows,
|
||||
bool clearPayload) {
|
||||
ASSERT(numOfRows > 0);
|
||||
|
||||
if (numOfRows <= pBlockInfo->capacity) {
|
||||
if (numOfRows <= 0 || numOfRows <= pBlockInfo->capacity) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1200,7 +1192,9 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo*
|
|||
int32_t oldLen = BitmapLen(existedRows);
|
||||
pColumn->nullbitmap = tmp;
|
||||
memset(&pColumn->nullbitmap[oldLen], 0, BitmapLen(numOfRows) - oldLen);
|
||||
ASSERT(pColumn->info.bytes);
|
||||
if (pColumn->info.bytes == 0) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
// make sure the allocated memory is MALLOC_ALIGN_BYTES aligned
|
||||
tmp = taosMemoryMallocAlign(MALLOC_ALIGN_BYTES, numOfRows * pColumn->info.bytes);
|
||||
|
@ -1218,7 +1212,9 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo*
|
|||
|
||||
// todo remove it soon
|
||||
#if defined LINUX
|
||||
ASSERT((((uint64_t)pColumn->pData) & (MALLOC_ALIGN_BYTES - 1)) == 0x0);
|
||||
if ((((uint64_t)pColumn->pData) & (MALLOC_ALIGN_BYTES - 1)) != 0x0) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (clearPayload) {
|
||||
|
@ -1297,7 +1293,6 @@ void blockDataFreeRes(SSDataBlock* pBlock) {
|
|||
taosArrayDestroy(pBlock->pDataBlock);
|
||||
pBlock->pDataBlock = NULL;
|
||||
taosMemoryFreeClear(pBlock->pBlockAgg);
|
||||
taosMemoryFree(pBlock->info.pTag);
|
||||
memset(&pBlock->info, 0, sizeof(SDataBlockInfo));
|
||||
}
|
||||
|
||||
|
@ -1312,8 +1307,6 @@ void* blockDataDestroy(SSDataBlock* pBlock) {
|
|||
}
|
||||
|
||||
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
|
||||
ASSERT(src != NULL);
|
||||
|
||||
dst->info = src->info;
|
||||
dst->info.rows = 0;
|
||||
dst->info.capacity = 0;
|
||||
|
@ -1348,8 +1341,6 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
|
|||
}
|
||||
|
||||
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
|
||||
ASSERT(src != NULL && dst != NULL);
|
||||
|
||||
blockDataCleanup(dst);
|
||||
int32_t code = blockDataEnsureCapacity(dst, src->info.rows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1505,7 +1496,6 @@ SSDataBlock* createDataBlock() {
|
|||
}
|
||||
|
||||
int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData) {
|
||||
ASSERT(pBlock != NULL && pColInfoData != NULL);
|
||||
if (pBlock->pDataBlock == NULL) {
|
||||
pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData));
|
||||
if (pBlock->pDataBlock == NULL) {
|
||||
|
@ -1540,7 +1530,6 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId)
|
|||
}
|
||||
|
||||
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index) {
|
||||
ASSERT(pBlock != NULL);
|
||||
if (index >= taosArrayGetSize(pBlock->pDataBlock)) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1971,10 +1960,10 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
int32_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len,
|
||||
"===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
|
||||
"|rows:%d|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "\n",
|
||||
"|rows:%d|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n",
|
||||
flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
|
||||
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version,
|
||||
pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey);
|
||||
pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName);
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
|
@ -2164,7 +2153,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
|||
case TSDB_DATA_TYPE_JSON:
|
||||
case TSDB_DATA_TYPE_MEDIUMBLOB:
|
||||
uError("the column type %" PRIi16 " is defined but not implemented yet", pColInfoData->info.type);
|
||||
ASSERT(0);
|
||||
break;
|
||||
default:
|
||||
if (pColInfoData->info.type < TSDB_DATA_TYPE_MAX && pColInfoData->info.type > TSDB_DATA_TYPE_NULL) {
|
||||
|
@ -2198,7 +2186,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
|||
}
|
||||
} else {
|
||||
uError("the column type %" PRIi16 " is undefined\n", pColInfoData->info.type);
|
||||
ASSERT(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -2405,7 +2392,10 @@ _end:
|
|||
}
|
||||
|
||||
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
|
||||
ASSERT(stbFullName[0] != 0);
|
||||
if (stbFullName[0] == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
|
||||
if (tags == NULL) {
|
||||
return NULL;
|
||||
|
@ -2435,7 +2425,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
|
|||
|
||||
taosArrayDestroy(tags);
|
||||
|
||||
ASSERT(rname.ctbShortName && rname.ctbShortName[0]);
|
||||
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
return rname.ctbShortName;
|
||||
}
|
||||
|
||||
|
|
|
@ -903,9 +903,7 @@ static int32_t tRowAppendTupleToColData(SRow *pRow, STSchema *pTSchema, SColData
|
|||
int32_t iTColumn = 1;
|
||||
STColumn *pTColumn = &pTSchema->columns[iTColumn];
|
||||
|
||||
uint8_t *pb = NULL;
|
||||
uint8_t *pf;
|
||||
uint8_t *pv;
|
||||
uint8_t *pb = NULL, *pf = NULL, *pv = NULL;
|
||||
|
||||
switch (pRow->flag) {
|
||||
case HAS_VALUE:
|
||||
|
|
|
@ -60,6 +60,9 @@ int32_t tsElectInterval = 25 * 1000;
|
|||
int32_t tsHeartbeatInterval = 1000;
|
||||
int32_t tsHeartbeatTimeout = 20 * 1000;
|
||||
|
||||
// vnode
|
||||
int64_t tsVndCommitMaxIntervalMs = 60 * 1000;
|
||||
|
||||
// monitor
|
||||
bool tsEnableMonitor = true;
|
||||
int32_t tsMonitorInterval = 30;
|
||||
|
@ -73,6 +76,11 @@ bool tsEnableTelem = true;
|
|||
int32_t tsTelemInterval = 43200;
|
||||
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
|
||||
uint16_t tsTelemPort = 80;
|
||||
char* tsTelemUri = "/report";
|
||||
|
||||
bool tsEnableCrashReport = true;
|
||||
char* tsClientCrashReportUri = "/ccrashreport";
|
||||
char* tsSvrCrashReportUri = "/dcrashreport";
|
||||
|
||||
// schemaless
|
||||
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
|
||||
|
@ -202,7 +210,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg) {
|
|||
int32_t taosSetTfsCfg(SConfig *pCfg);
|
||||
#endif
|
||||
|
||||
struct SConfig *taosGetCfg() { return tsCfg; }
|
||||
struct SConfig *taosGetCfg() {
|
||||
return tsCfg;
|
||||
}
|
||||
|
||||
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
||||
char *apolloUrl) {
|
||||
|
@ -314,6 +324,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "maxMemUsedByInsert", tsMaxMemUsedByInsert, 1, INT32_MAX, true) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
|
||||
|
||||
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
||||
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
||||
|
@ -377,7 +388,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "queryRspPolicy", tsQueryRspPolicy, 0, 1, 0) != 0) return -1;
|
||||
|
||||
tsNumOfRpcThreads = tsNumOfCores / 2;
|
||||
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
|
||||
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS);
|
||||
if (cfgAddInt32(pCfg, "numOfRpcThreads", tsNumOfRpcThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfCommitThreads = tsNumOfCores / 2;
|
||||
|
@ -427,6 +438,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "monitorFqdn", tsMonitorFqdn, 0) != 0) return -1;
|
||||
|
@ -434,6 +447,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "monitorMaxLogs", tsMonitorMaxLogs, 1, 1000000, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "monitorComp", tsMonitorComp, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "telemetryReporting", tsEnableTelem, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "telemetryInterval", tsTelemInterval, 1, 200000, 0) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1;
|
||||
|
@ -665,6 +679,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
|
||||
tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
|
||||
tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval;
|
||||
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
||||
|
||||
tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32;
|
||||
return 0;
|
||||
|
@ -715,7 +730,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsNumOfSnodeWriteThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32;
|
||||
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
|
||||
|
||||
tsSIMDBuiltins = (bool) cfgGetItem(pCfg, "SIMD-builtins")->bval;
|
||||
tsSIMDBuiltins = (bool)cfgGetItem(pCfg, "SIMD-builtins")->bval;
|
||||
|
||||
tsEnableMonitor = cfgGetItem(pCfg, "monitor")->bval;
|
||||
tsMonitorInterval = cfgGetItem(pCfg, "monitorInterval")->i32;
|
||||
|
@ -726,6 +741,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsQueryRspPolicy = cfgGetItem(pCfg, "queryRspPolicy")->i32;
|
||||
|
||||
tsEnableTelem = cfgGetItem(pCfg, "telemetryReporting")->bval;
|
||||
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
||||
tsTelemInterval = cfgGetItem(pCfg, "telemetryInterval")->i32;
|
||||
tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN);
|
||||
tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32;
|
||||
|
@ -743,6 +759,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsHeartbeatInterval = cfgGetItem(pCfg, "syncHeartbeatInterval")->i32;
|
||||
tsHeartbeatTimeout = cfgGetItem(pCfg, "syncHeartbeatTimeout")->i32;
|
||||
|
||||
tsVndCommitMaxIntervalMs = cfgGetItem(pCfg, "vndCommitMaxInterval")->i64;
|
||||
|
||||
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
|
||||
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));
|
||||
tstrncpy(tsUdfdLdLibPath, cfgGetItem(pCfg, "udfdLdLibPath")->str, sizeof(tsUdfdLdLibPath));
|
||||
|
@ -795,6 +813,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
|
||||
} else if (strcasecmp("cDebugFlag", name) == 0) {
|
||||
cDebugFlag = cfgGetItem(pCfg, "cDebugFlag")->i32;
|
||||
} else if (strcasecmp("crashReporting", name) == 0) {
|
||||
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -90,16 +90,12 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
|
|||
SName* toName(int32_t acctId, const char* pDbName, const char* pTableName, SName* pName) {
|
||||
pName->type = TSDB_TABLE_NAME_T;
|
||||
pName->acctId = acctId;
|
||||
memset(pName->dbname, 0, TSDB_DB_NAME_LEN);
|
||||
strncpy(pName->dbname, pDbName, TSDB_DB_NAME_LEN - 1);
|
||||
memset(pName->tname, 0, TSDB_TABLE_NAME_LEN);
|
||||
strncpy(pName->tname, pTableName, TSDB_TABLE_NAME_LEN - 1);
|
||||
snprintf(pName->dbname, sizeof(pName->dbname), "%s", pDbName);
|
||||
snprintf(pName->tname, sizeof(pName->tname), "%s", pTableName);
|
||||
return pName;
|
||||
}
|
||||
|
||||
int32_t tNameExtractFullName(const SName* name, char* dst) {
|
||||
assert(name != NULL && dst != NULL);
|
||||
|
||||
// invalid full name format, abort
|
||||
if (!tNameIsValid(name)) {
|
||||
return -1;
|
||||
|
@ -109,7 +105,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) {
|
|||
|
||||
size_t tnameLen = strlen(name->tname);
|
||||
if (tnameLen > 0) {
|
||||
/*assert(name->type == TSDB_TABLE_NAME_T);*/
|
||||
/*ASSERT(name->type == TSDB_TABLE_NAME_T);*/
|
||||
dst[len] = TS_PATH_DELIMITER[0];
|
||||
|
||||
memcpy(dst + len + 1, name->tname, tnameLen);
|
||||
|
@ -120,25 +116,21 @@ int32_t tNameExtractFullName(const SName* name, char* dst) {
|
|||
}
|
||||
|
||||
int32_t tNameLen(const SName* name) {
|
||||
assert(name != NULL);
|
||||
|
||||
char tmp[12] = {0};
|
||||
int32_t len = sprintf(tmp, "%d", name->acctId);
|
||||
int32_t len1 = (int32_t)strlen(name->dbname);
|
||||
int32_t len2 = (int32_t)strlen(name->tname);
|
||||
|
||||
if (name->type == TSDB_DB_NAME_T) {
|
||||
assert(len2 == 0);
|
||||
ASSERT(len2 == 0);
|
||||
return len + len1 + TSDB_NAME_DELIMITER_LEN;
|
||||
} else {
|
||||
assert(len2 > 0);
|
||||
ASSERT(len2 > 0);
|
||||
return len + len1 + len2 + TSDB_NAME_DELIMITER_LEN * 2;
|
||||
}
|
||||
}
|
||||
|
||||
bool tNameIsValid(const SName* name) {
|
||||
assert(name != NULL);
|
||||
|
||||
if (!VALID_NAME_TYPE(name->type)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -151,15 +143,12 @@ bool tNameIsValid(const SName* name) {
|
|||
}
|
||||
|
||||
SName* tNameDup(const SName* name) {
|
||||
assert(name != NULL);
|
||||
|
||||
SName* p = taosMemoryMalloc(sizeof(SName));
|
||||
memcpy(p, name, sizeof(SName));
|
||||
return p;
|
||||
}
|
||||
|
||||
int32_t tNameGetDbName(const SName* name, char* dst) {
|
||||
assert(name != NULL && dst != NULL);
|
||||
strncpy(dst, name->dbname, tListLen(name->dbname));
|
||||
return 0;
|
||||
}
|
||||
|
@ -167,28 +156,24 @@ int32_t tNameGetDbName(const SName* name, char* dst) {
|
|||
const char* tNameGetDbNameP(const SName* name) { return &name->dbname[0]; }
|
||||
|
||||
int32_t tNameGetFullDbName(const SName* name, char* dst) {
|
||||
assert(name != NULL && dst != NULL);
|
||||
snprintf(dst, TSDB_DB_FNAME_LEN, "%d.%s", name->acctId, name->dbname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool tNameIsEmpty(const SName* name) {
|
||||
assert(name != NULL);
|
||||
return name->type == 0 || name->acctId == 0;
|
||||
}
|
||||
|
||||
const char* tNameGetTableName(const SName* name) {
|
||||
assert(name != NULL && name->type == TSDB_TABLE_NAME_T);
|
||||
ASSERT(name != NULL && name->type == TSDB_TABLE_NAME_T);
|
||||
return &name->tname[0];
|
||||
}
|
||||
|
||||
void tNameAssign(SName* dst, const SName* src) { memcpy(dst, src, sizeof(SName)); }
|
||||
|
||||
int32_t tNameSetDbName(SName* dst, int32_t acct, const char* dbName, size_t nameLen) {
|
||||
assert(dst != NULL && dbName != NULL && nameLen > 0);
|
||||
|
||||
// too long account id or too long db name
|
||||
if (nameLen >= tListLen(dst->dbname)) {
|
||||
if (nameLen <= 0 || nameLen >= tListLen(dst->dbname)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -199,8 +184,6 @@ int32_t tNameSetDbName(SName* dst, int32_t acct, const char* dbName, size_t name
|
|||
}
|
||||
|
||||
int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen) {
|
||||
assert(dst != NULL && tbName != NULL && nameLen > 0);
|
||||
|
||||
// too long account id or too long db name
|
||||
if (nameLen >= tListLen(dst->tname) || nameLen <= 0) {
|
||||
return -1;
|
||||
|
@ -212,7 +195,6 @@ int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen) {
|
|||
}
|
||||
|
||||
int32_t tNameSetAcctId(SName* dst, int32_t acctId) {
|
||||
assert(dst != NULL);
|
||||
dst->acctId = acctId;
|
||||
return 0;
|
||||
}
|
||||
|
@ -247,7 +229,9 @@ bool tNameTbNameEqual(SName* left, SName* right) {
|
|||
}
|
||||
|
||||
int32_t tNameFromString(SName* dst, const char* str, uint32_t type) {
|
||||
assert(dst != NULL && str != NULL && strlen(str) > 0);
|
||||
if (strlen(str) == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
char* p = NULL;
|
||||
if ((type & T_NAME_ACCT) == T_NAME_ACCT) {
|
||||
|
@ -316,7 +300,7 @@ static int compareKv(const void* p1, const void* p2) {
|
|||
void buildChildTableName(RandTableName* rName) {
|
||||
SStringBuilder sb = {0};
|
||||
taosStringBuilderAppendStringLen(&sb, rName->stbFullName, rName->stbFullNameLen);
|
||||
if(sb.buf == NULL) return;
|
||||
if (sb.buf == NULL) return;
|
||||
taosArraySort(rName->tags, compareKv);
|
||||
for (int j = 0; j < taosArrayGetSize(rName->tags); ++j) {
|
||||
taosStringBuilderAppendChar(&sb, ',');
|
||||
|
@ -344,5 +328,5 @@ void buildChildTableName(RandTableName* rName) {
|
|||
strcat(rName->ctbShortName, temp);
|
||||
}
|
||||
taosStringBuilderDestroy(&sb);
|
||||
rName->uid = *(uint64_t*)(context.digest);
|
||||
// rName->uid = *(uint64_t*)(context.digest);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,6 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) {
|
|||
return;
|
||||
}
|
||||
if (!pVal->val) {
|
||||
ASSERT(0);
|
||||
printf("BadVal ");
|
||||
return;
|
||||
}
|
||||
|
@ -342,7 +341,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
|||
}
|
||||
|
||||
if (iColumn == 0) {
|
||||
ASSERT(pColVal->cid == pTColumn->colId);
|
||||
ASSERT(pColVal && pColVal->cid == pTColumn->colId);
|
||||
ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
|
||||
ASSERT(pTColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
|
||||
} else {
|
||||
|
@ -490,7 +489,6 @@ bool tdSTSRowGetVal(STSRowIter *pIter, col_id_t colId, col_type_t colType, SCell
|
|||
|
||||
int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) {
|
||||
if (!pBitmap || colIdx < 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -512,7 +510,6 @@ int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pVa
|
|||
*pValType = ((*pDestByte) & 0x03);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -521,7 +518,6 @@ int32_t tdGetBitmapValTypeII(const void *pBitmap, int16_t colIdx, TDRowValT *pVa
|
|||
|
||||
int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pValType) {
|
||||
if (!pBitmap || colIdx < 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -555,7 +551,6 @@ int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pVal
|
|||
*pValType = ((*pDestByte) & 0x01);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -564,7 +559,6 @@ int32_t tdGetBitmapValTypeI(const void *pBitmap, int16_t colIdx, TDRowValT *pVal
|
|||
|
||||
int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType) {
|
||||
if (!pBitmap || colIdx < 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -607,7 +601,6 @@ int32_t tdSetBitmapValTypeI(void *pBitmap, int16_t colIdx, TDRowValT valType) {
|
|||
// *pDestByte |= (valType);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -630,7 +623,6 @@ int32_t tdGetKvRowValOfCol(SCellVal *output, STSRow *pRow, void *pBitmap, int32_
|
|||
output->val = POINTER_SHIFT(pRow, offset);
|
||||
}
|
||||
#else
|
||||
ASSERT(0);
|
||||
if (offset < 0) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
output->valType = TD_VTYPE_NONE;
|
||||
|
@ -680,7 +672,6 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
return terrno;
|
||||
}
|
||||
#else
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
#endif
|
||||
|
@ -707,8 +698,8 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
if (!pBuilder->hasNone) pBuilder->hasNone = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
default:
|
||||
ASSERT(0);
|
||||
break;
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (TD_IS_TP_ROW(pRow)) {
|
||||
|
@ -722,7 +713,6 @@ int32_t tdAppendColValToRow(SRowBuilder *pBuilder, col_id_t colId, int8_t colTyp
|
|||
int32_t tdAppendColValToKvRow(SRowBuilder *pBuilder, TDRowValT valType, const void *val, bool isCopyVarData,
|
||||
int8_t colType, int16_t colIdx, int32_t offset, col_id_t colId) {
|
||||
if ((offset < (int32_t)sizeof(SKvRowIdx)) || (colIdx < 1)) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -810,7 +800,6 @@ int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBou
|
|||
pBuilder->nCols = nCols;
|
||||
pBuilder->nBoundCols = nBoundCols;
|
||||
if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -832,7 +821,6 @@ int32_t tdSRowSetExtendedInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBou
|
|||
int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
||||
pBuilder->pBuf = (STSRow *)pBuf;
|
||||
if (!pBuilder->pBuf) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -869,7 +857,6 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
TD_ROW_SET_NCOLS(pBuilder->pBuf, pBuilder->nBoundCols);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -880,7 +867,6 @@ int32_t tdSRowResetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
||||
pBuilder->pBuf = (STSRow *)pBuf;
|
||||
if (!pBuilder->pBuf) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -900,7 +886,6 @@ int32_t tdSRowGetBuf(SRowBuilder *pBuilder, void *pBuf) {
|
|||
#endif
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -920,7 +905,6 @@ int32_t tdSRowSetTpInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t flen) {
|
|||
pBuilder->flen = flen;
|
||||
pBuilder->nCols = nCols;
|
||||
if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -939,7 +923,6 @@ int32_t tdSRowSetInfo(SRowBuilder *pBuilder, int32_t nCols, int32_t nBoundCols,
|
|||
pBuilder->nCols = nCols;
|
||||
pBuilder->nBoundCols = nBoundCols;
|
||||
if (pBuilder->flen <= 0 || pBuilder->nCols <= 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -968,7 +951,6 @@ int32_t tdGetBitmapValType(const void *pBitmap, int16_t colIdx, TDRowValT *pValT
|
|||
tdGetBitmapValTypeI(pBitmap, colIdx, pValType);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
@ -987,7 +969,6 @@ bool tdIsBitmapValTypeNorm(const void *pBitmap, int16_t idx, int8_t bitmapMode)
|
|||
|
||||
int32_t tdSetBitmapValTypeII(void *pBitmap, int16_t colIdx, TDRowValT valType) {
|
||||
if (!pBitmap || colIdx < 0) {
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1014,7 +995,6 @@ int32_t tdSetBitmapValTypeII(void *pBitmap, int16_t colIdx, TDRowValT valType) {
|
|||
// *pDestByte |= (valType);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1031,7 +1011,6 @@ int32_t tdSetBitmapValType(void *pBitmap, int16_t colIdx, TDRowValT valType, int
|
|||
tdSetBitmapValTypeI(pBitmap, colIdx, valType);
|
||||
break;
|
||||
default:
|
||||
ASSERT(0);
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
|
|
@ -168,12 +168,13 @@ int64_t parseFraction(char* str, char** end, int32_t timePrec) {
|
|||
i = MICRO_SEC_FRACTION_LEN;
|
||||
}
|
||||
times = MICRO_SEC_FRACTION_LEN - i;
|
||||
} else {
|
||||
assert(timePrec == TSDB_TIME_PRECISION_NANO);
|
||||
} else if (timePrec == TSDB_TIME_PRECISION_NANO) {
|
||||
if (i >= NANO_SEC_FRACTION_LEN) {
|
||||
i = NANO_SEC_FRACTION_LEN;
|
||||
}
|
||||
times = NANO_SEC_FRACTION_LEN - i;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
fraction = strnatoi(str, i) * factor[times];
|
||||
|
@ -510,8 +511,11 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
|
|||
// !!!!notice: double lose precison if time is too large, for example: 1626006833631000000*1.0 = double =
|
||||
// 1626006833631000064
|
||||
int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit) {
|
||||
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO ||
|
||||
fromPrecision == TSDB_TIME_PRECISION_NANO);
|
||||
if (fromPrecision != TSDB_TIME_PRECISION_MILLI && fromPrecision != TSDB_TIME_PRECISION_MICRO &&
|
||||
fromPrecision != TSDB_TIME_PRECISION_NANO) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t factors[3] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1};
|
||||
double tmp = time;
|
||||
switch (toUnit) {
|
||||
|
@ -761,8 +765,7 @@ int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char
|
|||
}
|
||||
|
||||
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
|
||||
if (pInterval->sliding == 0) {
|
||||
assert(pInterval->interval == 0);
|
||||
if (pInterval->sliding == 0 && pInterval->interval == 0) {
|
||||
return t;
|
||||
}
|
||||
|
||||
|
@ -931,7 +934,7 @@ void taosFormatUtcTime(char* buf, int32_t bufLen, int64_t t, int32_t precision)
|
|||
|
||||
default:
|
||||
fractionLen = 0;
|
||||
assert(false);
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
taosLocalTime(", &ptm);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "ttszip.h"
|
||||
#include "taoserror.h"
|
||||
#include "tcompression.h"
|
||||
#include "tlog.h"
|
||||
|
||||
static int32_t getDataStartOffset();
|
||||
static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo);
|
||||
|
@ -202,14 +203,14 @@ void* tsBufDestroy(STSBuf* pTSBuf) {
|
|||
static STSGroupBlockInfoEx* tsBufGetLastGroupInfo(STSBuf* pTSBuf) {
|
||||
int32_t last = pTSBuf->numOfGroups - 1;
|
||||
|
||||
assert(last >= 0);
|
||||
ASSERT(last >= 0);
|
||||
return &pTSBuf->pData[last];
|
||||
}
|
||||
|
||||
static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
|
||||
if (pTSBuf->numOfAlloc <= pTSBuf->numOfGroups) {
|
||||
uint32_t newSize = (uint32_t)(pTSBuf->numOfAlloc * 1.5);
|
||||
assert((int32_t)newSize > pTSBuf->numOfAlloc);
|
||||
ASSERT((int32_t)newSize > pTSBuf->numOfAlloc);
|
||||
|
||||
STSGroupBlockInfoEx* tmp =
|
||||
(STSGroupBlockInfoEx*)taosMemoryRealloc(pTSBuf->pData, sizeof(STSGroupBlockInfoEx) * newSize);
|
||||
|
@ -233,7 +234,7 @@ static STSGroupBlockInfoEx* addOneGroupInfo(STSBuf* pTSBuf, int32_t id) {
|
|||
STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfGroups].info;
|
||||
pBlockInfo->id = id;
|
||||
pBlockInfo->offset = pTSBuf->fileSize;
|
||||
assert(pBlockInfo->offset >= getDataStartOffset());
|
||||
ASSERT(pBlockInfo->offset >= getDataStartOffset());
|
||||
|
||||
// update vnode info in file
|
||||
TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups, pBlockInfo);
|
||||
|
@ -282,7 +283,7 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
|
|||
pTsData->allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize);
|
||||
|
||||
int64_t r = taosLSeekFile(pTSBuf->pFile, pTSBuf->fileSize, SEEK_SET);
|
||||
assert(r == 0);
|
||||
ASSERT(r == 0);
|
||||
|
||||
/*
|
||||
* format for output data:
|
||||
|
@ -316,7 +317,7 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
|
|||
taosWriteFile(pTSBuf->pFile, &pBlock->compLen, sizeof(pBlock->compLen));
|
||||
|
||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &trueLen, sizeof(pBlock->tag.nLen));
|
||||
assert(metaLen == getTagAreaLength(&pBlock->tag));
|
||||
ASSERT(metaLen == getTagAreaLength(&pBlock->tag));
|
||||
|
||||
int32_t blockSize = metaLen + sizeof(pBlock->numOfElem) + sizeof(pBlock->compLen) * 2 + pBlock->compLen;
|
||||
pTSBuf->fileSize += blockSize;
|
||||
|
@ -379,7 +380,7 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) {
|
|||
size_t sz = 0;
|
||||
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) {
|
||||
char* tp = taosMemoryRealloc(pBlock->tag.pz, pBlock->tag.nLen + 1);
|
||||
assert(tp != NULL);
|
||||
ASSERT(tp != NULL);
|
||||
|
||||
memset(tp, 0, pBlock->tag.nLen + 1);
|
||||
pBlock->tag.pz = tp;
|
||||
|
@ -410,14 +411,14 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) {
|
|||
|
||||
// read the comp length at the length of comp block
|
||||
sz = taosReadFile(pTSBuf->pFile, &pBlock->padding, sizeof(pBlock->padding));
|
||||
assert(pBlock->padding == pBlock->compLen);
|
||||
ASSERT(pBlock->padding == pBlock->compLen);
|
||||
|
||||
int32_t n = 0;
|
||||
sz = taosReadFile(pTSBuf->pFile, &n, sizeof(pBlock->tag.nLen));
|
||||
if (pBlock->tag.nType == TSDB_DATA_TYPE_NULL) {
|
||||
assert(n == 0);
|
||||
ASSERT(n == 0);
|
||||
} else {
|
||||
assert(n == pBlock->tag.nLen);
|
||||
ASSERT(n == pBlock->tag.nLen);
|
||||
}
|
||||
|
||||
UNUSED(sz);
|
||||
|
@ -477,7 +478,7 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t id, SVariant* tag, const char* pData, i
|
|||
pBlockInfo = tsBufGetLastGroupInfo(pTSBuf);
|
||||
}
|
||||
|
||||
assert(pBlockInfo->info.id == id);
|
||||
ASSERT(pBlockInfo->info.id == id);
|
||||
|
||||
if ((taosVariantCompare(&pTSBuf->block.tag, tag) != 0) && ptsData->len > 0) {
|
||||
// new arrived data with different tags value, save current value into disk first
|
||||
|
@ -596,7 +597,7 @@ static int32_t tsBufFindBlockByTag(STSBuf* pTSBuf, STSGroupBlockInfo* pBlockInfo
|
|||
static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex) {
|
||||
STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[groupIndex].info;
|
||||
if (pBlockInfo->numOfBlocks <= blockIndex) {
|
||||
assert(false);
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
STSCursor* pCur = &pTSBuf->cur;
|
||||
|
@ -613,7 +614,7 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex
|
|||
}
|
||||
} else {
|
||||
if (tsBufFindBlock(pTSBuf, pBlockInfo, blockIndex) == -1) {
|
||||
assert(false);
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -633,7 +634,7 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex
|
|||
tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf,
|
||||
pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize);
|
||||
|
||||
assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len));
|
||||
ASSERT((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len));
|
||||
|
||||
pCur->vgroupIndex = groupIndex;
|
||||
pCur->blockIndex = blockIndex;
|
||||
|
@ -668,7 +669,9 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
assert(pHeader->tsOrder == TSDB_ORDER_ASC || pHeader->tsOrder == TSDB_ORDER_DESC);
|
||||
if (pHeader->tsOrder != TSDB_ORDER_ASC && pHeader->tsOrder != TSDB_ORDER_DESC) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t r = taosLSeekFile(pTSBuf->pFile, 0, SEEK_SET);
|
||||
if (r != 0) {
|
||||
|
@ -705,7 +708,7 @@ bool tsBufNextPos(STSBuf* pTSBuf) {
|
|||
}
|
||||
|
||||
} else { // get the last timestamp record in the last block of the last vnode
|
||||
assert(pTSBuf->numOfGroups > 0);
|
||||
ASSERT(pTSBuf->numOfGroups > 0);
|
||||
|
||||
int32_t groupIndex = pTSBuf->numOfGroups - 1;
|
||||
pCur->vgroupIndex = groupIndex;
|
||||
|
@ -729,7 +732,7 @@ bool tsBufNextPos(STSBuf* pTSBuf) {
|
|||
int32_t step = pCur->order == TSDB_ORDER_ASC ? 1 : -1;
|
||||
|
||||
while (1) {
|
||||
assert(pTSBuf->tsData.len == pTSBuf->block.numOfElem * TSDB_KEYSIZE);
|
||||
ASSERT(pTSBuf->tsData.len == pTSBuf->block.numOfElem * TSDB_KEYSIZE);
|
||||
|
||||
if ((pCur->order == TSDB_ORDER_ASC && pCur->tsIndex >= pTSBuf->block.numOfElem - 1) ||
|
||||
(pCur->order == TSDB_ORDER_DESC && pCur->tsIndex <= 0)) {
|
||||
|
@ -810,7 +813,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) {
|
|||
}
|
||||
|
||||
// src can only have one vnode index
|
||||
assert(pSrcBuf->numOfGroups == 1);
|
||||
ASSERT(pSrcBuf->numOfGroups == 1);
|
||||
|
||||
// there are data in buffer, flush to disk first
|
||||
tsBufFlush(pDestBuf);
|
||||
|
@ -853,7 +856,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) {
|
|||
}
|
||||
|
||||
int32_t r = taosLSeekFile(pDestBuf->pFile, 0, SEEK_END);
|
||||
assert(r == 0);
|
||||
ASSERT(r == 0);
|
||||
|
||||
int64_t offset = getDataStartOffset();
|
||||
int32_t size = (int32_t)pSrcBuf->fileSize - (int32_t)offset;
|
||||
|
@ -881,7 +884,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf) {
|
|||
}
|
||||
pDestBuf->fileSize = (uint32_t)file_size;
|
||||
|
||||
assert(pDestBuf->fileSize == oldSize + size);
|
||||
ASSERT(pDestBuf->fileSize == oldSize + size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -913,7 +916,10 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_
|
|||
pTSBuf->fileSize += len;
|
||||
|
||||
pTSBuf->tsOrder = order;
|
||||
assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
|
||||
if (order != TSDB_ORDER_ASC && order != TSDB_ORDER_DESC) {
|
||||
tsBufDestroy(pTSBuf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STSBufFileHeader header = {
|
||||
.magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder};
|
||||
|
@ -1095,7 +1101,7 @@ void tsBufGetGroupIdList(STSBuf* pTSBuf, int32_t* num, int32_t** id) {
|
|||
}
|
||||
|
||||
int32_t dumpFileBlockByGroupId(STSBuf* pTSBuf, int32_t groupIndex, void* buf, int32_t* len, int32_t* numOfBlocks) {
|
||||
assert(groupIndex >= 0 && groupIndex < pTSBuf->numOfGroups);
|
||||
ASSERT(groupIndex >= 0 && groupIndex < pTSBuf->numOfGroups);
|
||||
STSGroupBlockInfo* pBlockInfo = &pTSBuf->pData[groupIndex].info;
|
||||
|
||||
*len = 0;
|
||||
|
|
|
@ -140,7 +140,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
|||
}
|
||||
}
|
||||
|
||||
void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) {
|
||||
int32_t operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) {
|
||||
if (optr == OP_TYPE_ADD) {
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
|
@ -177,11 +177,12 @@ void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type) {
|
|||
SET_DOUBLE_VAL(dst, GET_DOUBLE_VAL(s1) + GET_DOUBLE_VAL(s2));
|
||||
break;
|
||||
default: {
|
||||
assert(0);
|
||||
break;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
|
|||
pSrc->nType == TSDB_DATA_TYPE_JSON) {
|
||||
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
|
||||
char *p = taosMemoryRealloc(pDst->pz, len);
|
||||
assert(p);
|
||||
ASSERT(p);
|
||||
|
||||
memset(p, 0, len);
|
||||
pDst->pz = p;
|
||||
|
@ -192,7 +192,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
|
|||
size_t num = taosArrayGetSize(pSrc->arr);
|
||||
pDst->arr = taosArrayInit(num, sizeof(int64_t));
|
||||
pDst->nLen = pSrc->nLen;
|
||||
assert(pSrc->nLen == num);
|
||||
ASSERT(pSrc->nLen == num);
|
||||
for (size_t i = 0; i < num; i++) {
|
||||
int64_t *p = taosArrayGet(pSrc->arr, i);
|
||||
taosArrayPush(pDst->arr, p);
|
||||
|
|
|
@ -44,6 +44,7 @@ static struct {
|
|||
char apolloUrl[PATH_MAX];
|
||||
const char **envCmd;
|
||||
SArray *pArgs; // SConfigPair
|
||||
int64_t startTime;
|
||||
} global = {0};
|
||||
|
||||
static void dmSetDebugFlag(int32_t signum, void *sigInfo, void *context) { taosSetAllDebugFlag(143, true); }
|
||||
|
@ -67,23 +68,67 @@ static void dmStopDnode(int signum, void *sigInfo, void *context) {
|
|||
dmStop();
|
||||
}
|
||||
|
||||
void dmLogCrash(int signum, void *sigInfo, void *context) {
|
||||
taosIgnSignal(SIGTERM);
|
||||
taosIgnSignal(SIGHUP);
|
||||
taosIgnSignal(SIGINT);
|
||||
taosIgnSignal(SIGBREAK);
|
||||
|
||||
#ifndef WINDOWS
|
||||
taosIgnSignal(SIGBUS);
|
||||
#endif
|
||||
taosIgnSignal(SIGABRT);
|
||||
taosIgnSignal(SIGFPE);
|
||||
taosIgnSignal(SIGSEGV);
|
||||
|
||||
char *pMsg = NULL;
|
||||
const char *flags = "UTL FATAL ";
|
||||
ELogLevel level = DEBUG_FATAL;
|
||||
int32_t dflag = 255;
|
||||
int64_t msgLen= -1;
|
||||
|
||||
if (tsEnableCrashReport) {
|
||||
if (taosGenCrashJsonMsg(signum, &pMsg, dmGetClusterId(), global.startTime)) {
|
||||
taosPrintLog(flags, level, dflag, "failed to generate crash json msg");
|
||||
goto _return;
|
||||
} else {
|
||||
msgLen = strlen(pMsg);
|
||||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
taosLogCrashInfo("taosd", pMsg, msgLen, signum, sigInfo);
|
||||
|
||||
exit(signum);
|
||||
}
|
||||
|
||||
static void dmSetSignalHandle() {
|
||||
taosSetSignal(SIGUSR1, dmSetDebugFlag);
|
||||
taosSetSignal(SIGUSR2, dmSetAssert);
|
||||
taosSetSignal(SIGTERM, dmStopDnode);
|
||||
taosSetSignal(SIGHUP, dmStopDnode);
|
||||
taosSetSignal(SIGINT, dmStopDnode);
|
||||
taosSetSignal(SIGABRT, dmStopDnode);
|
||||
taosSetSignal(SIGBREAK, dmStopDnode);
|
||||
#ifndef WINDOWS
|
||||
taosSetSignal(SIGTSTP, dmStopDnode);
|
||||
taosSetSignal(SIGQUIT, dmStopDnode);
|
||||
#endif
|
||||
|
||||
#ifndef WINDOWS
|
||||
taosSetSignal(SIGBUS, dmLogCrash);
|
||||
#endif
|
||||
taosSetSignal(SIGABRT, dmLogCrash);
|
||||
taosSetSignal(SIGFPE, dmLogCrash);
|
||||
taosSetSignal(SIGSEGV, dmLogCrash);
|
||||
}
|
||||
|
||||
static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
|
||||
global.startTime = taosGetTimestampMs();
|
||||
|
||||
int32_t cmdEnvIndex = 0;
|
||||
if (argc < 2) return 0;
|
||||
|
||||
global.envCmd = taosMemoryMalloc((argc - 1) * sizeof(char *));
|
||||
memset(global.envCmd, 0, (argc - 1) * sizeof(char *));
|
||||
for (int32_t i = 1; i < argc; ++i) {
|
||||
|
|
|
@ -29,6 +29,7 @@ typedef struct SDnodeMgmt {
|
|||
const char *name;
|
||||
TdThread statusThread;
|
||||
TdThread monitorThread;
|
||||
TdThread crashReportThread;
|
||||
SSingleWorker mgmtWorker;
|
||||
ProcessCreateNodeFp processCreateNodeFp;
|
||||
ProcessDropNodeFp processDropNodeFp;
|
||||
|
@ -55,6 +56,8 @@ int32_t dmStartStatusThread(SDnodeMgmt *pMgmt);
|
|||
void dmStopStatusThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt);
|
||||
void dmStopMonitorThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt);
|
||||
void dmStopCrashReportThread(SDnodeMgmt *pMgmt);
|
||||
int32_t dmStartWorker(SDnodeMgmt *pMgmt);
|
||||
void dmStopWorker(SDnodeMgmt *pMgmt);
|
||||
|
||||
|
|
|
@ -23,6 +23,9 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
|
|||
if (dmStartMonitorThread(pMgmt) != 0) {
|
||||
return -1;
|
||||
}
|
||||
if (dmStartCrashReportThread(pMgmt) != 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -30,6 +33,7 @@ static void dmStopMgmt(SDnodeMgmt *pMgmt) {
|
|||
pMgmt->pData->stopped = true;
|
||||
dmStopMonitorThread(pMgmt);
|
||||
dmStopStatusThread(pMgmt);
|
||||
dmStopCrashReportThread(pMgmt);
|
||||
}
|
||||
|
||||
static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmInt.h"
|
||||
#include "thttp.h"
|
||||
|
||||
static void *dmStatusThreadFp(void *param) {
|
||||
SDnodeMgmt *pMgmt = param;
|
||||
|
@ -63,6 +64,63 @@ static void *dmMonitorThreadFp(void *param) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *dmCrashReportThreadFp(void *param) {
|
||||
SDnodeMgmt *pMgmt = param;
|
||||
int64_t lastTime = taosGetTimestampMs();
|
||||
setThreadName("dnode-crashReport");
|
||||
char filepath[PATH_MAX] = {0};
|
||||
snprintf(filepath, sizeof(filepath), "%s%s.taosdCrashLog", tsLogDir, TD_DIRSEP);
|
||||
char *pMsg = NULL;
|
||||
int64_t msgLen = 0;
|
||||
TdFilePtr pFile = NULL;
|
||||
bool truncateFile = false;
|
||||
int32_t sleepTime = 200;
|
||||
int32_t reportPeriodNum = 3600 * 1000 / sleepTime;;
|
||||
int32_t loopTimes = reportPeriodNum;
|
||||
|
||||
while (1) {
|
||||
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
|
||||
if (loopTimes++ < reportPeriodNum) {
|
||||
taosMsleep(sleepTime);
|
||||
continue;
|
||||
}
|
||||
|
||||
taosReadCrashInfo(filepath, &pMsg, &msgLen, &pFile);
|
||||
if (pMsg && msgLen > 0) {
|
||||
if (taosSendHttpReport(tsTelemServer, tsSvrCrashReportUri, tsTelemPort, pMsg, msgLen, HTTP_FLAT) != 0) {
|
||||
dError("failed to send crash report");
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, false);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
dInfo("succeed to send crash report");
|
||||
truncateFile = true;
|
||||
}
|
||||
} else {
|
||||
dDebug("no crash info");
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg);
|
||||
|
||||
if (pMsg && msgLen > 0) {
|
||||
pMsg = NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pFile) {
|
||||
taosReleaseCrashLogFile(pFile, truncateFile);
|
||||
truncateFile = false;
|
||||
}
|
||||
|
||||
taosMsleep(sleepTime);
|
||||
loopTimes = 0;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
int32_t dmStartStatusThread(SDnodeMgmt *pMgmt) {
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
|
@ -105,6 +163,36 @@ void dmStopMonitorThread(SDnodeMgmt *pMgmt) {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt) {
|
||||
if (!tsEnableCrashReport) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
TdThreadAttr thAttr;
|
||||
taosThreadAttrInit(&thAttr);
|
||||
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
|
||||
if (taosThreadCreate(&pMgmt->crashReportThread, &thAttr, dmCrashReportThreadFp, pMgmt) != 0) {
|
||||
dError("failed to create crashReport thread since %s", strerror(errno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosThreadAttrDestroy(&thAttr);
|
||||
tmsgReportStartup("dnode-crashReport", "initialized");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dmStopCrashReportThread(SDnodeMgmt *pMgmt) {
|
||||
if (!tsEnableCrashReport) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (taosCheckPthreadValid(pMgmt->crashReportThread)) {
|
||||
taosThreadJoin(pMgmt->crashReportThread, NULL);
|
||||
taosThreadClear(&pMgmt->crashReportThread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void dmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
||||
SDnodeMgmt *pMgmt = pInfo->ahandle;
|
||||
int32_t code = -1;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mmInt.h"
|
||||
#include "tjson.h"
|
||||
|
||||
int32_t mmReadFile(const char *path, SMnodeOpt *pOption) {
|
||||
int32_t code = TSDB_CODE_INVALID_JSON_FORMAT;
|
||||
|
@ -130,56 +131,70 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t mmEncodeOption(SJson *pJson, const SMnodeOpt *pOption) {
|
||||
if (pOption->deploy && pOption->numOfReplicas > 0) {
|
||||
if (tjsonAddDoubleToObject(pJson, "selfIndex", pOption->selfIndex) < 0) return -1;
|
||||
|
||||
SJson *replicas = tjsonCreateArray();
|
||||
if (replicas == NULL) return -1;
|
||||
if (tjsonAddItemToObject(pJson, "replicas", replicas) < 0) return -1;
|
||||
|
||||
for (int32_t i = 0; i < pOption->numOfReplicas; ++i) {
|
||||
SJson *replica = tjsonCreateObject();
|
||||
if (replica == NULL) return -1;
|
||||
|
||||
const SReplica *pReplica = pOption->replicas + i;
|
||||
if (tjsonAddDoubleToObject(replica, "id", pReplica->id) < 0) return -1;
|
||||
if (tjsonAddStringToObject(replica, "fqdn", pReplica->fqdn) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(replica, "port", pReplica->port) < 0) return -1;
|
||||
if (tjsonAddItemToArray(replicas, replica) < 0) return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (tjsonAddDoubleToObject(pJson, "deployed", pOption->deploy) < 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mmWriteFile(const char *path, const SMnodeOpt *pOption) {
|
||||
int32_t code = -1;
|
||||
char *buffer = NULL;
|
||||
SJson *pJson = NULL;
|
||||
TdFilePtr pFile = NULL;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%smnode.json.bak", path, TD_DIRSEP);
|
||||
snprintf(realfile, sizeof(realfile), "%s%smnode.json", path, TD_DIRSEP);
|
||||
|
||||
TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write %s since %s", file, terrstr());
|
||||
return -1;
|
||||
}
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pJson = tjsonCreateObject();
|
||||
if (pJson == NULL) goto _OVER;
|
||||
if (mmEncodeOption(pJson, pOption) != 0) goto _OVER;
|
||||
buffer = tjsonToString(pJson);
|
||||
if (buffer == NULL) goto _OVER;
|
||||
terrno = 0;
|
||||
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = 4096;
|
||||
char *content = taosMemoryCalloc(1, maxLen + 1);
|
||||
pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) goto _OVER;
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
if (pOption->deploy && pOption->numOfReplicas > 0) {
|
||||
len += snprintf(content + len, maxLen - len, " \"selfIndex\": %d,\n", pOption->selfIndex);
|
||||
len += snprintf(content + len, maxLen - len, " \"replicas\": [{\n");
|
||||
int32_t len = strlen(buffer);
|
||||
if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER;
|
||||
if (taosFsyncFile(pFile) < 0) goto _OVER;
|
||||
|
||||
for (int32_t i = 0; i < pOption->numOfReplicas; ++i) {
|
||||
const SReplica *pReplica = pOption->replicas + i;
|
||||
if (pReplica != NULL && pReplica->id > 0) {
|
||||
len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pReplica->id);
|
||||
len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pReplica->fqdn);
|
||||
len += snprintf(content + len, maxLen - len, " \"port\": %u\n", pReplica->port);
|
||||
}
|
||||
if (i < pOption->numOfReplicas - 1) {
|
||||
len += snprintf(content + len, maxLen - len, " },{\n");
|
||||
} else {
|
||||
len += snprintf(content + len, maxLen - len, " }],\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
len += snprintf(content + len, maxLen - len, " \"deployed\": %d\n", pOption->deploy);
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
taosWriteFile(pFile, content, len);
|
||||
taosFsyncFile(pFile);
|
||||
taosCloseFile(&pFile);
|
||||
taosMemoryFree(content);
|
||||
if (taosRenameFile(file, realfile) != 0) goto _OVER;
|
||||
|
||||
if (taosRenameFile(file, realfile) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to rename %s since %s", file, terrstr());
|
||||
return -1;
|
||||
code = 0;
|
||||
dInfo("succeed to write mnode file:%s, deloyed:%d", realfile, pOption->deploy);
|
||||
|
||||
_OVER:
|
||||
if (pJson != NULL) tjsonDelete(pJson);
|
||||
if (buffer != NULL) taosMemoryFree(buffer);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
|
||||
if (code != 0) {
|
||||
if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write mnode file:%s since %s, deloyed:%d", realfile, terrstr(), pOption->deploy);
|
||||
}
|
||||
|
||||
dDebug("succeed to write %s, deployed:%d", realfile, pOption->deploy);
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "vmInt.h"
|
||||
#include "tjson.h"
|
||||
|
||||
#define MAX_CONTENT_LEN 2 * 1024 * 1024
|
||||
|
||||
|
@ -144,65 +145,66 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||
int32_t code = 0;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
|
||||
snprintf(realfile, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP);
|
||||
static int32_t vmEncodeVnodeList(SJson *pJson, SVnodeObj **ppVnodes, int32_t numOfVnodes) {
|
||||
SJson *vnodes = tjsonCreateArray();
|
||||
if (vnodes == NULL) return -1;
|
||||
if (tjsonAddItemToObject(pJson, "vnodes", vnodes) < 0) return -1;
|
||||
|
||||
TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write %s since %s", file, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t numOfVnodes = 0;
|
||||
SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
|
||||
if (ppVnodes == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = -1;
|
||||
dError("failed to write %s while get vnodelist", file);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = MAX_CONTENT_LEN;
|
||||
char *content = taosMemoryCalloc(1, maxLen + 1);
|
||||
if (content == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
code = -1;
|
||||
dError("failed to write %s while malloc content", file);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n");
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
SVnodeObj *pVnode = ppVnodes[i];
|
||||
if (pVnode == NULL) continue;
|
||||
|
||||
len += snprintf(content + len, maxLen - len, " {\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", pVnode->vgId);
|
||||
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pVnode->dropped);
|
||||
len += snprintf(content + len, maxLen - len, " \"vgVersion\": %d\n", pVnode->vgVersion);
|
||||
if (i < numOfVnodes - 1) {
|
||||
len += snprintf(content + len, maxLen - len, " },\n");
|
||||
} else {
|
||||
len += snprintf(content + len, maxLen - len, " }\n");
|
||||
SJson *vnode = tjsonCreateObject();
|
||||
if (vnode == NULL) return -1;
|
||||
if (tjsonAddDoubleToObject(vnode, "vgId", pVnode->vgId) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(vnode, "dropped", pVnode->dropped) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(vnode, "vgVersion", pVnode->vgVersion) < 0) return -1;
|
||||
if (tjsonAddItemToArray(vnodes, vnode) < 0) return -1;
|
||||
}
|
||||
}
|
||||
len += snprintf(content + len, maxLen - len, " ]\n");
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
||||
int32_t code = -1;
|
||||
char *buffer = NULL;
|
||||
SJson *pJson = NULL;
|
||||
TdFilePtr pFile = NULL;
|
||||
SVnodeObj **ppVnodes = NULL;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
|
||||
snprintf(realfile, sizeof(realfile), "%s%svnodes.json", pMgmt->path, TD_DIRSEP);
|
||||
|
||||
int32_t numOfVnodes = 0;
|
||||
ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
|
||||
if (ppVnodes == NULL) goto _OVER;
|
||||
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pJson = tjsonCreateObject();
|
||||
if (pJson == NULL) goto _OVER;
|
||||
if (vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes) != 0) goto _OVER;
|
||||
buffer = tjsonToString(pJson);
|
||||
if (buffer == NULL) goto _OVER;
|
||||
terrno = 0;
|
||||
|
||||
_OVER:
|
||||
taosWriteFile(pFile, content, len);
|
||||
taosFsyncFile(pFile);
|
||||
taosCloseFile(&pFile);
|
||||
taosMemoryFree(content);
|
||||
pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) goto _OVER;
|
||||
|
||||
int32_t len = strlen(buffer);
|
||||
if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER;
|
||||
if (taosFsyncFile(pFile) < 0) goto _OVER;
|
||||
|
||||
taosCloseFile(&pFile);
|
||||
if (taosRenameFile(file, realfile) != 0) goto _OVER;
|
||||
|
||||
code = 0;
|
||||
dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes);
|
||||
|
||||
_OVER:
|
||||
if (pJson != NULL) tjsonDelete(pJson);
|
||||
if (buffer != NULL) taosMemoryFree(buffer);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
if (ppVnodes != NULL) {
|
||||
for (int32_t i = 0; i < numOfVnodes; ++i) {
|
||||
SVnodeObj *pVnode = ppVnodes[i];
|
||||
|
@ -213,14 +215,9 @@ _OVER:
|
|||
taosMemoryFree(ppVnodes);
|
||||
}
|
||||
|
||||
if (code != 0) return -1;
|
||||
|
||||
dInfo("succeed to write %s, numOfVnodes:%d", realfile, numOfVnodes);
|
||||
code = taosRenameFile(file, realfile);
|
||||
|
||||
if (code != 0) {
|
||||
dError("failed to rename %s to %s", file, realfile);
|
||||
if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, terrstr(), numOfVnodes);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
|
@ -79,6 +79,8 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
||||
vnodeProposeCommitOnNeed(pVnode->pImpl);
|
||||
|
||||
taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
||||
taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
|
@ -118,6 +120,9 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
|
||||
dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId);
|
||||
|
||||
dInfo("vgId:%d, post close", pVnode->vgId);
|
||||
vnodePostClose(pVnode->pImpl);
|
||||
|
||||
vmFreeQueue(pMgmt, pVnode);
|
||||
vnodeClose(pVnode->pImpl);
|
||||
pVnode->pImpl = NULL;
|
||||
|
|
|
@ -134,6 +134,13 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
|
|||
}
|
||||
}
|
||||
|
||||
static void vmSendResponse(SRpcMsg *pMsg) {
|
||||
if (pMsg->info.handle) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = terrno};
|
||||
rpcSendResponse(&rsp);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) {
|
||||
const STraceId *trace = &pMsg->info.traceId;
|
||||
if (pMsg->contLen < sizeof(SMsgHead)) {
|
||||
|
@ -152,7 +159,9 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
if (pVnode == NULL) {
|
||||
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg,
|
||||
terrstr(), TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
|
||||
return terrno != 0 ? terrno : -1;
|
||||
terrno = (terrno != 0) ? terrno : -1;
|
||||
vmSendResponse(pMsg);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
switch (qtype) {
|
||||
|
|
|
@ -85,6 +85,7 @@ typedef struct SDnode {
|
|||
// dmEnv.c
|
||||
SDnode *dmInstance();
|
||||
void dmReportStartup(const char *pName, const char *pDesc);
|
||||
int64_t dmGetClusterId();
|
||||
|
||||
// dmMgmt.c
|
||||
int32_t dmInitDnode(SDnode *pDnode);
|
||||
|
|
|
@ -268,3 +268,8 @@ void dmReportStartup(const char *pName, const char *pDesc) {
|
|||
tstrncpy(pStartup->desc, pDesc, TSDB_STEP_DESC_LEN);
|
||||
dDebug("step:%s, %s", pStartup->name, pStartup->desc);
|
||||
}
|
||||
|
||||
int64_t dmGetClusterId() {
|
||||
return global.data.clusterId;
|
||||
}
|
||||
|
||||
|
|
|
@ -109,8 +109,8 @@ static int32_t dmStartNodes(SDnode *pDnode) {
|
|||
}
|
||||
}
|
||||
|
||||
dInfo("TDengine initialized successfully");
|
||||
dmReportStartup("TDengine", "initialized successfully");
|
||||
dInfo("The daemon initialized successfully");
|
||||
dmReportStartup("The daemon", "initialized successfully");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ int32_t dmRunDnode(SDnode *pDnode) {
|
|||
|
||||
while (1) {
|
||||
if (pDnode->stop) {
|
||||
dInfo("TDengine is about to stop");
|
||||
dInfo("The daemon is about to stop");
|
||||
dmSetStatus(pDnode, DND_STAT_STOPPED);
|
||||
dmStopNodes(pDnode);
|
||||
dmCloseNodes(pDnode);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmUtil.h"
|
||||
#include "tjson.h"
|
||||
#include "tmisce.h"
|
||||
|
||||
static void dmPrintEps(SDnodeData *pData);
|
||||
|
@ -181,81 +182,73 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t dmEncodeEps(SJson *pJson, SDnodeData *pData) {
|
||||
if (tjsonAddDoubleToObject(pJson, "dnodeId", pData->dnodeId) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(pJson, "dnodeVer", pData->dnodeVer) < 0) return -1;
|
||||
if (tjsonAddIntegerToObject(pJson, "clusterId", pData->clusterId) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(pJson, "dropped", pData->dropped) < 0) return -1;
|
||||
|
||||
SJson *dnodes = tjsonCreateArray();
|
||||
if (dnodes == NULL) return -1;
|
||||
if (tjsonAddItemToObject(pJson, "dnodes", dnodes) < 0) return -1;
|
||||
|
||||
int32_t numOfEps = (int32_t)taosArrayGetSize(pData->dnodeEps);
|
||||
for (int32_t i = 0; i < numOfEps; ++i) {
|
||||
SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, i);
|
||||
SJson *dnode = tjsonCreateObject();
|
||||
if (dnode == NULL) return -1;
|
||||
|
||||
if (tjsonAddDoubleToObject(dnode, "id", pDnodeEp->id) < 0) return -1;
|
||||
if (tjsonAddStringToObject(dnode, "fqdn", pDnodeEp->ep.fqdn) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(dnode, "port", pDnodeEp->ep.port) < 0) return -1;
|
||||
if (tjsonAddDoubleToObject(dnode, "isMnode", pDnodeEp->isMnode) < 0) return -1;
|
||||
if (tjsonAddItemToArray(dnodes, dnode) < 0) return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t dmWriteEps(SDnodeData *pData) {
|
||||
int32_t code = -1;
|
||||
char *content = NULL;
|
||||
char *buffer = NULL;
|
||||
SJson *pJson = NULL;
|
||||
TdFilePtr pFile = NULL;
|
||||
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%sdnode%sdnode.json.bak", tsDataDir, TD_DIRSEP, TD_DIRSEP);
|
||||
snprintf(realfile, sizeof(realfile), "%s%sdnode%sdnode.json", tsDataDir, TD_DIRSEP, TD_DIRSEP);
|
||||
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pJson = tjsonCreateObject();
|
||||
if (pJson == NULL) goto _OVER;
|
||||
if (dmEncodeEps(pJson, pData) != 0) goto _OVER;
|
||||
buffer = tjsonToString(pJson);
|
||||
if (buffer == NULL) goto _OVER;
|
||||
terrno = 0;
|
||||
|
||||
pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
dError("failed to open %s since %s", file, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _OVER;
|
||||
}
|
||||
if (pFile == NULL) goto _OVER;
|
||||
|
||||
int32_t len = 0;
|
||||
int32_t maxLen = 256 * 1024;
|
||||
content = taosMemoryCalloc(1, maxLen + 1);
|
||||
|
||||
len += snprintf(content + len, maxLen - len, "{\n");
|
||||
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", pData->dnodeId);
|
||||
len += snprintf(content + len, maxLen - len, " \"dnodeVer\": \"%" PRId64 "\",\n", pData->dnodeVer);
|
||||
len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%" PRId64 "\",\n", pData->clusterId);
|
||||
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pData->dropped);
|
||||
len += snprintf(content + len, maxLen - len, " \"dnodes\": [{\n");
|
||||
|
||||
int32_t numOfEps = (int32_t)taosArrayGetSize(pData->dnodeEps);
|
||||
for (int32_t i = 0; i < numOfEps; ++i) {
|
||||
SDnodeEp *pDnodeEp = taosArrayGet(pData->dnodeEps, i);
|
||||
len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", pDnodeEp->id);
|
||||
len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\",\n", pDnodeEp->ep.fqdn);
|
||||
len += snprintf(content + len, maxLen - len, " \"port\": %u,\n", pDnodeEp->ep.port);
|
||||
len += snprintf(content + len, maxLen - len, " \"isMnode\": %d\n", pDnodeEp->isMnode);
|
||||
if (i < numOfEps - 1) {
|
||||
len += snprintf(content + len, maxLen - len, " },{\n");
|
||||
} else {
|
||||
len += snprintf(content + len, maxLen - len, " }]\n");
|
||||
}
|
||||
}
|
||||
len += snprintf(content + len, maxLen - len, "}\n");
|
||||
|
||||
if (taosWriteFile(pFile, content, len) != len) {
|
||||
dError("failed to write %s since %s", file, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (taosFsyncFile(pFile) < 0) {
|
||||
dError("failed to fsync %s since %s", file, strerror(errno));
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _OVER;
|
||||
}
|
||||
int32_t len = strlen(buffer);
|
||||
if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER;
|
||||
if (taosFsyncFile(pFile) < 0) goto _OVER;
|
||||
|
||||
taosCloseFile(&pFile);
|
||||
taosMemoryFreeClear(content);
|
||||
|
||||
if (taosRenameFile(file, realfile) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to rename %s since %s", file, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
if (taosRenameFile(file, realfile) != 0) goto _OVER;
|
||||
|
||||
code = 0;
|
||||
pData->updateTime = taosGetTimestampMs();
|
||||
dInfo("succeed to write %s, dnodeVer:%" PRId64, realfile, pData->dnodeVer);
|
||||
dInfo("succeed to write dnode file:%s, dnodeVer:%" PRId64, realfile, pData->dnodeVer);
|
||||
|
||||
_OVER:
|
||||
if (content != NULL) taosMemoryFreeClear(content);
|
||||
if (pJson != NULL) tjsonDelete(pJson);
|
||||
if (buffer != NULL) taosMemoryFree(buffer);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
if (code != 0) {
|
||||
dError("failed to write file %s since %s", realfile, terrstr());
|
||||
}
|
||||
|
||||
if (code != 0) {
|
||||
if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dInfo("succeed to write dnode file:%s since %s, dnodeVer:%" PRId64, realfile, terrstr(), pData->dnodeVer);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "dmUtil.h"
|
||||
#include "tjson.h"
|
||||
|
||||
#define MAXLEN 1024
|
||||
|
||||
|
@ -63,56 +64,51 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t dmEncodeFile(SJson *pJson, bool deployed) {
|
||||
if (tjsonAddDoubleToObject(pJson, "deployed", deployed) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t dmWriteFile(const char *path, const char *name, bool deployed) {
|
||||
int32_t code = -1;
|
||||
int32_t len = 0;
|
||||
char content[MAXLEN + 1] = {0};
|
||||
char *buffer = NULL;
|
||||
SJson *pJson = NULL;
|
||||
TdFilePtr pFile = NULL;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
TdFilePtr pFile = NULL;
|
||||
|
||||
snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name);
|
||||
snprintf(realfile, sizeof(realfile), "%s%s%s.json", path, TD_DIRSEP, name);
|
||||
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pJson = tjsonCreateObject();
|
||||
if (pJson == NULL) goto _OVER;
|
||||
if (dmEncodeFile(pJson, deployed) != 0) goto _OVER;
|
||||
buffer = tjsonToString(pJson);
|
||||
if (buffer == NULL) goto _OVER;
|
||||
terrno = 0;
|
||||
|
||||
pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
|
||||
if (pFile == NULL) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write %s since %s", file, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
if (pFile == NULL) goto _OVER;
|
||||
|
||||
len += snprintf(content + len, MAXLEN - len, "{\n");
|
||||
len += snprintf(content + len, MAXLEN - len, " \"deployed\": %d\n", deployed);
|
||||
len += snprintf(content + len, MAXLEN - len, "}\n");
|
||||
|
||||
if (taosWriteFile(pFile, content, len) != len) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write file:%s since %s", file, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (taosFsyncFile(pFile) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to fsync file:%s since %s", file, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
int32_t len = strlen(buffer);
|
||||
if (taosWriteFile(pFile, buffer, len) <= 0) goto _OVER;
|
||||
if (taosFsyncFile(pFile) < 0) goto _OVER;
|
||||
|
||||
taosCloseFile(&pFile);
|
||||
if (taosRenameFile(file, realfile) != 0) goto _OVER;
|
||||
|
||||
if (taosRenameFile(file, realfile) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to rename %s since %s", file, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
dInfo("succeed to write %s, deployed:%d", realfile, deployed);
|
||||
code = 0;
|
||||
dInfo("succeed to write file:%s, deloyed:%d", realfile, deployed);
|
||||
|
||||
_OVER:
|
||||
if (pFile != NULL) {
|
||||
taosCloseFile(&pFile);
|
||||
}
|
||||
if (pJson != NULL) tjsonDelete(pJson);
|
||||
if (buffer != NULL) taosMemoryFree(buffer);
|
||||
if (pFile != NULL) taosCloseFile(&pFile);
|
||||
|
||||
if (code != 0) {
|
||||
if (terrno == 0) terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
dError("failed to write file:%s since %s, deloyed:%d", realfile, terrstr(), deployed);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,8 @@ SHashObj *mndDupDbHash(SHashObj *pOld);
|
|||
SHashObj *mndDupTopicHash(SHashObj *pOld);
|
||||
int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_t numOfUses, void **ppRsp,
|
||||
int32_t *pRspLen);
|
||||
int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db);
|
||||
int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -1051,17 +1051,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
|
|||
if (mndDropStreamByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSmasByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
|
||||
SUserObj *pUser = mndAcquireUser(pMnode, pDb->createUser);
|
||||
if (pUser != NULL) {
|
||||
pUser->authVersion++;
|
||||
SSdbRaw *pCommitRaw = mndUserActionEncode(pUser);
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||
mError("trans:%d, failed to append redo log since %s", pTrans->id, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||
}
|
||||
if (mndUserRemoveDb(pMnode, pTrans, pDb->name) != 0) goto _OVER;
|
||||
|
||||
int32_t rspLen = 0;
|
||||
void *pRsp = NULL;
|
||||
|
|
|
@ -167,6 +167,10 @@ void tFreeStreamObj(SStreamObj *pStream) {
|
|||
taosArrayDestroy(pLevel);
|
||||
}
|
||||
taosArrayDestroy(pStream->tasks);
|
||||
// tagSchema.pSchema
|
||||
if (pStream->tagSchema.nCols > 0) {
|
||||
taosMemoryFree(pStream->tagSchema.pSchema);
|
||||
}
|
||||
}
|
||||
|
||||
SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
|
||||
|
|
|
@ -1050,7 +1050,7 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
status = "offline";
|
||||
}
|
||||
|
||||
char b1[9] = {0};
|
||||
char b1[16] = {0};
|
||||
STR_TO_VARSTR(b1, status);
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, b1, false);
|
||||
|
|
|
@ -785,9 +785,9 @@ static void mndReloadSyncConfig(SMnode *pMnode) {
|
|||
|
||||
int32_t code = syncReconfig(pMnode->syncMgmt.sync, &cfg);
|
||||
if (code != 0) {
|
||||
mError("vgId:1, failed to reconfig mnode sync since %s", terrstr());
|
||||
mError("vgId:1, mnode sync reconfig failed since %s", terrstr());
|
||||
} else {
|
||||
mInfo("vgId:1, reconfig mnode sync success");
|
||||
mInfo("vgId:1, mnode sync reconfig success");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -465,7 +465,6 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
|
|||
SMCreateStbReq createReq = {0};
|
||||
tstrncpy(createReq.name, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
|
||||
createReq.numOfColumns = pStream->outputSchema.nCols;
|
||||
createReq.numOfTags = 1; // group id
|
||||
createReq.pColumns = taosArrayInit(createReq.numOfColumns, sizeof(SField));
|
||||
// build fields
|
||||
taosArraySetSize(createReq.pColumns, createReq.numOfColumns);
|
||||
|
@ -476,14 +475,29 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
|
|||
pField->type = pStream->outputSchema.pSchema[i].type;
|
||||
pField->bytes = pStream->outputSchema.pSchema[i].bytes;
|
||||
}
|
||||
|
||||
if (pStream->tagSchema.nCols == 0) {
|
||||
createReq.numOfTags = 1;
|
||||
createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField));
|
||||
taosArraySetSize(createReq.pTags, 1);
|
||||
taosArraySetSize(createReq.pTags, createReq.numOfTags);
|
||||
// build tags
|
||||
SField *pField = taosArrayGet(createReq.pTags, 0);
|
||||
strcpy(pField->name, "group_id");
|
||||
pField->type = TSDB_DATA_TYPE_UBIGINT;
|
||||
pField->flags = 0;
|
||||
pField->bytes = 8;
|
||||
} else {
|
||||
createReq.numOfTags = pStream->tagSchema.nCols;
|
||||
createReq.pTags = taosArrayInit(createReq.numOfTags, sizeof(SField));
|
||||
taosArraySetSize(createReq.pTags, createReq.numOfTags);
|
||||
for (int32_t i = 0; i < createReq.numOfTags; i++) {
|
||||
SField *pField = taosArrayGet(createReq.pTags, i);
|
||||
pField->bytes = pStream->tagSchema.pSchema[i].bytes;
|
||||
pField->flags = pStream->tagSchema.pSchema[i].flags;
|
||||
pField->type = pStream->tagSchema.pSchema[i].type;
|
||||
tstrncpy(pField->name, pStream->tagSchema.pSchema[i].name, TSDB_COL_NAME_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
if (mndCheckCreateStbReq(&createReq) != 0) {
|
||||
goto _OVER;
|
||||
|
|
|
@ -131,7 +131,7 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) {
|
|||
taosThreadMutexUnlock(&pMgmt->lock);
|
||||
|
||||
if (pCont != NULL) {
|
||||
if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) {
|
||||
if (taosSendHttpReport(tsTelemServer, tsTelemUri, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) {
|
||||
mError("failed to send telemetry report");
|
||||
} else {
|
||||
mInfo("succeed to send telemetry report");
|
||||
|
|
|
@ -604,22 +604,19 @@ _OVER:
|
|||
}
|
||||
|
||||
static int32_t mndDropTopic(SMnode *pMnode, STrans *pTrans, SRpcMsg *pReq, SMqTopicObj *pTopic) {
|
||||
int32_t code = -1;
|
||||
if (mndUserRemoveTopic(pMnode, pTrans, pTopic->name) != 0) goto _OVER;
|
||||
|
||||
SSdbRaw *pCommitRaw = mndTopicActionEncode(pTopic);
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
|
||||
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) goto _OVER;
|
||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_DROPPED);
|
||||
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) {
|
||||
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
|
||||
mndTransDrop(pTrans);
|
||||
return -1;
|
||||
}
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
mndTransDrop(pTrans);
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
|
||||
|
@ -885,6 +882,7 @@ int32_t mndCheckTopicExist(SMnode *pMnode, SDbObj *pDb) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||
int32_t code = 0;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
|
@ -912,3 +910,4 @@ int32_t mndDropTopicByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
|||
|
||||
return code;
|
||||
}
|
||||
#endif
|
|
@ -285,14 +285,35 @@ static int32_t mndUserActionInsert(SSdb *pSdb, SUserObj *pUser) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndUserActionDelete(SSdb *pSdb, SUserObj *pUser) {
|
||||
mTrace("user:%s, perform delete action, row:%p", pUser->user, pUser);
|
||||
static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) {
|
||||
memcpy(pNew, pUser, sizeof(SUserObj));
|
||||
pNew->authVersion++;
|
||||
pNew->updateTime = taosGetTimestampMs();
|
||||
|
||||
taosRLockLatch(&pUser->lock);
|
||||
pNew->readDbs = mndDupDbHash(pUser->readDbs);
|
||||
pNew->writeDbs = mndDupDbHash(pUser->writeDbs);
|
||||
pNew->topics = mndDupTopicHash(pUser->topics);
|
||||
taosRUnLockLatch(&pUser->lock);
|
||||
|
||||
if (pNew->readDbs == NULL || pNew->writeDbs == NULL || pNew->topics == NULL) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mndUserFreeObj(SUserObj *pUser) {
|
||||
taosHashCleanup(pUser->readDbs);
|
||||
taosHashCleanup(pUser->writeDbs);
|
||||
taosHashCleanup(pUser->topics);
|
||||
pUser->readDbs = NULL;
|
||||
pUser->writeDbs = NULL;
|
||||
pUser->topics = NULL;
|
||||
}
|
||||
|
||||
static int32_t mndUserActionDelete(SSdb *pSdb, SUserObj *pUser) {
|
||||
mTrace("user:%s, perform delete action, row:%p", pUser->user, pUser);
|
||||
mndUserFreeObj(pUser);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -516,19 +537,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
memcpy(&newUser, pUser, sizeof(SUserObj));
|
||||
newUser.authVersion++;
|
||||
newUser.updateTime = taosGetTimestampMs();
|
||||
|
||||
taosRLockLatch(&pUser->lock);
|
||||
newUser.readDbs = mndDupDbHash(pUser->readDbs);
|
||||
newUser.writeDbs = mndDupDbHash(pUser->writeDbs);
|
||||
newUser.topics = mndDupTopicHash(pUser->topics);
|
||||
taosRUnLockLatch(&pUser->lock);
|
||||
|
||||
if (newUser.readDbs == NULL || newUser.writeDbs == NULL || newUser.topics == NULL) {
|
||||
goto _OVER;
|
||||
}
|
||||
if (mndUserDupObj(pUser, &newUser) != 0) goto _OVER;
|
||||
|
||||
if (alterReq.alterType == TSDB_ALTER_USER_PASSWD) {
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
|
@ -654,9 +663,7 @@ _OVER:
|
|||
|
||||
mndReleaseUser(pMnode, pOperUser);
|
||||
mndReleaseUser(pMnode, pUser);
|
||||
taosHashCleanup(newUser.writeDbs);
|
||||
taosHashCleanup(newUser.readDbs);
|
||||
taosHashCleanup(newUser.topics);
|
||||
mndUserFreeObj(&newUser);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -884,9 +891,9 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
db = taosHashIterate(pUser->writeDbs, NULL);
|
||||
while (db != NULL) {
|
||||
cols = 0;
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(userName, pUser->user, pShow->pMeta->pSchemas[cols].bytes);
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)userName, false);
|
||||
|
||||
char privilege[20] = {0};
|
||||
|
@ -909,9 +916,9 @@ static int32_t mndRetrievePrivileges(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
char *topic = taosHashIterate(pUser->topics, NULL);
|
||||
while (topic != NULL) {
|
||||
cols = 0;
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
char userName[TSDB_USER_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_WITH_MAXSIZE_TO_VARSTR(userName, pUser->user, pShow->pMeta->pSchemas[cols].bytes);
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)userName, false);
|
||||
|
||||
char privilege[20] = {0};
|
||||
|
@ -1007,3 +1014,74 @@ _OVER:
|
|||
tFreeSUserAuthBatchRsp(&batchRsp);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db) {
|
||||
int32_t code = 0;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
int32_t len = strlen(db) + 1;
|
||||
void *pIter = NULL;
|
||||
SUserObj *pUser = NULL;
|
||||
SUserObj newUser = {0};
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pUser);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
code = -1;
|
||||
if (mndUserDupObj(pUser, &newUser) != 0) break;
|
||||
|
||||
bool inRead = (taosHashGet(newUser.readDbs, db, len) != NULL);
|
||||
bool inWrite = (taosHashGet(newUser.writeDbs, db, len) != NULL);
|
||||
if (inRead || inWrite) {
|
||||
(void)taosHashRemove(newUser.readDbs, db, len);
|
||||
(void)taosHashRemove(newUser.writeDbs, db, len);
|
||||
|
||||
SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser);
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) break;
|
||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||
}
|
||||
|
||||
mndUserFreeObj(&newUser);
|
||||
sdbRelease(pSdb, pUser);
|
||||
code = 0;
|
||||
}
|
||||
|
||||
if (pUser != NULL) sdbRelease(pSdb, pUser);
|
||||
if (pIter != NULL) sdbCancelFetch(pSdb, pIter);
|
||||
mndUserFreeObj(&newUser);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic) {
|
||||
int32_t code = 0;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
int32_t len = strlen(topic) + 1;
|
||||
void *pIter = NULL;
|
||||
SUserObj *pUser = NULL;
|
||||
SUserObj newUser = {0};
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pUser);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
code = -1;
|
||||
if (mndUserDupObj(pUser, &newUser) != 0) break;
|
||||
|
||||
bool inTopic = (taosHashGet(newUser.topics, topic, len) != NULL);
|
||||
if (inTopic) {
|
||||
(void)taosHashRemove(newUser.topics, topic, len);
|
||||
SSdbRaw *pCommitRaw = mndUserActionEncode(&newUser);
|
||||
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) break;
|
||||
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
|
||||
}
|
||||
|
||||
mndUserFreeObj(&newUser);
|
||||
sdbRelease(pSdb, pUser);
|
||||
code = 0;
|
||||
}
|
||||
|
||||
if (pUser != NULL) sdbRelease(pSdb, pUser);
|
||||
if (pIter != NULL) sdbCancelFetch(pSdb, pIter);
|
||||
mndUserFreeObj(&newUser);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -243,7 +243,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
|
|||
if (pFile == NULL) {
|
||||
taosMemoryFree(pRaw);
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
mDebug("failed to read sdb file:%s since %s", file, terrstr());
|
||||
mInfo("read sdb file:%s finished since %s", file, terrstr());
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -636,15 +636,20 @@ int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
|
|||
}
|
||||
|
||||
int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, int64_t term, int64_t config) {
|
||||
int32_t code = 0;
|
||||
int32_t code = -1;
|
||||
|
||||
if (!isApply) {
|
||||
mInfo("sdbiter:%p, not apply to sdb", pIter);
|
||||
sdbCloseIter(pIter);
|
||||
return 0;
|
||||
code = 0;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (taosFsyncFile(pIter->file) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
mError("sdbiter:%p, failed to fasync file %s since %s", pIter, pIter->name, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
taosFsyncFile(pIter->file);
|
||||
taosCloseFile(&pIter->file);
|
||||
pIter->file = NULL;
|
||||
|
||||
|
@ -653,14 +658,12 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
|
|||
if (taosRenameFile(pIter->name, datafile) != 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
mError("sdbiter:%p, failed to rename file %s to %s since %s", pIter, pIter->name, datafile, terrstr());
|
||||
sdbCloseIter(pIter);
|
||||
return -1;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (sdbReadFile(pSdb) != 0) {
|
||||
mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
|
||||
sdbCloseIter(pIter);
|
||||
return -1;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (config > 0) {
|
||||
|
@ -674,8 +677,11 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
|
|||
}
|
||||
|
||||
mInfo("sdbiter:%p, success applyed to sdb", pIter);
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
sdbCloseIter(pIter);
|
||||
return 0;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
|
||||
|
|
|
@ -124,6 +124,7 @@ FAIL:
|
|||
}
|
||||
|
||||
void sndClose(SSnode *pSnode) {
|
||||
streamMetaCommit(pSnode->pMeta);
|
||||
streamMetaClose(pSnode->pMeta);
|
||||
taosMemoryFree(pSnode->path);
|
||||
taosMemoryFree(pSnode);
|
||||
|
|
|
@ -54,7 +54,8 @@ int32_t vnodeAlter(const char *path, SAlterVnodeReplicaReq *pReq, STfs *pTfs);
|
|||
void vnodeDestroy(const char *path, STfs *pTfs);
|
||||
SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb);
|
||||
void vnodePreClose(SVnode *pVnode);
|
||||
void vnodeSyncCheckTimeout(SVnode* pVnode);
|
||||
void vnodePostClose(SVnode *pVnode);
|
||||
void vnodeSyncCheckTimeout(SVnode *pVnode);
|
||||
void vnodeClose(SVnode *pVnode);
|
||||
|
||||
int32_t vnodeStart(SVnode *pVnode);
|
||||
|
@ -88,6 +89,7 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg);
|
|||
int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo);
|
||||
void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||
void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs);
|
||||
void vnodeProposeCommitOnNeed(SVnode *pVnode);
|
||||
|
||||
// meta
|
||||
typedef struct SMeta SMeta; // todo: remove
|
||||
|
@ -175,7 +177,8 @@ int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableL
|
|||
void tsdbReaderClose(STsdbReader *pReader);
|
||||
bool tsdbNextDataBlock(STsdbReader *pReader);
|
||||
void tsdbRetrieveDataBlockInfo(const STsdbReader *pReader, int32_t *rows, uint64_t *uid, STimeWindow *pWindow);
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock* pDataBlock, bool *allHave);
|
||||
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
|
||||
void tsdbReleaseDataBlock(STsdbReader *pReader);
|
||||
SSDataBlock *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
|
||||
int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
|
||||
int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
|
||||
|
@ -185,7 +188,7 @@ void *tsdbGetIvtIdx(SMeta *pMeta);
|
|||
uint64_t getReaderMaxVersion(STsdbReader *pReader);
|
||||
|
||||
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
|
||||
uint64_t suid, void **pReader, const char* idstr);
|
||||
uint64_t suid, void **pReader, const char *idstr);
|
||||
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
|
||||
void *tsdbCacherowsReaderClose(void *pReader);
|
||||
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
|
||||
|
|
|
@ -209,10 +209,10 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
|
|||
// tsdbMemTable ==============================================================================================
|
||||
// SMemTable
|
||||
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
|
||||
void tsdbMemTableDestroy(SMemTable *pMemTable);
|
||||
void tsdbMemTableDestroy(SMemTable *pMemTable, bool proactive);
|
||||
STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid);
|
||||
void tsdbRefMemTable(SMemTable *pMemTable);
|
||||
void tsdbUnrefMemTable(SMemTable *pMemTable);
|
||||
int32_t tsdbRefMemTable(SMemTable *pMemTable, SQueryNode *pQNode);
|
||||
int32_t tsdbUnrefMemTable(SMemTable *pMemTable, SQueryNode *pNode, bool proactive);
|
||||
SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable);
|
||||
// STbDataIter
|
||||
int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter **ppIter);
|
||||
|
@ -290,8 +290,8 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
|
|||
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData);
|
||||
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
|
||||
// tsdbRead.c ==============================================================================================
|
||||
int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap, const char *id);
|
||||
void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap, const char *id);
|
||||
int32_t tsdbTakeReadSnap(STsdbReader *pReader, _query_reseek_func_t reseek, STsdbReadSnap **ppSnap);
|
||||
void tsdbUntakeReadSnap(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proactive);
|
||||
// tsdbMerge.c ==============================================================================================
|
||||
int32_t tsdbMerge(STsdb *pTsdb);
|
||||
|
||||
|
@ -371,6 +371,8 @@ struct SMemTable {
|
|||
STsdb *pTsdb;
|
||||
SVBufPool *pPool;
|
||||
volatile int32_t nRef;
|
||||
int64_t minVer;
|
||||
int64_t maxVer;
|
||||
TSKEY minKey;
|
||||
TSKEY maxKey;
|
||||
int64_t nRow;
|
||||
|
@ -606,7 +608,9 @@ struct SDelFWriter {
|
|||
|
||||
struct STsdbReadSnap {
|
||||
SMemTable *pMem;
|
||||
SQueryNode *pNode;
|
||||
SMemTable *pIMem;
|
||||
SQueryNode *pINode;
|
||||
STsdbFS fs;
|
||||
};
|
||||
|
||||
|
@ -726,6 +730,9 @@ void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
|
|||
|
||||
// tsdbCache ==============================================================================================
|
||||
typedef struct SCacheRowsReader {
|
||||
STsdb *pTsdb;
|
||||
SVersionRange verRange;
|
||||
TdThreadMutex readerMutex;
|
||||
SVnode *pVnode;
|
||||
STSchema *pSchema;
|
||||
uint64_t uid;
|
||||
|
|
|
@ -61,10 +61,19 @@ struct SVBufPoolNode {
|
|||
};
|
||||
|
||||
struct SVBufPool {
|
||||
SVBufPool* next;
|
||||
SVBufPool* freeNext;
|
||||
SVBufPool* recycleNext;
|
||||
SVBufPool* recyclePrev;
|
||||
|
||||
// query handle list
|
||||
TdThreadMutex mutex;
|
||||
int32_t nQuery;
|
||||
SQueryNode qList;
|
||||
|
||||
SVnode* pVnode;
|
||||
TdThreadSpinlock* lock;
|
||||
int32_t id;
|
||||
volatile int32_t nRef;
|
||||
TdThreadSpinlock* lock;
|
||||
int64_t size;
|
||||
uint8_t* ptr;
|
||||
SVBufPoolNode* pTail;
|
||||
|
@ -74,6 +83,8 @@ struct SVBufPool {
|
|||
int32_t vnodeOpenBufPool(SVnode* pVnode);
|
||||
int32_t vnodeCloseBufPool(SVnode* pVnode);
|
||||
void vnodeBufPoolReset(SVBufPool* pPool);
|
||||
void vnodeBufPoolAddToFreeList(SVBufPool* pPool);
|
||||
int32_t vnodeBufPoolRecycle(SVBufPool* pPool);
|
||||
|
||||
// vnodeQuery.c
|
||||
int32_t vnodeQueryOpen(SVnode* pVnode);
|
||||
|
@ -86,6 +97,7 @@ int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg);
|
|||
// vnodeCommit.c
|
||||
int32_t vnodeBegin(SVnode* pVnode);
|
||||
int32_t vnodeShouldCommit(SVnode* pVnode);
|
||||
void vnodeUpdCommitSched(SVnode* pVnode);
|
||||
void vnodeRollback(SVnode* pVnode);
|
||||
int32_t vnodeSaveInfo(const char* dir, const SVnodeInfo* pCfg);
|
||||
int32_t vnodeCommitInfo(const char* dir, const SVnodeInfo* pInfo);
|
||||
|
@ -98,10 +110,12 @@ bool vnodeShouldRollback(SVnode* pVnode);
|
|||
int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
|
||||
int32_t vnodeSyncStart(SVnode* pVnode);
|
||||
void vnodeSyncPreClose(SVnode* pVnode);
|
||||
void vnodeSyncPostClose(SVnode* pVnode);
|
||||
void vnodeSyncClose(SVnode* pVnode);
|
||||
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg, int32_t code);
|
||||
bool vnodeIsLeader(SVnode* pVnode);
|
||||
bool vnodeIsRoleLeader(SVnode* pVnode);
|
||||
int vnodeShouldCommit(SVnode* pVnode);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -76,6 +76,7 @@ typedef struct SRSmaSnapReader SRSmaSnapReader;
|
|||
typedef struct SRSmaSnapWriter SRSmaSnapWriter;
|
||||
typedef struct SSnapDataHdr SSnapDataHdr;
|
||||
typedef struct SCommitInfo SCommitInfo;
|
||||
typedef struct SQueryNode SQueryNode;
|
||||
|
||||
#define VNODE_META_DIR "meta"
|
||||
#define VNODE_TSDB_DIR "tsdb"
|
||||
|
@ -87,17 +88,29 @@ typedef struct SCommitInfo SCommitInfo;
|
|||
#define VNODE_RSMA1_DIR "rsma1"
|
||||
#define VNODE_RSMA2_DIR "rsma2"
|
||||
|
||||
#define VNODE_BUFPOOL_SEGMENTS 3
|
||||
|
||||
#define VND_INFO_FNAME "vnode.json"
|
||||
|
||||
// vnd.h
|
||||
typedef int32_t (*_query_reseek_func_t)(void* pQHandle);
|
||||
struct SQueryNode {
|
||||
SQueryNode* pNext;
|
||||
SQueryNode** ppNext;
|
||||
void* pQHandle;
|
||||
_query_reseek_func_t reseek;
|
||||
};
|
||||
|
||||
void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
|
||||
void* vnodeBufPoolMallocAligned(SVBufPool* pPool, int size);
|
||||
void vnodeBufPoolFree(SVBufPool* pPool, void* p);
|
||||
void vnodeBufPoolRef(SVBufPool* pPool);
|
||||
void vnodeBufPoolUnRef(SVBufPool* pPool);
|
||||
void vnodeBufPoolUnRef(SVBufPool* pPool, bool proactive);
|
||||
int vnodeDecodeInfo(uint8_t* pData, SVnodeInfo* pInfo);
|
||||
|
||||
int32_t vnodeBufPoolRegisterQuery(SVBufPool* pPool, SQueryNode* pQNode);
|
||||
void vnodeBufPoolDeregisterQuery(SVBufPool* pPool, SQueryNode* pQNode, bool proactive);
|
||||
|
||||
// meta
|
||||
typedef struct SMCtbCursor SMCtbCursor;
|
||||
typedef struct SMStbCursor SMStbCursor;
|
||||
|
@ -328,6 +341,11 @@ struct STsdbKeepCfg {
|
|||
int32_t keep2;
|
||||
};
|
||||
|
||||
typedef struct SVCommitSched {
|
||||
int64_t commitMs;
|
||||
int64_t maxWaitMs;
|
||||
} SVCommitSched;
|
||||
|
||||
struct SVnode {
|
||||
char* path;
|
||||
SVnodeCfg config;
|
||||
|
@ -335,10 +353,18 @@ struct SVnode {
|
|||
SVStatis statis;
|
||||
STfs* pTfs;
|
||||
SMsgCb msgCb;
|
||||
|
||||
// Buffer Pool
|
||||
TdThreadMutex mutex;
|
||||
TdThreadCond poolNotEmpty;
|
||||
SVBufPool* pPool;
|
||||
SVBufPool* aBufPool[VNODE_BUFPOOL_SEGMENTS];
|
||||
SVBufPool* freeList;
|
||||
SVBufPool* inUse;
|
||||
SVBufPool* onCommit;
|
||||
SVBufPool* recycleHead;
|
||||
SVBufPool* recycleTail;
|
||||
SVBufPool* onRecycle;
|
||||
|
||||
SMeta* pMeta;
|
||||
SSma* pSma;
|
||||
STsdb* pTsdb;
|
||||
|
@ -346,6 +372,7 @@ struct SVnode {
|
|||
STQ* pTq;
|
||||
SSink* pSink;
|
||||
tsem_t canCommit;
|
||||
SVCommitSched commitSched;
|
||||
int64_t sync;
|
||||
TdThreadMutex lock;
|
||||
bool blocked;
|
||||
|
|
|
@ -563,13 +563,13 @@ int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int
|
|||
}
|
||||
|
||||
// add to cache.
|
||||
taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
|
||||
int32_t ret = taosLRUCacheInsert(pCache, buf, sizeof(uint64_t) + keyLen, pPayload, payloadLen, freePayload, NULL,
|
||||
TAOS_LRU_PRIORITY_LOW);
|
||||
|
||||
taosThreadMutexUnlock(pLock);
|
||||
|
||||
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d", TD_VID(pMeta->pVnode), suid,
|
||||
(int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry));
|
||||
metaDebug("vgId:%d, suid:%" PRIu64 " list cache added into cache, total:%d, tables:%d, ret:%d", TD_VID(pMeta->pVnode),
|
||||
suid, (int32_t)taosLRUCacheGetUsage(pCache), taosHashGetSize(pTableEntry), ret);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -203,6 +203,7 @@ _err:
|
|||
|
||||
int metaClose(SMeta *pMeta) {
|
||||
if (pMeta) {
|
||||
if (pMeta->pEnv) tdbAbort(pMeta->pEnv, pMeta->txn);
|
||||
if (pMeta->pCache) metaCacheClose(pMeta);
|
||||
if (pMeta->pIdx) metaCloseIdx(pMeta);
|
||||
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
|
||||
|
|
|
@ -653,8 +653,10 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
|
|||
}
|
||||
|
||||
if (c == 0) {
|
||||
metaError("meta/query: incorrect c: %" PRId32 ".", c);
|
||||
metaULock(pMeta);
|
||||
tdbTbcClose(pSkmDbC);
|
||||
code = TSDB_CODE_FAILED;
|
||||
metaError("meta/query: incorrect c: %" PRId32 ".", c);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
|
|
|
@ -343,12 +343,17 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
|||
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
|
||||
ret = tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c);
|
||||
if (!(ret == 0 && c == 0)) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
|
||||
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
|
||||
metaError("meta/table: invalide ret: %" PRId32 " or c: %" PRId32 "alter stb failed.", ret, c);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = tdbTbcGet(pTbDbc, NULL, NULL, &pData, &nData);
|
||||
if (ret < 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
|
||||
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
|
||||
|
@ -926,6 +931,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
|
|||
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL);
|
||||
tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
metaError("meta/table: invalide c: %" PRId32 " alt tb column failed.", c);
|
||||
return -1;
|
||||
}
|
||||
|
@ -939,6 +945,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
|
|||
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
|
||||
tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
metaError("meta/table: invalide c: %" PRId32 " alt tb column failed.", c);
|
||||
return -1;
|
||||
}
|
||||
|
@ -952,6 +960,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
|
|||
tDecoderInit(&dc, entry.pBuf, nData);
|
||||
ret = metaDecodeEntry(&dc, &entry);
|
||||
if (ret != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
tDecoderClear(&dc);
|
||||
metaError("meta/table: invalide ret: %" PRId32 " alt tb column failed.", ret);
|
||||
return -1;
|
||||
|
@ -1132,6 +1142,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL);
|
||||
tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1148,6 +1160,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
|
||||
tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
terrno = TSDB_CODE_TDB_TABLE_NOT_EXIST;
|
||||
metaError("meta/table: invalide c: %" PRId32 " update tb tag val failed.", c);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1309,6 +1324,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
|
|||
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, NULL);
|
||||
tdbTbcMoveTo(pUidIdxc, &uid, sizeof(uid), &c);
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1322,6 +1338,8 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
|
|||
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, NULL);
|
||||
tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = uid, .version = oversion}), sizeof(STbDbKey), &c);
|
||||
if (c != 0) {
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
metaError("meta/table: invalide c: %" PRId32 " update tb options failed.", c);
|
||||
return -1;
|
||||
}
|
||||
|
@ -1336,6 +1354,8 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
|
|||
ret = metaDecodeEntry(&dc, &entry);
|
||||
if (ret != 0) {
|
||||
tDecoderClear(&dc);
|
||||
tdbTbcClose(pUidIdxc);
|
||||
tdbTbcClose(pTbDbc);
|
||||
metaError("meta/table: invalide ret: %" PRId32 " alt tb options failed.", ret);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -239,6 +239,11 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma, SCommitInfo *pInfo) {
|
|||
int32_t lino = 0;
|
||||
SVnode *pVnode = pSma->pVnode;
|
||||
|
||||
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
|
||||
if (!pSmaEnv) {
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
code = tdRSmaFSCommit(pSma);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
|
|
|
@ -148,10 +148,6 @@ int32_t smaOpen(SVnode *pVnode, int8_t rollback) {
|
|||
SMA_OPEN_RSMA_IMPL(pVnode, 1);
|
||||
} else if (i == TSDB_RETENTION_L2) {
|
||||
SMA_OPEN_RSMA_IMPL(pVnode, 2);
|
||||
} else {
|
||||
code = TSDB_CODE_APP_ERROR;
|
||||
smaError("vgId:%d, sma open failed since %s, level:%d", TD_VID(pVnode), tstrerror(code), i);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -237,8 +237,8 @@ int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData) {
|
|||
|
||||
_exit:
|
||||
if (code) {
|
||||
rsmaSnapReaderClose(&pReader);
|
||||
smaError("vgId:%d, vnode snapshot rsma read failed since %s", SMA_VID(pReader->pSma), tstrerror(code));
|
||||
rsmaSnapReaderClose(&pReader);
|
||||
} else {
|
||||
smaInfo("vgId:%d, vnode snapshot rsma read succeed", SMA_VID(pReader->pSma));
|
||||
}
|
||||
|
@ -432,7 +432,7 @@ _exit:
|
|||
if (pInFD) taosCloseFile(&pInFD);
|
||||
smaError("vgId:%d, vnode snapshot rsma writer close failed since %s", SMA_VID(pSma), tstrerror(code));
|
||||
} else {
|
||||
smaInfo("vgId:%d, vnode snapshot rsma writer close succeed", SMA_VID(pSma));
|
||||
smaInfo("vgId:%d, vnode snapshot rsma writer close succeed", pSma ? SMA_VID(pSma) : 0);
|
||||
}
|
||||
|
||||
return code;
|
||||
|
|
|
@ -725,6 +725,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
|
||||
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
|
||||
|
||||
tqDebug("vgId:%d, tq process delete sub req %s", pTq->pVnode->config.vgId, pReq->subKey);
|
||||
|
||||
taosWLockLatch(&pTq->pushLock);
|
||||
int32_t code = taosHashRemove(pTq->pPushMgr, pReq->subKey, strlen(pReq->subKey));
|
||||
if (code != 0) {
|
||||
|
@ -791,6 +793,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
|
|||
SMqRebVgReq req = {0};
|
||||
tDecodeSMqRebVgReq(msg, &req);
|
||||
// todo lock
|
||||
|
||||
tqDebug("vgId:%d, tq process sub req %s", pTq->pVnode->config.vgId, req.subKey);
|
||||
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
if (pHandle == NULL) {
|
||||
if (req.oldConsumerId != -1) {
|
||||
|
@ -881,6 +886,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t version, char* msg, int32_t msgL
|
|||
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
|
||||
atomic_add_fetch_32(&pHandle->epoch, 1);
|
||||
taosMemoryFree(req.qmsg);
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
qStreamCloseTsdbReader(pHandle->execHandle.task);
|
||||
}
|
||||
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
|
||||
}
|
||||
// close handle
|
||||
|
|
|
@ -15,4 +15,11 @@
|
|||
|
||||
#include "tq.h"
|
||||
|
||||
int tqCommit(STQ* pTq) { return tqOffsetCommitFile(pTq->pOffsetStore); }
|
||||
int tqCommit(STQ* pTq) {
|
||||
if (streamMetaCommit(pTq->pStreamMeta) < 0) {
|
||||
tqError("vgId:%d, failed to commit stream meta since %s", TD_VID(pTq->pVnode), terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
return tqOffsetCommitFile(pTq->pOffsetStore);
|
||||
}
|
||||
|
|
|
@ -323,6 +323,70 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
|||
taosArrayDestroy(tagArray);
|
||||
}
|
||||
|
||||
static int32_t encodeCreateChildTableForRPC(SVCreateTbReq* req, int32_t vgId, void** pBuf, int32_t* contLen) {
|
||||
int32_t ret = 0;
|
||||
SVCreateTbBatchReq reqs = {0};
|
||||
|
||||
reqs.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
|
||||
if (NULL == reqs.pArray) {
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
taosArrayPush(reqs.pArray, req);
|
||||
reqs.nReqs = 1;
|
||||
|
||||
tEncodeSize(tEncodeSVCreateTbBatchReq, &reqs, *contLen, ret);
|
||||
if (ret < 0) {
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
*contLen += sizeof(SMsgHead);
|
||||
*pBuf = rpcMallocCont(*contLen);
|
||||
if (NULL == *pBuf) {
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
((SMsgHead*)(*pBuf))->vgId = vgId;
|
||||
((SMsgHead*)(*pBuf))->contLen = htonl(*contLen);
|
||||
SEncoder coder = {0};
|
||||
tEncoderInit(&coder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), (*contLen) - sizeof(SMsgHead) );
|
||||
if (tEncodeSVCreateTbBatchReq(&coder, &reqs) < 0) {
|
||||
rpcFreeCont(*pBuf);
|
||||
*pBuf = NULL;
|
||||
*contLen = 0;
|
||||
tEncoderClear(&coder);
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
tEncoderClear(&coder);
|
||||
|
||||
end:
|
||||
taosArrayDestroy(reqs.pArray);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbReq* pReq) {
|
||||
void* buf = NULL;
|
||||
int32_t tlen = 0;
|
||||
encodeCreateChildTableForRPC(pReq, TD_VID(pVnode), &buf, &tlen);
|
||||
|
||||
SRpcMsg msg = {
|
||||
.msgType = TDMT_VND_CREATE_TABLE,
|
||||
.pCont = buf,
|
||||
.contLen = tlen,
|
||||
};
|
||||
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqError("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
||||
_error:
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("failed to encode submit req since %s", terrstr());
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
||||
const SArray* pBlocks = (const SArray*)data;
|
||||
SVnode* pVnode = (SVnode*)vnode;
|
||||
|
@ -339,12 +403,9 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
SArray* tagArray = NULL;
|
||||
SArray* pVals = NULL;
|
||||
|
||||
if (!(tagArray = taosArrayInit(1, sizeof(STagVal)))) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < blockSz; i++) {
|
||||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
SBatchDeleteReq deleteReq = {0};
|
||||
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
|
@ -380,9 +441,98 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqDebug("failed to put delete req into write-queue since %s", terrstr());
|
||||
}
|
||||
} else if (pDataBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
for (int32_t rowId = 0; rowId < rows; rowId++) {
|
||||
SVCreateTbReq* pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateStbReq));
|
||||
if (!pCreateTbReq) {
|
||||
goto _end;
|
||||
}
|
||||
|
||||
// set const
|
||||
pCreateTbReq->flags = 0;
|
||||
pCreateTbReq->type = TSDB_CHILD_TABLE;
|
||||
pCreateTbReq->ctb.suid = suid;
|
||||
|
||||
// set super table name
|
||||
SName name = {0};
|
||||
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
pCreateTbReq->ctb.stbName = strdup((char*)tNameGetTableName(&name)); // strdup(stbFullName);
|
||||
|
||||
// set tag content
|
||||
int32_t size = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
if (size == 2) {
|
||||
tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
goto _end;
|
||||
}
|
||||
STagVal tagVal = {
|
||||
.cid = pTSchema->numOfCols + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
.i64 = (int64_t)pDataBlock->info.id.groupId,
|
||||
};
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
|
||||
// set tag name
|
||||
SArray* tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
|
||||
char tagNameStr[TSDB_COL_NAME_LEN] = "group_id";
|
||||
taosArrayPush(tagName, tagNameStr);
|
||||
pCreateTbReq->ctb.tagName = tagName;
|
||||
} else {
|
||||
tagArray = taosArrayInit(size - 1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
goto _end;
|
||||
}
|
||||
for (int32_t tagId = UD_TAG_COLUMN_INDEX, step = 1; tagId < size; tagId++, step++) {
|
||||
SColumnInfoData* pTagData = taosArrayGet(pDataBlock->pDataBlock, tagId);
|
||||
STagVal tagVal = {
|
||||
.cid = pTSchema->numOfCols + step,
|
||||
.type = pTagData->info.type,
|
||||
};
|
||||
void* pData = colDataGetData(pTagData, rowId);
|
||||
if (colDataIsNull_s(pTagData, rowId)) {
|
||||
tagVal.type = TSDB_DATA_TYPE_NULL;
|
||||
tagVal.pData = NULL;
|
||||
tagVal.nData = 0;
|
||||
} else if (IS_VAR_DATA_TYPE(pTagData->info.type)) {
|
||||
tagVal.nData = varDataLen(pData);
|
||||
tagVal.pData = varDataVal(pData);
|
||||
} else {
|
||||
memcpy(&tagVal.i64, pData, pTagData->info.bytes);
|
||||
}
|
||||
taosArrayPush(tagArray, &tagVal);
|
||||
}
|
||||
}
|
||||
pCreateTbReq->ctb.tagNum = taosArrayGetSize(tagArray);
|
||||
|
||||
STag* pTag = NULL;
|
||||
tTagNew(tagArray, 1, false, &pTag);
|
||||
tagArray = taosArrayDestroy(tagArray);
|
||||
if (pTag == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
}
|
||||
pCreateTbReq->ctb.pTag = (uint8_t*)pTag;
|
||||
|
||||
// set table name
|
||||
SColumnInfoData* pTbColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_TABLE_NAME_COLUMN_INDEX);
|
||||
if (colDataIsNull_s(pTbColInfo, rowId)) {
|
||||
SColumnInfoData* pGpIdColInfo = taosArrayGet(pDataBlock->pDataBlock, UD_GROUPID_COLUMN_INDEX);
|
||||
void* pGpIdData = colDataGetData(pGpIdColInfo, rowId);
|
||||
pCreateTbReq->name = buildCtbNameByGroupId(stbFullName, *(uint64_t*)pGpIdData);
|
||||
} else {
|
||||
void* pTbData = colDataGetData(pTbColInfo, rowId);
|
||||
pCreateTbReq->name = taosMemoryCalloc(1, varDataLen(pTbData) + 1);
|
||||
memcpy(pCreateTbReq->name, varDataVal(pTbData), varDataLen(pTbData));
|
||||
}
|
||||
|
||||
if (tqPutReqToQueue(pVnode, pCreateTbReq) != TSDB_CODE_SUCCESS) {
|
||||
goto _end;
|
||||
}
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
taosMemoryFreeClear(pCreateTbReq);
|
||||
}
|
||||
} else {
|
||||
SSubmitTbData tbData = {0};
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
tqDebug("tq sink pipe2, convert block1 %d, rows: %d", i, rows);
|
||||
|
||||
if (!(tbData.aRowP = taosArrayInit(rows, sizeof(SRow*)))) {
|
||||
|
@ -394,6 +544,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
tbData.sver = pTSchema->version;
|
||||
|
||||
char* ctbName = NULL;
|
||||
tqDebug("vgId:%d, stream write into %s, table auto created", TD_VID(pVnode), pDataBlock->info.parTbName);
|
||||
if (pDataBlock->info.parTbName[0]) {
|
||||
ctbName = strdup(pDataBlock->info.parTbName);
|
||||
} else {
|
||||
|
@ -423,7 +574,10 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
pCreateTbReq->ctb.stbName = strdup((char*)tNameGetTableName(&name)); // strdup(stbFullName);
|
||||
|
||||
// set tag content
|
||||
taosArrayClear(tagArray);
|
||||
tagArray = taosArrayInit(1, sizeof(STagVal));
|
||||
if (!tagArray) {
|
||||
goto _end;
|
||||
}
|
||||
STagVal tagVal = {
|
||||
.cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1,
|
||||
.type = TSDB_DATA_TYPE_UBIGINT,
|
||||
|
@ -434,6 +588,7 @@ void tqSinkToTablePipeline2(SStreamTask* pTask, void* vnode, int64_t ver, void*
|
|||
|
||||
STag* pTag = NULL;
|
||||
tTagNew(tagArray, 1, false, &pTag);
|
||||
tagArray = taosArrayDestroy(tagArray);
|
||||
if (pTag == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _end;
|
||||
|
|
|
@ -1153,6 +1153,8 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
|
|||
|
||||
iMax[nMax] = i;
|
||||
max[nMax++] = pIter->input[i].pRow;
|
||||
} else {
|
||||
pIter->input[i].next = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,6 +109,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
|
|||
|
||||
p->type = type;
|
||||
p->pVnode = pVnode;
|
||||
p->pTsdb = p->pVnode->pTsdb;
|
||||
p->verRange = (SVersionRange){.minVer = 0, .maxVer = UINT64_MAX};
|
||||
p->numOfCols = numOfCols;
|
||||
p->suid = suid;
|
||||
|
||||
|
@ -145,6 +147,7 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
|
|||
}
|
||||
|
||||
p->idstr = taosMemoryStrDup(idstr);
|
||||
taosThreadMutexInit(&p->readerMutex, NULL);
|
||||
|
||||
*pReader = p;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -164,7 +167,9 @@ void* tsdbCacherowsReaderClose(void* pReader) {
|
|||
|
||||
destroyLastBlockLoadInfo(p->pLoadInfo);
|
||||
|
||||
taosMemoryFree((void*) p->idstr);
|
||||
taosMemoryFree((void*)p->idstr);
|
||||
taosThreadMutexDestroy(&p->readerMutex);
|
||||
|
||||
taosMemoryFree(pReader);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -199,6 +204,27 @@ static void freeItem(void* pItem) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t tsdbCacheQueryReseek(void* pQHandle) {
|
||||
int32_t code = 0;
|
||||
SCacheRowsReader* pReader = pQHandle;
|
||||
|
||||
code = taosThreadMutexTryLock(&pReader->readerMutex);
|
||||
if (code == 0) {
|
||||
// pause current reader's state if not paused, save ts & version for resuming
|
||||
// just wait for the big all tables' snapshot untaking for now
|
||||
|
||||
code = TSDB_CODE_VND_QUERY_BUSY;
|
||||
|
||||
taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
|
||||
return code;
|
||||
} else if (code == EBUSY) {
|
||||
return TSDB_CODE_VND_QUERY_BUSY;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
|
||||
if (pReader == NULL || pResBlock == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
|
@ -241,7 +267,8 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
taosArrayPush(pLastCols, &p);
|
||||
}
|
||||
|
||||
tsdbTakeReadSnap(pr->pVnode->pTsdb, &pr->pReadSnap, "cache-l");
|
||||
taosThreadMutexLock(&pr->readerMutex);
|
||||
tsdbTakeReadSnap((STsdbReader*)pr, tsdbCacheQueryReseek, &pr->pReadSnap);
|
||||
pr->pDataFReader = NULL;
|
||||
pr->pDataFReaderLast = NULL;
|
||||
|
||||
|
@ -355,8 +382,9 @@ _end:
|
|||
tsdbDataFReaderClose(&pr->pDataFReaderLast);
|
||||
tsdbDataFReaderClose(&pr->pDataFReader);
|
||||
|
||||
tsdbUntakeReadSnap(pr->pVnode->pTsdb, pr->pReadSnap, "cache-l");
|
||||
resetLastBlockLoadInfo(pr->pLoadInfo);
|
||||
tsdbUntakeReadSnap((STsdbReader*)pr, pr->pReadSnap, true);
|
||||
taosThreadMutexUnlock(&pr->readerMutex);
|
||||
|
||||
for (int32_t j = 0; j < pr->numOfCols; ++j) {
|
||||
taosMemoryFree(pRes[j]);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue