Merge branch '3.0' into enh/TD-21161-3.0

This commit is contained in:
kailixu 2023-04-27 19:11:16 +08:00
commit fa822ff286
52 changed files with 836 additions and 267 deletions

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.4.0")
SET(TD_VER_NUMBER "3.0.4.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG e02ddb2
GIT_TAG ae8d51c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -222,7 +222,7 @@ A database including one supertable and two subtables is created as follows:
```sql
DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb;
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");

View File

@ -6,7 +6,7 @@ description: Use Tag Index to Improve Query Performance
## Introduction
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
Prior to TDengine 3.0.3.0 (excluded)only one index is created by default on the first tag of each super table, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
## Syntax
@ -48,4 +48,4 @@ You can also add filter conditions to limit the results.
6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.

View File

@ -5,9 +5,9 @@ description: This document describes the standard SQL functions available in TDe
toc_max_heading_level: 4
---
## Single Row Functions
## Scalar Functions
Single row functions return a result for each row.
Scalar functions return one result for each row.
### Mathematical Functions

View File

@ -13,8 +13,11 @@ Because stream processing is built in to TDengine, you are no longer reliant on
```sql
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
}
```
@ -141,3 +144,27 @@ The data in expired windows is tagged as expired. TDengine stream processing pro
2. Recalculate the data. In this method, all data in the window is reobtained from the database and recalculated. The latest results are then returned.
In both of these methods, configuring the watermark is essential for obtaining accurate results (if expired data is dropped) and avoiding repeated triggers that affect system performance (if expired data is recalculated).
## Supported functions
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [System information functions](../function/#system-information-functions) are <b>not</b> allowed in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
- [top](../function/#top)
- [bottom](../function/#bottom)
- [elapsed](../function/#elapsed)
- [interp](../function/#interp)
- [derivative](../function/#derivative)
- [irate](../function/#irate)
- [twa](../function/#twa)
- [histogram](../function/#histogram)
- [diff](../function/#diff)
- [statecount](../function/#statecount)
- [stateduration](../function/#stateduration)
- [csum](../function/#csum)
- [mavg](../function/#mavg)
- [sample](../function/#sample)
- [tail](../function/#tail)
- [unique](../function/#unique)
- [mode](../function/#mode)

View File

@ -27,7 +27,7 @@ The following data types can be used in the schema for standard tables.
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | Deprecated| This Enterprise Edition-only statement has been removed. It returns the error "This statement is no longer supported."
| 2 | ALTER ALL DNODES | Added | Modifies the configuration of all dnodes.
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consitency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 3 | ALTER DATABASE | Modified | Deprecated<ul><li>QUORUM: Specified the required number of confirmations. TDengine 3.0 provides strict consistency by default and doesn't allow to change to weak consistency. </li><li>BLOCKS: Specified the memory blocks used by each vnode. BUFFER is now used to specify the size of the write cache pool for each vnode. </li><li>UPDATE: Specified whether update operations were supported. All databases now support updating data in certain columns. </li><li>CACHELAST: Specified how to cache the newest row of data. CACHEMODEL now replaces CACHELAST. </li><li>COMP: Cannot be modified. <br/>Added</li><li>CACHEMODEL: Specifies whether to cache the latest subtable data. </li><li>CACHESIZE: Specifies the size of the cache for the newest subtable data. </li><li>WAL_FSYNC_PERIOD: Replaces the FSYNC parameter. </li><li>WAL_LEVEL: Replaces the WAL parameter. </li><li>WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. </li><li>WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. <br/>Modified</li><li>REPLICA: Cannot be modified. </li><li>KEEP: Now supports units. </li></ul>
| 4 | ALTER STABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a supertable. </li></ul>
| 5 | ALTER TABLE | Modified | Deprecated<ul><li>CHANGE TAG: Modified the name of a tag. Replaced by RENAME TAG. <br/>Added</li><li>RENAME TAG: Replaces CHANGE TAG. </li><li>COMMENT: Specifies comments for a standard table. </li><li>TTL: Specifies the time-to-live for a standard table. </li></ul>
| 6 | ALTER USER | Modified | Deprecated<ul><li>PRIVILEGE: Specified user permissions. Replaced by GRANT and REVOKE. <br/>Added</li><li>ENABLE: Enables or disables a user. </li><li>SYSINFO: Specifies whether a user can query system information. </li></ul>

View File

@ -423,6 +423,6 @@ In addition to writing data using the SQL method or the parameter binding API, w
**Description**
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.

View File

@ -82,7 +82,7 @@ Add following dependency in the `pom.xml` file of your Maven project:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.1.0</version>
</dependency>
```
@ -227,7 +227,7 @@ In addition to getting the connection from the specified URL, you can use Proper
Note:
- The client parameter set in the application is process-level. If you want to update the parameters of the client, you need to restart the application. This is because the client parameter is a global parameter that takes effect only the first time the application is set.
- The following sample code is based on taos-jdbcdriver-3.0.0.
- The following sample code is based on taos-jdbcdriver-3.1.0.
```java
public Connection getConn() throws Exception{
@ -364,7 +364,7 @@ TDengine has significantly improved the bind APIs to support data writing (INSER
**Note:**
- JDBC REST connections do not currently support bind interface
- The following sample code is based on taos-jdbcdriver-3.0.0
- The following sample code is based on taos-jdbcdriver-3.1.0
- The setString method should be called for binary type data, and the setNString method should be called for nchar type data
- both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
@ -632,7 +632,7 @@ TDengine supports schemaless writing. It is compatible with InfluxDB's Line Prot
Note:
- JDBC REST connections do not currently support schemaless writes
- The following sample code is based on taos-jdbcdriver-3.0.0
- The following sample code is based on taos-jdbcdriver-3.1.0
```java
public class SchemalessInsertTest {

View File

@ -62,7 +62,7 @@ The different database framework specifications for various programming language
| **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />

View File

@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.4.12
<Release type="tools" version="2.4.12" />

View File

@ -100,7 +100,8 @@ sudo apt-get install tdengine
:::tip
apt-get 方式只适用于 Debian 或 Ubuntu 系统。
::::
:::
</TabItem>
<TabItem label="Windows 安装" value="windows">

View File

@ -221,7 +221,7 @@ void Close()
```sql
DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb;
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");

View File

@ -82,7 +82,7 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.1.0</version>
</dependency>
```
@ -230,7 +230,7 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra
**注意**
- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。
- 以下示例代码基于 taos-jdbcdriver-3.0.0。
- 以下示例代码基于 taos-jdbcdriver-3.1.0。
```java
public Connection getConn() throws Exception{
@ -367,7 +367,7 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
**注意**
- JDBC REST 连接目前不支持参数绑定
- 以下示例代码基于 taos-jdbcdriver-3.0.0
- 以下示例代码基于 taos-jdbcdriver-3.1.0
- binary 类型数据需要调用 setString 方法nchar 类型数据需要调用 setNString 方法
- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
@ -635,7 +635,7 @@ TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协
**注意**
- JDBC REST 连接目前不支持无模式写入
- 以下示例代码基于 taos-jdbcdriver-3.0.0
- 以下示例代码基于 taos-jdbcdriver-3.1.0
```java
public class SchemalessInsertTest {

View File

@ -61,7 +61,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **数据订阅TMQ** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **Schemaless** | 支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **批量拉取(基于 WebSocket** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |

View File

@ -15,6 +15,7 @@ stream_options: {
IGNORE EXPIRED [0|1]
DELETE_MARK time
FILL_HISTORY [0|1]
IGNORE UPDATE [0|1]
}
```
@ -169,7 +170,7 @@ T3 时刻最新事件到达T 向后推移超过了第二个窗口关闭的
在 window_close 或 max_delay 模式下,窗口关闭直接影响推送结果。在 at_once 模式下,窗口关闭只与内存占用有关。
## 流式计算的过期数据处理策略
## 流式计算对于过期数据的处理策略
对于已关闭的窗口,再次落入该窗口中的数据被标记为过期数据.
@ -177,11 +178,20 @@ TDengine 对于过期数据提供两种处理方式,由 IGNORE EXPIRED 选项
1. 重新计算,即 IGNORE EXPIRED 0从 TSDB 中重新查找对应窗口的所有数据并重新计算得到最新结果
2. 直接丢弃, 即 IGNORE EXPIRED 1默认配置忽略过期数据
2. 直接丢弃即 IGNORE EXPIRED 1默认配置忽略过期数据
无论在哪种模式下watermark 都应该被妥善设置,来得到正确结果(直接丢弃模式)或避免频繁触发重算带来的性能开销(重新计算模式)。
## 流式计算对于修改数据的处理策略
TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项指定:
1. 检查数据是否被修改,即 IGNORE UPDATE 0默认配置如果被修改则重新计算对应窗口。
2. 不检查数据是否被修改,全部按增量数据计算,即 IGNORE UPDATE 1。
## 写入已存在的超级表
```sql
[field1_name,...]
@ -216,26 +226,26 @@ T = 最新事件时间 - DELETE_MARK
## 流式计算支持的函数
1. 所有的单行函数均可用于流计算
2. 以下 19 个聚合函数不能在创建流计算的 SQL 语句中使用
```
leastsquares
percentile
top
bottom
elapsed
interp
derivative
irate
twa
histogram
diff
statecount
stateduration
csum
mavg
sample
tail
unique
mode
```
1. 所有的 [单行函数](../function/#单行函数) 均可用于流计算。
2. 以下 19 个聚合/选择函数 <b>不能</b> 应用在创建流计算的 SQL 语句,[系统信息函数](../function/#系统信息函数) 也不能用于流计算中。此外的其他类型的函数均可用于流计算。
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
- [top](../function/#top)
- [bottom](../function/#bottom)
- [elapsed](../function/#elapsed)
- [interp](../function/#interp)
- [derivative](../function/#derivative)
- [irate](../function/#irate)
- [twa](../function/#twa)
- [histogram](../function/#histogram)
- [diff](../function/#diff)
- [statecount](../function/#statecount)
- [stateduration](../function/#stateduration)
- [csum](../function/#csum)
- [mavg](../function/#mavg)
- [sample](../function/#sample)
- [tail](../function/#tail)
- [unique](../function/#unique)
- [mode](../function/#mode)

View File

@ -27,13 +27,13 @@ description: "TDengine 3.0 版本的语法变更说明"
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能3.0不再支持。语法暂时保留了执行报“This statement is no longer supported”错误。
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
| 3 | ALTER DATABASE | 调整 | 废除<ul><li>QUORUM写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li><br/>新增<li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。<br/>调整</li><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIOD3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZE3.0.4.0版本新增wal文件的额外保留策略用于数据订阅。</li></ul><p>调整</p><ul><li>REPLICA3.0.0版本暂不支持修改。</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改超级表的注释。</li></ul>
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG代替原CHANGE TAG子句。</li><li>COMMENT修改表的注释。</li><li>TTL修改表的生命周期。</li></ul>
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE启用或停用此用户。</li><li>SYSINFO修改用户是否可查看系统信息。</li></ul>
| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能3.0不再支持。语法暂时保留了执行报“This statement is no longer supported”错误。
| 9 | CREATE DATABASE | 调整 | 废除<ul><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHEVNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WALWAL 级别。3.0版本使用WAL_LEVEL代替。<br/>新增</li><li>BUFFER一个 VNODE 写入内存池大小。</li><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>DURATION代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS表示数据的聚合周期和保存时长。</li><li>STRICT表示数据同步的一致性要求。</li><li>SINGLE_STABLE表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIODwal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZEwal文件的额外保留策略用于数据订阅。</li><li>WAL_ROLL_PERIODwal文件切换时长。</li><li>WAL_SEGMENT_SIZEwal单个文件大小。<br/>调整</li><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 9 | CREATE DATABASE | 调整 | <p>废除</p><ul><li>BLOCKSVNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHEVNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>CACHELAST缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>DAYS数据文件存储数据的时间跨度。3.0版本使用DURATION代替。</li><li>FSYNC当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。</li><li>QUORUM写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。</li><li>UPDATE更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>WALWAL 级别。3.0版本使用WAL_LEVEL代替。</li></ul><p>新增</p><ul><li>BUFFER一个 VNODE 写入内存池大小。</li><li>CACHEMODEL表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE表示缓存子表最近数据的内存大小。</li><li>DURATION代替原DAYS参数。新增支持带单位的设置方式。</li><li>PAGES一个 VNODE 中元数据存储引擎的缓存页个数。</li><li>PAGESIZE一个 VNODE 中元数据存储引擎的页大小。</li><li>RETENTIONS表示数据的聚合周期和保存时长。</li><li>STRICT表示数据同步的一致性要求。</li><li>SINGLE_STABLE表示此数据库中是否只可以创建一个超级表。</li><li>VGROUPS数据库中初始VGROUP的数目。</li><li>WAL_FSYNC_PERIOD代替原FSYNC参数。</li><li>WAL_LEVEL代替原WAL参数。</li><li>WAL_RETENTION_PERIODwal文件的额外保留策略用于数据订阅。</li><li>WAL_RETENTION_SIZEwal文件的额外保留策略用于数据订阅。</li><li>WAL_ROLL_PERIODwal文件切换时长。</li><li>WAL_SEGMENT_SIZEwal单个文件大小。</li></ul><p>调整</p><ul><li>KEEP3.0版本新增支持带单位的设置方式。</li></ul>
| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法<ul><li>CREATE DNODE dnode_host_name PORT port_val</li></ul>
| 11 | CREATE INDEX | 新增 | 创建SMA索引。
| 12 | CREATE MNODE | 新增 | 创建管理节点。

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.0.4.1
<Release type="tdengine" version="3.0.4.1" />
## 3.0.4.0
<Release type="tdengine" version="3.0.4.0" />

View File

@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.5.0
<Release type="tools" version="2.5.0" />
## 2.4.12
<Release type="tools" version="2.4.12" />

View File

@ -24,6 +24,12 @@
extern "C" {
#endif
#define SLOW_LOG_TYPE_QUERY 0x1
#define SLOW_LOG_TYPE_INSERT 0x2
#define SLOW_LOG_TYPE_OTHERS 0x4
#define SLOW_LOG_TYPE_ALL 0xFFFFFFFF
// cluster
extern char tsFirst[];
extern char tsSecond[];
@ -118,6 +124,8 @@ extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
extern int32_t tsMaxRetryWaitTime;
extern bool tsUseAdapter;
extern int32_t tsSlowLogThreshold;
extern int32_t tsSlowLogScope;
// client
extern int32_t tsMinSlidingTime;

View File

@ -103,7 +103,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_CHECKSUM_ERROR TAOS_DEF_ERROR_CODE(0, 0x011F) // internal
#define TSDB_CODE_COMPRESS_ERROR TAOS_DEF_ERROR_CODE(0, 0x0120)
#define TSDB_CODE_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0121) //
#define TSDB_CODE_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0121)
#define TSDB_CODE_CFG_NOT_FOUND TAOS_DEF_ERROR_CODE(0, 0x0122)
#define TSDB_CODE_REPEAT_INIT TAOS_DEF_ERROR_CODE(0, 0x0123)
#define TSDB_CODE_DUP_KEY TAOS_DEF_ERROR_CODE(0, 0x0124)
@ -118,9 +118,10 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MSG_ENCODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x012D)
#define TSDB_CODE_NO_ENOUGH_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x012E)
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) //
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) //
#define TSDB_CODE_IVLD_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132) //
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130)
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131)
#define TSDB_CODE_INVALID_DATA_FMT TAOS_DEF_ERROR_CODE(0, 0x0132)
#define TSDB_CODE_INVALID_CFG_VALUE TAOS_DEF_ERROR_CODE(0, 0x0133)
//client
#define TSDB_CODE_TSC_INVALID_OPERATION TAOS_DEF_ERROR_CODE(0, 0x0200)
@ -538,20 +539,20 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_SYN_INVALID_CHECKSUM TAOS_DEF_ERROR_CODE(0, 0x0908) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) // 2.x
// #define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) // 2.x
#define TSDB_CODE_SYN_IS_LEADER TAOS_DEF_ERROR_CODE(0, 0x090B)
// #define TSDB_CODE_SYN_IS_LEADER TAOS_DEF_ERROR_CODE(0, 0x090B) // unused
#define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x090C)
#define TSDB_CODE_SYN_ONE_REPLICA TAOS_DEF_ERROR_CODE(0, 0x090D)
#define TSDB_CODE_SYN_NOT_IN_NEW_CONFIG TAOS_DEF_ERROR_CODE(0, 0x090E)
#define TSDB_CODE_SYN_NEW_CONFIG_ERROR TAOS_DEF_ERROR_CODE(0, 0x090F) // internal
#define TSDB_CODE_SYN_RECONFIG_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0910)
// #define TSDB_CODE_SYN_ONE_REPLICA TAOS_DEF_ERROR_CODE(0, 0x090D) // unused
// #define TSDB_CODE_SYN_NOT_IN_NEW_CONFIG TAOS_DEF_ERROR_CODE(0, 0x090E) // unused
#define TSDB_CODE_SYN_NEW_CONFIG_ERROR TAOS_DEF_ERROR_CODE(0, 0x090F) // internal
// #define TSDB_CODE_SYN_RECONFIG_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0910) // unused
#define TSDB_CODE_SYN_PROPOSE_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0911)
#define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912)
#define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913)
// #define TSDB_CODE_SYN_STANDBY_NOT_READY TAOS_DEF_ERROR_CODE(0, 0x0912) // unused
// #define TSDB_CODE_SYN_BATCH_ERROR TAOS_DEF_ERROR_CODE(0, 0x0913) // unused
#define TSDB_CODE_SYN_RESTORING TAOS_DEF_ERROR_CODE(0, 0x0914)
#define TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG TAOS_DEF_ERROR_CODE(0, 0x0915) // internal
#define TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG TAOS_DEF_ERROR_CODE(0, 0x0915) // internal
#define TSDB_CODE_SYN_BUFFER_FULL TAOS_DEF_ERROR_CODE(0, 0x0916)
#define TSDB_CODE_SYN_WRITE_STALL TAOS_DEF_ERROR_CODE(0, 0x0917)
#define TSDB_CODE_SYN_NEGO_WIN_EXCEEDED TAOS_DEF_ERROR_CODE(0, 0X0918)
#define TSDB_CODE_SYN_NEGOTIATION_WIN_FULL TAOS_DEF_ERROR_CODE(0, 0x0918)
#define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF)
// tq
@ -572,7 +573,7 @@ int32_t* taosGetErrno();
// wal
// #define TSDB_CODE_WAL_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x1000) // 2.x
#define TSDB_CODE_WAL_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x1001)
#define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002)
// #define TSDB_CODE_WAL_SIZE_LIMIT TAOS_DEF_ERROR_CODE(0, 0x1002) // unused
#define TSDB_CODE_WAL_INVALID_VER TAOS_DEF_ERROR_CODE(0, 0x1003)
// #define TSDB_CODE_WAL_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x1004) // 2.x
#define TSDB_CODE_WAL_LOG_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x1005)

View File

@ -83,6 +83,12 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
#endif
;
void taosPrintSlowLog(const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 1, 2)))
#endif
;
bool taosAssertDebug(bool condition, const char *file, int32_t line, const char *format, ...);
bool taosAssertRelease(bool condition);

View File

@ -341,7 +341,7 @@ if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
tmp_pwd=`pwd`
cd ${install_dir}/connector
if [ ! -d taos-connector-jdbc ];then
git clone -b main --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
git clone -b 3.1.0 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
fi
cd taos-connector-jdbc
mvn clean package -Dmaven.test.skip=true

View File

@ -42,7 +42,7 @@ SAppInfo appInfo;
int64_t lastClusterId = 0;
int32_t clientReqRefPool = -1;
int32_t clientConnRefPool = -1;
int32_t clientStop = 0;
int32_t clientStop = -1;
int32_t timestampDeltaLimit = 900; // s
@ -69,7 +69,6 @@ static int32_t registerRequest(SRequestObj *pRequest, STscObj *pTscObj) {
}
static void deregisterRequest(SRequestObj *pRequest) {
const static int64_t SLOW_QUERY_INTERVAL = 3000000L; // todo configurable
if (pRequest == NULL) {
tscError("pRequest == NULL");
return;
@ -80,6 +79,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
int32_t reqType = SLOW_LOG_TYPE_OTHERS;
int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64
@ -95,6 +95,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
duration, pRequest->metric.parseCostUs, pRequest->metric.ctgCostUs, pRequest->metric.analyseCostUs,
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
reqType = SLOW_LOG_TYPE_INSERT;
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
tscDebug("query duration %" PRId64 "us: parseCost:%" PRId64 "us, ctgCost:%" PRId64 "us, analyseCost:%" PRId64
"us, planCost:%" PRId64 "us, exec:%" PRId64 "us",
@ -102,12 +103,16 @@ static void deregisterRequest(SRequestObj *pRequest) {
pRequest->metric.planCostUs, pRequest->metric.execCostUs);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
reqType = SLOW_LOG_TYPE_QUERY;
}
}
if (duration >= SLOW_QUERY_INTERVAL) {
if (duration >= (tsSlowLogThreshold * 1000000UL)) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
tscWarnL("slow query: %s, duration:%" PRId64, pRequest->sqlstr, duration);
if (tsSlowLogScope & reqType) {
taosPrintSlowLog("PID:%d, Conn:%u, QID:0x%" PRIx64 ", Start:%" PRId64 ", Duration:%" PRId64 "us, SQL:%s",
taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, pRequest->sqlstr);
}
}
releaseTscObj(pTscObj->id);
@ -427,8 +432,12 @@ static void *tscCrashReportThreadFp(void *param) {
}
#endif
if (-1 != atomic_val_compare_exchange_32(&clientStop, -1, 0)) {
return NULL;
}
while (1) {
if (clientStop) break;
if (clientStop > 0) break;
if (loopTimes++ < reportPeriodNum) {
taosMsleep(sleepTime);
continue;
@ -466,7 +475,7 @@ static void *tscCrashReportThreadFp(void *param) {
loopTimes = 0;
}
clientStop = -1;
clientStop = -2;
return NULL;
}

View File

@ -1248,6 +1248,11 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
return NULL;
}
pRequest->sqlstr = taosStrdup("taos_connect");
if (pRequest->sqlstr) {
pRequest->sqlLen = strlen(pRequest->sqlstr);
}
SMsgSendInfo* body = buildConnectMsg(pRequest);
int64_t transporterId = 0;
@ -1257,7 +1262,7 @@ STscObj* taosConnectImpl(const char* user, const char* auth, const char* db, __t
if (pRequest->code != TSDB_CODE_SUCCESS) {
const char* errorMsg =
(pRequest->code == TSDB_CODE_RPC_FQDN_ERROR) ? taos_errstr(pRequest) : tstrerror(pRequest->code);
fprintf(stderr, "failed to connect to server, reason: %s\n\n", errorMsg);
tscError("failed to connect to server, reason: %s", errorMsg);
terrno = pRequest->code;
destroyRequest(pRequest);

View File

@ -681,6 +681,7 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
SMCreateStbReq pReq = {0};
int32_t code = TSDB_CODE_SUCCESS;
SCmdMsgInfo pCmdMsg = {0};
char *pSql = NULL;
// put front for free
pReq.numOfColumns = taosArrayGetSize(pColumns);
@ -688,7 +689,27 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
pReq.numOfTags = taosArrayGetSize(pTags);
pReq.pTags = pTags;
code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest, 0);
if (action == SCHEMA_ACTION_CREATE_STABLE) {
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
pSql = "sml_create_stable";
} else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) {
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
pSql = (action == SCHEMA_ACTION_ADD_TAG) ? "sml_add_tag" : "sml_modify_tag_size";
} else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE) {
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
pSql = (action == SCHEMA_ACTION_ADD_COLUMN) ? "sml_add_column" : "sml_modify_column_size";
}
code = buildRequest(info->taos->id, pSql, strlen(pSql), NULL, false, &pRequest, 0);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
@ -699,23 +720,6 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray *pColumns,
goto end;
}
if (action == SCHEMA_ACTION_CREATE_STABLE) {
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
} else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE) {
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
} else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE) {
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
}
if (pReq.numOfTags == 0) {
pReq.numOfTags = 1;
SField field = {0};
@ -1543,6 +1547,44 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
return code;
}
void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd) {
if (tsSlowLogScope & SLOW_LOG_TYPE_INSERT) {
int32_t len = 0;
int32_t rlen = 0;
char* p = NULL;
if (lines && lines[0]) {
len = strlen(lines[0]);
p = lines[0];
} else if (rawLine) {
if (rawLineEnd) {
len = rawLineEnd - rawLine;
} else {
len = strlen(rawLine);
}
p = rawLine;
}
if (NULL == p) {
return;
}
rlen = TMIN(len, TSDB_MAX_ALLOWED_SQL_LEN);
rlen = TMAX(rlen, 0);
char *sql = taosMemoryMalloc(rlen + 1);
if (NULL == sql) {
uError("malloc %d for sml sql failed", rlen + 1);
return;
}
memcpy(sql, p, rlen);
sql[rlen] = 0;
request->sqlstr = sql;
request->sqlLen = rlen;
}
}
TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, char *rawLineEnd, int numLines,
int protocol, int precision, int32_t ttl, int64_t reqid) {
int32_t code = TSDB_CODE_SUCCESS;
@ -1575,6 +1617,8 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine,
info->msgBuf.len = ERROR_MSG_BUF_DEFAULT_SIZE;
info->lineNum = numLines;
smlSetReqSQL(request, lines, rawLine, rawLineEnd);
SSmlMsgBuf msg = {ERROR_MSG_BUF_DEFAULT_SIZE, request->msgBuf};
if (request->pDb == NULL) {
request->code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;

View File

@ -500,7 +500,7 @@ int32_t tRowGet(SRow *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal)
break;
default:
ASSERTS(0, "invalid row format");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
if (bv == BIT_FLG_NONE) {
@ -938,7 +938,7 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *
break;
default:
ASSERTS(0, "Invalid row flag");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
while (pColData) {
@ -963,7 +963,7 @@ static int32_t tRowTupleUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *
break;
default:
ASSERTS(0, "Invalid row flag");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
if (bv == BIT_FLG_NONE) {
@ -1054,7 +1054,7 @@ static int32_t tRowKVUpsertColData(SRow *pRow, STSchema *pTSchema, SColData *aCo
pData = pv + ((uint32_t *)pKVIdx->idx)[iCol];
} else {
ASSERTS(0, "Invalid KV row format");
return TSDB_CODE_IVLD_DATA_FMT;
return TSDB_CODE_INVALID_DATA_FMT;
}
int16_t cid;
@ -2503,9 +2503,11 @@ _exit:
int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind) {
int32_t code = 0;
ASSERT(pColData->type == pBind->buffer_type);
if (IS_VAR_DATA_TYPE(pBind->buffer_type)) { // var-length data type
if (!(pBind->num == 1 && pBind->is_null && *pBind->is_null)) {
ASSERT(pColData->type == pBind->buffer_type);
}
if (IS_VAR_DATA_TYPE(pColData->type)) { // var-length data type
for (int32_t i = 0; i < pBind->num; ++i) {
if (pBind->is_null && pBind->is_null[i]) {
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);

View File

@ -117,6 +117,10 @@ int32_t tsRedirectFactor = 2;
int32_t tsRedirectMaxPeriod = 1000;
int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
int32_t tsSlowLogThreshold = 3; // seconds
int32_t tsSlowLogScope = SLOW_LOG_TYPE_ALL;
/*
@ -345,6 +349,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, true) != 0) return -1;
if (cfgAddString(pCfg, "slowLogScope", "", true) != 0) return -1;
tsNumOfRpcThreads = tsNumOfCores / 2;
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 2, TSDB_MAX_RPC_THREADS);
@ -692,6 +698,42 @@ static void taosSetServerLogCfg(SConfig *pCfg) {
metaDebugFlag = cfgGetItem(pCfg, "metaDebugFlag")->i32;
}
static int32_t taosSetSlowLogScope(char *pScope) {
if (NULL == pScope || 0 == strlen(pScope)) {
tsSlowLogScope = SLOW_LOG_TYPE_ALL;
return 0;
}
if (0 == strcasecmp(pScope, "all")) {
tsSlowLogScope = SLOW_LOG_TYPE_ALL;
return 0;
}
if (0 == strcasecmp(pScope, "query")) {
tsSlowLogScope = SLOW_LOG_TYPE_QUERY;
return 0;
}
if (0 == strcasecmp(pScope, "insert")) {
tsSlowLogScope = SLOW_LOG_TYPE_INSERT;
return 0;
}
if (0 == strcasecmp(pScope, "others")) {
tsSlowLogScope = SLOW_LOG_TYPE_OTHERS;
return 0;
}
if (0 == strcasecmp(pScope, "none")) {
tsSlowLogScope = 0;
return 0;
}
uError("Invalid slowLog scope value:%s", pScope);
terrno = TSDB_CODE_INVALID_CFG_VALUE;
return -1;
}
static int32_t taosSetClientCfg(SConfig *pCfg) {
tstrncpy(tsLocalFqdn, cfgGetItem(pCfg, "fqdn")->str, TSDB_FQDN_LEN);
tsServerPort = (uint16_t)cfgGetItem(pCfg, "serverPort")->i32;
@ -742,6 +784,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval;
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
tsQueryMaxConcurrentTables = cfgGetItem(pCfg, "queryMaxConcurrentTables")->i64;
tsSlowLogThreshold = cfgGetItem(pCfg, "slowLogThreshold")->i32;
if (taosSetSlowLogScope(cfgGetItem(pCfg, "slowLogScope")->str)) {
return -1;
}
tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32;
@ -1156,6 +1202,12 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32;
} else if (strcasecmp("smaDebugFlag", name) == 0) {
smaDebugFlag = cfgGetItem(pCfg, "smaDebugFlag")->i32;
} else if (strcasecmp("slowLogThreshold", name) == 0) {
tsSlowLogThreshold = cfgGetItem(pCfg, "slowLogThreshold")->i32;
} else if (strcasecmp("slowLogScope", name) == 0) {
if (taosSetSlowLogScope(cfgGetItem(pCfg, "slowLogScope")->str)) {
return -1;
}
}
break;
}

View File

@ -60,19 +60,19 @@ int vnodeCheckCfg(const SVnodeCfg *pCfg) {
const char* vnodeRoleToStr(ESyncRole role) {
switch (role) {
case TAOS_SYNC_ROLE_VOTER:
return "voter";
return "true";
case TAOS_SYNC_ROLE_LEARNER:
return "learner";
return "false";
default:
return "unknown";
}
}
const ESyncRole vnodeStrToRole(char* str) {
if(strcmp(str, "voter") == 0){
if(strcmp(str, "true") == 0){
return TAOS_SYNC_ROLE_VOTER;
}
if(strcmp(str, "learner") == 0){
if(strcmp(str, "false") == 0){
return TAOS_SYNC_ROLE_LEARNER;
}
@ -139,7 +139,6 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "hashSuffix", pCfg->hashSuffix) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.totalReplicaNum", pCfg->syncCfg.totalReplicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "vndStats.stables", pCfg->vndStats.numOfSTables) < 0) return -1;
@ -161,7 +160,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddStringToObject(info, "nodeFqdn", pNode->nodeFqdn) < 0) return -1;
if (tjsonAddIntegerToObject(info, "nodeId", pNode->nodeId) < 0) return -1;
if (tjsonAddIntegerToObject(info, "clusterId", pNode->clusterId) < 0) return -1;
if (tjsonAddStringToObject(info, "nodeRole", vnodeRoleToStr(pNode->nodeRole)) < 0) return -1;
if (tjsonAddStringToObject(info, "isReplica", vnodeRoleToStr(pNode->nodeRole)) < 0) return -1;
if (tjsonAddItemToArray(nodeInfo, info) < 0) return -1;
vDebug("vgId:%d, encode config, replica:%d ep:%s:%u dnode:%d", pCfg->vgId, i, pNode->nodeFqdn, pNode->nodePort,
pNode->nodeId);
@ -259,8 +258,6 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "syncCfg.totalReplicaNum", pCfg->syncCfg.totalReplicaNum, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex, code);
if (code < 0) return -1;
@ -277,10 +274,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
SJson *nodeInfo = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo");
int arraySize = tjsonGetArraySize(nodeInfo);
if(pCfg->syncCfg.totalReplicaNum == 0 && pCfg->syncCfg.replicaNum > 0){
pCfg->syncCfg.totalReplicaNum = pCfg->syncCfg.replicaNum;
}
if (arraySize != pCfg->syncCfg.totalReplicaNum) return -1;
pCfg->syncCfg.totalReplicaNum = arraySize;
vDebug("vgId:%d, decode config, replicas:%d totalReplicas:%d selfIndex:%d", pCfg->vgId, pCfg->syncCfg.replicaNum,
pCfg->syncCfg.totalReplicaNum, pCfg->syncCfg.myIndex);
@ -296,7 +290,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
tjsonGetNumberValue(info, "clusterId", pNode->clusterId, code);
if (code < 0) return -1;
char role[10] = {0};
code = tjsonGetStringValue(info, "nodeRole", role);
code = tjsonGetStringValue(info, "isReplica", role);
if (code < 0) return -1;
if(strlen(role) != 0){
pNode->nodeRole = vnodeStrToRole(role);

View File

@ -366,6 +366,7 @@ typedef struct STagScanInfo {
int32_t curPos;
SReadHandle readHandle;
STableListInfo* pTableListInfo;
SLimitNode* pSlimit;
} STagScanInfo;
typedef enum EStreamScanMode {

View File

@ -871,9 +871,9 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
getBufferPgSize(pInfo->binfo.pRes->info.rowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
terrno = TSDB_CODE_NO_DISKSPACE;
pTaskInfo->code = terrno;
qError("Create partition operator info failed since %s", terrstr(terrno));
qError("Create partition operator info failed since %s, tempDir:%s", terrstr(), tsTempDir);
goto _error;
}

View File

@ -2512,6 +2512,51 @@ _error:
return NULL;
}
static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, int32_t count, SMetaReader* mr) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
STagScanInfo* pInfo = pOperator->info;
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0];
STableKeyInfo* item = tableListGetInfo(pInfo->pTableListInfo, pInfo->curPos);
int32_t code = metaGetTableEntryByUid(mr, item->uid);
tDecoderClear(&(*mr).coder);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(mr);
T_LONG_JMP(pTaskInfo->env, terrno);
}
char str[512];
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
// refactor later
if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) {
STR_TO_VARSTR(str, (*mr).me.name);
colDataSetVal(pDst, (count), str, false);
} else { // it is a tag value
STagVal val = {0};
val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
const char* p = metaGetTableTagVal((*mr).me.ctbEntry.pTags, pDst->info.type, &val);
char* data = NULL;
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
data = tTagValToData((const STagVal*)p, false);
} else {
data = (char*)p;
}
colDataSetVal(pDst, (count), data,
(data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data)));
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) &&
data != NULL) {
taosMemoryFree(data);
}
}
}
}
static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@ -2536,48 +2581,22 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
metaReaderInit(&mr, pInfo->readHandle.meta, 0);
while (pInfo->curPos < size && count < pOperator->resultInfo.capacity) {
STableKeyInfo* item = tableListGetInfo(pInfo->pTableListInfo, pInfo->curPos);
int32_t code = metaGetTableEntryByUid(&mr, item->uid);
tDecoderClear(&mr.coder);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(&mr);
T_LONG_JMP(pTaskInfo->env, terrno);
}
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId);
// refactor later
if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) {
STR_TO_VARSTR(str, mr.me.name);
colDataSetVal(pDst, count, str, false);
} else { // it is a tag value
STagVal val = {0};
val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pDst->info.type, &val);
char* data = NULL;
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
data = tTagValToData((const STagVal*)p, false);
} else {
data = (char*)p;
}
colDataSetVal(pDst, count, data,
(data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data)));
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) &&
data != NULL) {
taosMemoryFree(data);
}
}
}
count += 1;
doTagScanOneTable(pOperator, pRes, count, &mr);
++count;
if (++pInfo->curPos >= size) {
setOperatorCompleted(pOperator);
}
// each table with tbname is a group, hence its own block, but only group when slimit exists for performance reason.
if (pInfo->pSlimit != NULL) {
if (pInfo->curPos < pInfo->pSlimit->offset) {
continue;
}
pInfo->pRes->info.id.groupId = calcGroupId(mr.me.name, strlen(mr.me.name));
if (pInfo->curPos >= (pInfo->pSlimit->offset + pInfo->pSlimit->limit) - 1) {
setOperatorCompleted(pOperator);
}
break;
}
}
metaReaderClear(&mr);
@ -2628,6 +2647,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
pInfo->pRes = createDataBlockFromDescNode(pDescNode);
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pInfo->pSlimit = (SLimitNode*)pPhyNode->node.pSlimit; //TODO: slimit now only indicate group
setOperatorInfo(pOperator, "TagScanOperator", QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN, false, OP_NOT_OPENED, pInfo,
pTaskInfo);

View File

@ -2911,8 +2911,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx,
bufSize = pageSize * 4;
}
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
qError("Init stream agg supporter failed since %s", terrstr(terrno));
terrno = TSDB_CODE_NO_DISKSPACE;
qError("Init stream agg supporter failed since %s, tempDir:%s", terrstr(), tsTempDir);
return terrno;
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, "function", tsTempDir);

View File

@ -248,8 +248,8 @@ SLHashObj* tHashInit(int32_t inMemPages, int32_t pageSize, _hash_fn_t fn, int32_
}
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
printf("tHash Init failed since %s", terrstr(terrno));
terrno = TSDB_CODE_NO_DISKSPACE;
printf("tHash Init failed since %s, tempDir:%s", terrstr(), tsTempDir);
taosMemoryFree(pHashObj);
return NULL;
}

View File

@ -195,8 +195,8 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
if (pHandle->pBuf == NULL) {
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
qError("Add to buf failed since %s", terrstr(terrno));
terrno = TSDB_CODE_NO_DISKSPACE;
qError("Add to buf failed since %s, tempDir:%s", terrstr(), tsTempDir);
return terrno;
}
@ -261,9 +261,8 @@ static int32_t sortComparInit(SMsortComparParam* pParam, SArray* pSources, int32
// multi-pass internal merge sort is required
if (pHandle->pBuf == NULL) {
if (!osTempSpaceAvailable()) {
code = TSDB_CODE_NO_AVAIL_DISK;
terrno = code;
qError("Sort compare init failed since %s, %s", tstrerror(code), pHandle->idStr);
code = terrno = TSDB_CODE_NO_DISKSPACE;
qError("Sort compare init failed since %s, tempDir:%s, idStr:%s", terrstr(), tsTempDir, pHandle->idStr);
return code;
}

View File

@ -855,7 +855,9 @@ int32_t setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STu
int32_t numOfCols = pCtx->subsidiaries.num;
const char* p = loadTupleData(pCtx, pTuplePos);
if (p == NULL) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
terrno = TSDB_CODE_NOT_FOUND;
qError("Load tuple data failed since %s, groupId:%" PRIu64 ", ts:%" PRId64, terrstr(),
pTuplePos->streamTupleKey.groupId, pTuplePos->streamTupleKey.ts);
return terrno;
}
@ -5098,7 +5100,9 @@ int32_t modeFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
if (maxCount != 0) {
const char* pData = loadTupleData(pCtx, &resDataPos);
if (pData == NULL) {
code = TSDB_CODE_NO_AVAIL_DISK;
code = terrno = TSDB_CODE_NOT_FOUND;
qError("Load tuple data failed since %s, groupId:%" PRIu64 ", ts:%" PRId64, terrstr(),
resDataPos.streamTupleKey.groupId, resDataPos.streamTupleKey.ts);
modeFunctionCleanup(pInfo);
return code;
}

View File

@ -277,7 +277,7 @@ tMemBucket *tMemBucketCreate(int32_t nElemSize, int16_t dataType, double minval,
resetSlotInfo(pBucket);
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
terrno = TSDB_CODE_NO_DISKSPACE;
// qError("MemBucket create disk based Buf failed since %s", terrstr(terrno));
tMemBucketDestroy(pBucket);
return NULL;

View File

@ -844,7 +844,7 @@ void udfdGetFuncBodyPath(const SUdf *udf, char *path) {
int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) {
if (!osDataSpaceAvailable()) {
terrno = TSDB_CODE_NO_AVAIL_DISK;
terrno = TSDB_CODE_NO_DISKSPACE;
fnError("udfd create shared library failed since %s", terrstr(terrno));
return terrno;
}

View File

@ -251,7 +251,7 @@ int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, in
goto _return;
}
if (bind[c].buffer_type != pColSchema->type) {
if ((!(rowNum == 1 && bind[c].is_null && *bind[c].is_null)) && bind[c].buffer_type != pColSchema->type) { // for rowNum ==1 , connector may not set buffer_type
code = buildInvalidOperationMsg(&pBuf, "column type mis-match with buffer type");
goto _return;
}

View File

@ -2418,6 +2418,36 @@ static bool tagScanOptShouldBeOptimized(SLogicNode* pNode) {
return true;
}
static SLogicNode* tagScanOptFindAncestorWithSlimit(SLogicNode* pTableScanNode) {
SLogicNode* pNode = pTableScanNode->pParent;
while (NULL != pNode) {
if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode) || QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode) ||
QUERY_NODE_LOGIC_PLAN_WINDOW == nodeType(pNode) || QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pNode)) {
return NULL;
}
if (NULL != pNode->pSlimit) {
return pNode;
}
pNode = pNode->pParent;
}
return NULL;
}
static void tagScanOptCloneAncestorSlimit(SLogicNode* pTableScanNode) {
if (NULL != pTableScanNode->pSlimit) {
return;
}
SLogicNode* pNode = tagScanOptFindAncestorWithSlimit(pTableScanNode);
if (NULL != pNode) {
//TODO: only set the slimit now. push down slimit later
pTableScanNode->pSlimit = nodesCloneNode(pNode->pSlimit);
((SLimitNode*)pTableScanNode->pSlimit)->limit += ((SLimitNode*)pTableScanNode->pSlimit)->offset;
((SLimitNode*)pTableScanNode->pSlimit)->offset = 0;
}
return;
}
static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SScanLogicNode* pScanNode = (SScanLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, tagScanOptShouldBeOptimized);
if (NULL == pScanNode) {
@ -2458,6 +2488,7 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp
NODES_CLEAR_LIST(pAgg->pChildren);
}
nodesDestroyNode((SNode*)pAgg);
tagScanOptCloneAncestorSlimit((SLogicNode*)pScanNode);
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
}

View File

@ -75,10 +75,12 @@ bool syncNodeAgreedUpon(SSyncNode* pNode, SyncIndex index) {
SSyncIndexMgr* pMatches = pNode->pMatchIndex;
ASSERT(pNode->replicaNum == pMatches->replicaNum);
for (int i = 0; i < pNode->replicaNum; i++) {
SyncIndex matchIndex = pMatches->index[i];
if (matchIndex >= index) {
count++;
for (int i = 0; i < pNode->totalReplicaNum; i++) {
if(pNode->raftCfg.cfg.nodeInfo[i].nodeRole == TAOS_SYNC_ROLE_VOTER){
SyncIndex matchIndex = pMatches->index[i];
if (matchIndex >= index) {
count++;
}
}
}

View File

@ -463,8 +463,7 @@ bool syncSnapshotRecving(int64_t rid) {
int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
if (pSyncNode->peersNum == 0) {
sDebug("vgId:%d, only one replica, cannot leader transfer", pSyncNode->vgId);
terrno = TSDB_CODE_SYN_ONE_REPLICA;
return -1;
return 0;
}
int32_t ret = 0;
@ -486,7 +485,6 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) {
if (pSyncNode->replicaNum == 1) {
sDebug("vgId:%d, only one replica, cannot leader transfer", pSyncNode->vgId);
terrno = TSDB_CODE_SYN_ONE_REPLICA;
return -1;
}

View File

@ -54,7 +54,7 @@ int32_t syncLogBufferAppend(SSyncLogBuffer* pBuf, SSyncNode* pNode, SSyncRaftEnt
}
if (pNode->restoreFinish && index - pBuf->commitIndex >= TSDB_SYNC_NEGOTIATION_WIN) {
terrno = TSDB_CODE_SYN_NEGO_WIN_EXCEEDED;
terrno = TSDB_CODE_SYN_NEGOTIATION_WIN_FULL;
sError("vgId:%d, failed to append since %s, index:%" PRId64 ", commit-index:%" PRId64, pNode->vgId, terrstr(),
index, pBuf->commitIndex);
goto _err;

View File

@ -21,19 +21,19 @@
const char* syncRoleToStr(ESyncRole role) {
switch (role) {
case TAOS_SYNC_ROLE_VOTER:
return "voter";
return "true";
case TAOS_SYNC_ROLE_LEARNER:
return "learner";
return "false";
default:
return "unknown";
}
}
const ESyncRole syncStrToRole(char* str) {
if(strcmp(str, "voter") == 0){
if(strcmp(str, "true") == 0){
return TAOS_SYNC_ROLE_VOTER;
}
if(strcmp(str, "learner") == 0){
if(strcmp(str, "false") == 0){
return TAOS_SYNC_ROLE_LEARNER;
}
@ -42,7 +42,6 @@ const ESyncRole syncStrToRole(char* str) {
static int32_t syncEncodeSyncCfg(const void *pObj, SJson *pJson) {
SSyncCfg *pCfg = (SSyncCfg *)pObj;
if (tjsonAddDoubleToObject(pJson, "totalReplicaNum", pCfg->totalReplicaNum) < 0) return -1;
if (tjsonAddDoubleToObject(pJson, "replicaNum", pCfg->replicaNum) < 0) return -1;
if (tjsonAddDoubleToObject(pJson, "myIndex", pCfg->myIndex) < 0) return -1;
@ -56,7 +55,7 @@ static int32_t syncEncodeSyncCfg(const void *pObj, SJson *pJson) {
if (tjsonAddStringToObject(info, "nodeFqdn", pCfg->nodeInfo[i].nodeFqdn) < 0) return -1;
if (tjsonAddIntegerToObject(info, "nodeId", pCfg->nodeInfo[i].nodeId) < 0) return -1;
if (tjsonAddIntegerToObject(info, "clusterId", pCfg->nodeInfo[i].clusterId) < 0) return -1;
if (tjsonAddStringToObject(info, "nodeRole", syncRoleToStr(pCfg->nodeInfo[i].nodeRole)) < 0) return -1;
if (tjsonAddStringToObject(info, "isReplica", syncRoleToStr(pCfg->nodeInfo[i].nodeRole)) < 0) return -1;
if (tjsonAddItemToArray(nodeInfo, info) < 0) return -1;
}
@ -133,7 +132,6 @@ static int32_t syncDecodeSyncCfg(const SJson *pJson, void *pObj) {
SSyncCfg *pCfg = (SSyncCfg *)pObj;
int32_t code = 0;
tjsonGetInt32ValueFromDouble(pJson, "totalReplicaNum", pCfg->totalReplicaNum, code);
tjsonGetInt32ValueFromDouble(pJson, "replicaNum", pCfg->replicaNum, code);
if (code < 0) return -1;
tjsonGetInt32ValueFromDouble(pJson, "myIndex", pCfg->myIndex, code);
@ -153,7 +151,7 @@ static int32_t syncDecodeSyncCfg(const SJson *pJson, void *pObj) {
tjsonGetNumberValue(info, "nodeId", pCfg->nodeInfo[i].nodeId, code);
tjsonGetNumberValue(info, "clusterId", pCfg->nodeInfo[i].clusterId, code);
char role[10] = {0};
code = tjsonGetStringValue(info, "nodeRole", role);
code = tjsonGetStringValue(info, "isReplica", role);
if(code < 0) return -1;
if(strlen(role) != 0){
pCfg->nodeInfo[i].nodeRole = syncStrToRole(role);

View File

@ -98,7 +98,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_ENOUGH_DISKSPACE, "No enough disk space"
TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STARTING, "Database is starting up")
TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STOPPING, "Database is closing down")
TAOS_DEFINE_ERROR(TSDB_CODE_IVLD_DATA_FMT, "Invalid data format")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DATA_FMT, "Invalid data format")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CFG_VALUE, "Invalid configuration value")
//client
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation")
@ -413,20 +414,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_TABLE_LIMITED, "Table creation limite
// sync
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_TIMEOUT, "Sync timeout")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_IS_LEADER, "Sync is leader")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_LEADER, "Sync leader is unreachable")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_ONE_REPLICA, "Sync one replica")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_IN_NEW_CONFIG, "Sync not in new config")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NEW_CONFIG_ERROR, "Sync new config error")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_RECONFIG_NOT_READY, "Sync not ready for reconfig")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_PROPOSE_NOT_READY, "Sync not ready for propose")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_STANDBY_NOT_READY, "Sync not ready for standby")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_BATCH_ERROR, "Sync batch error")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_PROPOSE_NOT_READY, "Sync not ready to propose")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_RESTORING, "Sync leader is restoring")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_SNAPSHOT_MSG, "Sync invalid snapshot msg")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_BUFFER_FULL, "Sync buffer is full")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_WRITE_STALL, "Sync write stall")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NEGO_WIN_EXCEEDED, "Sync negotiation win exceeded")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NEGOTIATION_WIN_FULL, "Sync negotiation win is full")
TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INTERNAL_ERROR, "Sync internal error")
//tq
@ -444,8 +439,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TQ_NO_COMMITTED_OFFSET, "TQ no committed offse
// wal
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_FILE_CORRUPTED, "WAL file is corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_SIZE_LIMIT, "WAL size exceeds limit")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_INVALID_VER, "WAL use invalid version")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_INVALID_VER, "WAL invalid version")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_LOG_NOT_EXIST, "WAL log not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_CHKSUM_MISMATCH, "WAL checksum mismatch")
TAOS_DEFINE_ERROR(TSDB_CODE_WAL_LOG_INCOMPLETE, "WAL log incomplete")

View File

@ -24,10 +24,11 @@
#define LOG_MAX_LINE_SIZE (10024)
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024)
#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 3)
#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128)
#define LOG_FILE_NAME_LEN 300
#define LOG_DEFAULT_BUF_SIZE (20 * 1024 * 1024) // 20MB
#define LOG_SLOW_BUF_SIZE (10 * 1024 * 1024) // 10MB
#define LOG_DEFAULT_INTERVAL 25
#define LOG_INTERVAL_STEP 5
@ -51,6 +52,8 @@ typedef struct {
int32_t stop;
TdThread asyncThread;
TdThreadMutex buffMutex;
int32_t writeInterval;
int32_t lastDuration;
} SLogBuff;
typedef struct {
@ -62,6 +65,7 @@ typedef struct {
pid_t pid;
char logName[LOG_FILE_NAME_LEN];
SLogBuff *logHandle;
SLogBuff *slowHandle;
TdThreadMutex logMutex;
} SLogObj;
@ -69,7 +73,6 @@ extern SConfig *tsCfg;
static int8_t tsLogInited = 0;
static SLogObj tsLogObj = {.fileNum = 1};
static int64_t tsAsyncLogLostLines = 0;
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
static int32_t tsDaylightActive; /* Currently in daylight saving time. */
bool tsLogEmbedded = 0;
@ -82,6 +85,7 @@ int64_t tsNumOfErrorLogs = 0;
int64_t tsNumOfInfoLogs = 0;
int64_t tsNumOfDebugLogs = 0;
int64_t tsNumOfTraceLogs = 0;
int64_t tsNumOfSlowLogs = 0;
// log
int32_t dDebugFlag = 131;
@ -136,6 +140,34 @@ static int32_t taosStartLog() {
return 0;
}
int32_t taosInitSlowLog() {
char fullName[PATH_MAX] = {0};
char logFileName[64] = {0};
#ifdef CUS_PROMPT
snprintf(logFileName, 64, "%sSlowLog", CUS_PROMPT);
#else
snprintf(logFileName, 64, "taosSlowLog");
#endif
if (strlen(tsLogDir) != 0) {
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logFileName);
} else {
snprintf(fullName, PATH_MAX, "%s", logFileName);
}
tsLogObj.slowHandle = taosLogBuffNew(LOG_SLOW_BUF_SIZE);
if (tsLogObj.slowHandle == NULL) return -1;
taosUmaskFile(0);
tsLogObj.slowHandle->pFile = taosOpenFile(fullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
if (tsLogObj.slowHandle->pFile == NULL) {
printf("\nfailed to open slow log file:%s, reason:%s\n", fullName, strerror(errno));
return -1;
}
return 0;
}
int32_t taosInitLog(const char *logName, int32_t maxFiles) {
if (atomic_val_compare_exchange_8(&tsLogInited, 0, 1) != 0) return 0;
osUpdate();
@ -151,6 +183,8 @@ int32_t taosInitLog(const char *logName, int32_t maxFiles) {
tsLogObj.logHandle = taosLogBuffNew(LOG_DEFAULT_BUF_SIZE);
if (tsLogObj.logHandle == NULL) return -1;
if (taosOpenLogFile(fullName, tsNumOfLogLines, maxFiles) < 0) return -1;
if (taosInitSlowLog() < 0) return -1;
if (taosStartLog() < 0) return -1;
return 0;
}
@ -159,25 +193,34 @@ static void taosStopLog() {
if (tsLogObj.logHandle) {
tsLogObj.logHandle->stop = 1;
}
if (tsLogObj.slowHandle) {
tsLogObj.slowHandle->stop = 1;
}
}
void taosCloseLog() {
taosStopLog();
if (tsLogObj.logHandle != NULL && taosCheckPthreadValid(tsLogObj.logHandle->asyncThread)) {
taosThreadJoin(tsLogObj.logHandle->asyncThread, NULL);
taosThreadClear(&tsLogObj.logHandle->asyncThread);
}
if (tsLogObj.slowHandle != NULL) {
taosThreadMutexDestroy(&tsLogObj.slowHandle->buffMutex);
taosCloseFile(&tsLogObj.slowHandle->pFile);
taosMemoryFreeClear(tsLogObj.slowHandle->buffer);
taosMemoryFreeClear(tsLogObj.slowHandle);
}
if (tsLogObj.logHandle != NULL) {
taosStopLog();
if (tsLogObj.logHandle != NULL && taosCheckPthreadValid(tsLogObj.logHandle->asyncThread)) {
taosThreadJoin(tsLogObj.logHandle->asyncThread, NULL);
taosThreadClear(&tsLogObj.logHandle->asyncThread);
}
tsLogInited = 0;
taosThreadMutexDestroy(&tsLogObj.logHandle->buffMutex);
taosCloseFile(&tsLogObj.logHandle->pFile);
taosMemoryFreeClear(tsLogObj.logHandle->buffer);
memset(&tsLogObj.logHandle->buffer, 0, sizeof(tsLogObj.logHandle->buffer));
taosThreadMutexDestroy(&tsLogObj.logMutex);
taosMemoryFreeClear(tsLogObj.logHandle);
memset(&tsLogObj.logHandle, 0, sizeof(tsLogObj.logHandle));
tsLogObj.logHandle = NULL;
}
}
@ -513,10 +556,9 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
va_list argpointer;
va_start(argpointer, format);
len += vsnprintf(buffer + len, LOG_MAX_LINE_DUMP_BUFFER_SIZE - len, format, argpointer);
len += vsnprintf(buffer + len, LOG_MAX_LINE_DUMP_BUFFER_SIZE - 2 - len, format, argpointer);
va_end(argpointer);
if (len > LOG_MAX_LINE_DUMP_SIZE) len = LOG_MAX_LINE_DUMP_SIZE;
buffer[len++] = '\n';
buffer[len] = 0;
@ -524,6 +566,31 @@ void taosPrintLongString(const char *flags, ELogLevel level, int32_t dflag, cons
taosMemoryFree(buffer);
}
void taosPrintSlowLog(const char *format, ...) {
if (!osLogSpaceAvailable()) return;
char *buffer = taosMemoryMalloc(LOG_MAX_LINE_DUMP_BUFFER_SIZE);
int32_t len = taosBuildLogHead(buffer, "");
va_list argpointer;
va_start(argpointer, format);
len += vsnprintf(buffer + len, LOG_MAX_LINE_DUMP_BUFFER_SIZE - 2 - len, format, argpointer);
va_end(argpointer);
buffer[len++] = '\n';
buffer[len] = 0;
atomic_add_fetch_64(&tsNumOfSlowLogs, 1);
if (tsAsyncLog) {
taosPushLogBuffer(tsLogObj.slowHandle, buffer, len);
} else {
taosWriteFile(tsLogObj.slowHandle->pFile, buffer, len);
}
taosMemoryFree(buffer);
}
void taosDumpData(unsigned char *msg, int32_t len) {
if (!osLogSpaceAvailable()) return;
taosUpdateLogNums(DEBUG_DUMP);
@ -568,6 +635,7 @@ static SLogBuff *taosLogBuffNew(int32_t bufSize) {
LOG_BUF_SIZE(pLogBuf) = bufSize;
pLogBuf->minBuffSize = bufSize / 10;
pLogBuf->stop = 0;
pLogBuf->writeInterval = LOG_DEFAULT_INTERVAL;
if (taosThreadMutexInit(&LOG_BUF_MUTEX(pLogBuf), NULL) < 0) goto _err;
// tsem_init(&(pLogBuf->buffNotEmpty), 0, 0);
@ -651,83 +719,78 @@ static int32_t taosGetLogRemainSize(SLogBuff *pLogBuf, int32_t start, int32_t en
}
static void taosWriteLog(SLogBuff *pLogBuf) {
static int32_t lastDuration = 0;
int32_t remainChecked = 0;
int32_t start, end, pollSize;
int32_t start = LOG_BUF_START(pLogBuf);
int32_t end = LOG_BUF_END(pLogBuf);
do {
if (remainChecked == 0) {
start = LOG_BUF_START(pLogBuf);
end = LOG_BUF_END(pLogBuf);
if (start == end) {
dbgEmptyW++;
pLogBuf->writeInterval = LOG_MAX_INTERVAL;
return;
}
if (start == end) {
dbgEmptyW++;
tsWriteInterval = LOG_MAX_INTERVAL;
return;
}
pollSize = taosGetLogRemainSize(pLogBuf, start, end);
if (pollSize < pLogBuf->minBuffSize) {
lastDuration += tsWriteInterval;
if (lastDuration < LOG_MAX_WAIT_MSEC) {
break;
}
}
lastDuration = 0;
int32_t pollSize = taosGetLogRemainSize(pLogBuf, start, end);
if (pollSize < pLogBuf->minBuffSize) {
pLogBuf->lastDuration += pLogBuf->writeInterval;
if (pLogBuf->lastDuration < LOG_MAX_WAIT_MSEC) {
return;
}
}
if (start < end) {
taosWriteFile(pLogBuf->pFile, LOG_BUF_BUFFER(pLogBuf) + start, pollSize);
} else {
int32_t tsize = LOG_BUF_SIZE(pLogBuf) - start;
taosWriteFile(pLogBuf->pFile, LOG_BUF_BUFFER(pLogBuf) + start, tsize);
pLogBuf->lastDuration = 0;
taosWriteFile(pLogBuf->pFile, LOG_BUF_BUFFER(pLogBuf), end);
if (start < end) {
taosWriteFile(pLogBuf->pFile, LOG_BUF_BUFFER(pLogBuf) + start, pollSize);
} else {
int32_t tsize = LOG_BUF_SIZE(pLogBuf) - start;
taosWriteFile(pLogBuf->pFile, LOG_BUF_BUFFER(pLogBuf) + start, tsize);
taosWriteFile(pLogBuf->pFile, LOG_BUF_BUFFER(pLogBuf), end);
}
dbgWN++;
dbgWSize += pollSize;
if (pollSize < pLogBuf->minBuffSize) {
dbgSmallWN++;
if (pLogBuf->writeInterval < LOG_MAX_INTERVAL) {
pLogBuf->writeInterval += LOG_INTERVAL_STEP;
}
dbgWN++;
dbgWSize += pollSize;
if (pollSize < pLogBuf->minBuffSize) {
dbgSmallWN++;
if (tsWriteInterval < LOG_MAX_INTERVAL) {
tsWriteInterval += LOG_INTERVAL_STEP;
}
} else if (pollSize > LOG_BUF_SIZE(pLogBuf) / 3) {
dbgBigWN++;
tsWriteInterval = LOG_MIN_INTERVAL;
} else if (pollSize > LOG_BUF_SIZE(pLogBuf) / 4) {
if (tsWriteInterval > LOG_MIN_INTERVAL) {
tsWriteInterval -= LOG_INTERVAL_STEP;
}
} else if (pollSize > LOG_BUF_SIZE(pLogBuf) / 3) {
dbgBigWN++;
pLogBuf->writeInterval = LOG_MIN_INTERVAL;
} else if (pollSize > LOG_BUF_SIZE(pLogBuf) / 4) {
if (pLogBuf->writeInterval > LOG_MIN_INTERVAL) {
pLogBuf->writeInterval -= LOG_INTERVAL_STEP;
}
}
LOG_BUF_START(pLogBuf) = (LOG_BUF_START(pLogBuf) + pollSize) % LOG_BUF_SIZE(pLogBuf);
LOG_BUF_START(pLogBuf) = (LOG_BUF_START(pLogBuf) + pollSize) % LOG_BUF_SIZE(pLogBuf);
start = LOG_BUF_START(pLogBuf);
end = LOG_BUF_END(pLogBuf);
start = LOG_BUF_START(pLogBuf);
end = LOG_BUF_END(pLogBuf);
pollSize = taosGetLogRemainSize(pLogBuf, start, end);
if (pollSize < pLogBuf->minBuffSize) {
break;
}
pollSize = taosGetLogRemainSize(pLogBuf, start, end);
if (pollSize < pLogBuf->minBuffSize) {
return;
}
tsWriteInterval = LOG_MIN_INTERVAL;
remainChecked = 1;
} while (1);
pLogBuf->writeInterval = 0;
}
static void *taosAsyncOutputLog(void *param) {
SLogBuff *pLogBuf = (SLogBuff *)param;
SLogBuff *pLogBuf = (SLogBuff *)tsLogObj.logHandle;
SLogBuff *pSlowBuf = (SLogBuff *)tsLogObj.slowHandle;
setThreadName("log");
int32_t count = 0;
int32_t updateCron = 0;
int32_t writeInterval = 0;
while (1) {
count += tsWriteInterval;
writeInterval = TMIN(pLogBuf->writeInterval, pSlowBuf->writeInterval);
count += writeInterval;
updateCron++;
taosMsleep(tsWriteInterval);
taosMsleep(writeInterval);
if (count > 1000) {
osUpdate();
count = 0;
@ -735,13 +798,14 @@ static void *taosAsyncOutputLog(void *param) {
// Polling the buffer
taosWriteLog(pLogBuf);
taosWriteLog(pSlowBuf);
if (updateCron >= 3600 * 24 * 40 / 2) {
taosUpdateDaylight();
updateCron = 0;
}
if (pLogBuf->stop) break;
if (pLogBuf->stop || pSlowBuf->stop) break;
}
return NULL;

View File

@ -888,6 +888,7 @@
,,y,script,./test.sh -f tsim/query/emptyTsRange.sim
,,y,script,./test.sh -f tsim/query/partitionby.sim
,,y,script,./test.sh -f tsim/query/tableCount.sim
,,y,script,./test.sh -f tsim/query/tag_scan.sim
,,y,script,./test.sh -f tsim/query/nullColSma.sim
,,y,script,./test.sh -f tsim/qnode/basic1.sim
,,y,script,./test.sh -f tsim/snode/basic1.sim

227
tests/script/local.supp Normal file
View File

@ -0,0 +1,227 @@
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN9__gnu_cxx13new_allocatorINSt8__detail10_Hash_nodeISt4pairIKjPFvPvEELb0EEEE8allocateEmPKv
fun:_ZNSt16allocator_traitsISaINSt8__detail10_Hash_nodeISt4pairIKjPFvPvEELb0EEEEE8allocateERS9_m
fun:_ZNSt8__detail16_Hashtable_allocISaINS_10_Hash_nodeISt4pairIKjPFvPvEELb0EEEEE16_M_allocate_nodeIJRKSt21piecewise_construct_tSt5tupleIJRS3_EESF_IJEEEEEPS8_DpOT_
fun:_ZNSt10_HashtableIjSt4pairIKjPFvPvEESaIS5_ENSt8__detail10_Select1stESt8equal_toIjESt4hashIjENS7_18_Mod_range_hashingENS7_20_Default_ranged_hashENS7_20_Prime_rehash_policyENS7_17_Hashtable_traitsILb0ELb0ELb1EEEE12_Scoped_nodeC1IJRKSt21piecewise_construct_tSt5tupleIJRS1_EESO_IJEEEEEPNS7_16_Hashtable_allocISaINS7_10_Hash_nodeIS5_Lb0EEEEEEDpOT_
fun:_ZNSt8__detail9_Map_baseIjSt4pairIKjPFvPvEESaIS6_ENS_10_Select1stESt8equal_toIjESt4hashIjENS_18_Mod_range_hashingENS_20_Default_ranged_hashENS_20_Prime_rehash_policyENS_17_Hashtable_traitsILb0ELb0ELb1EEELb1EEixERS2_
fun:_ZNSt13unordered_mapIjPFvPvESt4hashIjESt8equal_toIjESaISt4pairIKjS2_EEEixERS8_
fun:_ZN7rocksdb14ThreadLocalPtr10StaticMeta10SetHandlerEjPFvPvE
fun:_ZN7rocksdb14ThreadLocalPtrC1EPFvPvE
fun:_ZN7rocksdb16ColumnFamilyDataC1EjRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPNS_7VersionEPNS_5CacheEPNS_18WriteBufferManagerERKNS_19ColumnFamilyOptionsERKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_15ColumnFamilySetEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb15ColumnFamilySetC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_5CacheEPNS_18WriteBufferManagerEPNS_15WriteControllerEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb10VersionSetC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_5CacheEPNS_18WriteBufferManagerEPNS_15WriteControllerEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb6DBImplC1ERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEbbb
fun:_ZN7rocksdb6DBImpl4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPNS_2DBEbb
fun:_ZN7rocksdb2DB4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPS0_
fun:_ZN7rocksdb2DB4OpenERKNS_7OptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPPS0_
fun:rocksdb_open
fun:tsdbOpenRocksCache
fun:tsdbOpenCache
fun:tsdbOpen
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN9__gnu_cxx13new_allocatorINSt8__detail10_Hash_nodeISt4pairIKjPFvPvEELb0EEEE8allocateEmPKv
fun:_ZNSt16allocator_traitsISaINSt8__detail10_Hash_nodeISt4pairIKjPFvPvEELb0EEEEE8allocateERS9_m
fun:_ZNSt8__detail16_Hashtable_allocISaINS_10_Hash_nodeISt4pairIKjPFvPvEELb0EEEEE16_M_allocate_nodeIJRKSt21piecewise_construct_tSt5tupleIJRS3_EESF_IJEEEEEPS8_DpOT_
fun:_ZNSt10_HashtableIjSt4pairIKjPFvPvEESaIS5_ENSt8__detail10_Select1stESt8equal_toIjESt4hashIjENS7_18_Mod_range_hashingENS7_20_Default_ranged_hashENS7_20_Prime_rehash_policyENS7_17_Hashtable_traitsILb0ELb0ELb1EEEE12_Scoped_nodeC1IJRKSt21piecewise_construct_tSt5tupleIJRS1_EESO_IJEEEEEPNS7_16_Hashtable_allocISaINS7_10_Hash_nodeIS5_Lb0EEEEEEDpOT_
fun:_ZNSt8__detail9_Map_baseIjSt4pairIKjPFvPvEESaIS6_ENS_10_Select1stESt8equal_toIjESt4hashIjENS_18_Mod_range_hashingENS_20_Default_ranged_hashENS_20_Prime_rehash_policyENS_17_Hashtable_traitsILb0ELb0ELb1EEELb1EEixERS2_
fun:_ZNSt13unordered_mapIjPFvPvESt4hashIjESt8equal_toIjESaISt4pairIKjS2_EEEixERS8_
fun:_ZN7rocksdb14ThreadLocalPtr10StaticMeta10SetHandlerEjPFvPvE
fun:_ZN7rocksdb14ThreadLocalPtrC1EPFvPvE
fun:_ZN7rocksdb16ColumnFamilyDataC1EjRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPNS_7VersionEPNS_5CacheEPNS_18WriteBufferManagerERKNS_19ColumnFamilyOptionsERKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_15ColumnFamilySetEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb15ColumnFamilySet18CreateColumnFamilyERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEjPNS_7VersionERKNS_19ColumnFamilyOptionsE
fun:_ZN7rocksdb10VersionSet18CreateColumnFamilyERKNS_19ColumnFamilyOptionsEPKNS_11VersionEditE
fun:_ZN7rocksdb18VersionEditHandler15CreateCfAndInitERKNS_19ColumnFamilyOptionsERKNS_11VersionEditE
fun:_ZN7rocksdb18VersionEditHandler10InitializeEv
fun:_ZN7rocksdb22VersionEditHandlerBase7IterateERNS_3log6ReaderEPNS_6StatusE
fun:_ZN7rocksdb10VersionSet7RecoverERKSt6vectorINS_22ColumnFamilyDescriptorESaIS2_EEbPNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
fun:_ZN7rocksdb6DBImpl7RecoverERKSt6vectorINS_22ColumnFamilyDescriptorESaIS2_EEbbbPm
fun:_ZN7rocksdb6DBImpl4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPNS_2DBEbb
fun:_ZN7rocksdb2DB4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPS0_
fun:_ZN7rocksdb2DB4OpenERKNS_7OptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPPS0_
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:calloc
fun:__cxa_thread_atexit_impl
fun:__cxa_thread_atexit
fun:__tls_init
fun:_ZTWN7rocksdb12perf_contextE
fun:_ZN7rocksdb17InstrumentedMutex4LockEv
fun:_ZN7rocksdb21InstrumentedMutexLockC1EPNS_17InstrumentedMutexE
fun:_ZN7rocksdb5Timer8ShutdownEv
fun:_ZN7rocksdb5TimerD1Ev
fun:_ZNKSt14default_deleteIN7rocksdb5TimerEEclEPS1_
fun:_ZNSt10unique_ptrIN7rocksdb5TimerESt14default_deleteIS1_EED1Ev
fun:_ZN7rocksdb21PeriodicWorkSchedulerD1Ev
fun:__run_exit_handlers
fun:exit
fun:(below main)
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN7rocksdb24CacheEntryStatsCollectorINS_13InternalStats19CacheEntryRoleStatsEE9GetSharedEPNS_5CacheEPNS_11SystemClockEPSt10shared_ptrIS3_E
fun:_ZN7rocksdb13InternalStatsC1EiPNS_11SystemClockEPNS_16ColumnFamilyDataE
fun:_ZN7rocksdb16ColumnFamilyDataC1EjRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPNS_7VersionEPNS_5CacheEPNS_18WriteBufferManagerERKNS_19ColumnFamilyOptionsERKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_15ColumnFamilySetEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb15ColumnFamilySet18CreateColumnFamilyERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEjPNS_7VersionERKNS_19ColumnFamilyOptionsE
fun:_ZN7rocksdb10VersionSet18CreateColumnFamilyERKNS_19ColumnFamilyOptionsEPKNS_11VersionEditE
fun:_ZN7rocksdb18VersionEditHandler15CreateCfAndInitERKNS_19ColumnFamilyOptionsERKNS_11VersionEditE
fun:_ZN7rocksdb18VersionEditHandler10InitializeEv
fun:_ZN7rocksdb22VersionEditHandlerBase7IterateERNS_3log6ReaderEPNS_6StatusE
fun:_ZN7rocksdb10VersionSet7RecoverERKSt6vectorINS_22ColumnFamilyDescriptorESaIS2_EEbPNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
fun:_ZN7rocksdb6DBImpl7RecoverERKSt6vectorINS_22ColumnFamilyDescriptorESaIS2_EEbbbPm
fun:_ZN7rocksdb6DBImpl4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPNS_2DBEbb
fun:_ZN7rocksdb2DB4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPS0_
fun:_ZN7rocksdb2DB4OpenERKNS_7OptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPPS0_
fun:rocksdb_open
fun:tsdbOpenRocksCache
fun:tsdbOpenCache
fun:tsdbOpen
fun:vnodeOpen
fun:vmProcessCreateVnodeReq
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN7rocksdb12_GLOBAL__N_111GetRegistryEv
fun:_ZN7rocksdb23CopyCacheDeleterRoleMapEv
fun:_ZN7rocksdb13InternalStats19CacheEntryRoleStats15BeginCollectionEPNS_5CacheEPNS_11SystemClockEm
fun:_ZN7rocksdb24CacheEntryStatsCollectorINS_13InternalStats19CacheEntryRoleStatsEE12CollectStatsEii
fun:_ZN7rocksdb13InternalStats22CollectCacheEntryStatsEb
fun:_ZN7rocksdb6DBImpl9DumpStatsEv
fun:_ZZN7rocksdb21PeriodicWorkScheduler8RegisterEPNS_6DBImplEjjENKUlvE_clEv
fun:_ZSt13__invoke_implIvRZN7rocksdb21PeriodicWorkScheduler8RegisterEPNS0_6DBImplEjjEUlvE_JEET_St14__invoke_otherOT0_DpOT1_
fun:_ZSt10__invoke_rIvRZN7rocksdb21PeriodicWorkScheduler8RegisterEPNS0_6DBImplEjjEUlvE_JEENSt9enable_ifIXsrSt6__and_IJSt7is_voidIT_ESt14__is_invocableIT0_JDpT1_EEEE5valueES9_E4typeEOSC_DpOSD_
fun:_ZNSt17_Function_handlerIFvvEZN7rocksdb21PeriodicWorkScheduler8RegisterEPNS1_6DBImplEjjEUlvE_E9_M_invokeERKSt9_Any_data
fun:_ZNKSt8functionIFvvEEclEv
fun:_ZN7rocksdb5Timer3RunEv
fun:_ZSt13__invoke_implIvMN7rocksdb5TimerEFvvEPS1_JEET_St21__invoke_memfun_derefOT0_OT1_DpOT2_
fun:_ZSt8__invokeIMN7rocksdb5TimerEFvvEJPS1_EENSt15__invoke_resultIT_JDpT0_EE4typeEOS6_DpOS7_
fun:_ZNSt6thread8_InvokerISt5tupleIJMN7rocksdb5TimerEFvvEPS3_EEE9_M_invokeIJLm0ELm1EEEEvSt12_Index_tupleIJXspT_EEE
fun:_ZNSt6thread8_InvokerISt5tupleIJMN7rocksdb5TimerEFvvEPS3_EEEclEv
fun:_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJMN7rocksdb5TimerEFvvEPS4_EEEEE6_M_runEv
obj:/usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.28
fun:start_thread
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN9__gnu_cxx13new_allocatorIPNSt8__detail15_Hash_node_baseEE8allocateEmPKv
fun:_ZNSt16allocator_traitsISaIPNSt8__detail15_Hash_node_baseEEE8allocateERS3_m
fun:_ZNSt8__detail16_Hashtable_allocISaINS_10_Hash_nodeIPN7rocksdb16ThreadStatusDataELb0EEEEE19_M_allocate_bucketsEm
fun:_ZNSt10_HashtableIPN7rocksdb16ThreadStatusDataES2_SaIS2_ENSt8__detail9_IdentityESt8equal_toIS2_ESt4hashIS2_ENS4_18_Mod_range_hashingENS4_20_Default_ranged_hashENS4_20_Prime_rehash_policyENS4_17_Hashtable_traitsILb0ELb1ELb1EEEE19_M_allocate_bucketsEm
fun:_ZNSt10_HashtableIPN7rocksdb16ThreadStatusDataES2_SaIS2_ENSt8__detail9_IdentityESt8equal_toIS2_ESt4hashIS2_ENS4_18_Mod_range_hashingENS4_20_Default_ranged_hashENS4_20_Prime_rehash_policyENS4_17_Hashtable_traitsILb0ELb1ELb1EEEE13_M_rehash_auxEmSt17integral_constantIbLb1EE
fun:_ZNSt10_HashtableIPN7rocksdb16ThreadStatusDataES2_SaIS2_ENSt8__detail9_IdentityESt8equal_toIS2_ESt4hashIS2_ENS4_18_Mod_range_hashingENS4_20_Default_ranged_hashENS4_20_Prime_rehash_policyENS4_17_Hashtable_traitsILb0ELb1ELb1EEEE9_M_rehashEmRKm
fun:_ZNSt10_HashtableIPN7rocksdb16ThreadStatusDataES2_SaIS2_ENSt8__detail9_IdentityESt8equal_toIS2_ESt4hashIS2_ENS4_18_Mod_range_hashingENS4_20_Default_ranged_hashENS4_20_Prime_rehash_policyENS4_17_Hashtable_traitsILb0ELb1ELb1EEEE21_M_insert_unique_nodeERKS2_mmPNS4_10_Hash_nodeIS2_Lb0EEEm
fun:_ZNSt10_HashtableIPN7rocksdb16ThreadStatusDataES2_SaIS2_ENSt8__detail9_IdentityESt8equal_toIS2_ESt4hashIS2_ENS4_18_Mod_range_hashingENS4_20_Default_ranged_hashENS4_20_Prime_rehash_policyENS4_17_Hashtable_traitsILb0ELb1ELb1EEEE9_M_insertIRKS2_NS4_10_AllocNodeISaINS4_10_Hash_nodeIS2_Lb0EEEEEEEESt4pairINS4_14_Node_iteratorIS2_Lb1ELb0EEEbEOT_RKT0_St17integral_constantIbLb1EEm
fun:_ZNSt8__detail12_Insert_baseIPN7rocksdb16ThreadStatusDataES3_SaIS3_ENS_9_IdentityESt8equal_toIS3_ESt4hashIS3_ENS_18_Mod_range_hashingENS_20_Default_ranged_hashENS_20_Prime_rehash_policyENS_17_Hashtable_traitsILb0ELb1ELb1EEEE6insertERKS3_
fun:_ZNSt13unordered_setIPN7rocksdb16ThreadStatusDataESt4hashIS2_ESt8equal_toIS2_ESaIS2_EE6insertERKS2_
fun:_ZN7rocksdb19ThreadStatusUpdater14RegisterThreadENS_12ThreadStatus10ThreadTypeEm
fun:_ZN7rocksdb16ThreadStatusUtil14RegisterThreadEPKNS_3EnvENS_12ThreadStatus10ThreadTypeE
fun:_ZN7rocksdb14ThreadPoolImpl4Impl15BGThreadWrapperEPv
fun:_ZSt13__invoke_implIvPFvPvEJPN7rocksdb16BGThreadMetadataEEET_St14__invoke_otherOT0_DpOT1_
fun:_ZSt8__invokeIPFvPvEJPN7rocksdb16BGThreadMetadataEEENSt15__invoke_resultIT_JDpT0_EE4typeEOS7_DpOS8_
fun:_ZNSt6thread8_InvokerISt5tupleIJPFvPvEPN7rocksdb16BGThreadMetadataEEEE9_M_invokeIJLm0ELm1EEEEvSt12_Index_tupleIJXspT_EEE
fun:_ZNSt6thread8_InvokerISt5tupleIJPFvPvEPN7rocksdb16BGThreadMetadataEEEEclEv
fun:_ZNSt6thread11_State_implINS_8_InvokerISt5tupleIJPFvPvEPN7rocksdb16BGThreadMetadataEEEEEE6_M_runEv
obj:/usr/lib/x86_64-linux-gnu/libstdc++.so.6.0.28
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN9__gnu_cxx13new_allocatorIPNSt8__detail15_Hash_node_baseEE8allocateEmPKv
fun:_ZNSt16allocator_traitsISaIPNSt8__detail15_Hash_node_baseEEE8allocateERS3_m
fun:_ZNSt8__detail16_Hashtable_allocISaINS_10_Hash_nodeISt4pairIKjPFvPvEELb0EEEEE19_M_allocate_bucketsEm
fun:_ZNSt10_HashtableIjSt4pairIKjPFvPvEESaIS5_ENSt8__detail10_Select1stESt8equal_toIjESt4hashIjENS7_18_Mod_range_hashingENS7_20_Default_ranged_hashENS7_20_Prime_rehash_policyENS7_17_Hashtable_traitsILb0ELb0ELb1EEEE19_M_allocate_bucketsEm
fun:_ZNSt10_HashtableIjSt4pairIKjPFvPvEESaIS5_ENSt8__detail10_Select1stESt8equal_toIjESt4hashIjENS7_18_Mod_range_hashingENS7_20_Default_ranged_hashENS7_20_Prime_rehash_policyENS7_17_Hashtable_traitsILb0ELb0ELb1EEEE13_M_rehash_auxEmSt17integral_constantIbLb1EE
fun:_ZNSt10_HashtableIjSt4pairIKjPFvPvEESaIS5_ENSt8__detail10_Select1stESt8equal_toIjESt4hashIjENS7_18_Mod_range_hashingENS7_20_Default_ranged_hashENS7_20_Prime_rehash_policyENS7_17_Hashtable_traitsILb0ELb0ELb1EEEE9_M_rehashEmRKm
fun:_ZNSt10_HashtableIjSt4pairIKjPFvPvEESaIS5_ENSt8__detail10_Select1stESt8equal_toIjESt4hashIjENS7_18_Mod_range_hashingENS7_20_Default_ranged_hashENS7_20_Prime_rehash_policyENS7_17_Hashtable_traitsILb0ELb0ELb1EEEE21_M_insert_unique_nodeERS1_mmPNS7_10_Hash_nodeIS5_Lb0EEEm
fun:_ZNSt8__detail9_Map_baseIjSt4pairIKjPFvPvEESaIS6_ENS_10_Select1stESt8equal_toIjESt4hashIjENS_18_Mod_range_hashingENS_20_Default_ranged_hashENS_20_Prime_rehash_policyENS_17_Hashtable_traitsILb0ELb0ELb1EEELb1EEixERS2_
fun:_ZNSt13unordered_mapIjPFvPvESt4hashIjESt8equal_toIjESaISt4pairIKjS2_EEEixERS8_
fun:_ZN7rocksdb14ThreadLocalPtr10StaticMeta10SetHandlerEjPFvPvE
fun:_ZN7rocksdb14ThreadLocalPtrC1EPFvPvE
fun:_ZN7rocksdb16ColumnFamilyDataC1EjRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPNS_7VersionEPNS_5CacheEPNS_18WriteBufferManagerERKNS_19ColumnFamilyOptionsERKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_15ColumnFamilySetEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb15ColumnFamilySetC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_5CacheEPNS_18WriteBufferManagerEPNS_15WriteControllerEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb10VersionSetC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_5CacheEPNS_18WriteBufferManagerEPNS_15WriteControllerEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb6DBImplC1ERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEbbb
fun:_ZN7rocksdb6DBImpl4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPNS_2DBEbb
fun:_ZN7rocksdb2DB4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPS0_
fun:_ZN7rocksdb2DB4OpenERKNS_7OptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPPS0_
fun:rocksdb_open
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN7rocksdb12_GLOBAL__N_125CreateThreadStatusUpdaterEv
fun:_ZN7rocksdb12_GLOBAL__N_18PosixEnvC1Ev
fun:_ZN7rocksdb3Env7DefaultEv
fun:_ZN7rocksdb9DBOptionsC1Ev
fun:_ZN7rocksdb7OptionsC1Ev
fun:_ZN7rocksdb18ImmutableCFOptionsC1Ev
fun:_Z41__static_initialization_and_destruction_0ii
fun:_GLOBAL__sub_I_cf_options.cc
fun:__libc_csu_init
fun:(below main)
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN7rocksdb14ThreadLocalPtr8InstanceEv
fun:_ZN7rocksdb14ThreadLocalPtr14InitSingletonsEv
fun:_ZN7rocksdb3Env7DefaultEv
fun:_ZN7rocksdb9DBOptionsC1Ev
fun:_ZN7rocksdb7OptionsC1Ev
fun:_ZN7rocksdb18ImmutableCFOptionsC1Ev
fun:_Z41__static_initialization_and_destruction_0ii
fun:_GLOBAL__sub_I_cf_options.cc
fun:__libc_csu_init
fun:(below main)
}
{
<insert_a_suppression_name_here>
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN7rocksdb24CacheEntryStatsCollectorINS_13InternalStats19CacheEntryRoleStatsEE9GetSharedEPNS_5CacheEPNS_11SystemClockEPSt10shared_ptrIS3_E
fun:_ZN7rocksdb13InternalStatsC1EiPNS_11SystemClockEPNS_16ColumnFamilyDataE
fun:_ZN7rocksdb16ColumnFamilyDataC1EjRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPNS_7VersionEPNS_5CacheEPNS_18WriteBufferManagerERKNS_19ColumnFamilyOptionsERKNS_18ImmutableDBOptionsERKNS_11FileOptionsEPNS_15ColumnFamilySetEPNS_16BlockCacheTracerERKSt10shared_ptrINS_8IOTracerEES8_
fun:_ZN7rocksdb15ColumnFamilySet18CreateColumnFamilyERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEjPNS_7VersionERKNS_19ColumnFamilyOptionsE
fun:_ZN7rocksdb10VersionSet18CreateColumnFamilyERKNS_19ColumnFamilyOptionsEPKNS_11VersionEditE
fun:_ZN7rocksdb18VersionEditHandler15CreateCfAndInitERKNS_19ColumnFamilyOptionsERKNS_11VersionEditE
fun:_ZN7rocksdb18VersionEditHandler10InitializeEv
fun:_ZN7rocksdb22VersionEditHandlerBase7IterateERNS_3log6ReaderEPNS_6StatusE
fun:_ZN7rocksdb10VersionSet7RecoverERKSt6vectorINS_22ColumnFamilyDescriptorESaIS2_EEbPNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE
fun:_ZN7rocksdb6DBImpl7RecoverERKSt6vectorINS_22ColumnFamilyDescriptorESaIS2_EEbbbPm
fun:_ZN7rocksdb6DBImpl4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPNS_2DBEbb
fun:_ZN7rocksdb2DB4OpenERKNS_9DBOptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt6vectorINS_22ColumnFamilyDescriptorESaISD_EEPSC_IPNS_18ColumnFamilyHandleESaISJ_EEPPS0_
fun:_ZN7rocksdb2DB4OpenERKNS_7OptionsERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPPS0_
fun:rocksdb_open
fun:tsdbOpenRocksCache
fun:tsdbOpenCache
fun:tsdbOpen
fun:vnodeOpen
fun:vmOpenVnodeInThread
}

View File

@ -109,8 +109,10 @@ if [ "$EXEC_OPTON" = "start" ]; then
if [ "$VALGRIND_OPTION" = "true" ]; then
TT=`date +%s`
#mkdir ${LOG_DIR}/${TT}
echo "nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &"
nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
#echo "nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &"
#nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --gen-suppressions=all --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
echo "nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --suppressions=${SCRIPT_DIR}/local.supp --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &"
nohup valgrind --log-file=${LOG_DIR}/valgrind-taosd-${NODE_NAME}-${TT}.log --suppressions=${SCRIPT_DIR}/local.supp --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
echo "nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &"
nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2> $ASAN_DIR/$NODE_NAME.asan &

View File

@ -0,0 +1,48 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists test
sql create database test;
sql use test;
sql create table st(ts timestamp, f int) tags (t int);
sql insert into ct1 using st tags(1) values(now, 1);
sql insert into ct2 using st tags(2) values(now, 2);
sql insert into ct3 using st tags(3) values(now, 3);
sql insert into ct4 using st tags(4) values(now, 4);
sql create table st2(ts timestamp, f int) tags (t int);
sql insert into ct21 using st2 tags(1) values(now, 1);
sql insert into ct22 using st2 tags(2) values(now, 2);
sql insert into ct23 using st2 tags(3) values(now, 3);
sql insert into ct24 using st2 tags(4) values(now, 4);
sql select tbname, 1 from st group by tbname order by tbname;
print $rows $data00 $data10 $data20
if $rows != 4 then
return -1
endi
if $data00 != @ct1@ then
return -1
endi
if $data10 != @ct2@ then
return -1
endi
sql select tbname, 1 from st group by tbname slimit 0, 1;
print $rows
if $rows != 1 then
return -1
endi
sql select tbname, 1 from st group by tbname slimit 2, 2;
print $rows $data00 $data10
if $rows != 2 then
return -1
endi
sql select tbname, 1 from st group by tbname order by tbname slimit 0, 1;
print $rows $data00 $data10 $data20
if $rows != 4 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -1115,6 +1115,7 @@ int32_t shellExecute() {
}
if (shell.conn == NULL) {
printf("failed to connect to server, reason: %s\n", taos_errstr(NULL));
fflush(stdout);
return -1;
}