This commit is contained in:
yihaoDeng 2023-09-25 18:40:10 +08:00
commit b134365d08
266 changed files with 74470 additions and 5290 deletions

View File

@ -314,9 +314,9 @@ def pre_test_build_win() {
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip uninstall taospy -y
python -m pip install taospy==2.7.10
python -m pip install taospy==2.7.12
python -m pip uninstall taos-ws-py -y
python -m pip install taos-ws-py==0.2.8
python -m pip install taos-ws-py==0.2.9
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1

View File

@ -83,6 +83,16 @@ ELSE ()
SET(TAOS_LIB taos)
ENDIF ()
# build TSZ by default
IF ("${TSZ_ENABLED}" MATCHES "false")
set(VAR_TSZ "" CACHE INTERNAL "global variant empty" )
ELSE()
# define add
MESSAGE(STATUS "build with TSZ enabled")
ADD_DEFINITIONS(-DTD_TSZ)
set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" )
ENDIF()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")

View File

@ -23,20 +23,30 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips:(c interface for example)
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
The following are some explanations about data subscription, which require some understanding of the architecture of TDengine and the use of various language linker interfaces.
- A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
- A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
- On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
- Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through offset interface. The offset interface obtains the offset of the first record in the block;
- If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
- If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
- The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
- Consumers can call position interface to obtain the offset of the current consumption, seek to the specified offset, and consume again;
- Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
- Position is to obtain the current consumption position, which is the position to be taken next time, not the current consumption position
- Commit is the submission of the consumption location. Without parameters, it is the submission of the current consumption location (the location to be taken next time, not the current consumption location). With parameters, it is the location in the submission parameters (i.e. the location to be taken after the next exit and restart)
- Seek is to set the consumer's consumption position. Wherever the seek goes, the position will be returned, all of which are the positions to be taken next time
- Seek does not affect commit, commit does not affect seek, independent of each other, the two are different concepts
- The begin interface is the offset of the first data in wal, and the end interface is the offset+1 of the last data in wal10.
- Before the seek operation, tmq must be call assignment interface, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
- Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
- The offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
- Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
This document does not provide any further introduction to the knowledge of message queues themselves. If you need to know more, please search for it yourself.
Starting from version 3.2.0.0, data subscription supports vnode migration and splitting.
Due to the dependence of data subscription on wal files, wal does not synchronize during vnode migration and splitting. Therefore, after migration or splitting, wal data that has not been consumed before cannot be consumed. So please ensure that all data has been consumed before proceeding with vnode migration or splitting, otherwise data loss may occur during consumption.
## Data Schema and API

View File

@ -142,8 +142,15 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
| VARBINARY | byte[] |
| GEOMETRY | byte[] |
**Note**: Only TAG supports JSON types
Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead.
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type] (/tao-sql/data-type/#Data Types)
For WKB specifications, please refer to [Well Known Binary (WKB)] https://libgeos.org/specifications/wkb/
For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example]https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java
## Installation Steps
@ -354,7 +361,7 @@ The configuration parameters in properties are as follows.
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR: only works when using JDBC native connection. Client configuration file directory path, default value `/etc/taos` on Linux OS, default value `C:/TDengine/cfg` on Windows OS, default value `/etc/taos` on macOS.
- TSDBDriver.PROPERTY_KEY_CHARSET: In the character set used by the client, the default value is the system character set.
- TSDBDriver.PROPERTY_KEY_LOCALE: this only takes effect when using JDBC native connection. Client language environment, the default value is system current locale.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone.
- TSDBDriver.PROPERTY_KEY_TIME_ZONE: only takes effect when using JDBC native connection. In the time zone used by the client, the default value is the system's current time zone. Due to historical reasons, we only support some specifications of the POSIX standard, such as UTC-8 (representing timezone Shanghai in China), GMT-7, Europe/Paris.
- TSDBDriver.HTTP_CONNECT_TIMEOUT: REST connection timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is false.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
@ -456,13 +463,15 @@ public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 20;
private static final int BINARY_COLUMN_SIZE = 50;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
};
private static final int numOfSubTable = 10, numOfRow = 10;
@ -474,21 +483,20 @@ public class ParameterBindingDemo {
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
bindVarbinary(conn);
bindGeometry(conn);
clean(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
clean(conn);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
stmt.execute("create database if not exists test_parabind");
stmt.execute("use test_parabind");
for (int i = 0; i < schemaList.length; i++) {
@ -496,6 +504,11 @@ public class ParameterBindingDemo {
}
}
}
private static void clean(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
@ -674,10 +687,84 @@ public class ParameterBindingDemo {
pstmt.columnDataExecuteBatch();
}
}
private static void bindVarbinary(Connection conn) throws SQLException {
String sql = "insert into ? using stable6 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t6_" + i);
// set tags
byte[] bTag = new byte[]{0,2,3,4,5};
bTag[0] = (byte) i;
pstmt.setTagVarbinary(0, bTag);
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<byte[]> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
byte[] v = new byte[]{0,2,3,4,5,6};
v[0] = (byte)j;
f1List.add(v);
}
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindGeometry(Connection conn) throws SQLException {
String sql = "insert into ? using stable7 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
List<byte[]> listGeo = new ArrayList<>();
listGeo.add(g1);
listGeo.add(g2);
for (int i = 1; i <= 2; i++) {
// set table name
pstmt.setTableName("t7_" + i);
// set tags
pstmt.setTagGeometry(0, listGeo.get(i - 1));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<byte[]> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(listGeo.get(i - 1));
}
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
}
```
**Note**: both setString and setNString require the user to declare the width of the corresponding column in the size parameter of the table definition
**Note**: both String and byte[] require the user to declare the width of the corresponding column in the size parameter of the table definition
The methods to set VALUES columns:
@ -692,6 +779,8 @@ public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setVarbinary(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
public void setGeometry(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
```
</TabItem>
@ -880,6 +969,9 @@ public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
public void setTagJson(int index, String value)
public void setTagVarbinary(int index, byte[] value)
public void setTagGeometry(int index, byte[] value)
```
### Schemaless Writing

View File

@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.2.4</version>
<version>3.2.7-SNAPSHOT</version>
</dependency>
<!-- ANCHOR_END: dep-->
<dependency>

View File

@ -23,22 +23,30 @@ import CDemo from "./_sub_c.mdx";
为了实现上述功能TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
下面为关于数据订阅的一些说明需要对TDengine的架构有一些了解结合各个语言链接器的接口使用。
- 一个消费组消费同一个topic下的所有数据不同消费组之间相互独立
- 一个消费组消费同一个topic所有的vgroup消费组可由多个消费者组成但一个vgroup仅被一个消费者消费如果消费者数量超过了vgroup数量多余的消费者不消费数据
- 在服务端每个vgroup仅保存一个offset每个vgroup的offset是单调递增的但不一定连续。各个vgroup的offset之间没有关联
- 每次poll服务端会返回一个结果block该block属于一个vgroup可能包含多个wal版本的数据可以通过 offset 接口获得是该block第一条记录的offset
- 一个消费组如果从未commit过offset当其成员消费者重启重新拉取数据时均从参数auto.offset.reset设定值开始消费在一个消费者生命周期中客户端本地记录了最近一次拉取数据的offset不会拉取重复数据
- 消费者如果异常终止没有调用tmq_close需等约12秒后触发其所属消费组rebalance该消费者在服务端状态变为LOST约1天后该消费者自动被删除正常退出退出后就会删除消费者新增消费者需等约2秒触发rebalance该消费者在服务端状态变为ready
- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作
- 消费者可利用 position 获得当前消费的offset并seek到指定offset重新消费
- seek将position指向指定offset不执行commit操作一旦seek成功可poll拉取指定offset及以后的数据
- seek 操作之前须调用 assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法如非法将报错
- position是获取当前的消费位置是下次要取的位置不是当前消费到的位置
- commit是提交消费位置不带参数的话是提交当前消费位置下次要取的位置不是当前消费到的位置带参数的话是提交参数里的位置也即下次退出重启后要取的位置
- seek是设置consumer消费位置seek到哪position就返回哪都是下次要取的位置
- seek不会影响commitcommit不影响seek相互独立两个是不同的概念
- begin接口为wal 第一条数据的offsetend 接口为wal 最后一条数据的offset + 1
- offset接口获取的是记录所在结果block块里的第一条数据的offset当seek至该offset时将消费到这个block里的全部数据。参见第四点
- 由于存在 WAL 过期删除机制即使seek 操作成功poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号将会从WAL最小版本号消费
- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
说明以c接口为例
1. 一个消费组消费同一个topic下的所有数据不同消费组之间相互独立
2. 一个消费组消费同一个topic所有的vgroup消费组可由多个消费者组成但一个vgroup仅被一个消费者消费如果消费者数量超过了vgroup数量多余的消费者不消费数据
3. 在服务端每个vgroup仅保存一个offset每个vgroup的offset是单调递增的但不一定连续。各个vgroup的offset之间没有关联
4. 每次poll服务端会返回一个结果block该block属于一个vgroup可能包含多个wal版本的数据可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset
5. 一个消费组如果从未commit过offset当其成员消费者重启重新拉取数据时均从参数auto.offset.reset设定值开始消费在一个消费者生命周期中客户端本地记录了最近一次拉取数据的offset不会拉取重复数据
6. 消费者如果异常终止没有调用tmq_close需等约12秒后触发其所属消费组rebalance该消费者在服务端状态变为LOST约1天后该消费者自动被删除正常退出退出后就会删除消费者新增消费者需等约2秒触发rebalance该消费者在服务端状态变为ready
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作
8. 消费者可利用 tmq_position 获得当前消费的offset并seek到指定offset重新消费
9. seek将position指向指定offset不执行commit操作一旦seek成功可poll拉取指定offset及以后的数据
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法如非法将报错
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset当seek至该offset时将消费到这个block里的全部数据。参见第四点
12. 由于存在 WAL 过期删除机制即使seek 操作成功poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号将会从WAL最小版本号消费
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
本文档不对消息队列本身的知识做更多的介绍,如果需要了解,请自行搜索。
从3.2.0.0版本开始数据订阅支持vnode迁移和分裂。
由于数据订阅依赖wal文件而在vnode迁移和分裂的过程中wal并不会同步过去所以迁移或分裂后之前没消费完的wal数据后消费不到。所以请保证之前把数据全部消费完后再进行vnode迁移或分裂否则消费会丢失数据。
## 主要数据结构和 API

View File

@ -142,8 +142,14 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
| BINARY | byte array |
| NCHAR | java.lang.String |
| JSON | java.lang.String |
| VARBINARY | byte[] |
| GEOMETRY | byte[] |
**注意**JSON 类型仅在 tag 中支持。
**注意**JSON 类型仅在 tag 中支持。
由于历史原因TDengine中的BINARY底层不是真正的二进制数据已不建议使用。请用VARBINARY类型代替。
GEOMETRY类型是little endian字节序的二进制数据符合WKB规范。详细信息请参考 [数据类型](/taos-sql/data-type/#数据类型)
WKB规范请参考[Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)
对于java连接器可以使用jts库来方便的创建GEOMETRY类型对象序列化后写入TDengine这里有一个样例[Geometry示例](https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java)
## 安装步骤
@ -357,7 +363,7 @@ properties 中的配置参数如下:
- TSDBDriver.PROPERTY_KEY_CONFIG_DIR仅在使用 JDBC 原生连接时生效。客户端配置文件目录路径Linux OS 上默认值 `/etc/taos`Windows OS 上默认值 `C:/TDengine/cfg`。
- TSDBDriver.PROPERTY_KEY_CHARSET客户端使用的字符集默认值为系统字符集。
- TSDBDriver.PROPERTY_KEY_LOCALE仅在使用 JDBC 原生连接时生效。 客户端语言环境,默认值系统当前 locale。
- TSDBDriver.PROPERTY_KEY_TIME_ZONE仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。
- TSDBDriver.PROPERTY_KEY_TIME_ZONE仅在使用 JDBC 原生连接时生效。 客户端使用的时区,默认值为系统当前时区。因为历史的原因我们只支持POSIX标准的部分规范如UTC-8(代表中国上上海), GMT-8Asia/Shanghai 这几种形式。
- TSDBDriver.HTTP_CONNECT_TIMEOUT: 连接超时时间,单位 ms 默认值为 60000。仅在 REST 连接时生效。
- TSDBDriver.HTTP_SOCKET_TIMEOUT: socket 超时时间,单位 ms默认值为 60000。仅在 REST 连接且 batchfetch 设置为 false 时生效。
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: 消息超时时间, 单位 ms 默认值为 60000。 仅在 REST 连接且 batchfetch 设置为 true 时生效。
@ -459,13 +465,15 @@ public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 30;
private static final int BINARY_COLUMN_SIZE = 50;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
};
private static final int numOfSubTable = 10, numOfRow = 10;
@ -477,21 +485,20 @@ public class ParameterBindingDemo {
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
bindVarbinary(conn);
bindGeometry(conn);
clean(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
clean(conn);
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
stmt.execute("create database if not exists test_parabind");
stmt.execute("use test_parabind");
for (int i = 0; i < schemaList.length; i++) {
@ -499,6 +506,11 @@ public class ParameterBindingDemo {
}
}
}
private static void clean(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
@ -677,10 +689,84 @@ public class ParameterBindingDemo {
pstmt.columnDataExecuteBatch();
}
}
private static void bindVarbinary(Connection conn) throws SQLException {
String sql = "insert into ? using stable6 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t6_" + i);
// set tags
byte[] bTag = new byte[]{0,2,3,4,5};
bTag[0] = (byte) i;
pstmt.setTagVarbinary(0, bTag);
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<byte[]> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
byte[] v = new byte[]{0,2,3,4,5,6};
v[0] = (byte)j;
f1List.add(v);
}
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindGeometry(Connection conn) throws SQLException {
String sql = "insert into ? using stable7 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
List<byte[]> listGeo = new ArrayList<>();
listGeo.add(g1);
listGeo.add(g2);
for (int i = 1; i <= 2; i++) {
// set table name
pstmt.setTableName("t7_" + i);
// set tags
pstmt.setTagGeometry(0, listGeo.get(i - 1));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<byte[]> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(listGeo.get(i - 1));
}
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
}
```
**注**setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
**注**字符串和数组类型都要求用户在 size 参数里声明表定义中对应列的列宽
用于设定 VALUES 数据列的取值的方法总共有:
@ -695,6 +781,8 @@ public void setByte(int columnIndex, ArrayList<Byte> list) throws SQLException
public void setShort(int columnIndex, ArrayList<Short> list) throws SQLException
public void setString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setNString(int columnIndex, ArrayList<String> list, int size) throws SQLException
public void setVarbinary(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
public void setGeometry(int columnIndex, ArrayList<byte[]> list, int size) throws SQLException
```
</TabItem>
@ -883,6 +971,9 @@ public void setTagFloat(int index, float value)
public void setTagDouble(int index, double value)
public void setTagString(int index, String value)
public void setTagNString(int index, String value)
public void setTagJson(int index, String value)
public void setTagVarbinary(int index, byte[] value)
public void setTagGeometry(int index, byte[] value)
```
### 无模式写入

View File

@ -11,13 +11,20 @@
<properties>
<project.assembly.dir>src/main/resources/assembly</project.assembly.dir>
<java.version>1.8</java.version>
</properties>
<dependencies>
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.0.0</version>
<version>3.2.7</version>
</dependency>
<dependency>
<groupId>org.locationtech.jts</groupId>
<artifactId>jts-core</artifactId>
<version>1.19.0</version>
</dependency>
</dependencies>
@ -68,12 +75,12 @@
</execution>
<execution>
<id>SubscribeDemo</id>
<id>GeometryDemo</id>
<configuration>
<finalName>SubscribeDemo</finalName>
<finalName>GeometryDemo</finalName>
<archive>
<manifest>
<mainClass>com.taosdata.example.SubscribeDemo</mainClass>
<mainClass>com.taosdata.example.GeometryDemo</mainClass>
</manifest>
</archive>
<descriptorRefs>

View File

@ -0,0 +1,94 @@
package com.taosdata.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
import com.taosdata.jdbc.tmq.TaosConsumer;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
public abstract class ConsumerLoop {
private final TaosConsumer<ResultBean> consumer;
private final List<String> topics;
private final AtomicBoolean shutdown;
private final CountDownLatch shutdownLatch;
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
config.setProperty("td.connect.type", "jni");
config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("td.connect.user", "root");
config.setProperty("td.connect.pass", "taosdata");
config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group1");
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
this.shutdown = new AtomicBoolean(false);
this.shutdownLatch = new CountDownLatch(1);
}
public abstract void process(ResultBean result);
public void pollData() throws SQLException {
try {
consumer.subscribe(topics);
while (!shutdown.get()) {
ConsumerRecords<ResultBean> records = consumer.poll(Duration.ofMillis(100));
for (ConsumerRecord<ResultBean> record : records) {
ResultBean bean = record.value();
process(bean);
}
}
consumer.unsubscribe();
} finally {
consumer.close();
shutdownLatch.countDown();
}
}
public void shutdown() throws InterruptedException {
shutdown.set(true);
shutdownLatch.await();
}
public static class ResultDeserializer extends ReferenceDeserializer<ResultBean> {
}
public static class ResultBean {
private Timestamp ts;
private int speed;
public Timestamp getTs() {
return ts;
}
public void setTs(Timestamp ts) {
this.ts = ts;
}
public int getSpeed() {
return speed;
}
public void setSpeed(int speed) {
this.speed = speed;
}
}
}

View File

@ -0,0 +1,190 @@
package com.taosdata.example;
import com.taosdata.jdbc.TSDBPreparedStatement;
import org.locationtech.jts.geom.*;
import org.locationtech.jts.io.ByteOrderValues;
import org.locationtech.jts.io.ParseException;
import org.locationtech.jts.io.WKBReader;
import org.locationtech.jts.io.WKBWriter;
import java.sql.*;
import java.util.ArrayList;
import java.util.Properties;
public class GeometryDemo {
private static String host = "localhost";
private static final String dbName = "test";
private static final String tbName = "weather";
private static final String user = "root";
private static final String password = "taosdata";
private Connection connection;
public static void main(String[] args) throws SQLException {
for (int i = 0; i < args.length; i++) {
if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1)
host = args[++i];
}
if (host == null) {
printHelp();
}
GeometryDemo demo = new GeometryDemo();
demo.init();
demo.createDatabase();
demo.useDatabase();
demo.dropTable();
demo.createTable();
demo.insert();
demo.stmtInsert();
demo.select();
demo.dropTable();
demo.close();
}
private void init() {
final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password;
// get connection
try {
Properties properties = new Properties();
properties.setProperty("charset", "UTF-8");
properties.setProperty("locale", "en_US.UTF-8");
properties.setProperty("timezone", "UTC-8");
System.out.println("get connection starting...");
connection = DriverManager.getConnection(url, properties);
if (connection != null)
System.out.println("[ OK ] Connection established.");
} catch (SQLException e) {
e.printStackTrace();
}
}
private void createDatabase() {
String sql = "create database if not exists " + dbName;
execute(sql);
}
private void useDatabase() {
String sql = "use " + dbName;
execute(sql);
}
private void dropTable() {
final String sql = "drop table if exists " + dbName + "." + tbName + "";
execute(sql);
}
private void createTable() {
final String sql = "create table if not exists " + dbName + "." + tbName + " (ts timestamp, temperature float, humidity int, location geometry(50))";
execute(sql);
}
private void insert() {
final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity, location) values(now, 20.5, 34, 'POINT(1 2)')";
execute(sql);
}
private void stmtInsert() throws SQLException {
TSDBPreparedStatement preparedStatement = (TSDBPreparedStatement) connection.prepareStatement("insert into " + dbName + "." + tbName + " values (?, ?, ?, ?)");
long current = System.currentTimeMillis();
ArrayList<Long> tsList = new ArrayList<>();
tsList.add(current);
tsList.add(current + 1);
preparedStatement.setTimestamp(0, tsList);
ArrayList<Float> tempList = new ArrayList<>();
tempList.add(20.1F);
tempList.add(21.2F);
preparedStatement.setFloat(1, tempList);
ArrayList<Integer> humList = new ArrayList<>();
humList.add(30);
humList.add(31);
preparedStatement.setInt(2, humList);
ArrayList<byte[]> list = new ArrayList<>();
GeometryFactory gf = new GeometryFactory();
Point p1 = gf.createPoint(new Coordinate(1,2));
p1.setSRID(1234);
// NOTE: TDengine current version only support 2D dimension and little endian byte order
WKBWriter w = new WKBWriter(2, ByteOrderValues.LITTLE_ENDIAN, true);
byte[] wkb = w.write(p1);
list.add(wkb);
Coordinate[] coordinates = { new Coordinate(10, 20),
new Coordinate(30, 40)};
LineString lineString = gf.createLineString(coordinates);
lineString.setSRID(2345);
byte[] wkb2 = w.write(lineString);
list.add(wkb2);
preparedStatement.setGeometry(3, list, 50);
preparedStatement.columnDataAddBatch();
preparedStatement.columnDataExecuteBatch();
}
private void select() {
final String sql = "select * from " + dbName + "." + tbName;
executeQuery(sql);
}
private void close() {
try {
if (connection != null) {
this.connection.close();
System.out.println("connection closed.");
}
} catch (SQLException e) {
e.printStackTrace();
}
}
private void executeQuery(String sql) {
long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
ResultSet resultSet = statement.executeQuery(sql);
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
while (resultSet.next()){
byte[] result1 = resultSet.getBytes(4);
WKBReader reader = new WKBReader();
Geometry g1 = reader.read(result1);
System.out.println("GEO OBJ: " + g1 + ", SRID: " + g1.getSRID());
}
} catch (SQLException e) {
long end = System.currentTimeMillis();
printSql(sql, false, (end - start));
e.printStackTrace();
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
private void printSql(String sql, boolean succeed, long cost) {
System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql);
}
private void execute(String sql) {
long start = System.currentTimeMillis();
try (Statement statement = connection.createStatement()) {
boolean execute = statement.execute(sql);
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
} catch (SQLException e) {
long end = System.currentTimeMillis();
printSql(sql, false, (end - start));
e.printStackTrace();
}
}
private static void printHelp() {
System.out.println("Usage: java -jar JDBCDemo.jar -host <hostname>");
System.exit(0);
}
}

View File

@ -0,0 +1,316 @@
package com.taosdata.example;
import com.taosdata.jdbc.TSDBPreparedStatement;
import com.taosdata.jdbc.utils.StringUtils;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
public class ParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 50;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))",
"create table stable6(ts timestamp, f1 varbinary(" + BINARY_COLUMN_SIZE + ")) tags(t1 varbinary(" + BINARY_COLUMN_SIZE + "))",
"create table stable7(ts timestamp, f1 geometry(" + BINARY_COLUMN_SIZE + ")) tags(t1 geometry(" + BINARY_COLUMN_SIZE + "))",
};
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS://" + host + ":6030/";
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
bindVarbinary(conn);
bindGeometry(conn);
clean(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
clean(conn);
try (Statement stmt = conn.createStatement()) {
stmt.execute("create database if not exists test_parabind");
stmt.execute("use test_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void clean(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_parabind");
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(0, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(1, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(2, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(3, random.nextLong());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Byte> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setByte(1, f1List);
ArrayList<Short> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setShort(2, f2List);
ArrayList<Integer> f3List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f3List.add(random.nextInt(Integer.MAX_VALUE));
pstmt.setInt(3, f3List);
ArrayList<Long> f4List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f4List.add(random.nextLong());
pstmt.setLong(4, f4List);
// add column
pstmt.columnDataAddBatch();
}
// execute column
pstmt.columnDataExecuteBatch();
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class);
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(0, random.nextFloat());
pstmt.setTagDouble(1, random.nextDouble());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Float> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextFloat());
pstmt.setFloat(1, f1List);
ArrayList<Double> f2List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f2List.add(random.nextDouble());
pstmt.setDouble(2, f2List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
// close if no try-with-catch statement is used
pstmt.close();
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(0, random.nextBoolean());
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<Boolean> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++)
f1List.add(random.nextBoolean());
pstmt.setBoolean(1, f1List);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(0, new String("abc"));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(new String("abc"));
}
pstmt.setString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(0, "California.SanFrancisco");
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<String> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add("California.LosAngeles");
}
pstmt.setNString(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindVarbinary(Connection conn) throws SQLException {
String sql = "insert into ? using stable6 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t6_" + i);
// set tags
byte[] bTag = new byte[]{0,2,3,4,5};
bTag[0] = (byte) i;
pstmt.setTagVarbinary(0, bTag);
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<byte[]> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
byte[] v = new byte[]{0,2,3,4,5,6};
v[0] = (byte)j;
f1List.add(v);
}
pstmt.setVarbinary(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
private static void bindGeometry(Connection conn) throws SQLException {
String sql = "insert into ? using stable7 tags(?) values(?,?)";
try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
byte[] g1 = StringUtils.hexToBytes("0101000000000000000000F03F0000000000000040");
byte[] g2 = StringUtils.hexToBytes("0102000020E610000002000000000000000000F03F000000000000004000000000000008400000000000001040");
List<byte[]> listGeo = new ArrayList<>();
listGeo.add(g1);
listGeo.add(g2);
for (int i = 1; i <= 2; i++) {
// set table name
pstmt.setTableName("t7_" + i);
// set tags
pstmt.setTagGeometry(0, listGeo.get(i - 1));
// set columns
ArrayList<Long> tsList = new ArrayList<>();
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++)
tsList.add(current + j);
pstmt.setTimestamp(0, tsList);
ArrayList<byte[]> f1List = new ArrayList<>();
for (int j = 0; j < numOfRow; j++) {
f1List.add(listGeo.get(i - 1));
}
pstmt.setGeometry(1, f1List, BINARY_COLUMN_SIZE);
// add column
pstmt.columnDataAddBatch();
}
// execute
pstmt.columnDataExecuteBatch();
}
}
}

View File

@ -1,74 +0,0 @@
package com.taosdata.example;
import com.taosdata.jdbc.TSDBConnection;
import com.taosdata.jdbc.TSDBDriver;
import com.taosdata.jdbc.TSDBResultSet;
import com.taosdata.jdbc.TSDBSubscribe;
import java.sql.DriverManager;
import java.sql.ResultSetMetaData;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class SubscribeDemo {
private static final String usage = "java -jar SubscribeDemo.jar -host <hostname> -database <database name> -topic <topic> -sql <sql>";
public static void main(String[] args) {
// parse args from command line
String host = "", database = "", topic = "", sql = "";
for (int i = 0; i < args.length; i++) {
if ("-host".equalsIgnoreCase(args[i]) && i < args.length - 1) {
host = args[++i];
}
if ("-database".equalsIgnoreCase(args[i]) && i < args.length - 1) {
database = args[++i];
}
if ("-topic".equalsIgnoreCase(args[i]) && i < args.length - 1) {
topic = args[++i];
}
if ("-sql".equalsIgnoreCase(args[i]) && i < args.length - 1) {
sql = args[++i];
}
}
if (host.isEmpty() || database.isEmpty() || topic.isEmpty() || sql.isEmpty()) {
System.out.println(usage);
return;
}
try {
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
final String url = "jdbc:TAOS://" + host + ":6030/" + database + "?user=root&password=taosdata";
// get TSDBConnection
TSDBConnection connection = (TSDBConnection) DriverManager.getConnection(url, properties);
// create TSDBSubscribe
TSDBSubscribe sub = connection.subscribe(topic, sql, false);
int total = 0;
while (true) {
TSDBResultSet rs = sub.consume();
int count = 0;
ResultSetMetaData meta = rs.getMetaData();
while (rs.next()) {
for (int i = 1; i <= meta.getColumnCount(); i++) {
System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t");
}
System.out.println();
count++;
}
total += count;
// System.out.printf("%d rows consumed, total %d\n", count, total);
if (total >= 10)
break;
TimeUnit.SECONDS.sleep(1);
}
sub.close(false);
connection.close();
} catch (Exception e) {
System.out.println("host: " + host + ", database: " + database + ", topic: " + topic + ", sql: " + sql);
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,170 @@
package com.taosdata.example;
import com.taosdata.jdbc.ws.TSWSPreparedStatement;
import java.sql.*;
import java.util.Random;
public class WSParameterBindingDemo {
private static final String host = "127.0.0.1";
private static final Random random = new Random(System.currentTimeMillis());
private static final int BINARY_COLUMN_SIZE = 30;
private static final String[] schemaList = {
"create table stable1(ts timestamp, f1 tinyint, f2 smallint, f3 int, f4 bigint) tags(t1 tinyint, t2 smallint, t3 int, t4 bigint)",
"create table stable2(ts timestamp, f1 float, f2 double) tags(t1 float, t2 double)",
"create table stable3(ts timestamp, f1 bool) tags(t1 bool)",
"create table stable4(ts timestamp, f1 binary(" + BINARY_COLUMN_SIZE + ")) tags(t1 binary(" + BINARY_COLUMN_SIZE + "))",
"create table stable5(ts timestamp, f1 nchar(" + BINARY_COLUMN_SIZE + ")) tags(t1 nchar(" + BINARY_COLUMN_SIZE + "))"
};
private static final int numOfSubTable = 10, numOfRow = 10;
public static void main(String[] args) throws SQLException {
String jdbcUrl = "jdbc:TAOS-RS://" + host + ":6041/?batchfetch=true";
Connection conn = DriverManager.getConnection(jdbcUrl, "root", "taosdata");
init(conn);
bindInteger(conn);
bindFloat(conn);
bindBoolean(conn);
bindBytes(conn);
bindString(conn);
conn.close();
}
private static void init(Connection conn) throws SQLException {
try (Statement stmt = conn.createStatement()) {
stmt.execute("drop database if exists test_ws_parabind");
stmt.execute("create database if not exists test_ws_parabind");
stmt.execute("use test_ws_parabind");
for (int i = 0; i < schemaList.length; i++) {
stmt.execute(schemaList[i]);
}
}
}
private static void bindInteger(Connection conn) throws SQLException {
String sql = "insert into ? using stable1 tags(?,?,?,?) values(?,?,?,?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t1_" + i);
// set tags
pstmt.setTagByte(1, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setTagShort(2, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setTagInt(3, random.nextInt(Integer.MAX_VALUE));
pstmt.setTagLong(4, random.nextLong());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setByte(2, Byte.parseByte(Integer.toString(random.nextInt(Byte.MAX_VALUE))));
pstmt.setShort(3, Short.parseShort(Integer.toString(random.nextInt(Short.MAX_VALUE))));
pstmt.setInt(4, random.nextInt(Integer.MAX_VALUE));
pstmt.setLong(5, random.nextLong());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindFloat(Connection conn) throws SQLException {
String sql = "insert into ? using stable2 tags(?,?) values(?,?,?)";
try(TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t2_" + i);
// set tags
pstmt.setTagFloat(1, random.nextFloat());
pstmt.setTagDouble(2, random.nextDouble());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setFloat(2, random.nextFloat());
pstmt.setDouble(3, random.nextDouble());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBoolean(Connection conn) throws SQLException {
String sql = "insert into ? using stable3 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t3_" + i);
// set tags
pstmt.setTagBoolean(1, random.nextBoolean());
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setBoolean(2, random.nextBoolean());
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindBytes(Connection conn) throws SQLException {
String sql = "insert into ? using stable4 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t4_" + i);
// set tags
pstmt.setTagString(1, new String("abc"));
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(1, new Timestamp(current + j));
pstmt.setString(2, "abc");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
private static void bindString(Connection conn) throws SQLException {
String sql = "insert into ? using stable5 tags(?) values(?,?)";
try (TSWSPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSWSPreparedStatement.class)) {
for (int i = 1; i <= numOfSubTable; i++) {
// set table name
pstmt.setTableName("t5_" + i);
// set tags
pstmt.setTagNString(1, "California.SanFrancisco");
// set columns
long current = System.currentTimeMillis();
for (int j = 0; j < numOfRow; j++) {
pstmt.setTimestamp(0, new Timestamp(current + j));
pstmt.setNString(1, "California.SanFrancisco");
pstmt.addBatch();
}
pstmt.executeBatch();
}
}
}
}

View File

@ -125,7 +125,8 @@ typedef enum {
typedef enum {
TAOS_NOTIFY_PASSVER = 0,
TAOS_NOTIFY_WHITELIST_VER = 1
TAOS_NOTIFY_WHITELIST_VER = 1,
TAOS_NOTIFY_USER_DROPPED = 2,
} TAOS_NOTIFY_TYPE;
#define RET_MSG_LENGTH 1024
@ -240,6 +241,11 @@ DLL_EXPORT int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param
typedef void (*__taos_async_whitelist_fn_t)(void *param, int code, TAOS *taos, int numOfWhiteLists, uint64_t* pWhiteLists);
DLL_EXPORT void taos_fetch_whitelist_a(TAOS *taos, __taos_async_whitelist_fn_t fp, void *param);
typedef enum {
TAOS_CONN_MODE_BI = 0,
} TAOS_CONN_MODE;
DLL_EXPORT int taos_set_conn_mode(TAOS* taos, int mode, int value);
/* --------------------------schemaless INTERFACE------------------------------- */
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision);
@ -313,6 +319,7 @@ DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT TAOS *tmq_get_connect(tmq_t *tmq);
DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);

View File

@ -145,6 +145,7 @@ extern bool tsUseAdapter;
extern int32_t tsMetaCacheMaxSize;
extern int32_t tsSlowLogThreshold;
extern int32_t tsSlowLogScope;
extern int32_t tsTimeSeriesThreshold;
// client
extern int32_t tsMinSlidingTime;
@ -159,10 +160,11 @@ extern char buildinfo[];
// lossy
extern char tsLossyColumns[];
extern double tsFPrecision;
extern float tsFPrecision;
extern double tsDPrecision;
extern uint32_t tsMaxRange;
extern uint32_t tsCurRange;
extern bool tsIfAdtFse;
extern char tsCompressor[];
// tfs

View File

@ -30,6 +30,8 @@ extern "C" {
#define GRANTS_COL_MAX_LEN 196
#endif
#define GRANT_HEART_BEAT_MIN 2
typedef enum {
TSDB_GRANT_ALL,
TSDB_GRANT_TIME,

View File

@ -959,7 +959,7 @@ typedef struct {
int8_t superAuth;
int8_t sysInfo;
int8_t enable;
int8_t reserve;
int8_t dropped;
SHashObj* createdDbs;
SHashObj* readDbs;
SHashObj* writeDbs;
@ -1464,6 +1464,11 @@ typedef struct {
int32_t learnerProgress; // use one reservered
} SVnodeLoad;
typedef struct {
int32_t vgId;
int64_t nTimeSeries;
} SVnodeLoadLite;
typedef struct {
int8_t syncState;
int64_t syncTerm;
@ -1511,6 +1516,16 @@ int32_t tSerializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
int32_t tDeserializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
void tFreeSStatusReq(SStatusReq* pReq);
typedef struct {
int32_t dnodeId;
int64_t clusterId;
SArray* pVloads;
} SNotifyReq;
int32_t tSerializeSNotifyReq(void* buf, int32_t bufLen, SNotifyReq* pReq);
int32_t tDeserializeSNotifyReq(void* buf, int32_t bufLen, SNotifyReq* pReq);
void tFreeSNotifyReq(SNotifyReq* pReq);
typedef struct {
int32_t dnodeId;
int64_t clusterId;

View File

@ -179,8 +179,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_HEARTBEAT, "stream-heartbeat", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RETRIEVE_IP_WHITE, "retrieve-ip-white", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_WHITELIST, "get-user-whitelist", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_NOTIFY, "notify", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
@ -189,6 +188,8 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)

View File

@ -186,177 +186,179 @@
#define TK_SUBSCRIPTIONS 167
#define TK_VNODES 168
#define TK_ALIVE 169
#define TK_LIKE 170
#define TK_TBNAME 171
#define TK_QTAGS 172
#define TK_AS 173
#define TK_INDEX 174
#define TK_FUNCTION 175
#define TK_INTERVAL 176
#define TK_COUNT 177
#define TK_LAST_ROW 178
#define TK_META 179
#define TK_ONLY 180
#define TK_TOPIC 181
#define TK_CONSUMER 182
#define TK_GROUP 183
#define TK_DESC 184
#define TK_DESCRIBE 185
#define TK_RESET 186
#define TK_QUERY 187
#define TK_CACHE 188
#define TK_EXPLAIN 189
#define TK_ANALYZE 190
#define TK_VERBOSE 191
#define TK_NK_BOOL 192
#define TK_RATIO 193
#define TK_NK_FLOAT 194
#define TK_OUTPUTTYPE 195
#define TK_AGGREGATE 196
#define TK_BUFSIZE 197
#define TK_LANGUAGE 198
#define TK_REPLACE 199
#define TK_STREAM 200
#define TK_INTO 201
#define TK_PAUSE 202
#define TK_RESUME 203
#define TK_TRIGGER 204
#define TK_AT_ONCE 205
#define TK_WINDOW_CLOSE 206
#define TK_IGNORE 207
#define TK_EXPIRED 208
#define TK_FILL_HISTORY 209
#define TK_UPDATE 210
#define TK_SUBTABLE 211
#define TK_UNTREATED 212
#define TK_KILL 213
#define TK_CONNECTION 214
#define TK_TRANSACTION 215
#define TK_BALANCE 216
#define TK_VGROUP 217
#define TK_LEADER 218
#define TK_MERGE 219
#define TK_REDISTRIBUTE 220
#define TK_SPLIT 221
#define TK_DELETE 222
#define TK_INSERT 223
#define TK_NULL 224
#define TK_NK_QUESTION 225
#define TK_NK_ARROW 226
#define TK_ROWTS 227
#define TK_QSTART 228
#define TK_QEND 229
#define TK_QDURATION 230
#define TK_WSTART 231
#define TK_WEND 232
#define TK_WDURATION 233
#define TK_IROWTS 234
#define TK_ISFILLED 235
#define TK_CAST 236
#define TK_NOW 237
#define TK_TODAY 238
#define TK_TIMEZONE 239
#define TK_CLIENT_VERSION 240
#define TK_SERVER_VERSION 241
#define TK_SERVER_STATUS 242
#define TK_CURRENT_USER 243
#define TK_CASE 244
#define TK_WHEN 245
#define TK_THEN 246
#define TK_ELSE 247
#define TK_BETWEEN 248
#define TK_IS 249
#define TK_NK_LT 250
#define TK_NK_GT 251
#define TK_NK_LE 252
#define TK_NK_GE 253
#define TK_NK_NE 254
#define TK_MATCH 255
#define TK_NMATCH 256
#define TK_CONTAINS 257
#define TK_IN 258
#define TK_JOIN 259
#define TK_INNER 260
#define TK_SELECT 261
#define TK_NK_HINT 262
#define TK_DISTINCT 263
#define TK_WHERE 264
#define TK_PARTITION 265
#define TK_BY 266
#define TK_SESSION 267
#define TK_STATE_WINDOW 268
#define TK_EVENT_WINDOW 269
#define TK_SLIDING 270
#define TK_FILL 271
#define TK_VALUE 272
#define TK_VALUE_F 273
#define TK_NONE 274
#define TK_PREV 275
#define TK_NULL_F 276
#define TK_LINEAR 277
#define TK_NEXT 278
#define TK_HAVING 279
#define TK_RANGE 280
#define TK_EVERY 281
#define TK_ORDER 282
#define TK_SLIMIT 283
#define TK_SOFFSET 284
#define TK_LIMIT 285
#define TK_OFFSET 286
#define TK_ASC 287
#define TK_NULLS 288
#define TK_ABORT 289
#define TK_AFTER 290
#define TK_ATTACH 291
#define TK_BEFORE 292
#define TK_BEGIN 293
#define TK_BITAND 294
#define TK_BITNOT 295
#define TK_BITOR 296
#define TK_BLOCKS 297
#define TK_CHANGE 298
#define TK_COMMA 299
#define TK_CONCAT 300
#define TK_CONFLICT 301
#define TK_COPY 302
#define TK_DEFERRED 303
#define TK_DELIMITERS 304
#define TK_DETACH 305
#define TK_DIVIDE 306
#define TK_DOT 307
#define TK_EACH 308
#define TK_FAIL 309
#define TK_FILE 310
#define TK_FOR 311
#define TK_GLOB 312
#define TK_ID 313
#define TK_IMMEDIATE 314
#define TK_IMPORT 315
#define TK_INITIALLY 316
#define TK_INSTEAD 317
#define TK_ISNULL 318
#define TK_KEY 319
#define TK_MODULES 320
#define TK_NK_BITNOT 321
#define TK_NK_SEMI 322
#define TK_NOTNULL 323
#define TK_OF 324
#define TK_PLUS 325
#define TK_PRIVILEGE 326
#define TK_RAISE 327
#define TK_RESTRICT 328
#define TK_ROW 329
#define TK_SEMI 330
#define TK_STAR 331
#define TK_STATEMENT 332
#define TK_STRICT 333
#define TK_STRING 334
#define TK_TIMES 335
#define TK_VALUES 336
#define TK_VARIABLE 337
#define TK_VIEW 338
#define TK_WAL 339
#define TK_NORMAL 170
#define TK_CHILD 171
#define TK_LIKE 172
#define TK_TBNAME 173
#define TK_QTAGS 174
#define TK_AS 175
#define TK_SYSTEM 176
#define TK_INDEX 177
#define TK_FUNCTION 178
#define TK_INTERVAL 179
#define TK_COUNT 180
#define TK_LAST_ROW 181
#define TK_META 182
#define TK_ONLY 183
#define TK_TOPIC 184
#define TK_CONSUMER 185
#define TK_GROUP 186
#define TK_DESC 187
#define TK_DESCRIBE 188
#define TK_RESET 189
#define TK_QUERY 190
#define TK_CACHE 191
#define TK_EXPLAIN 192
#define TK_ANALYZE 193
#define TK_VERBOSE 194
#define TK_NK_BOOL 195
#define TK_RATIO 196
#define TK_NK_FLOAT 197
#define TK_OUTPUTTYPE 198
#define TK_AGGREGATE 199
#define TK_BUFSIZE 200
#define TK_LANGUAGE 201
#define TK_REPLACE 202
#define TK_STREAM 203
#define TK_INTO 204
#define TK_PAUSE 205
#define TK_RESUME 206
#define TK_TRIGGER 207
#define TK_AT_ONCE 208
#define TK_WINDOW_CLOSE 209
#define TK_IGNORE 210
#define TK_EXPIRED 211
#define TK_FILL_HISTORY 212
#define TK_UPDATE 213
#define TK_SUBTABLE 214
#define TK_UNTREATED 215
#define TK_KILL 216
#define TK_CONNECTION 217
#define TK_TRANSACTION 218
#define TK_BALANCE 219
#define TK_VGROUP 220
#define TK_LEADER 221
#define TK_MERGE 222
#define TK_REDISTRIBUTE 223
#define TK_SPLIT 224
#define TK_DELETE 225
#define TK_INSERT 226
#define TK_NULL 227
#define TK_NK_QUESTION 228
#define TK_NK_ARROW 229
#define TK_ROWTS 230
#define TK_QSTART 231
#define TK_QEND 232
#define TK_QDURATION 233
#define TK_WSTART 234
#define TK_WEND 235
#define TK_WDURATION 236
#define TK_IROWTS 237
#define TK_ISFILLED 238
#define TK_CAST 239
#define TK_NOW 240
#define TK_TODAY 241
#define TK_TIMEZONE 242
#define TK_CLIENT_VERSION 243
#define TK_SERVER_VERSION 244
#define TK_SERVER_STATUS 245
#define TK_CURRENT_USER 246
#define TK_CASE 247
#define TK_WHEN 248
#define TK_THEN 249
#define TK_ELSE 250
#define TK_BETWEEN 251
#define TK_IS 252
#define TK_NK_LT 253
#define TK_NK_GT 254
#define TK_NK_LE 255
#define TK_NK_GE 256
#define TK_NK_NE 257
#define TK_MATCH 258
#define TK_NMATCH 259
#define TK_CONTAINS 260
#define TK_IN 261
#define TK_JOIN 262
#define TK_INNER 263
#define TK_SELECT 264
#define TK_NK_HINT 265
#define TK_DISTINCT 266
#define TK_WHERE 267
#define TK_PARTITION 268
#define TK_BY 269
#define TK_SESSION 270
#define TK_STATE_WINDOW 271
#define TK_EVENT_WINDOW 272
#define TK_SLIDING 273
#define TK_FILL 274
#define TK_VALUE 275
#define TK_VALUE_F 276
#define TK_NONE 277
#define TK_PREV 278
#define TK_NULL_F 279
#define TK_LINEAR 280
#define TK_NEXT 281
#define TK_HAVING 282
#define TK_RANGE 283
#define TK_EVERY 284
#define TK_ORDER 285
#define TK_SLIMIT 286
#define TK_SOFFSET 287
#define TK_LIMIT 288
#define TK_OFFSET 289
#define TK_ASC 290
#define TK_NULLS 291
#define TK_ABORT 292
#define TK_AFTER 293
#define TK_ATTACH 294
#define TK_BEFORE 295
#define TK_BEGIN 296
#define TK_BITAND 297
#define TK_BITNOT 298
#define TK_BITOR 299
#define TK_BLOCKS 300
#define TK_CHANGE 301
#define TK_COMMA 302
#define TK_CONCAT 303
#define TK_CONFLICT 304
#define TK_COPY 305
#define TK_DEFERRED 306
#define TK_DELIMITERS 307
#define TK_DETACH 308
#define TK_DIVIDE 309
#define TK_DOT 310
#define TK_EACH 311
#define TK_FAIL 312
#define TK_FILE 313
#define TK_FOR 314
#define TK_GLOB 315
#define TK_ID 316
#define TK_IMMEDIATE 317
#define TK_IMPORT 318
#define TK_INITIALLY 319
#define TK_INSTEAD 320
#define TK_ISNULL 321
#define TK_KEY 322
#define TK_MODULES 323
#define TK_NK_BITNOT 324
#define TK_NK_SEMI 325
#define TK_NOTNULL 326
#define TK_OF 327
#define TK_PLUS 328
#define TK_PRIVILEGE 329
#define TK_RAISE 330
#define TK_RESTRICT 331
#define TK_ROW 332
#define TK_SEMI 333
#define TK_STAR 334
#define TK_STATEMENT 335
#define TK_STRICT 336
#define TK_STRING 337
#define TK_TIMES 338
#define TK_VALUES 339
#define TK_VARIABLE 340
#define TK_VIEW 341
#define TK_WAL 342
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601

View File

@ -22,7 +22,7 @@
typedef struct SExplainCtx SExplainCtx;
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp, int8_t biMode);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);

View File

@ -142,6 +142,7 @@ typedef struct SSnapContext {
typedef struct {
int64_t uid;
int64_t ctbNum;
int32_t colNum;
} SMetaStbStats;
// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
@ -285,8 +286,8 @@ typedef struct SStoreMeta {
// db name, vgId, numOfTables, numOfSTables
int32_t (*getNumOfChildTables)(
void* pVnode, int64_t uid,
int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
void* pVnode, int64_t uid, int64_t* numOfTables,
int32_t* numOfCols); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);

View File

@ -236,6 +236,7 @@ bool fmIsInterpPseudoColumnFunc(int32_t funcId);
bool fmIsGroupKeyFunc(int32_t funcId);
bool fmIsBlockDistFunc(int32_t funcId);
bool fmIsConstantResFunc(SFunctionNode* pFunc);
bool fmIsSkipScanCheckFunc(int32_t funcId);
void getLastCacheDataType(SDataType* pType);
SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);

View File

@ -191,7 +191,7 @@ typedef struct {
} SMonBmInfo;
typedef struct {
SArray *pVloads; // SVnodeLoad
SArray *pVloads; // SVnodeLoad/SVnodeLoadLite
} SMonVloadInfo;
typedef struct {

View File

@ -273,6 +273,7 @@ typedef struct SShowStmt {
SNode* pDbName; // SValueNode
SNode* pTbName; // SValueNode
EOperatorType tableCondType;
EShowKind showKind; // show databases: user/system, show tables: normal/child, others NULL
} SShowStmt;
typedef struct SShowCreateDatabaseStmt {

View File

@ -124,6 +124,7 @@ int32_t nodesListStrictAppendList(SNodeList* pTarget, SNodeList* pSrc);
int32_t nodesListPushFront(SNodeList* pList, SNode* pNode);
SListCell* nodesListErase(SNodeList* pList, SListCell* pCell);
void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc);
void nodesListInsertListAfterPos(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc);
SNode* nodesListGetNode(SNodeList* pList, int32_t index);
SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
void nodesDestroyList(SNodeList* pList);

View File

@ -293,6 +293,7 @@ typedef struct SPartitionLogicNode {
SNodeList* pPartitionKeys;
SNodeList* pTags;
SNode* pSubtable;
SNodeList* pAggFuncs;
bool needBlockOutputTsOrder; // if true, partition output block will have ts order maintained
int32_t pkTsColId;

View File

@ -277,6 +277,14 @@ typedef enum ETimeLineMode {
TIME_LINE_GLOBAL,
} ETimeLineMode;
typedef enum EShowKind {
SHOW_KIND_ALL = 1,
SHOW_KIND_TABLES_NORMAL,
SHOW_KIND_TABLES_CHILD,
SHOW_KIND_DATABASES_USER,
SHOW_KIND_DATABASES_SYSTEM
} EShowKind;
typedef struct SFillNode {
ENodeType type; // QUERY_NODE_FILL
EFillMode mode;
@ -519,6 +527,8 @@ void* nodesGetValueFromNode(SValueNode* pNode);
int32_t nodesSetValueNodeValue(SValueNode* pNode, void* value);
char* nodesGetStrValueFromNode(SValueNode* pNode);
void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
SValueNode* nodesMakeValueNodeFromString(char* literal);
SValueNode* nodesMakeValueNodeFromBool(bool b);
char* nodesGetFillModeString(EFillMode mode);
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
@ -526,6 +536,9 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
const char* operatorTypeStr(EOperatorType type);
const char* logicConditionTypeStr(ELogicConditionType type);
bool nodesIsStar(SNode* pNode);
bool nodesIsTableStar(SNode* pNode);
#ifdef __cplusplus
}
#endif

View File

@ -64,6 +64,7 @@ typedef struct SParseContext {
SArray* pTableMetaPos; // sql table pos => catalog data pos
SArray* pTableVgroupPos; // sql table pos => catalog data pos
int64_t allocatorId;
int8_t biMode;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);

View File

@ -22,6 +22,15 @@
extern "C" {
#endif
#if defined(WINDOWS) && !defined(__USE_PTHREAD)
#include <windows.h>
#define __USE_WIN_THREAD
// https://learn.microsoft.com/en-us/windows/win32/winprog/using-the-windows-headers
// #ifndef _WIN32_WINNT
// #define _WIN32_WINNT 0x0600
// #endif
#endif
#if !defined(WINDOWS) && !defined(_ALPINE)
#ifndef __USE_XOPEN2K
#define TD_USE_SPINLOCK_AS_MUTEX
@ -29,6 +38,22 @@ typedef pthread_mutex_t pthread_spinlock_t;
#endif
#endif
#ifdef __USE_WIN_THREAD
typedef pthread_t TdThread; // pthread api
typedef pthread_spinlock_t TdThreadSpinlock; // pthread api
typedef CRITICAL_SECTION TdThreadMutex; // windows api
typedef HANDLE TdThreadMutexAttr; // windows api
typedef struct {
SRWLOCK lock;
int8_t excl;
} TdThreadRwlock; // windows api
typedef pthread_attr_t TdThreadAttr; // pthread api
typedef pthread_once_t TdThreadOnce; // pthread api
typedef HANDLE TdThreadRwlockAttr; // windows api
typedef CONDITION_VARIABLE TdThreadCond; // windows api
typedef HANDLE TdThreadCondAttr; // windows api
typedef pthread_key_t TdThreadKey; // pthread api
#else
typedef pthread_t TdThread;
typedef pthread_spinlock_t TdThreadSpinlock;
typedef pthread_mutex_t TdThreadMutex;
@ -40,11 +65,14 @@ typedef pthread_rwlockattr_t TdThreadRwlockAttr;
typedef pthread_cond_t TdThreadCond;
typedef pthread_condattr_t TdThreadCondAttr;
typedef pthread_key_t TdThreadKey;
#endif
#define taosThreadCleanupPush pthread_cleanup_push
#define taosThreadCleanupPop pthread_cleanup_pop
#ifdef WINDOWS
#if defined(WINDOWS) && !defined(__USE_PTHREAD)
#define TD_PTHREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER_FORBID
#elif defined(WINDOWS)
#define TD_PTHREAD_MUTEX_INITIALIZER (TdThreadMutex)(-1)
#else
#define TD_PTHREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER

51
include/td_sz.h Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_SZ_H
#define _TD_SZ_H
#include "defines.h"
#ifdef __cplusplus
extern "C" {
#endif
void cost_start();
double cost_end(const char* tag);
//
// Init success return 1 else 0
//
void tdszInit(float fPrecision, double dPrecision, unsigned int maxIntervals, unsigned int intervals, int ifAdtFse, const char* compressor);
//
// compress interface to tdengine return value is count of output with bytes
//
int tdszCompress(int type, const char * input, const int nelements, const char * output);
//
// decompress interface to tdengine return value is count of output with bytes
//
int tdszDecompress(int type, const char * input, int compressedSize, const int nelements, const char * output);
//
// Exit call
//
void tdszExit();
#ifdef __cplusplus
}
#endif
#endif /* ----- #ifndef _SZ_H ----- */

View File

@ -54,8 +54,15 @@ extern "C" {
#ifdef TD_TSZ
extern bool lossyFloat;
extern bool lossyDouble;
int32_t tsCompressInit();
void tsCompressExit();
int32_t tsCompressInit(char* lossyColumns, float fPrecision, double dPrecision, uint32_t maxIntervals, uint32_t intervals,
int32_t ifAdtFse, const char* compressor);
void tsCompressExit();
int32_t tsCompressFloatLossyImp(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressFloatLossyImp(const char *const input, int32_t compressedSize, const int32_t nelements, char *const output);
int32_t tsCompressDoubleLossyImp(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressDoubleLossyImp(const char *const input, int32_t compressedSize, const int32_t nelements, char *const output);
static FORCE_INLINE int32_t tsCompressFloatLossy(const char *const input, int32_t inputSize, const int32_t nelements,
char *const output, int32_t outputSize, char algorithm,

View File

@ -43,6 +43,7 @@ typedef enum {
CFG_DTYPE_INT32,
CFG_DTYPE_INT64,
CFG_DTYPE_FLOAT,
CFG_DTYPE_DOUBLE,
CFG_DTYPE_STRING,
CFG_DTYPE_DIR,
CFG_DTYPE_LOCALE,
@ -64,6 +65,7 @@ typedef struct SConfigItem {
union {
bool bval;
float fval;
double dval;
int32_t i32;
int64_t i64;
char *str;
@ -101,7 +103,8 @@ int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfg
int32_t cfgAddBool(SConfig *pCfg, const char *name, bool defaultVal, int8_t scope);
int32_t cfgAddInt32(SConfig *pCfg, const char *name, int32_t defaultVal, int64_t minval, int64_t maxval, int8_t scope);
int32_t cfgAddInt64(SConfig *pCfg, const char *name, int64_t defaultVal, int64_t minval, int64_t maxval, int8_t scope);
int32_t cfgAddFloat(SConfig *pCfg, const char *name, float defaultVal, double minval, double maxval, int8_t scope);
int32_t cfgAddFloat(SConfig *pCfg, const char *name, float defaultVal, float minval, float maxval, int8_t scope);
int32_t cfgAddDouble(SConfig *pCfg, const char *name, double defaultVal, double minval, double maxval, int8_t scope);
int32_t cfgAddString(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope);
int32_t cfgAddDir(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope);
int32_t cfgAddLocale(SConfig *pCfg, const char *name, const char *defaultVal, int8_t scope);

View File

@ -33,14 +33,17 @@ adapterName="taosadapter"
benchmarkName="taosBenchmark"
dumpName="taosdump"
demoName="taosdemo"
xname="taosx"
clientName2="taos"
serverName2="${clientName2}d"
configFile2="${clientName2}.cfg"
productName2="TDengine"
emailName2="taosdata.com"
xname2="${clientName2}x"
adapterName2="${clientName2}adapter"
explorerName="${clientName2}-explorer"
benchmarkName2="${clientName2}Benchmark"
demoName2="${clientName2}demo"
dumpName2="${clientName2}dump"
@ -235,6 +238,12 @@ function install_bin() {
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
${csudo}rm -f ${bin_link_dir}/${xname2} || :
${csudo}rm -f ${bin_link_dir}/${explorerName} || :
#Make link
[ -x ${install_main_dir}/bin/${xname2} ] && ${csudo}ln -sf ${install_main_dir}/bin/${xname2} ${bin_link_dir}/${xname2} || :
[ -x ${install_main_dir}/bin/${explorerName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${explorerName} ${bin_link_dir}/${explorerName} || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript2} || :
fi
}
@ -693,9 +702,29 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
# if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
# ${csudo}rm -f ${service_config_dir}/${serverName2}.service
# fi
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
x_service_config="${service_config_dir}/${xName2}.service"
if [ -e "$x_service_config" ]; then
if systemctl is-active --quiet ${xName2}; then
echo "${productName2} ${xName2} is running, stopping it..."
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${x_service_config}
fi
explorer_service_config="${service_config_dir}/${explorerName2}.service"
if [ -e "$explorer_service_config" ]; then
if systemctl is-active --quiet ${explorerName2}; then
echo "${productName2} ${explorerName2} is running, stopping it..."
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${explorer_service_config}
${csudo}rm -f /etc/${clientName2}/explorer.toml
fi
fi
}
function install_service_on_systemd() {

View File

@ -123,10 +123,11 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
${csudo}rm -f ${bin_link_dir}/${keeperName2} || :
# ${csudo}rm -f ${bin_link_dir}/${xName2} || :
# ${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
${csudo}rm -f ${bin_link_dir}/${xName2} || :
${csudo}rm -f ${bin_link_dir}/${explorerName2} || :
${csudo}rm -f ${bin_link_dir}/${clientName2} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName2} || :
${csudo}rm -f ${bin_link_dir}/${dumpName2} || :
@ -194,27 +195,29 @@ function clean_service_on_systemd() {
${csudo}systemctl stop ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then
x_service_config="${service_config_dir}/${xName2}.service"
if [ -e "$x_service_config" ]; then
if systemctl is-active --quiet ${xName2}; then
echo "${productName2} ${xName2} is running, stopping it..."
${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${x_service_config}
fi
# x_service_config="${service_config_dir}/${xName2}.service"
# if [ -e "$x_service_config" ]; then
# if systemctl is-active --quiet ${xName2}; then
# echo "${productName2} ${xName2} is running, stopping it..."
# ${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null
# fi
# ${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null
# ${csudo}rm -f ${x_service_config}
# fi
# explorer_service_config="${service_config_dir}/${explorerName2}.service"
# if [ -e "$explorer_service_config" ]; then
# if systemctl is-active --quiet ${explorerName2}; then
# echo "${productName2} ${explorerName2} is running, stopping it..."
# ${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
# fi
# ${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
# ${csudo}rm -f ${explorer_service_config}
# ${csudo}rm -f /etc/${clientName2}/explorer.toml
# fi
explorer_service_config="${service_config_dir}/${explorerName2}.service"
if [ -e "$explorer_service_config" ]; then
if systemctl is-active --quiet ${explorerName2}; then
echo "${productName2} ${explorerName2} is running, stopping it..."
${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null
fi
${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${explorer_service_config}
${csudo}rm -f /etc/${clientName2}/explorer.toml
fi
fi
}
function clean_service_on_sysvinit() {

View File

@ -133,32 +133,37 @@ typedef struct {
int32_t ver;
void* param;
__taos_notify_fn_t fp;
} SPassInfo;
} STscNotifyInfo;
typedef STscNotifyInfo SPassInfo;
typedef struct {
int64_t ver;
void* param;
int64_t ver;
void* param;
__taos_notify_fn_t fp;
} SWhiteListInfo;
typedef struct STscObj {
char user[TSDB_USER_LEN];
char pass[TSDB_PASSWORD_LEN];
char db[TSDB_DB_FNAME_LEN];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
int8_t sysInfo;
int8_t connType;
int32_t acctId;
uint32_t connId;
int64_t id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection
int32_t authVer;
SAppInstInfo* pAppInfo;
SHashObj* pRequests;
SPassInfo passInfo;
char user[TSDB_USER_LEN];
char pass[TSDB_PASSWORD_LEN];
char db[TSDB_DB_FNAME_LEN];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
int8_t sysInfo;
int8_t connType;
int8_t dropped;
int8_t biMode;
int32_t acctId;
uint32_t connId;
int64_t id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection
int32_t authVer;
SAppInstInfo* pAppInfo;
SHashObj* pRequests;
SPassInfo passInfo;
SWhiteListInfo whiteListInfo;
STscNotifyInfo userDroppedInfo;
} STscObj;
typedef struct STscDbg {

View File

@ -96,6 +96,19 @@ static int32_t hbUpdateUserAuthInfo(SAppHbMgr *pAppHbMgr, SUserAuthBatchRsp *bat
}
}
if (pRsp->dropped == 1) {
if (atomic_val_compare_exchange_8(&pTscObj->dropped, 0, 1) == 0) {
if (pTscObj->userDroppedInfo.fp) {
SPassInfo *dropInfo = &pTscObj->userDroppedInfo;
if (dropInfo->fp) {
(*dropInfo->fp)(dropInfo->param, NULL, TAOS_NOTIFY_USER_DROPPED);
}
}
}
releaseTscObj(pReq->connKey.tscRid);
continue;
}
pTscObj->authVer = pRsp->version;
if (pTscObj->sysInfo != pRsp->sysInfo) {
@ -842,7 +855,8 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
SClientHbKey *connKey = &pOneReq->connKey;
STscObj *pTscObj = (STscObj *)acquireTscObj(connKey->tscRid);
if (!pTscObj) {
if (!pTscObj || atomic_load_8(&pTscObj->dropped) == 1) {
if (pTscObj) releaseTscObj(connKey->tscRid);
continue;
}

View File

@ -297,7 +297,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
int8_t biMode = atomic_load_8(&pRequest->pTscObj->biMode);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@ -335,7 +336,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
return;
}
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, atomic_load_8(&pRequest->pTscObj->biMode));
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@ -2553,6 +2554,14 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
}
void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) {
if (pRequest->syncQuery && pRequest->body.param != param) {
if (pRequest->body.param) {
tsem_destroy(&((SSyncQueryParam *)pRequest->body.param)->sem);
}
taosMemoryFree(pRequest->body.param);
pRequest->syncQuery = false;
}
pRequest->body.fetchFp = fp;
pRequest->body.param = param;

View File

@ -147,6 +147,13 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)
taosThreadMutexUnlock(&pObj->mutex);
break;
}
case TAOS_NOTIFY_USER_DROPPED: {
taosThreadMutexLock(&pObj->mutex);
pObj->userDroppedInfo.fp = fp;
pObj->userDroppedInfo.param = param;
taosThreadMutexUnlock(&pObj->mutex);
break;
}
default: {
terrno = TSDB_CODE_INVALID_PARA;
releaseTscObj(*(int64_t *)taos);
@ -1166,6 +1173,8 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
.allocatorId = pRequest->allocatorRefId};
int8_t biMode = atomic_load_8(&((STscObj *)pTscObj)->biMode);
(*pCxt)->biMode = biMode;
return TSDB_CODE_SUCCESS;
}
@ -1829,3 +1838,26 @@ int taos_stmt_close(TAOS_STMT *stmt) {
return stmtClose(stmt);
}
int taos_set_conn_mode(TAOS* taos, int mode, int value) {
if (taos == NULL) {
terrno = TSDB_CODE_INVALID_PARA;
return terrno;
}
STscObj *pObj = acquireTscObj(*(int64_t *)taos);
if (NULL == pObj) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
tscError("invalid parameter for %s", __func__);
return terrno;
}
switch (mode) {
case TAOS_CONN_MODE_BI:
atomic_store_8(&pObj->biMode, value);
break;
default:
tscError("not supported mode.");
return TSDB_CODE_INVALID_PARA;
}
return 0;
}

View File

@ -1411,7 +1411,7 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
code = smlBuildOutput(pQuery, pVgHash);
if (code != TSDB_CODE_SUCCESS) {
uError("smlBuildOutput failed");
return code;
goto end;
}
launchQueryImpl(pRequest, pQuery, true, NULL);
@ -1496,7 +1496,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
code = smlBuildOutput(pQuery, pVgHash);
if (code != TSDB_CODE_SUCCESS) {
uError("smlBuildOutput failed");
return code;
goto end;
}
launchQueryImpl(pRequest, pQuery, true, NULL);

View File

@ -1415,6 +1415,8 @@ static void initClientTopicFromRsp(SMqClientTopic* pTopic, SMqSubTopicEp* pTopic
STqOffsetVal offsetNew = {0};
offsetNew.type = tmq->resetOffsetCfg;
tscInfo("consumer:0x%" PRIx64 ", update topic:%s, new numOfVgs:%d, num:%d, port:%d", tmq->consumerId, pTopic->topicName, vgNumGet, pVgEp->epSet.numOfEps,pVgEp->epSet.eps[pVgEp->epSet.inUse].port);
SMqClientVg clientVg = {
.pollCnt = 0,
.vgId = pVgEp->vgId,
@ -1766,7 +1768,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout) {
tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, since %s", tmq->consumerId, pollRspWrapper->vgId, tstrerror(pRspWrapper->code));
taosWLockLatch(&tmq->lock);
SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
pVg->emptyBlockReceiveTs = taosGetTimestampMs();
if(pVg) pVg->emptyBlockReceiveTs = taosGetTimestampMs();
taosWUnLockLatch(&tmq->lock);
}
setVgIdle(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
@ -3104,3 +3106,10 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
return code;
}
TAOS *tmq_get_connect(tmq_t *tmq){
if (tmq && tmq->pTscObj) {
return (TAOS *)(&(tmq->pTscObj->id));
}
return NULL;
}

View File

@ -20,6 +20,7 @@
#include "tgrant.h"
#include "tlog.h"
#include "tmisce.h"
#include "defines.h"
#if defined(CUS_NAME) || defined(CUS_PROMPT) || defined(CUS_EMAIL)
#include "cus_name.h"
@ -144,6 +145,7 @@ bool tsUseAdapter = false;
int32_t tsMetaCacheMaxSize = -1; // MB
int32_t tsSlowLogThreshold = 3; // seconds
int32_t tsSlowLogScope = SLOW_LOG_TYPE_ALL;
int32_t tsTimeSeriesThreshold = 50;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@ -211,14 +213,15 @@ int64_t tsTickPerMin[] = {60000L, 60000000L, 60000000000L};
*/
int64_t tsTickPerHour[] = {3600000L, 3600000000L, 3600000000000L};
// lossy compress 6
// lossy compress 7
char tsLossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
// can close lossy compress.
// below option can take effect when tsLossyColumns not empty
double tsFPrecision = 1E-8; // float column precision
float tsFPrecision = 1E-8; // float column precision
double tsDPrecision = 1E-16; // double column precision
uint32_t tsMaxRange = 500; // max range
uint32_t tsCurRange = 100; // range
uint32_t tsMaxRange = 500; // max quantization intervals
uint32_t tsCurRange = 100; // current quantization intervals
bool tsIfAdtFse = false; // ADT-FSE algorithom or original huffman algorithom
char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
// udf
@ -264,6 +267,9 @@ char tsS3BucketName[TSDB_FQDN_LEN] = "<bucketname>";
char tsS3AppId[TSDB_FQDN_LEN] = "<appid>";
int8_t tsS3Enabled = false;
int32_t tsS3BlockSize = 4096; // number of tsdb pages
int32_t tsS3BlockCacheSize = 16; // number of blocks
int32_t tsCheckpointInterval = 20;
#ifndef _STORAGE
@ -321,7 +327,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) {
return 0;
}
struct SConfig *taosGetCfg() { return tsCfg; }
struct SConfig *taosGetCfg() {
return tsCfg;
}
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
char *apolloUrl) {
@ -630,6 +638,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "trimVDbIntervalSec", tsTrimVDbIntervalSec, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "timeseriesThreshold", tsTimeSeriesThreshold, 0, 2000, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX,
CFG_SCOPE_SERVER) != 0)
@ -646,6 +655,14 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0)
return -1;
if (cfgAddString(pCfg, "LossyColumns", tsLossyColumns, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddFloat(pCfg, "FPrecision", tsFPrecision, 0.0f, 100000.0f, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddDouble(pCfg, "DPrecision", tsDPrecision, 0.0f, 1000000.0f, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "MaxRange", tsMaxRange, 0, 65536, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "CurRange", tsCurRange, 0, 65536, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddBool(pCfg, "IfAdtFse", tsIfAdtFse, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddString(pCfg, "Compressor", tsCompressor, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1;
@ -655,6 +672,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "s3BlockSize", tsS3BlockSize, 2048, 1024 * 1024, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "s3BlockCacheSize", tsS3BlockCacheSize, 4, 1024 * 1024, CFG_SCOPE_SERVER) != 0) return -1;
// min free disk space used to check if the disk is full [50MB, 1GB]
if (cfgAddInt64(pCfg, "minDiskFreeSize", tsMinDiskFreeSize, TFS_MIN_DISK_FREE_SIZE, 1024 * 1024 * 1024,
@ -1036,6 +1055,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsTrimVDbIntervalSec = cfgGetItem(pCfg, "trimVDbIntervalSec")->i32;
tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32;
tsQueryRsmaTolerance = cfgGetItem(pCfg, "queryRsmaTolerance")->i32;
tsTimeSeriesThreshold = cfgGetItem(pCfg, "timeseriesThreshold")->i32;
tsWalFsyncDataSizeLimit = cfgGetItem(pCfg, "walFsyncDataSizeLimit")->i64;
@ -1060,6 +1080,15 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsCacheLazyLoadThreshold = cfgGetItem(pCfg, "cacheLazyLoadThreshold")->i32;
tstrncpy(tsLossyColumns, cfgGetItem(pCfg, "LossyColumns")->str, sizeof(tsLossyColumns));
tsFPrecision = cfgGetItem(pCfg, "FPrecision")->fval;
tsDPrecision = cfgGetItem(pCfg, "DPrecision")->dval;
tsMaxRange = cfgGetItem(pCfg, "MaxRange")->i32;
tsCurRange = cfgGetItem(pCfg, "CurRange")->i32;
tsIfAdtFse = cfgGetItem(pCfg, "IfAdtFse")->bval;
tstrncpy(tsCompressor, cfgGetItem(pCfg, "Compressor")->str, sizeof(tsCompressor));
tsDisableStream = cfgGetItem(pCfg, "disableStream")->bval;
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
@ -1070,6 +1099,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32;
tsMinDiskFreeSize = cfgGetItem(pCfg, "minDiskFreeSize")->i64;
tsS3BlockSize = cfgGetItem(pCfg, "s3BlockSize")->i32;
tsS3BlockCacheSize = cfgGetItem(pCfg, "s3BlockCacheSize")->i32;
GRANT_CFG_GET;
return 0;
}
@ -1448,6 +1480,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
tqDebugFlag = cfgGetItem(pCfg, "tqDebugFlag")->i32;
} else if (strcasecmp("ttlFlushThreshold", name) == 0) {
tsTtlFlushThreshold = cfgGetItem(pCfg, "ttlFlushThreshold")->i32;
} else if (strcasecmp("timeseriesThreshold", name) == 0) {
tsTimeSeriesThreshold = cfgGetItem(pCfg, "timeseriesThreshold")->i32;
}
break;
}
@ -1643,6 +1677,20 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
return;
}
if (strcasecmp(option, "s3BlockSize") == 0) {
int32_t newS3BlockSize = atoi(value);
uInfo("s3BlockSize set from %d to %d", tsS3BlockSize, newS3BlockSize);
tsS3BlockSize = newS3BlockSize;
return;
}
if (strcasecmp(option, "s3BlockCacheSize") == 0) {
int32_t newS3BlockCacheSize = atoi(value);
uInfo("s3BlockCacheSize set from %d to %d", tsS3BlockCacheSize, newS3BlockCacheSize);
tsS3BlockCacheSize = newS3BlockCacheSize;
return;
}
if (strcasecmp(option, "keepTimeOffset") == 0) {
int32_t newKeepTimeOffset = atoi(value);
uInfo("keepTimeOffset set from %d to %d", tsKeepTimeOffset, newKeepTimeOffset);

View File

@ -1035,6 +1035,68 @@ int32_t tDeserializeSMDropFullTextReq(void *buf, int32_t bufLen, SMDropFullTextR
return 0;
}
int32_t tSerializeSNotifyReq(void *buf, int32_t bufLen, SNotifyReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
if (tEncodeI64(&encoder, pReq->clusterId) < 0) return -1;
int32_t nVgroup = taosArrayGetSize(pReq->pVloads);
if (tEncodeI32(&encoder, nVgroup) < 0) return -1;
for (int32_t i = 0; i < nVgroup; ++i) {
SVnodeLoadLite *vload = TARRAY_GET_ELEM(pReq->pVloads, i);
if (tEncodeI32(&encoder, vload->vgId) < 0) return -1;
if (tEncodeI64(&encoder, vload->nTimeSeries) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSNotifyReq(void *buf, int32_t bufLen, SNotifyReq *pReq) {
int32_t code = TSDB_CODE_INVALID_MSG;
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) goto _exit;
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) goto _exit;
if (tDecodeI64(&decoder, &pReq->clusterId) < 0) goto _exit;
int32_t nVgroup = 0;
if (tDecodeI32(&decoder, &nVgroup) < 0) goto _exit;
if (nVgroup > 0) {
pReq->pVloads = taosArrayInit(nVgroup, sizeof(SVnodeLoadLite));
if (!pReq->pVloads) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t i = 0; i < nVgroup; ++i) {
SVnodeLoadLite vload;
if (tDecodeI32(&decoder, &(vload.vgId)) < 0) goto _exit;
if (tDecodeI64(&decoder, &(vload.nTimeSeries)) < 0) goto _exit;
taosArrayPush(pReq->pVloads, &vload);
}
}
code = 0;
_exit:
tEndDecode(&decoder);
tDecoderClear(&decoder);
return code;
}
void tFreeSNotifyReq(SNotifyReq *pReq) {
if (pReq) {
taosArrayDestroy(pReq->pVloads);
}
}
int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -1652,7 +1714,7 @@ int32_t tSerializeSGetUserAuthRspImpl(SEncoder *pEncoder, SGetUserAuthRsp *pRsp)
if (tEncodeI8(pEncoder, pRsp->superAuth) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->sysInfo) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->enable) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->reserve) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->dropped) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->version) < 0) return -1;
int32_t numOfCreatedDbs = taosHashGetSize(pRsp->createdDbs);
@ -1767,7 +1829,7 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
if (tDecodeI8(pDecoder, &pRsp->superAuth) < 0) goto _err;
if (tDecodeI8(pDecoder, &pRsp->sysInfo) < 0) goto _err;
if (tDecodeI8(pDecoder, &pRsp->enable) < 0) goto _err;
if (tDecodeI8(pDecoder, &pRsp->reserve) < 0) goto _err;
if (tDecodeI8(pDecoder, &pRsp->dropped) < 0) goto _err;
if (tDecodeI32(pDecoder, &pRsp->version) < 0) goto _err;
int32_t numOfCreatedDbs = 0;

View File

@ -28,6 +28,7 @@ typedef struct SDnodeMgmt {
const char *path;
const char *name;
TdThread statusThread;
TdThread notifyThread;
TdThread monitorThread;
TdThread crashReportThread;
SSingleWorker mgmtWorker;
@ -36,6 +37,7 @@ typedef struct SDnodeMgmt {
ProcessDropNodeFp processDropNodeFp;
SendMonitorReportFp sendMonitorReportFp;
GetVnodeLoadsFp getVnodeLoadsFp;
GetVnodeLoadsFp getVnodeLoadsLiteFp;
GetMnodeLoadsFp getMnodeLoadsFp;
GetQnodeLoadsFp getQnodeLoadsFp;
int32_t statusSeq;
@ -44,17 +46,21 @@ typedef struct SDnodeMgmt {
// dmHandle.c
SArray *dmGetMsgHandles();
void dmSendStatusReq(SDnodeMgmt *pMgmt);
void dmSendNotifyReq(SDnodeMgmt *pMgmt);
int32_t dmProcessConfigReq(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessAuthRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessGrantRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessServerRunStatus(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessGrantReq(void *pInfo, SRpcMsg *pMsg);
int32_t dmProcessGrantNotify(void *pInfo, SRpcMsg *pMsg);
// dmWorker.c
int32_t dmPutNodeMsgToMgmtQueue(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmStartStatusThread(SDnodeMgmt *pMgmt);
void dmStopStatusThread(SDnodeMgmt *pMgmt);
int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt);
void dmStopNotifyThread(SDnodeMgmt *pMgmt);
int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt);
void dmStopMonitorThread(SDnodeMgmt *pMgmt);
int32_t dmStartCrashReportThread(SDnodeMgmt *pMgmt);

View File

@ -170,6 +170,36 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
dmProcessStatusRsp(pMgmt, &rpcRsp);
}
void dmSendNotifyReq(SDnodeMgmt *pMgmt) {
SNotifyReq req = {0};
taosThreadRwlockRdlock(&pMgmt->pData->lock);
req.dnodeId = pMgmt->pData->dnodeId;
taosThreadRwlockUnlock(&pMgmt->pData->lock);
req.clusterId = pMgmt->pData->clusterId;
SMonVloadInfo vinfo = {0};
(*pMgmt->getVnodeLoadsLiteFp)(&vinfo);
req.pVloads = vinfo.pVloads;
int32_t contLen = tSerializeSNotifyReq(NULL, 0, &req);
void *pHead = rpcMallocCont(contLen);
tSerializeSNotifyReq(pHead, contLen, &req);
tFreeSNotifyReq(&req);
SRpcMsg rpcMsg = {.pCont = pHead,
.contLen = contLen,
.msgType = TDMT_MND_NOTIFY,
.info.ahandle = (void *)0x9527,
.info.refId = 0,
.info.noResp = 1};
SEpSet epSet = {0};
dmGetMnodeEpSet(pMgmt->pData, &epSet);
rpcSendRequest(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, NULL);
}
int32_t dmProcessAuthRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
dError("auth rsp is received, but not supported yet");
return 0;
@ -395,6 +425,7 @@ SArray *dmGetMsgHandles() {
// Requests handled by MNODE
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT_NOTIFY, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_AUTH_RSP, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
code = 0;

View File

@ -20,6 +20,11 @@ static int32_t dmStartMgmt(SDnodeMgmt *pMgmt) {
if (dmStartStatusThread(pMgmt) != 0) {
return -1;
}
#ifdef TD_ENTERPRISE
if (dmStartNotifyThread(pMgmt) != 0) {
return -1;
}
#endif
if (dmStartMonitorThread(pMgmt) != 0) {
return -1;
}
@ -33,6 +38,7 @@ static void dmStopMgmt(SDnodeMgmt *pMgmt) {
pMgmt->pData->stopped = true;
dmStopMonitorThread(pMgmt);
dmStopStatusThread(pMgmt);
dmStopNotifyThread(pMgmt);
dmStopCrashReportThread(pMgmt);
}
@ -52,6 +58,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
pMgmt->processDropNodeFp = pInput->processDropNodeFp;
pMgmt->sendMonitorReportFp = pInput->sendMonitorReportFp;
pMgmt->getVnodeLoadsFp = pInput->getVnodeLoadsFp;
pMgmt->getVnodeLoadsLiteFp = pInput->getVnodeLoadsLiteFp;
pMgmt->getMnodeLoadsFp = pInput->getMnodeLoadsFp;
pMgmt->getQnodeLoadsFp = pInput->getQnodeLoadsFp;

View File

@ -53,6 +53,26 @@ static void *dmStatusThreadFp(void *param) {
return NULL;
}
tsem_t dmNotifySem;
static void *dmNotifyThreadFp(void *param) {
SDnodeMgmt *pMgmt = param;
int64_t lastTime = taosGetTimestampMs();
setThreadName("dnode-notify");
if (tsem_init(&dmNotifySem, 0, 0) != 0) {
return NULL;
}
while (1) {
if (pMgmt->pData->dropped || pMgmt->pData->stopped) break;
tsem_wait(&dmNotifySem);
dmSendNotifyReq(pMgmt);
}
return NULL;
}
static void *dmMonitorThreadFp(void *param) {
SDnodeMgmt *pMgmt = param;
int64_t lastTime = taosGetTimestampMs();
@ -132,7 +152,6 @@ static void *dmCrashReportThreadFp(void *param) {
return NULL;
}
int32_t dmStartStatusThread(SDnodeMgmt *pMgmt) {
TdThreadAttr thAttr;
taosThreadAttrInit(&thAttr);
@ -154,6 +173,29 @@ void dmStopStatusThread(SDnodeMgmt *pMgmt) {
}
}
int32_t dmStartNotifyThread(SDnodeMgmt *pMgmt) {
TdThreadAttr thAttr;
taosThreadAttrInit(&thAttr);
taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
if (taosThreadCreate(&pMgmt->notifyThread, &thAttr, dmNotifyThreadFp, pMgmt) != 0) {
dError("failed to create notify thread since %s", strerror(errno));
return -1;
}
taosThreadAttrDestroy(&thAttr);
tmsgReportStartup("dnode-notify", "initialized");
return 0;
}
void dmStopNotifyThread(SDnodeMgmt *pMgmt) {
if (taosCheckPthreadValid(pMgmt->notifyThread)) {
tsem_post(&dmNotifySem);
taosThreadJoin(pMgmt->notifyThread, NULL);
taosThreadClear(&pMgmt->notifyThread);
}
tsem_destroy(&dmNotifySem);
}
int32_t dmStartMonitorThread(SDnodeMgmt *pMgmt) {
TdThreadAttr thAttr;
taosThreadAttrInit(&thAttr);
@ -251,6 +293,11 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
case TDMT_MND_GRANT:
code = dmProcessGrantReq(&pMgmt->pData->clusterId, pMsg);
break;
case TDMT_MND_GRANT_NOTIFY:
#ifdef MAKE_JENKINS_HAPPY
code = dmProcessGrantNotify(NULL, pMsg);
#endif
break;
default:
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
dGError("msg:%p, not processed in mgmt queue", pMsg);

View File

@ -178,6 +178,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_CONN, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_HEARTBEAT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_STATUS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_NOTIFY, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_SYSTABLE_RETRIEVE, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_AUTH, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_SHOW_VARIABLES, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;

View File

@ -40,6 +40,28 @@ void vmGetVnodeLoads(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo, bool isReset) {
taosThreadRwlockUnlock(&pMgmt->lock);
}
void vmGetVnodeLoadsLite(SVnodeMgmt *pMgmt, SMonVloadInfo *pInfo) {
pInfo->pVloads = taosArrayInit(pMgmt->state.totalVnodes, sizeof(SVnodeLoadLite));
if (!pInfo->pVloads) return;
taosThreadRwlockRdlock(&pMgmt->lock);
void *pIter = taosHashIterate(pMgmt->hash, NULL);
while (pIter) {
SVnodeObj **ppVnode = pIter;
if (ppVnode == NULL || *ppVnode == NULL) continue;
SVnodeObj *pVnode = *ppVnode;
SVnodeLoadLite vload = {0};
if (vnodeGetLoadLite(pVnode->pImpl, &vload) == 0) {
taosArrayPush(pInfo->pVloads, &vload);
}
pIter = taosHashIterate(pMgmt->hash, pIter);
}
taosThreadRwlockUnlock(&pMgmt->lock);
}
void vmGetMonitorInfo(SVnodeMgmt *pMgmt, SMonVmInfo *pInfo) {
SMonVloadInfo vloads = {0};
vmGetVnodeLoads(pMgmt, &vloads, true);

View File

@ -119,6 +119,7 @@ int32_t dmProcessNodeMsg(SMgmtWrapper *pWrapper, SRpcMsg *pMsg);
// dmMonitor.c
void dmSendMonitorReport();
void dmGetVnodeLoads(SMonVloadInfo *pInfo);
void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo);
void dmGetMnodeLoads(SMonMloadInfo *pInfo);
void dmGetQnodeLoads(SQnodeLoad *pInfo);

View File

@ -35,6 +35,7 @@ void smGetMonitorInfo(void *pMgmt, SMonSmInfo *pInfo);
void bmGetMonitorInfo(void *pMgmt, SMonBmInfo *pInfo);
void vmGetVnodeLoads(void *pMgmt, SMonVloadInfo *pInfo, bool isReset);
void vmGetVnodeLoadsLite(void *pMgmt, SMonVloadInfo *pInfo);
void mmGetMnodeLoads(void *pMgmt, SMonMloadInfo *pInfo);
void qmGetQnodeLoads(void *pMgmt, SQnodeLoad *pInfo);

View File

@ -419,6 +419,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) {
.processDropNodeFp = dmProcessDropNodeReq,
.sendMonitorReportFp = dmSendMonitorReport,
.getVnodeLoadsFp = dmGetVnodeLoads,
.getVnodeLoadsLiteFp = dmGetVnodeLoadsLite,
.getMnodeLoadsFp = dmGetMnodeLoads,
.getQnodeLoadsFp = dmGetQnodeLoads,
};

View File

@ -19,6 +19,10 @@
#include "index.h"
#include "qworker.h"
#include "tstream.h"
#ifdef TD_TSZ
#include "tglobal.h"
#include "tcompression.h"
#endif
static bool dmRequireNode(SDnode *pDnode, SMgmtWrapper *pWrapper) {
SMgmtInputOpt input = dmBuildMgmtInputOpt(pWrapper);
@ -111,6 +115,11 @@ int32_t dmInitDnode(SDnode *pDnode) {
goto _OVER;
}
#ifdef TD_TSZ
// compress module init
tsCompressInit(tsLossyColumns, tsFPrecision, tsDPrecision, tsMaxRange, tsCurRange, (int)tsIfAdtFse, tsCompressor);
#endif
pDnode->wrappers[DNODE].func = dmGetMgmtFunc();
pDnode->wrappers[MNODE].func = mmGetMgmtFunc();
pDnode->wrappers[VNODE].func = vmGetMgmtFunc();
@ -180,6 +189,12 @@ void dmCleanupDnode(SDnode *pDnode) {
streamMetaCleanup();
indexCleanup();
taosConvDestroy();
#ifdef TD_TSZ
// compress destroy
tsCompressExit();
#endif
dDebug("dnode is closed, ptr:%p", pDnode);
}

View File

@ -119,6 +119,17 @@ void dmGetVnodeLoads(SMonVloadInfo *pInfo) {
}
}
void dmGetVnodeLoadsLite(SMonVloadInfo *pInfo) {
SDnode *pDnode = dmInstance();
SMgmtWrapper *pWrapper = &pDnode->wrappers[VNODE];
if (dmMarkWrapper(pWrapper) == 0) {
if (pWrapper->pMgmt != NULL) {
vmGetVnodeLoadsLite(pWrapper->pMgmt, pInfo);
}
dmReleaseWrapper(pWrapper);
}
}
void dmGetMnodeLoads(SMonMloadInfo *pInfo) {
SDnode *pDnode = dmInstance();
SMgmtWrapper *pWrapper = &pDnode->wrappers[MNODE];

View File

@ -121,6 +121,7 @@ typedef struct {
ProcessDropNodeFp processDropNodeFp;
SendMonitorReportFp sendMonitorReportFp;
GetVnodeLoadsFp getVnodeLoadsFp;
GetVnodeLoadsFp getVnodeLoadsLiteFp;
GetMnodeLoadsFp getMnodeLoadsFp;
GetQnodeLoadsFp getQnodeLoadsFp;
} SMgmtInputOpt;

View File

@ -32,11 +32,11 @@ void mndReleaseSubscribe(SMnode *pMnode, SMqSubscribeObj *pSub);
int32_t mndMakeSubscribeKey(char *key, const char *cgroup, const char *topicName);
static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, const char *topicName, int32_t vgId) {
return snprintf(key, TSDB_PARTITION_KEY_LEN, "%d:%s:%s", vgId, cgroup, topicName);
}
//static FORCE_INLINE int32_t mndMakePartitionKey(char *key, const char *cgroup, const char *topicName, int32_t vgId) {
// return snprintf(key, TSDB_PARTITION_KEY_LEN, "%d:%s:%s", vgId, cgroup, topicName);
//}
int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
//int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topic);
int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj *pSub);

View File

@ -311,6 +311,34 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId);
}
taosRUnLockLatch(&pConsumer->lock);
}else{
int32_t newTopicNum = taosArrayGetSize(pConsumer->currentTopics);
for (int32_t i = 0; i < newTopicNum; i++) {
char * topic = taosArrayGetP(pConsumer->currentTopics, i);
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic);
if (pSub == NULL) {
continue;
}
taosRLockLatch(&pSub->lock);
// 2.2 iterate all vg assigned to the consumer of that topic
SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &pConsumer->consumerId, sizeof(int64_t));
int32_t vgNum = taosArrayGetSize(pConsumerEp->vgs);
for (int32_t j = 0; j < vgNum; j++) {
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j);
SVgObj * pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId);
if (!pVgroup) {
char key[TSDB_SUBSCRIBE_KEY_LEN];
mndMakeSubscribeKey(key, pConsumer->cgroup, topic);
mndGetOrCreateRebSub(pRebMsg->rebSubHash, key);
mInfo("vnode splitted, vgId:%d rebalance will be triggered", pVgEp->vgId);
}
mndReleaseVgroup(pMnode, pVgroup);
}
taosRUnLockLatch(&pSub->lock);
mndReleaseSubscribe(pMnode, pSub);
}
}
} else if (status == MQ_CONSUMER_STATUS_LOST) {
if (hbStatus > MND_CONSUMER_LOST_CLEAR_THRESHOLD) { // clear consumer if lost a day
@ -343,7 +371,7 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
}
if (taosHashGetSize(pRebMsg->rebSubHash) != 0) {
mInfo("mq rebalance will be triggered");
mInfo("mq rebalance will be triggered");
SRpcMsg rpcMsg = {
.msgType = TDMT_MND_TMQ_DO_REBALANCE,
.pCont = pRebMsg,
@ -548,8 +576,8 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
for (int32_t j = 0; j < vgNum; j++) {
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j);
char offsetKey[TSDB_PARTITION_KEY_LEN];
mndMakePartitionKey(offsetKey, pConsumer->cgroup, topic, pVgEp->vgId);
// char offsetKey[TSDB_PARTITION_KEY_LEN];
// mndMakePartitionKey(offsetKey, pConsumer->cgroup, topic, pVgEp->vgId);
if(epoch == -1){
SVgObj *pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId);

View File

@ -71,6 +71,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp);
static int32_t mndProcessStatusReq(SRpcMsg *pReq);
static int32_t mndProcessNotifyReq(SRpcMsg *pReq);
static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq);
static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
@ -80,6 +81,12 @@ static void mndCancelGetNextDnode(SMnode *pMnode, void *pIter);
static int32_t mndMCfgGetValInt32(SMCfgDnodeReq *pInMCfgReq, int32_t opLen, int32_t *pOutValue);
#ifndef TD_ENTERPRISE
static int32_t mndUpdClusterInfo(SRpcMsg *pReq) { return 0; }
#else
int32_t mndUpdClusterInfo(SRpcMsg *pReq);
#endif
int32_t mndInitDnode(SMnode *pMnode) {
SSdbTable table = {
.sdbType = SDB_DNODE,
@ -97,6 +104,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_CONFIG_DNODE, mndProcessConfigDnodeReq);
mndSetMsgHandle(pMnode, TDMT_DND_CONFIG_DNODE_RSP, mndProcessConfigDnodeRsp);
mndSetMsgHandle(pMnode, TDMT_MND_STATUS, mndProcessStatusReq);
mndSetMsgHandle(pMnode, TDMT_MND_NOTIFY, mndProcessNotifyReq);
mndSetMsgHandle(pMnode, TDMT_MND_DNODE_LIST, mndProcessDnodeListReq);
mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq);
mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq);
@ -543,6 +551,10 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
mGTrace("dnode:%d, status received, accessTimes:%d check:%d online:%d reboot:%d changed:%d statusSeq:%d", pDnode->id,
pDnode->accessTimes, needCheck, online, reboot, dnodeChanged, statusReq.statusSeq);
if (reboot) {
tsGrantHBInterval = GRANT_HEART_BEAT_MIN;
}
for (int32_t v = 0; v < taosArrayGetSize(statusReq.pVloads); ++v) {
SVnodeLoad *pVload = taosArrayGet(statusReq.pVloads, v);
@ -676,6 +688,45 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
_OVER:
mndReleaseDnode(pMnode, pDnode);
taosArrayDestroy(statusReq.pVloads);
#ifdef MAKE_JENKINS_HAPPY
mndUpdClusterInfo(pReq);
#endif
return code;
}
static int32_t mndProcessNotifyReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SNotifyReq notifyReq = {0};
int32_t code = 0;
if ((code = tDeserializeSNotifyReq(pReq->pCont, pReq->contLen, &notifyReq)) != 0) {
terrno = code;
goto _OVER;
}
int64_t clusterid = mndGetClusterId(pMnode);
if (notifyReq.clusterId != 0 && notifyReq.clusterId != clusterid) {
code = TSDB_CODE_MND_DNODE_DIFF_CLUSTER;
mWarn("dnode:%d, its clusterid:%" PRId64 " differ from current cluster:%" PRId64 " since %s", notifyReq.dnodeId,
notifyReq.clusterId, clusterid, tstrerror(code));
goto _OVER;
}
int32_t nVgroup = taosArrayGetSize(notifyReq.pVloads);
for (int32_t v = 0; v < nVgroup; ++v) {
SVnodeLoadLite *pVload = taosArrayGet(notifyReq.pVloads, v);
SVgObj *pVgroup = mndAcquireVgroup(pMnode, pVload->vgId);
if (pVgroup != NULL) {
pVgroup->numOfTimeSeries = pVload->nTimeSeries;
mndReleaseVgroup(pMnode, pVgroup);
}
}
_OVER:
#ifdef MAKE_JENKINS_HAPPY
mndUpdClusterInfo(pReq);
#endif
tFreeSNotifyReq(&notifyReq);
return code;
}

View File

@ -35,7 +35,6 @@ int32_t mndCheckTopicPrivilegeByName(SMnode *pMnode, const char *user, EOperType
}
// TODO: for community version use the commented version
int32_t mndSetUserWhiteListRsp(SMnode *pMnode, SUserObj *pUser, SGetUserWhiteListRsp *pWhiteListRsp) {
memcpy(pWhiteListRsp->user, pUser->user, TSDB_USER_LEN);
pWhiteListRsp->numWhiteLists = 1;
@ -43,14 +42,26 @@ int32_t mndSetUserWhiteListRsp(SMnode *pMnode, SUserObj *pUser, SGetUserWhiteLis
if (pWhiteListRsp->pWhiteLists == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
memset(pWhiteListRsp->pWhiteLists, 0, pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// pWhiteListRsp->numWhiteLists = pUser->pIpWhiteList->num;
// pWhiteListRsp->pWhiteLists = taosMemoryMalloc(pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// if (pWhiteListRsp->pWhiteLists == NULL) {
// return TSDB_CODE_OUT_OF_MEMORY;
// }
// memcpy(pWhiteListRsp->pWhiteLists, pUser->pIpWhiteList->pIpRange,
// pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
memset(pWhiteListRsp->pWhiteLists, 0, pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// if (tsEnableWhiteList) {
// memcpy(pWhiteListRsp->user, pUser->user, TSDB_USER_LEN);
// pWhiteListRsp->numWhiteLists = pUser->pIpWhiteList->num;
// pWhiteListRsp->pWhiteLists = taosMemoryMalloc(pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// if (pWhiteListRsp->pWhiteLists == NULL) {
// return TSDB_CODE_OUT_OF_MEMORY;
// }
// memcpy(pWhiteListRsp->pWhiteLists, pUser->pIpWhiteList->pIpRange,
// pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// } else {
// memcpy(pWhiteListRsp->user, pUser->user, TSDB_USER_LEN);
// pWhiteListRsp->numWhiteLists = 1;
// pWhiteListRsp->pWhiteLists = taosMemoryMalloc(pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// if (pWhiteListRsp->pWhiteLists == NULL) {
// return TSDB_CODE_OUT_OF_MEMORY;
// }
// memset(pWhiteListRsp->pWhiteLists, 0, pWhiteListRsp->numWhiteLists * sizeof(SIpV4Range));
// }
return 0;
}

View File

@ -1678,6 +1678,10 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray
return -1;
}
if ((terrno = grantCheck(TSDB_GRANT_TIMESERIES)) != 0) {
return -1;
}
pNew->numOfColumns = pNew->numOfColumns + ncols;
if (mndAllocStbSchemas(pOld, pNew) != 0) {
return -1;

View File

@ -361,8 +361,71 @@ static void transferVgroupsForConsumers(SMqRebOutputObj *pOutput, SHashObj *pHas
}
}
static int32_t processRemoveAddVgs(SMnode *pMnode, SMqRebOutputObj *pOutput){
int32_t totalVgNum = 0;
SVgObj* pVgroup = NULL;
void* pIter = NULL;
SArray* newVgs = taosArrayInit(0, POINTER_BYTES);
while (1) {
pIter = sdbFetch(pMnode->pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
if (pIter == NULL) {
break;
}
if (!mndVgroupInDb(pVgroup, pOutput->pSub->dbUid)) {
sdbRelease(pMnode->pSdb, pVgroup);
continue;
}
totalVgNum++;
SMqVgEp* pVgEp = taosMemoryMalloc(sizeof(SMqVgEp));
pVgEp->epSet = mndGetVgroupEpset(pMnode, pVgroup);
pVgEp->vgId = pVgroup->vgId;
taosArrayPush(newVgs, &pVgEp);
sdbRelease(pMnode->pSdb, pVgroup);
}
pIter = NULL;
while (1) {
pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
if (pIter == NULL) break;
SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
int32_t j = 0;
while (j < taosArrayGetSize(pConsumerEp->vgs)) {
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j);
bool find = false;
for(int32_t k = 0; k < taosArrayGetSize(newVgs); k++){
SMqVgEp *pnewVgEp = taosArrayGetP(newVgs, k);
if(pVgEp->vgId == pnewVgEp->vgId){
tDeleteSMqVgEp(pnewVgEp);
taosArrayRemove(newVgs, k);
find = true;
break;
}
}
if(!find){
mInfo("processRemoveAddVgs old vgId:%d", pVgEp->vgId);
tDeleteSMqVgEp(pVgEp);
taosArrayRemove(pConsumerEp->vgs, j);
continue;
}
j++;
}
}
if(taosArrayGetSize(pOutput->pSub->unassignedVgs) == 0 && taosArrayGetSize(newVgs) != 0){
taosArrayAddAll(pOutput->pSub->unassignedVgs, newVgs);
mInfo("processRemoveAddVgs add new vg num:%d", (int)taosArrayGetSize(newVgs));
taosArrayDestroy(newVgs);
}else{
taosArrayDestroyP(newVgs, (FDelete)tDeleteSMqVgEp);
}
return totalVgNum;
}
static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqRebOutputObj *pOutput) {
int32_t totalVgNum = pOutput->pSub->vgNum;
int32_t totalVgNum = processRemoveAddVgs(pMnode, pOutput);
const char *pSubKey = pOutput->pSub->key;
int32_t numOfRemoved = taosArrayGetSize(pInput->pRebInfo->removedConsumers);
@ -771,6 +834,29 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
return 0;
}
static int32_t sendDeleteSubToVnode(SMqSubscribeObj *pSub, STrans *pTrans){
// iter all vnode to delete handle
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
for (int32_t i = 0; i < sz; i++) {
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
SMqVDeleteReq *pReq = taosMemoryCalloc(1, sizeof(SMqVDeleteReq));
pReq->head.vgId = htonl(pVgEp->vgId);
pReq->vgId = pVgEp->vgId;
pReq->consumerId = -1;
memcpy(pReq->subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
STransAction action = {0};
action.epSet = pVgEp->epSet;
action.pCont = pReq;
action.contLen = sizeof(SMqVDeleteReq);
action.msgType = TDMT_VND_TMQ_DELETE_SUB;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
}
}
return 0;
}
static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) {
SMnode *pMnode = pMsg->info.node;
SMDropCgroupReq dropReq = {0};
@ -831,6 +917,11 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pMsg) {
mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
code = sendDeleteSubToVnode(pSub, pTrans);
if (code != 0) {
goto end;
}
if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) {
mError("cgroup %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
code = -1;
@ -1065,33 +1156,33 @@ int32_t mndSetDropSubCommitLogs(SMnode *pMnode, STrans *pTrans, SMqSubscribeObj
return 0;
}
int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
int32_t code = 0;
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
SMqSubscribeObj *pSub = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pSub);
if (pIter == NULL) break;
if (pSub->dbUid != pDb->uid) {
sdbRelease(pSdb, pSub);
continue;
}
if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) {
sdbRelease(pSdb, pSub);
sdbCancelFetch(pSdb, pIter);
code = -1;
break;
}
sdbRelease(pSdb, pSub);
}
return code;
}
//int32_t mndDropSubByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
// int32_t code = 0;
// SSdb *pSdb = pMnode->pSdb;
//
// void *pIter = NULL;
// SMqSubscribeObj *pSub = NULL;
// while (1) {
// pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pSub);
// if (pIter == NULL) break;
//
// if (pSub->dbUid != pDb->uid) {
// sdbRelease(pSdb, pSub);
// continue;
// }
//
// if (mndSetDropSubCommitLogs(pMnode, pTrans, pSub) < 0) {
// sdbRelease(pSdb, pSub);
// sdbCancelFetch(pSdb, pIter);
// code = -1;
// break;
// }
//
// sdbRelease(pSdb, pSub);
// }
//
// return code;
//}
int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName) {
SSdb *pSdb = pMnode->pSdb;
@ -1117,25 +1208,10 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName)
sdbCancelFetch(pSdb, pIter);
return -1;
}
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
for (int32_t i = 0; i < sz; i++) {
SMqVgEp *pVgEp = taosArrayGetP(pSub->unassignedVgs, i);
SMqVDeleteReq *pReq = taosMemoryCalloc(1, sizeof(SMqVDeleteReq));
pReq->head.vgId = htonl(pVgEp->vgId);
pReq->vgId = pVgEp->vgId;
pReq->consumerId = -1;
memcpy(pReq->subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
STransAction action = {0};
action.epSet = pVgEp->epSet;
action.pCont = pReq;
action.contLen = sizeof(SMqVDeleteReq);
action.msgType = TDMT_VND_TMQ_DELETE_SUB;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
sdbRelease(pSdb, pSub);
sdbCancelFetch(pSdb, pIter);
return -1;
}
if (sendDeleteSubToVnode(pSub, pTrans) != 0) {
sdbRelease(pSdb, pSub);
sdbCancelFetch(pSdb, pIter);
return -1;
}
if (mndSetDropSubRedoLogs(pMnode, pTrans, pSub) < 0) {

View File

@ -2289,6 +2289,11 @@ int32_t mndValidateUserAuthInfo(SMnode *pMnode, SUserAuthVersion *pUsers, int32_
for (int32_t i = 0; i < numOfUses; ++i) {
SUserObj *pUser = mndAcquireUser(pMnode, pUsers[i].user);
if (pUser == NULL) {
if (TSDB_CODE_MND_USER_NOT_EXIST == terrno) {
SGetUserAuthRsp rsp = {.dropped = 1};
memcpy(rsp.user, pUsers[i].user, TSDB_USER_LEN);
taosArrayPush(batchRsp.pArray, &rsp);
}
mError("user:%s, failed to auth user since %s", pUsers[i].user, terrstr());
continue;
}

View File

@ -2672,14 +2672,14 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
SDbObj dbObj = {0};
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
int32_t numOfTopics = 0;
if (mndGetNumOfTopics(pMnode, pDb->name, &numOfTopics) != 0) {
goto _OVER;
}
if (numOfTopics > 0) {
terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
goto _OVER;
}
// int32_t numOfTopics = 0;
// if (mndGetNumOfTopics(pMnode, pDb->name, &numOfTopics) != 0) {
// goto _OVER;
// }
// if (numOfTopics > 0) {
// terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
// goto _OVER;
// }
int32_t numOfStreams = 0;
if (mndGetNumOfStreams(pMnode, pDb->name, &numOfStreams) != 0) {

View File

@ -65,7 +65,8 @@ set(
"src/tq/tqSink.c"
"src/tq/tqCommit.c"
"src/tq/tqStreamTask.c"
"src/tq/tqSnapshot.c"
"src/tq/tqHandleSnapshot.c"
"src/tq/tqCheckInfoSnapshot.c"
"src/tq/tqOffsetSnapshot.c"
"src/tq/tqStreamStateSnap.c"
"src/tq/tqStreamTaskSnap.c"

View File

@ -86,11 +86,13 @@ void *vnodeGetIdx(void *pVnode);
void *vnodeGetIvtIdx(void *pVnode);
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num);
int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num);
int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num);
int32_t vnodeGetAllCtbNum(SVnode *pVnode, int64_t *num);
void vnodeResetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad);
int32_t vnodeGetLoadLite(SVnode *pVnode, SVnodeLoadLite *pLoad);
int32_t vnodeValidateTableHash(SVnode *pVnode, char *tableFName);
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg);
@ -134,7 +136,7 @@ bool metaTbInFilterCache(void *pVnode, tb_uid_t suid, int8_t type);
int32_t metaPutTbToFilterCache(void *pVnode, tb_uid_t suid, int8_t type);
int32_t metaSizeOfTbFilterCache(void *pVnode, int8_t type);
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables);
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables, int32_t *numOfCols);
// tsdb
typedef struct STsdbReader STsdbReader;
@ -288,10 +290,10 @@ typedef struct {
int64_t numOfSTables;
int64_t numOfCTables;
int64_t numOfNTables;
int64_t numOfCmprTables;
int64_t numOfReportedTimeSeries;
int64_t numOfNTimeSeries;
int64_t numOfTimeSeries;
int64_t itvTimeSeries;
// int64_t itvTimeSeries;
int64_t pointsWritten;
int64_t totalStorage;
int64_t compStorage;

View File

@ -71,7 +71,7 @@ int32_t metaCacheDrop(SMeta* pMeta, int64_t uid);
int32_t metaStatsCacheUpsert(SMeta* pMeta, SMetaStbStats* pInfo);
int32_t metaStatsCacheDrop(SMeta* pMeta, int64_t uid);
int32_t metaStatsCacheGet(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo);
void metaUpdateStbStats(SMeta* pMeta, int64_t uid, int64_t delta);
void metaUpdateStbStats(SMeta* pMeta, int64_t uid, int64_t deltaCtb, int32_t deltaCol);
int32_t metaUidFilterCacheGet(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, LRUHandle** pHandle);
struct SMeta {

View File

@ -134,7 +134,7 @@ int32_t tqMetaOpen(STQ* pTq);
int32_t tqMetaClose(STQ* pTq);
int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle);
int32_t tqMetaDeleteHandle(STQ* pTq, const char* key);
int32_t tqMetaRestoreHandle(STQ* pTq);
//int32_t tqMetaRestoreHandle(STQ* pTq);
int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen);
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
int32_t tqMetaRestoreCheckInfo(STQ* pTq);

View File

@ -380,6 +380,8 @@ struct STsdb {
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SLRUCache *bCache;
TdThreadMutex bMutex;
struct STFileSystem *pFS; // new
SRocksCache rCache;
};
@ -643,13 +645,19 @@ struct SRowMerger {
};
typedef struct {
char *path;
int32_t szPage;
int32_t flag;
TdFilePtr pFD;
int64_t pgno;
uint8_t *pBuf;
int64_t szFile;
char *path;
int32_t szPage;
int32_t flag;
TdFilePtr pFD;
int64_t pgno;
uint8_t *pBuf;
int64_t szFile;
STsdb *pTsdb;
const char *objName;
uint8_t s3File;
int32_t fid;
int64_t cid;
int64_t blkno;
} STsdbFD;
struct SDelFWriter {
@ -716,9 +724,9 @@ typedef struct SSttBlockLoadCostInfo {
} SSttBlockLoadCostInfo;
typedef struct SSttBlockLoadInfo {
SBlockData blockData[2]; // buffered block data
int32_t statisBlockIndex; // buffered statistics block index
void *statisBlock; // buffered statistics block data
SBlockData blockData[2]; // buffered block data
int32_t statisBlockIndex; // buffered statistics block index
void *statisBlock; // buffered statistics block data
void *pSttStatisBlkArray;
SArray *aSttBlk;
int32_t blockIndex[2]; // to denote the loaded block in the corresponding position.
@ -861,6 +869,9 @@ int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHandle **handle);
int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheGetBlockS3(SLRUCache *pCache, STsdbFD *pFD, LRUHandle **handle);
int32_t tsdbBCacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);

View File

@ -22,15 +22,21 @@
extern "C" {
#endif
extern int8_t tsS3Enabled;
#define S3_BLOCK_CACHE
extern int8_t tsS3Enabled;
extern int32_t tsS3BlockSize;
extern int32_t tsS3BlockCacheSize;
int32_t s3Init();
void s3CleanUp();
int32_t s3PutObjectFromFile(const char *file, const char *object);
int32_t s3PutObjectFromFile2(const char *file, const char *object);
void s3DeleteObjectsByPrefix(const char *prefix);
void s3DeleteObjects(const char *object_name[], int nobject);
bool s3Exists(const char *object_name);
bool s3Get(const char *object_name, const char *path);
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, uint8_t **ppBlock);
void s3EvictCache(const char *path, long object_size);
long s3Size(const char *object_name);

View File

@ -69,6 +69,8 @@ typedef struct STqSnapReader STqSnapReader;
typedef struct STqSnapWriter STqSnapWriter;
typedef struct STqOffsetReader STqOffsetReader;
typedef struct STqOffsetWriter STqOffsetWriter;
typedef struct STqCheckInfoReader STqCheckInfoReader;
typedef struct STqCheckInfoWriter STqCheckInfoWriter;
typedef struct SStreamTaskReader SStreamTaskReader;
typedef struct SStreamTaskWriter SStreamTaskWriter;
typedef struct SStreamStateReader SStreamStateReader;
@ -168,7 +170,8 @@ int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid);
int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq);
int64_t metaGetTimeSeriesNum(SMeta* pMeta);
int64_t metaGetTimeSeriesNum(SMeta* pMeta, int type);
void metaUpdTimeSeriesNum(SMeta* pMeta);
SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock);
int32_t metaResumeCtbCursor(SMCtbCursor* pCtbCur, int8_t first);
void metaPauseCtbCursor(SMCtbCursor* pCtbCur);
@ -308,6 +311,14 @@ int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData);
int32_t tqSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapWriter** ppWriter);
int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback);
int32_t tqSnapWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
// STqCheckInfoshotReader ==
int32_t tqCheckInfoReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqCheckInfoReader** ppReader);
int32_t tqCheckInfoReaderClose(STqCheckInfoReader** ppReader);
int32_t tqCheckInfoRead(STqCheckInfoReader* pReader, uint8_t** ppData);
// STqCheckInfoshotWriter ======================================
int32_t tqCheckInfoWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqCheckInfoWriter** ppWriter);
int32_t tqCheckInfoWriterClose(STqCheckInfoWriter** ppWriter, int8_t rollback);
int32_t tqCheckInfoWrite(STqCheckInfoWriter* pWriter, uint8_t* pData, uint32_t nData);
// STqOffsetReader ========================================
int32_t tqOffsetReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqOffsetReader** ppReader);
int32_t tqOffsetReaderClose(STqOffsetReader** ppReader);
@ -503,6 +514,7 @@ enum {
SNAP_DATA_STREAM_TASK_CHECKPOINT = 10,
SNAP_DATA_STREAM_STATE = 11,
SNAP_DATA_STREAM_STATE_BACKEND = 12,
SNAP_DATA_TQ_CHECKINFO = 13,
};
struct SSnapDataHdr {

View File

@ -696,22 +696,30 @@ int64_t metaGetTbNum(SMeta *pMeta) {
return pMeta->pVnode->config.vndStats.numOfCTables + pMeta->pVnode->config.vndStats.numOfNTables;
}
// N.B. Called by statusReq per second
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
int64_t nTables = metaGetTbNum(pMeta);
if (nTables - pMeta->pVnode->config.vndStats.numOfCmprTables > 100 ||
pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 ||
++pMeta->pVnode->config.vndStats.itvTimeSeries % (60 * 5) == 0) {
int64_t num = 0;
vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
void metaUpdTimeSeriesNum(SMeta *pMeta) {
int64_t nCtbTimeSeries = 0;
if (vnodeGetTimeSeriesNum(pMeta->pVnode, &nCtbTimeSeries) == 0) {
atomic_store_64(&pMeta->pVnode->config.vndStats.numOfTimeSeries, nCtbTimeSeries);
}
}
pMeta->pVnode->config.vndStats.itvTimeSeries = (TD_VID(pMeta->pVnode) % 100) * 2;
pMeta->pVnode->config.vndStats.numOfCmprTables = nTables;
static FORCE_INLINE int64_t metaGetTimeSeriesNumImpl(SMeta *pMeta, bool forceUpd) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
SVnodeStats *pStats = &pMeta->pVnode->config.vndStats;
if (forceUpd || pStats->numOfTimeSeries <= 0) {
metaUpdTimeSeriesNum(pMeta);
}
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
return pStats->numOfTimeSeries + pStats->numOfNTimeSeries;
}
// type: 1 reported timeseries
int64_t metaGetTimeSeriesNum(SMeta *pMeta, int type) {
int64_t nTimeSeries = metaGetTimeSeriesNumImpl(pMeta, false);
if (type == 1) {
atomic_store_64(&pMeta->pVnode->config.vndStats.numOfReportedTimeSeries, nTimeSeries);
}
return nTimeSeries;
}
typedef struct {
@ -1509,9 +1517,10 @@ _exit:
return code;
}
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables) {
int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables, int32_t *numOfCols) {
int32_t code = 0;
*numOfTables = 0;
if (!numOfTables && !numOfCols) goto _exit;
SVnode *pVnodeObj = pVnode;
metaRLock(pVnodeObj->pMeta);
@ -1520,19 +1529,26 @@ int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables) {
SMetaStbStats state = {0};
if (metaStatsCacheGet(pVnodeObj->pMeta, uid, &state) == TSDB_CODE_SUCCESS) {
metaULock(pVnodeObj->pMeta);
*numOfTables = state.ctbNum;
if (numOfTables) *numOfTables = state.ctbNum;
if (numOfCols) *numOfCols = state.colNum;
ASSERTS(state.colNum > 0, "vgId:%d, suid:%" PRIi64 " nCols:%d <= 0 in metaCache", TD_VID(pVnodeObj), uid,
state.colNum);
goto _exit;
}
// slow path: search TDB
int64_t ctbNum = 0;
int32_t colNum = 0;
vnodeGetCtbNum(pVnode, uid, &ctbNum);
vnodeGetStbColumnNum(pVnode, uid, &colNum);
metaULock(pVnodeObj->pMeta);
*numOfTables = ctbNum;
if (numOfTables) *numOfTables = ctbNum;
if (numOfCols) *numOfCols = colNum;
state.uid = uid;
state.ctbNum = ctbNum;
state.colNum = colNum;
// upsert the cache
metaWLock(pVnodeObj->pMeta);
@ -1543,12 +1559,12 @@ _exit:
return code;
}
void metaUpdateStbStats(SMeta *pMeta, int64_t uid, int64_t delta) {
void metaUpdateStbStats(SMeta *pMeta, int64_t uid, int64_t deltaCtb, int32_t deltaCol) {
SMetaStbStats stats = {0};
if (metaStatsCacheGet(pMeta, uid, &stats) == TSDB_CODE_SUCCESS) {
stats.ctbNum += delta;
stats.ctbNum += deltaCtb;
stats.colNum += deltaCol;
metaStatsCacheUpsert(pMeta, &stats);
}
}

View File

@ -15,6 +15,8 @@
#include "meta.h"
extern tsem_t dmNotifySem;
static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSchema *pSchema);
static int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSchema *pSchema);
static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME);
@ -26,7 +28,7 @@ static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type);
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *pSuid);
static void metaDestroyTagIdxKey(STagIdxKey *pTagIdxKey);
// opt ins_tables query
static int metaUpdateBtimeIdx(SMeta *pMeta, const SMetaEntry *pME);
@ -34,6 +36,7 @@ static int metaDeleteBtimeIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateNcolIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaDeleteNcolIdx(SMeta *pMeta, const SMetaEntry *pME);
static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) {
pInfo->uid = pEntry->uid;
pInfo->version = pEntry->version;
@ -191,6 +194,14 @@ int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSche
return 0;
}
static inline void metaTimeSeriesNotifyCheck(SMeta *pMeta) {
#ifdef TD_ENTERPRISE
int64_t nTimeSeries = metaGetTimeSeriesNum(pMeta, 0);
int64_t deltaTS = nTimeSeries - pMeta->pVnode->config.vndStats.numOfReportedTimeSeries;
if (deltaTS > tsTimeSeriesThreshold) tsem_post(&dmNotifySem);
#endif
}
int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
SMetaEntry me = {0};
int kLen = 0;
@ -292,7 +303,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb
for (int32_t iChild = 0; iChild < taosArrayGetSize(tbUidList); iChild++) {
tb_uid_t uid = *(tb_uid_t *)taosArrayGet(tbUidList, iChild);
metaDropTableByUid(pMeta, uid, NULL);
metaDropTableByUid(pMeta, uid, NULL, NULL);
}
// drop super table
@ -304,8 +315,12 @@ _drop_super_table:
tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), pMeta->txn);
tdbTbDelete(pMeta->pSuidIdx, &pReq->suid, sizeof(tb_uid_t), pMeta->txn);
metaStatsCacheDrop(pMeta, pReq->suid);
metaULock(pMeta);
metaUpdTimeSeriesNum(pMeta);
_exit:
tdbFree(pKey);
tdbFree(pData);
@ -376,6 +391,8 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
nStbEntry.stbEntry.schemaRow = pReq->schemaRow;
nStbEntry.stbEntry.schemaTag = pReq->schemaTag;
int32_t deltaCol = pReq->schemaRow.nCols - oStbEntry.stbEntry.schemaRow.nCols;
metaWLock(pMeta);
// compare two entry
if (oStbEntry.stbEntry.schemaRow.version != pReq->schemaRow.version) {
@ -390,8 +407,18 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
// metaStatsCacheDrop(pMeta, nStbEntry.uid);
if (deltaCol != 0) {
metaUpdateStbStats(pMeta, pReq->suid, 0, deltaCol);
}
metaULock(pMeta);
if (deltaCol != 0) {
int64_t ctbNum;
metaGetStbStats(pMeta->pVnode, pReq->suid, &ctbNum, NULL);
pMeta->pVnode->config.vndStats.numOfTimeSeries += (ctbNum * deltaCol);
}
_exit:
if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
tDecoderClear(&dc);
tdbTbcClose(pTbDbc);
@ -734,6 +761,7 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
metaReaderClear(&mr);
// build SMetaEntry
SVnodeStats *pStats = &pMeta->pVnode->config.vndStats;
me.version = ver;
me.type = pReq->type;
me.uid = pReq->uid;
@ -767,10 +795,13 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
}
#endif
++pMeta->pVnode->config.vndStats.numOfCTables;
++pStats->numOfCTables;
int32_t nCols = 0;
metaGetStbStats(pMeta->pVnode, me.ctbEntry.suid, 0, &nCols);
pStats->numOfTimeSeries += nCols - 1;
metaWLock(pMeta);
metaUpdateStbStats(pMeta, me.ctbEntry.suid, 1);
metaUpdateStbStats(pMeta, me.ctbEntry.suid, 1, 0);
metaUidCacheClear(pMeta, me.ctbEntry.suid);
metaTbGroupCacheClear(pMeta, me.ctbEntry.suid);
metaULock(pMeta);
@ -782,12 +813,14 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
me.ntbEntry.schemaRow = pReq->ntb.schemaRow;
me.ntbEntry.ncid = me.ntbEntry.schemaRow.pSchema[me.ntbEntry.schemaRow.nCols - 1].colId + 1;
++pMeta->pVnode->config.vndStats.numOfNTables;
pMeta->pVnode->config.vndStats.numOfNTimeSeries += me.ntbEntry.schemaRow.nCols - 1;
++pStats->numOfNTables;
pStats->numOfNTimeSeries += me.ntbEntry.schemaRow.nCols - 1;
}
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
metaTimeSeriesNotifyCheck(pMeta);
if (pMetaRsp) {
*pMetaRsp = taosMemoryCalloc(1, sizeof(STableMetaRsp));
@ -817,7 +850,8 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
void *pData = NULL;
int nData = 0;
int rc = 0;
tb_uid_t uid;
tb_uid_t uid = 0;
tb_uid_t suid = 0;
int type;
rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
@ -828,9 +862,19 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
uid = *(tb_uid_t *)pData;
metaWLock(pMeta);
metaDropTableByUid(pMeta, uid, &type);
rc = metaDropTableByUid(pMeta, uid, &type, &suid);
metaULock(pMeta);
if (rc < 0) goto _exit;
if (type == TSDB_CHILD_TABLE) {
int32_t nCols = 0;
SVnodeStats *pStats = &pMeta->pVnode->config.vndStats;
if (metaGetStbStats(pMeta->pVnode, suid, NULL, &nCols) == 0) {
pStats->numOfTimeSeries -= nCols - 1;
}
}
if ((type == TSDB_CHILD_TABLE || type == TSDB_NORMAL_TABLE) && tbUids) {
taosArrayPush(tbUids, &uid);
}
@ -839,20 +883,48 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
*tbUid = uid;
}
_exit:
tdbFree(pData);
return 0;
return rc;
}
void metaDropTables(SMeta *pMeta, SArray *tbUids) {
if (taosArrayGetSize(tbUids) == 0) return;
int64_t nCtbDropped = 0;
SSHashObj *suidHash = tSimpleHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
metaWLock(pMeta);
for (int i = 0; i < taosArrayGetSize(tbUids); ++i) {
tb_uid_t uid = *(tb_uid_t *)taosArrayGet(tbUids, i);
metaDropTableByUid(pMeta, uid, NULL);
tb_uid_t suid = 0;
int type;
metaDropTableByUid(pMeta, uid, &type, &suid);
if (type == TSDB_CHILD_TABLE && suid != 0 && suidHash) {
int64_t *pVal = tSimpleHashGet(suidHash, &suid, sizeof(tb_uid_t));
if (pVal) {
nCtbDropped = *pVal + 1;
} else {
nCtbDropped = 1;
}
tSimpleHashPut(suidHash, &suid, sizeof(tb_uid_t), &nCtbDropped, sizeof(int64_t));
}
metaDebug("batch drop table:%" PRId64, uid);
}
metaULock(pMeta);
// update timeseries
void *pCtbDropped = NULL;
int32_t iter = 0;
while ((pCtbDropped = tSimpleHashIterate(suidHash, pCtbDropped, &iter))) {
tb_uid_t *pSuid = tSimpleHashGetKey(pCtbDropped, NULL);
int32_t nCols = 0;
SVnodeStats *pStats = &pMeta->pVnode->config.vndStats;
if (metaGetStbStats(pMeta->pVnode, *pSuid, NULL, &nCols) == 0) {
pStats->numOfTimeSeries -= *(int64_t *)pCtbDropped * (nCols - 1);
}
}
tSimpleHashCleanup(suidHash);
}
static int32_t metaFilterTableByHash(SMeta *pMeta, SArray *uidList) {
@ -987,7 +1059,7 @@ static int metaDeleteTtl(SMeta *pMeta, const SMetaEntry *pME) {
return ttlMgrDeleteTtl(pMeta->pTtlMgr, &ctx);
}
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *pSuid) {
void *pData = NULL;
int nData = 0;
int rc = 0;
@ -1012,9 +1084,11 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
if (type) *type = e.type;
if (e.type == TSDB_CHILD_TABLE) {
if (pSuid) *pSuid = e.ctbEntry.suid;
void *tData = NULL;
int tLen = 0;
if (tdbTbGet(pMeta->pUidIdx, &e.ctbEntry.suid, sizeof(tb_uid_t), &tData, &tLen) == 0) {
STbDbKey tbDbKey = {.uid = e.ctbEntry.suid, .version = ((SUidIdxVal *)tData)[0].version};
if (tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &tData, &tLen) == 0) {
@ -1075,8 +1149,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
tdbTbDelete(pMeta->pCtbIdx, &(SCtbIdxKey){.suid = e.ctbEntry.suid, .uid = uid}, sizeof(SCtbIdxKey), pMeta->txn);
--pMeta->pVnode->config.vndStats.numOfCTables;
metaUpdateStbStats(pMeta, e.ctbEntry.suid, -1);
metaUpdateStbStats(pMeta, e.ctbEntry.suid, -1, 0);
metaUidCacheClear(pMeta, e.ctbEntry.suid);
metaTbGroupCacheClear(pMeta, e.ctbEntry.suid);
} else if (e.type == TSDB_NORMAL_TABLE) {
@ -1243,6 +1316,9 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_COL_ALREADY_EXISTS;
goto _err;
}
if ((terrno = grantCheck(TSDB_GRANT_TIMESERIES)) < 0) {
goto _err;
}
pSchema->version++;
pSchema->nCols++;
pNewSchema = taosMemoryMalloc(sizeof(SSchema) * pSchema->nCols);
@ -1255,6 +1331,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
strcpy(pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].name, pAlterTbReq->colName);
++pMeta->pVnode->config.vndStats.numOfNTimeSeries;
metaTimeSeriesNotifyCheck(pMeta);
break;
case TSDB_ALTER_TABLE_DROP_COLUMN:
if (pColumn == NULL) {

View File

@ -697,7 +697,9 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
tqDestroyTqHandle(&handle);
goto end;
}
taosWLockLatch(&pTq->lock);
ret = tqMetaSaveHandle(pTq, req.subKey, &handle);
taosWUnLockLatch(&pTq->lock);
} else {
while(1){
taosWLockLatch(&pTq->lock);
@ -710,7 +712,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
continue;
}
if (pHandle->consumerId == req.newConsumerId) { // do nothing
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains, because redo wal log", req.vgId, req.newConsumerId);
tqInfo("vgId:%d no switch consumer:0x%" PRIx64 " remains", req.vgId, req.newConsumerId);
} else {
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId,
req.newConsumerId);

View File

@ -0,0 +1,196 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "meta.h"
#include "tdbInt.h"
#include "tq.h"
// STqCheckInfoReader ========================================
struct STqCheckInfoReader {
STQ* pTq;
int64_t sver;
int64_t ever;
TBC* pCur;
};
int32_t tqCheckInfoReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqCheckInfoReader** ppReader) {
int32_t code = 0;
STqCheckInfoReader* pReader = NULL;
// alloc
pReader = (STqCheckInfoReader*)taosMemoryCalloc(1, sizeof(STqCheckInfoReader));
if (pReader == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
pReader->pTq = pTq;
pReader->sver = sver;
pReader->ever = ever;
// impl
code = tdbTbcOpen(pTq->pCheckStore, &pReader->pCur, NULL);
if (code) {
taosMemoryFree(pReader);
goto _err;
}
code = tdbTbcMoveToFirst(pReader->pCur);
if (code) {
taosMemoryFree(pReader);
goto _err;
}
tqInfo("vgId:%d, vnode checkinfo tq reader opened", TD_VID(pTq->pVnode));
*ppReader = pReader;
return code;
_err:
tqError("vgId:%d, vnode checkinfo tq reader open failed since %s", TD_VID(pTq->pVnode), tstrerror(code));
*ppReader = NULL;
return code;
}
int32_t tqCheckInfoReaderClose(STqCheckInfoReader** ppReader) {
int32_t code = 0;
tdbTbcClose((*ppReader)->pCur);
taosMemoryFree(*ppReader);
*ppReader = NULL;
return code;
}
int32_t tqCheckInfoRead(STqCheckInfoReader* pReader, uint8_t** ppData) {
int32_t code = 0;
void* pKey = NULL;
void* pVal = NULL;
int32_t kLen = 0;
int32_t vLen = 0;
if (tdbTbcNext(pReader->pCur, &pKey, &kLen, &pVal, &vLen)) {
goto _exit;
}
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + vLen);
if (*ppData == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
pHdr->type = SNAP_DATA_TQ_CHECKINFO;
pHdr->size = vLen;
memcpy(pHdr->data, pVal, vLen);
_exit:
tdbFree(pKey);
tdbFree(pVal);
tqInfo("vgId:%d, vnode check info tq read data, vLen:%d", TD_VID(pReader->pTq->pVnode), vLen);
return code;
_err:
tdbFree(pKey);
tdbFree(pVal);
tqError("vgId:%d, vnode check info tq read data failed since %s", TD_VID(pReader->pTq->pVnode), tstrerror(code));
return code;
}
// STqCheckInfoWriter ========================================
struct STqCheckInfoWriter {
STQ* pTq;
int64_t sver;
int64_t ever;
TXN* txn;
};
int32_t tqCheckInfoWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqCheckInfoWriter** ppWriter) {
int32_t code = 0;
STqCheckInfoWriter* pWriter;
// alloc
pWriter = (STqCheckInfoWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
if (pWriter == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
pWriter->pTq = pTq;
pWriter->sver = sver;
pWriter->ever = ever;
if (tdbBegin(pTq->pMetaDB, &pWriter->txn, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) {
code = -1;
taosMemoryFree(pWriter);
goto _err;
}
*ppWriter = pWriter;
return code;
_err:
tqError("vgId:%d, tq check info writer open failed since %s", TD_VID(pTq->pVnode), tstrerror(code));
*ppWriter = NULL;
return code;
}
int32_t tqCheckInfoWriterClose(STqCheckInfoWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
STqCheckInfoWriter* pWriter = *ppWriter;
STQ* pTq = pWriter->pTq;
if (rollback) {
tdbAbort(pWriter->pTq->pMetaDB, pWriter->txn);
} else {
code = tdbCommit(pWriter->pTq->pMetaDB, pWriter->txn);
if (code) goto _err;
code = tdbPostCommit(pWriter->pTq->pMetaDB, pWriter->txn);
if (code) goto _err;
}
taosMemoryFree(pWriter);
*ppWriter = NULL;
return code;
_err:
tqError("vgId:%d, tq check info writer close failed since %s", TD_VID(pTq->pVnode), tstrerror(code));
return code;
}
int32_t tqCheckInfoWrite(STqCheckInfoWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
STQ* pTq = pWriter->pTq;
STqCheckInfo info = {0};
SDecoder decoder;
SDecoder* pDecoder = &decoder;
tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
code = tDecodeSTqCheckInfo(pDecoder, &info);
if (code) goto _err;
code = taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo));
if (code) goto _err;
code = tqMetaSaveCheckInfo(pTq, info.topic, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
if (code) goto _err;
tDecoderClear(pDecoder);
return code;
_err:
tDecoderClear(pDecoder);
tqError("vgId:%d, vnode check info tq write failed since %s", TD_VID(pTq->pVnode), tstrerror(code));
return code;
}

View File

@ -75,29 +75,13 @@ int32_t tqSnapReaderClose(STqSnapReader** ppReader) {
int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) {
int32_t code = 0;
const void* pKey = NULL;
const void* pVal = NULL;
void* pKey = NULL;
void* pVal = NULL;
int32_t kLen = 0;
int32_t vLen = 0;
SDecoder decoder;
STqHandle handle;
*ppData = NULL;
for (;;) {
if (tdbTbcGet(pReader->pCur, &pKey, &kLen, &pVal, &vLen)) {
goto _exit;
}
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
tDecodeSTqHandle(&decoder, &handle);
tDecoderClear(&decoder);
if (handle.snapshotVer <= pReader->sver && handle.snapshotVer >= pReader->ever) {
tdbTbcMoveToNext(pReader->pCur);
break;
} else {
tdbTbcMoveToNext(pReader->pCur);
}
if (tdbTbcNext(pReader->pCur, &pKey, &kLen, &pVal, &vLen)) {
goto _exit;
}
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + vLen);
@ -111,13 +95,15 @@ int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) {
pHdr->size = vLen;
memcpy(pHdr->data, pVal, vLen);
tqInfo("vgId:%d, vnode snapshot tq read data, version:%" PRId64 " subKey: %s vLen:%d", TD_VID(pReader->pTq->pVnode),
handle.snapshotVer, handle.subKey, vLen);
_exit:
tdbFree(pKey);
tdbFree(pVal);
tqInfo("vgId:%d, vnode snapshot tq read data, vLen:%d", TD_VID(pReader->pTq->pVnode), vLen);
return code;
_err:
tdbFree(pKey);
tdbFree(pVal);
tqError("vgId:%d, vnode snapshot tq read data failed since %s", TD_VID(pReader->pTq->pVnode), tstrerror(code));
return code;
}
@ -173,20 +159,13 @@ int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) {
if (code) goto _err;
}
int vgId = TD_VID(pWriter->pTq->pVnode);
taosMemoryFree(pWriter);
*ppWriter = NULL;
// restore from metastore
if (tqMetaRestoreHandle(pTq) < 0) {
goto _err;
}
return code;
_err:
tqError("vgId:%d, tq snapshot writer close failed since %s", vgId, tstrerror(code));
tqError("vgId:%d, tq snapshot writer close failed since %s", TD_VID(pTq->pVnode), tstrerror(code));
return code;
}
@ -195,19 +174,18 @@ int32_t tqSnapWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
STQ* pTq = pWriter->pTq;
SDecoder decoder = {0};
SDecoder* pDecoder = &decoder;
STqHandle handle;
STqHandle handle = {0};
tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
code = tDecodeSTqHandle(pDecoder, &handle);
if (code) goto _err;
if (code) goto end;
taosWLockLatch(&pTq->lock);
code = tqMetaSaveHandle(pTq, handle.subKey, &handle);
if (code < 0) goto _err;
tDecoderClear(pDecoder);
taosWUnLockLatch(&pTq->lock);
return code;
_err:
end:
tDecoderClear(pDecoder);
tqError("vgId:%d, vnode snapshot tq write failed since %s", TD_VID(pTq->pVnode), tstrerror(code));
tqDestroyTqHandle(&handle);
tqInfo("vgId:%d, vnode snapshot tq write result:%d", TD_VID(pTq->pVnode), code);
return code;
}

View File

@ -198,58 +198,51 @@ int32_t tqMetaRestoreCheckInfo(STQ* pTq) {
int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
int32_t code;
int32_t vlen;
void* buf = NULL;
SEncoder encoder;
tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
if (code < 0) {
return -1;
goto end;
}
tqDebug("tq save %s(%d) handle consumer:0x%" PRIx64 " epoch:%d vgId:%d", pHandle->subKey,
(int32_t)strlen(pHandle->subKey), pHandle->consumerId, pHandle->epoch, TD_VID(pTq->pVnode));
void* buf = taosMemoryCalloc(1, vlen);
buf = taosMemoryCalloc(1, vlen);
if (buf == NULL) {
return -1;
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
SEncoder encoder;
tEncoderInit(&encoder, buf, vlen);
if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
code = tEncodeSTqHandle(&encoder, pHandle);
if (code < 0) {
goto end;
}
TXN* txn;
if (tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
TXN* txn = NULL;
code = tdbBegin(pTq->pMetaDB, &txn, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED);
if (code < 0) {
goto end;
}
if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, txn) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
code = tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, txn);
if (code < 0) {
goto end;
}
if (tdbCommit(pTq->pMetaDB, txn) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
}
if (tdbPostCommit(pTq->pMetaDB, txn) < 0) {
tEncoderClear(&encoder);
taosMemoryFree(buf);
return -1;
code = tdbCommit(pTq->pMetaDB, txn);
if (code < 0) {
goto end;
}
code = tdbPostCommit(pTq->pMetaDB, txn);
end:
tEncoderClear(&encoder);
taosMemoryFree(buf);
return 0;
return code;
}
int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
@ -349,15 +342,18 @@ static int buildHandle(STQ* pTq, STqHandle* handle){
static int restoreHandle(STQ* pTq, void* pVal, int vLen, STqHandle* handle){
int32_t vgId = TD_VID(pTq->pVnode);
SDecoder decoder;
int32_t code = 0;
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
tDecodeSTqHandle(&decoder, handle);
tDecoderClear(&decoder);
if(buildHandle(pTq, handle) < 0){
return -1;
}
code = tDecodeSTqHandle(&decoder, handle);
if (code) goto end;
code = buildHandle(pTq, handle);
if (code) goto end;
tqInfo("restoreHandle %s consumer 0x%" PRIx64 " vgId:%d", handle->subKey, handle->consumerId, vgId);
return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
code = taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
end:
tDecoderClear(&decoder);
return code;
}
int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle){
@ -388,34 +384,34 @@ int32_t tqCreateHandle(STQ* pTq, SMqRebVgReq* req, STqHandle* handle){
return taosHashPut(pTq->pHandle, handle->subKey, strlen(handle->subKey), handle, sizeof(STqHandle));
}
int32_t tqMetaRestoreHandle(STQ* pTq) {
int code = 0;
TBC* pCur = NULL;
if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) {
return -1;
}
void* pKey = NULL;
int kLen = 0;
void* pVal = NULL;
int vLen = 0;
tdbTbcMoveToFirst(pCur);
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqHandle handle = {0};
code = restoreHandle(pTq, pVal, vLen, &handle);
if (code < 0) {
tqDestroyTqHandle(&handle);
break;
}
}
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
return code;
}
//int32_t tqMetaRestoreHandle(STQ* pTq) {
// int code = 0;
// TBC* pCur = NULL;
// if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) {
// return -1;
// }
//
// void* pKey = NULL;
// int kLen = 0;
// void* pVal = NULL;
// int vLen = 0;
//
// tdbTbcMoveToFirst(pCur);
//
// while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
// STqHandle handle = {0};
// code = restoreHandle(pTq, pVal, vLen, &handle);
// if (code < 0) {
// tqDestroyTqHandle(&handle);
// break;
// }
// }
//
// tdbFree(pKey);
// tdbFree(pVal);
// tdbTbcClose(pCur);
// return code;
//}
int32_t tqMetaGetHandle(STQ* pTq, const char* key) {
void* pVal = NULL;

View File

@ -85,6 +85,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) {
*ppData = (uint8_t*)buf;
pReader->readEnd = 1;
taosCloseFile(&pFile);
return 0;
}
@ -159,6 +160,7 @@ int32_t tqOffsetSnapWrite(STqOffsetWriter* pWriter, uint8_t* pData, uint32_t nDa
taosCloseFile(&pFile);
return -1;
}
taosCloseFile(&pFile);
} else {
return -1;
}

View File

@ -104,6 +104,7 @@ int32_t streamStateSnapRead(SStreamStateReader* pReader, uint8_t** ppData) {
pHdr->type = SNAP_DATA_STREAM_STATE_BACKEND;
pHdr->size = len;
memcpy(pHdr->data, rowData, len);
taosMemoryFree(rowData);
tqDebug("vgId:%d, vnode stream-state snapshot read data success", TD_VID(pReader->pTq->pVnode));
return code;

View File

@ -16,6 +16,7 @@
#include "tsdbDataFileRW.h"
#include "tsdbReadUtil.h"
#include "vnd.h"
#include "vndCos.h"
#define ROCKS_BATCH_SIZE (4096)
@ -51,6 +52,41 @@ static void tsdbCloseBICache(STsdb *pTsdb) {
}
}
static int32_t tsdbOpenBCache(STsdb *pTsdb) {
int32_t code = 0;
// SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5);
int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
SLRUCache *pCache = taosLRUCacheInit(tsS3BlockCacheSize * tsS3BlockSize * szPage, 0, .5);
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
taosLRUCacheSetStrictCapacity(pCache, false);
taosThreadMutexInit(&pTsdb->bMutex, NULL);
_err:
pTsdb->bCache = pCache;
return code;
}
static void tsdbCloseBCache(STsdb *pTsdb) {
SLRUCache *pCache = pTsdb->bCache;
if (pCache) {
int32_t elems = taosLRUCacheGetElems(pCache);
tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems);
taosLRUCacheEraseUnrefEntries(pCache);
elems = taosLRUCacheGetElems(pCache);
tsdbTrace("vgId:%d, elems: %d", TD_VID(pTsdb->pVnode), elems);
taosLRUCacheCleanup(pCache);
taosThreadMutexDestroy(&pTsdb->bMutex);
}
}
#define ROCKS_KEY_LEN (sizeof(tb_uid_t) + sizeof(int16_t) + sizeof(int8_t))
typedef struct {
@ -1149,6 +1185,12 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
goto _err;
}
code = tsdbOpenBCache(pTsdb);
if (code != TSDB_CODE_SUCCESS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
code = tsdbOpenRocksCache(pTsdb);
if (code != TSDB_CODE_SUCCESS) {
code = TSDB_CODE_OUT_OF_MEMORY;
@ -1178,6 +1220,7 @@ void tsdbCloseCache(STsdb *pTsdb) {
}
tsdbCloseBICache(pTsdb);
tsdbCloseBCache(pTsdb);
tsdbCloseRocksCache(pTsdb);
}
@ -2987,3 +3030,100 @@ int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h) {
return code;
}
// block cache
static void getBCacheKey(int32_t fid, int64_t commitID, int64_t blkno, char *key, int *len) {
struct {
int32_t fid;
int64_t commitID;
int64_t blkno;
} bKey = {0};
bKey.fid = fid;
bKey.commitID = commitID;
bKey.blkno = blkno;
*len = sizeof(bKey);
memcpy(key, &bKey, *len);
}
static int32_t tsdbCacheLoadBlockS3(STsdbFD *pFD, uint8_t **ppBlock) {
int32_t code = 0;
/*
uint8_t *pBlock = taosMemoryCalloc(1, tsS3BlockSize * pFD->szPage);
if (pBlock == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
*/
int64_t block_offset = (pFD->blkno - 1) * tsS3BlockSize * pFD->szPage;
// int64_t size = 4096;
code = s3GetObjectBlock(pFD->objName, block_offset, tsS3BlockSize * pFD->szPage, ppBlock);
if (code != TSDB_CODE_SUCCESS) {
// taosMemoryFree(pBlock);
code = TSDB_CODE_OUT_OF_MEMORY;
return code;
}
//*ppBlock = pBlock;
tsdbTrace("block:%p load from s3", *ppBlock);
_exit:
return code;
}
static void deleteBCache(const void *key, size_t keyLen, void *value, void *ud) {
(void)ud;
uint8_t *pBlock = (uint8_t *)value;
taosMemoryFree(pBlock);
}
int32_t tsdbCacheGetBlockS3(SLRUCache *pCache, STsdbFD *pFD, LRUHandle **handle) {
int32_t code = 0;
char key[128] = {0};
int keyLen = 0;
getBCacheKey(pFD->fid, pFD->cid, pFD->blkno, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
STsdb *pTsdb = pFD->pTsdb;
taosThreadMutexLock(&pTsdb->bMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
uint8_t *pBlock = NULL;
code = tsdbCacheLoadBlockS3(pFD, &pBlock);
// if table's empty or error, return code of -1
if (code != TSDB_CODE_SUCCESS || pBlock == NULL) {
taosThreadMutexUnlock(&pTsdb->bMutex);
*handle = NULL;
return 0;
}
size_t charge = tsS3BlockSize * pFD->szPage;
_taos_lru_deleter_t deleter = deleteBCache;
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pBlock, charge, deleter, &h, TAOS_LRU_PRIORITY_LOW, NULL);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
}
taosThreadMutexUnlock(&pTsdb->bMutex);
}
*handle = h;
return code;
}
int32_t tsdbBCacheRelease(SLRUCache *pCache, LRUHandle *h) {
int32_t code = 0;
taosLRUCacheRelease(pCache, h, false);
return code;
}

View File

@ -101,7 +101,7 @@ int32_t tsdbDataFileReaderOpen(const char *fname[], const SDataFileReaderConfig
if (fname) {
for (int32_t i = 0; i < TSDB_FTYPE_MAX; ++i) {
if (fname[i]) {
code = tsdbOpenFile(fname[i], config->szPage, TD_FILE_READ, &reader[0]->fd[i]);
code = tsdbOpenFile(fname[i], config->tsdb, TD_FILE_READ, &reader[0]->fd[i]);
TSDB_CHECK_CODE(code, lino, _exit);
}
}
@ -110,7 +110,7 @@ int32_t tsdbDataFileReaderOpen(const char *fname[], const SDataFileReaderConfig
if (config->files[i].exist) {
char fname1[TSDB_FILENAME_LEN];
tsdbTFileName(config->tsdb, &config->files[i].file, fname1);
code = tsdbOpenFile(fname1, config->szPage, TD_FILE_READ, &reader[0]->fd[i]);
code = tsdbOpenFile(fname1, config->tsdb, TD_FILE_READ, &reader[0]->fd[i]);
TSDB_CHECK_CODE(code, lino, _exit);
}
}
@ -1466,7 +1466,7 @@ static int32_t tsdbDataFileWriterOpenDataFD(SDataFileWriter *writer) {
}
tsdbTFileName(writer->config->tsdb, &writer->files[ftype], fname);
code = tsdbOpenFile(fname, writer->config->szPage, flag, &writer->fd[ftype]);
code = tsdbOpenFile(fname, writer->config->tsdb, flag, &writer->fd[ftype]);
TSDB_CHECK_CODE(code, lino, _exit);
if (writer->files[ftype].size == 0) {
@ -1634,7 +1634,7 @@ static int32_t tsdbDataFileWriterOpenTombFD(SDataFileWriter *writer) {
int32_t flag = (TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
tsdbTFileName(writer->config->tsdb, writer->files + ftype, fname);
code = tsdbOpenFile(fname, writer->config->szPage, flag, &writer->fd[ftype]);
code = tsdbOpenFile(fname, writer->config->tsdb, flag, &writer->fd[ftype]);
TSDB_CHECK_CODE(code, lino, _exit);
uint8_t hdr[TSDB_FHDR_SIZE] = {0};
@ -1684,4 +1684,4 @@ _exit:
TSDB_ERROR_LOG(TD_VID(writer->config->tsdb->pVnode), lino, code);
}
return code;
}
}

View File

@ -31,7 +31,7 @@ typedef struct SFDataPtr {
int64_t size;
} SFDataPtr;
extern int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD);
extern int32_t tsdbOpenFile(const char *path, STsdb *pTsdb, int32_t flag, STsdbFD **ppFD);
extern void tsdbCloseFile(STsdbFD **ppFD);
extern int32_t tsdbWriteFile(STsdbFD *pFD, int64_t offset, const uint8_t *pBuf, int64_t size);
extern int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t size);
@ -41,4 +41,4 @@ extern int32_t tsdbFsyncFile(STsdbFD *pFD);
}
#endif
#endif /*_TD_TSDB_DEF_H_*/
#endif /*_TD_TSDB_DEF_H_*/

View File

@ -486,10 +486,15 @@ typedef enum {
} ETombBlkCheckEnum;
static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_t numOfTables, int32_t* j,
STableBlockScanInfo** pScanInfo, ETombBlkCheckEnum* pRet) {
ETombBlkCheckEnum* pRet) {
int32_t code = 0;
STombRecord record = {0};
uint64_t uid = pReader->status.uidList.tableUidList[*j];
STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr);
if (pScanInfo->pFileDelData == NULL) {
pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
}
for (int32_t k = 0; k < TARRAY2_SIZE(pBlock->suid); ++k) {
code = tTombBlockGet(pBlock, k, &record);
@ -507,11 +512,9 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_
return TSDB_CODE_SUCCESS;
}
bool newTable = false;
if (uid < record.uid) {
while ((*j) < numOfTables && pReader->status.uidList.tableUidList[*j] < record.uid) {
(*j) += 1;
newTable = true;
}
if ((*j) >= numOfTables) {
@ -520,6 +523,10 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_
}
uid = pReader->status.uidList.tableUidList[*j];
pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr);
if (pScanInfo->pFileDelData == NULL) {
pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
}
}
if (record.uid < uid) {
@ -528,16 +535,9 @@ static int32_t doCheckTombBlock(STombBlock* pBlock, STsdbReader* pReader, int32_
ASSERT(record.suid == pReader->info.suid && uid == record.uid);
if (newTable) {
(*pScanInfo) = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr);
if ((*pScanInfo)->pFileDelData == NULL) {
(*pScanInfo)->pFileDelData = taosArrayInit(4, sizeof(SDelData));
}
}
if (record.version <= pReader->info.verRange.maxVer) {
SDelData delData = {.version = record.version, .sKey = record.skey, .eKey = record.ekey};
taosArrayPush((*pScanInfo)->pFileDelData, &delData);
taosArrayPush(pScanInfo->pFileDelData, &delData);
}
}
@ -581,15 +581,15 @@ static int32_t doLoadTombDataFromTombBlk(const TTombBlkArray* pTombBlkArray, STs
return code;
}
uint64_t uid = pReader->status.uidList.tableUidList[j];
// uint64_t uid = pReader->status.uidList.tableUidList[j];
STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr);
if (pScanInfo->pFileDelData == NULL) {
pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
}
// STableBlockScanInfo* pScanInfo = getTableBlockScanInfo(pReader->status.pTableMap, uid, pReader->idStr);
// if (pScanInfo->pFileDelData == NULL) {
// pScanInfo->pFileDelData = taosArrayInit(4, sizeof(SDelData));
// }
ETombBlkCheckEnum ret = 0;
code = doCheckTombBlock(&block, pReader, numOfTables, &j, &pScanInfo, &ret);
code = doCheckTombBlock(&block, pReader, numOfTables, &j, &ret);
tTombBlockDestroy(&block);
if (code != TSDB_CODE_SUCCESS || ret == BLK_CHECK_QUIT) {

View File

@ -28,6 +28,7 @@ static int32_t tsdbOpenFileImpl(STsdbFD *pFD) {
const char *object_name = taosDirEntryBaseName((char *)path);
long s3_size = tsS3Enabled ? s3Size(object_name) : 0;
if (tsS3Enabled && !strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) {
#ifndef S3_BLOCK_CACHE
s3EvictCache(path, s3_size);
s3Get(object_name, path);
@ -38,6 +39,14 @@ static int32_t tsdbOpenFileImpl(STsdbFD *pFD) {
// taosMemoryFree(pFD);
goto _exit;
}
#else
pFD->s3File = 1;
pFD->pFD = (TdFilePtr)&pFD->s3File;
int32_t vid = 0;
sscanf(object_name, "v%df%dver%" PRId64 ".data", &vid, &pFD->fid, &pFD->cid);
pFD->objName = object_name;
// pFD->szFile = s3_size;
#endif
} else {
code = TAOS_SYSTEM_ERROR(errsv);
// taosMemoryFree(pFD);
@ -72,9 +81,10 @@ _exit:
}
// =============== PAGE-WISE FILE ===============
int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) {
int32_t tsdbOpenFile(const char *path, STsdb *pTsdb, int32_t flag, STsdbFD **ppFD) {
int32_t code = 0;
STsdbFD *pFD = NULL;
int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
*ppFD = NULL;
@ -90,6 +100,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p
pFD->flag = flag;
pFD->szPage = szPage;
pFD->pgno = 0;
pFD->pTsdb = pTsdb;
*ppFD = pFD;
@ -101,7 +112,9 @@ void tsdbCloseFile(STsdbFD **ppFD) {
STsdbFD *pFD = *ppFD;
if (pFD) {
taosMemoryFree(pFD->pBuf);
taosCloseFile(&pFD->pFD);
if (!pFD->s3File) {
taosCloseFile(&pFD->pFD);
}
taosMemoryFree(pFD);
*ppFD = NULL;
}
@ -153,22 +166,41 @@ static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) {
}
}
// seek
int64_t offset = PAGE_OFFSET(pgno, pFD->szPage);
int64_t n = taosLSeekFile(pFD->pFD, offset, SEEK_SET);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
// read
n = taosReadFile(pFD->pFD, pFD->pBuf, pFD->szPage);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
} else if (n < pFD->szPage) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _exit;
if (pFD->s3File) {
LRUHandle *handle = NULL;
pFD->blkno = (pgno + tsS3BlockSize - 1) / tsS3BlockSize;
int32_t code = tsdbCacheGetBlockS3(pFD->pTsdb->bCache, pFD, &handle);
if (code != TSDB_CODE_SUCCESS || handle == NULL) {
tsdbBCacheRelease(pFD->pTsdb->bCache, handle);
goto _exit;
}
uint8_t *pBlock = (uint8_t *)taosLRUCacheValue(pFD->pTsdb->bCache, handle);
int64_t blk_offset = (pFD->blkno - 1) * tsS3BlockSize * pFD->szPage;
memcpy(pFD->pBuf, pBlock + (offset - blk_offset), pFD->szPage);
tsdbBCacheRelease(pFD->pTsdb->bCache, handle);
} else {
// seek
int64_t n = taosLSeekFile(pFD->pFD, offset, SEEK_SET);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
// read
n = taosReadFile(pFD->pFD, pFD->pBuf, pFD->szPage);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
} else if (n < pFD->szPage) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _exit;
}
}
// check
@ -293,7 +325,7 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
// head
flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname);
code = tsdbOpenFile(fname, szPage, flag, &pWriter->pHeadFD);
code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pHeadFD);
if (code) goto _err;
code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE);
@ -307,7 +339,7 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
flag = TD_FILE_READ | TD_FILE_WRITE;
}
tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname);
code = tsdbOpenFile(fname, szPage, flag, &pWriter->pDataFD);
code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pDataFD);
if (code) goto _err;
if (pWriter->fData.size == 0) {
code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE);
@ -322,7 +354,7 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
flag = TD_FILE_READ | TD_FILE_WRITE;
}
tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname);
code = tsdbOpenFile(fname, szPage, flag, &pWriter->pSmaFD);
code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pSmaFD);
if (code) goto _err;
if (pWriter->fSma.size == 0) {
code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE);
@ -335,7 +367,7 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
ASSERT(pWriter->fStt[pSet->nSttF - 1].size == 0);
flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
tsdbSttFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fStt[pSet->nSttF - 1], fname);
code = tsdbOpenFile(fname, szPage, flag, &pWriter->pSttFD);
code = tsdbOpenFile(fname, pTsdb, flag, &pWriter->pSttFD);
if (code) goto _err;
code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE);
if (code) goto _err;
@ -907,23 +939,23 @@ int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pS
// head
tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname);
code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pHeadFD);
code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ, &pReader->pHeadFD);
TSDB_CHECK_CODE(code, lino, _exit);
// data
tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname);
code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pDataFD);
code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ, &pReader->pDataFD);
TSDB_CHECK_CODE(code, lino, _exit);
// sma
tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pSmaFD);
code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ, &pReader->pSmaFD);
TSDB_CHECK_CODE(code, lino, _exit);
// stt
for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->aSttFD[iStt]);
code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ, &pReader->aSttFD[iStt]);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -1323,8 +1355,7 @@ int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb
pDelFWriter->fDel = *pFile;
tsdbDelFileName(pTsdb, pFile, fname);
code = tsdbOpenFile(fname, pTsdb->pVnode->config.tsdbPageSize, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE,
&pDelFWriter->pWriteH);
code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE, &pDelFWriter->pWriteH);
TSDB_CHECK_CODE(code, lino, _exit);
// update header
@ -1498,7 +1529,7 @@ int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb
pDelFReader->fDel = *pFile;
tsdbDelFileName(pTsdb, pFile, fname);
code = tsdbOpenFile(fname, pTsdb->pVnode->config.tsdbPageSize, TD_FILE_READ, &pDelFReader->pReadH);
code = tsdbOpenFile(fname, pTsdb, TD_FILE_READ, &pDelFReader->pReadH);
if (code) {
taosMemoryFree(pDelFReader);
goto _exit;

View File

@ -114,7 +114,7 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile
TSDB_CHECK_CODE(code, lino, _exit);
char *object_name = taosDirEntryBaseName(fname);
code = s3PutObjectFromFile(from->fname, object_name);
code = s3PutObjectFromFile2(from->fname, object_name);
TSDB_CHECK_CODE(code, lino, _exit);
taosCloseFile(&fdFrom);

View File

@ -46,12 +46,12 @@ int32_t tsdbSttFileReaderOpen(const char *fname, const SSttFileReaderConfig *con
// open file
if (fname) {
code = tsdbOpenFile(fname, config->szPage, TD_FILE_READ, &reader[0]->fd);
code = tsdbOpenFile(fname, config->tsdb, TD_FILE_READ, &reader[0]->fd);
TSDB_CHECK_CODE(code, lino, _exit);
} else {
char fname1[TSDB_FILENAME_LEN];
tsdbTFileName(config->tsdb, config->file, fname1);
code = tsdbOpenFile(fname1, config->szPage, TD_FILE_READ, &reader[0]->fd);
code = tsdbOpenFile(fname1, config->tsdb, TD_FILE_READ, &reader[0]->fd);
TSDB_CHECK_CODE(code, lino, _exit);
}
@ -705,7 +705,7 @@ static int32_t tsdbSttFWriterDoOpen(SSttFileWriter *writer) {
char fname[TSDB_FILENAME_LEN];
tsdbTFileName(writer->config->tsdb, writer->file, fname);
code = tsdbOpenFile(fname, writer->config->szPage, flag, &writer->fd);
code = tsdbOpenFile(fname, writer->config->tsdb, flag, &writer->fd);
TSDB_CHECK_CODE(code, lino, _exit);
uint8_t hdr[TSDB_FHDR_SIZE] = {0};
@ -984,4 +984,4 @@ _exit:
return code;
}
bool tsdbSttFileWriterIsOpened(SSttFileWriter *writer) { return writer->ctx->opened; }
bool tsdbSttFileWriterIsOpened(SSttFileWriter *writer) { return writer->ctx->opened; }

View File

@ -87,7 +87,7 @@ static int32_t tsdbUpgradeHead(STsdb *tsdb, SDFileSet *pDFileSet, SDataFReader *
char fname[TSDB_FILENAME_LEN];
tsdbTFileName(tsdb, &file, fname);
code = tsdbOpenFile(fname, ctx->szPage, TD_FILE_READ | TD_FILE_WRITE, &ctx->fd);
code = tsdbOpenFile(fname, tsdb, TD_FILE_READ | TD_FILE_WRITE, &ctx->fd);
TSDB_CHECK_CODE(code, lino, _exit);
// convert
@ -257,7 +257,7 @@ static int32_t tsdbUpgradeSttFile(STsdb *tsdb, SDFileSet *pDFileSet, SDataFReade
code = tsdbTFileObjInit(tsdb, &file, &fobj);
TSDB_CHECK_CODE(code, lino, _exit1);
code = tsdbOpenFile(fobj->fname, ctx->szPage, TD_FILE_READ | TD_FILE_WRITE, &ctx->fd);
code = tsdbOpenFile(fobj->fname, tsdb, TD_FILE_READ | TD_FILE_WRITE, &ctx->fd);
TSDB_CHECK_CODE(code, lino, _exit1);
for (int32_t iSttBlk = 0; iSttBlk < taosArrayGetSize(aSttBlk); iSttBlk++) {
@ -408,8 +408,7 @@ static int32_t tsdbUpgradeOpenTombFile(STsdb *tsdb, STFileSet *fset, STsdbFD **f
}
char fname[TSDB_FILENAME_LEN] = {0};
code = tsdbOpenFile(fobj[0]->fname, tsdb->pVnode->config.tsdbPageSize,
TD_FILE_READ | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_CREATE, fd);
code = tsdbOpenFile(fobj[0]->fname, tsdb, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_CREATE, fd);
TSDB_CHECK_CODE(code, lino, _exit);
uint8_t hdr[TSDB_FHDR_SIZE] = {0};
@ -633,4 +632,4 @@ int32_t tsdbCheckAndUpgradeFileSystem(STsdb *tsdb, int8_t rollback) {
taosRemoveFile(fname);
return 0;
}
}

View File

@ -59,17 +59,19 @@ int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
cos_request_options_t *options = NULL;
cos_string_t bucket, object, file;
cos_table_t *resp_headers;
int traffic_limit = 0;
// int traffic_limit = 0;
cos_pool_create(&p, NULL);
options = cos_request_options_create(p);
s3InitRequestOptions(options, is_cname);
cos_table_t *headers = NULL;
/*
if (traffic_limit) {
// 限速值设置范围为819200 - 838860800即100KB/s - 100MB/s如果超出该范围将返回400错误
headers = cos_table_make(p, 1);
cos_table_add_int(headers, "x-cos-traffic-limit", 819200);
}
*/
cos_str_set(&bucket, tsS3BucketName);
cos_str_set(&file, file_str);
cos_str_set(&object, object_str);
@ -85,6 +87,48 @@ int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
return code;
}
int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
int32_t code = 0;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket, object, file;
cos_table_t *resp_headers;
int traffic_limit = 0;
cos_table_t *headers = NULL;
cos_resumable_clt_params_t *clt_params = NULL;
cos_pool_create(&p, NULL);
options = cos_request_options_create(p);
s3InitRequestOptions(options, is_cname);
headers = cos_table_make(p, 0);
cos_str_set(&bucket, tsS3BucketName);
cos_str_set(&file, file_str);
cos_str_set(&object, object_str);
// upload
clt_params = cos_create_resumable_clt_params_content(p, 1024 * 1024, 8, COS_FALSE, NULL);
s = cos_resumable_upload_file(options, &bucket, &object, &file, headers, NULL, clt_params, NULL, &resp_headers, NULL);
if (!cos_status_is_ok(s)) {
vError("s3: %s", s->error_msg);
vError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
code = terrno;
return code;
}
log_status(s);
cos_pool_destroy(p);
if (s->code != 200) {
return code = s->code;
}
return code;
}
void s3DeleteObjectsByPrefix(const char *prefix_str) {
cos_pool_t *p = NULL;
cos_request_options_t *options = NULL;
@ -217,6 +261,77 @@ bool s3Get(const char *object_name, const char *path) {
return ret;
}
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, uint8_t **ppBlock) {
int32_t code = 0;
cos_pool_t *p = NULL;
int is_cname = 0;
cos_status_t *s = NULL;
cos_request_options_t *options = NULL;
cos_string_t bucket;
cos_string_t object;
cos_table_t *resp_headers;
cos_table_t *headers = NULL;
cos_buf_t *content = NULL;
// cos_string_t file;
// int traffic_limit = 0;
char range_buf[64];
//创建内存池
cos_pool_create(&p, NULL);
//初始化请求选项
options = cos_request_options_create(p);
// init_test_request_options(options, is_cname);
s3InitRequestOptions(options, is_cname);
cos_str_set(&bucket, tsS3BucketName);
cos_str_set(&object, object_name);
cos_list_t download_buffer;
cos_list_init(&download_buffer);
/*
if (traffic_limit) {
// 限速值设置范围为819200 - 838860800单位默认为 bit/s即800Kb/s - 800Mb/s如果超出该范围将返回400错误
headers = cos_table_make(p, 1);
cos_table_add_int(headers, "x-cos-traffic-limit", 819200);
}
*/
headers = cos_table_create_if_null(options, headers, 1);
apr_snprintf(range_buf, sizeof(range_buf), "bytes=%" APR_INT64_T_FMT "-%" APR_INT64_T_FMT, offset,
offset + block_size - 1);
apr_table_add(headers, COS_RANGE, range_buf);
s = cos_get_object_to_buffer(options, &bucket, &object, headers, NULL, &download_buffer, &resp_headers);
if (!cos_status_is_ok(s)) {
vError("s3: %s", s->error_msg);
vError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
code = terrno;
return code;
}
log_status(s);
// print_headers(resp_headers);
int64_t len = 0;
int64_t size = 0;
int64_t pos = 0;
cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); }
// char *buf = cos_pcalloc(p, (apr_size_t)(len + 1));
char *buf = taosMemoryCalloc(1, (apr_size_t)(len));
// buf[len] = '\0';
cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) {
size = cos_buf_size(content);
memcpy(buf + pos, content->pos, (size_t)size);
pos += size;
}
// cos_warn_log("Download data=%s", buf);
//销毁内存池
cos_pool_destroy(p);
*ppBlock = buf;
return code;
}
typedef struct {
int64_t size;
int32_t atime;
@ -333,10 +448,12 @@ long s3Size(const char *object_name) {
int32_t s3Init() { return 0; }
void s3CleanUp() {}
int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; }
int32_t s3PutObjectFromFile2(const char *file, const char *object) { return 0; }
void s3DeleteObjectsByPrefix(const char *prefix) {}
void s3DeleteObjects(const char *object_name[], int nobject) {}
bool s3Exists(const char *object_name) { return false; }
bool s3Get(const char *object_name, const char *path) { return false; }
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size, uint8_t **ppBlock) { return 0; }
void s3EvictCache(const char *path, long object_size) {}
long s3Size(const char *object_name) { return 0; }

View File

@ -388,7 +388,7 @@ int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->cacheUsage = tsdbCacheGetUsage(pVnode);
pLoad->numOfCachedTables = tsdbCacheGetElems(pVnode);
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta);
pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta, 1);
pLoad->totalStorage = (int64_t)3 * 1073741824;
pLoad->compStorage = (int64_t)2 * 1073741824;
pLoad->pointsWritten = 100;
@ -400,6 +400,15 @@ int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
return 0;
}
int32_t vnodeGetLoadLite(SVnode *pVnode, SVnodeLoadLite *pLoad) {
SSyncState syncState = syncGetState(pVnode->sync);
if (syncState.state == TAOS_SYNC_STATE_LEADER) {
pLoad->vgId = TD_VID(pVnode);
pLoad->nTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta, 1);
return 0;
}
return -1;
}
/**
* @brief Reset the statistics value by monitor interval
*
@ -544,8 +553,8 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
return TSDB_CODE_SUCCESS;
}
static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 1);
int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0);
if (pSW) {
*num = pSW->nCols;
tDeleteSchemaWrapper(pSW);
@ -634,10 +643,8 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
tb_uid_t suid = *(tb_uid_t *)taosArrayGet(suidList, i);
int64_t ctbNum = 0;
metaGetStbStats(pVnode, suid, &ctbNum);
int numOfCols = 0;
vnodeGetStbColumnNum(pVnode, suid, &numOfCols);
int32_t numOfCols = 0;
metaGetStbStats(pVnode, suid, &ctbNum, &numOfCols);
*num += ctbNum * (numOfCols - 1);
}

View File

@ -34,6 +34,8 @@ struct SVSnapReader {
STqSnapReader *pTqSnapReader;
int8_t tqOffsetDone;
STqOffsetReader *pTqOffsetReader;
int8_t tqCheckInfoDone;
STqCheckInfoReader *pTqCheckInfoReader;
// stream
int8_t streamTaskDone;
SStreamTaskReader *pStreamTaskReader;
@ -81,6 +83,18 @@ void vnodeSnapReaderClose(SVSnapReader *pReader) {
metaSnapReaderClose(&pReader->pMetaReader);
}
if (pReader->pTqSnapReader) {
tqSnapReaderClose(&pReader->pTqSnapReader);
}
if (pReader->pTqOffsetReader) {
tqOffsetReaderClose(&pReader->pTqOffsetReader);
}
if (pReader->pTqCheckInfoReader) {
tqCheckInfoReaderClose(&pReader->pTqCheckInfoReader);
}
taosMemoryFree(pReader);
}
@ -181,6 +195,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
}
// TQ ================
vInfo("vgId:%d tq transform start", vgId);
if (!pReader->tqHandleDone) {
if (pReader->pTqSnapReader == NULL) {
code = tqSnapReaderOpen(pReader->pVnode->pTq, pReader->sver, pReader->ever, &pReader->pTqSnapReader);
@ -200,6 +215,25 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
}
}
}
if (!pReader->tqCheckInfoDone) {
if (pReader->pTqCheckInfoReader == NULL) {
code = tqCheckInfoReaderOpen(pReader->pVnode->pTq, pReader->sver, pReader->ever, &pReader->pTqCheckInfoReader);
if (code < 0) goto _err;
}
code = tqCheckInfoRead(pReader->pTqCheckInfoReader, ppData);
if (code) {
goto _err;
} else {
if (*ppData) {
goto _exit;
} else {
pReader->tqCheckInfoDone = 1;
code = tqCheckInfoReaderClose(&pReader->pTqCheckInfoReader);
if (code) goto _err;
}
}
}
if (!pReader->tqOffsetDone) {
if (pReader->pTqOffsetReader == NULL) {
code = tqOffsetReaderOpen(pReader->pVnode->pTq, pReader->sver, pReader->ever, &pReader->pTqOffsetReader);
@ -334,6 +368,7 @@ struct SVSnapWriter {
// tq
STqSnapWriter *pTqSnapWriter;
STqOffsetWriter *pTqOffsetWriter;
STqCheckInfoWriter *pTqCheckInfoWriter;
// stream
SStreamTaskWriter *pStreamTaskWriter;
SStreamStateWriter *pStreamStateWriter;
@ -411,6 +446,21 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
if (code) goto _exit;
}
if (pWriter->pTqSnapWriter) {
code = tqSnapWriterClose(&pWriter->pTqSnapWriter, rollback);
if (code) goto _exit;
}
if (pWriter->pTqCheckInfoWriter) {
code = tqCheckInfoWriterClose(&pWriter->pTqCheckInfoWriter, rollback);
if (code) goto _exit;
}
if (pWriter->pTqOffsetWriter) {
code = tqOffsetWriterClose(&pWriter->pTqOffsetWriter, rollback);
if (code) goto _exit;
}
if (pWriter->pStreamTaskWriter) {
code = streamTaskSnapWriterClose(pWriter->pStreamTaskWriter, rollback);
if (code) goto _exit;
@ -519,8 +569,34 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
if (code) goto _err;
} break;
case SNAP_DATA_TQ_HANDLE: {
// tq handle
if (pWriter->pTqSnapWriter == NULL) {
code = tqSnapWriterOpen(pVnode->pTq, pWriter->sver, pWriter->ever, &pWriter->pTqSnapWriter);
if (code) goto _err;
}
code = tqSnapWrite(pWriter->pTqSnapWriter, pData, nData);
if (code) goto _err;
} break;
case SNAP_DATA_TQ_CHECKINFO: {
// tq checkinfo
if (pWriter->pTqCheckInfoWriter == NULL) {
code = tqCheckInfoWriterOpen(pVnode->pTq, pWriter->sver, pWriter->ever, &pWriter->pTqCheckInfoWriter);
if (code) goto _err;
}
code = tqCheckInfoWrite(pWriter->pTqCheckInfoWriter, pData, nData);
if (code) goto _err;
} break;
case SNAP_DATA_TQ_OFFSET: {
// tq offset
if (pWriter->pTqOffsetWriter == NULL) {
code = tqOffsetWriterOpen(pVnode->pTq, pWriter->sver, pWriter->ever, &pWriter->pTqOffsetWriter);
if (code) goto _err;
}
code = tqOffsetSnapWrite(pWriter->pTqOffsetWriter, pData, nData);
if (code) goto _err;
} break;
case SNAP_DATA_STREAM_TASK:
case SNAP_DATA_STREAM_TASK_CHECKPOINT: {

View File

@ -975,6 +975,7 @@ void ctgFreeQNode(SCtgQNode* node);
void ctgClearHandle(SCatalog* pCtg);
void ctgFreeTbCacheImpl(SCtgTbCache* pCache, bool lock);
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
int32_t ctgRemoveCacheUser(SCatalog* pCtg, const char* user);
int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SVgroupInfo* pVgroup,
bool* exists);
SName* ctgGetFetchName(SArray* pNames, SCtgFetch* pFetch);

View File

@ -2243,11 +2243,15 @@ int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) {
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user));
if (NULL == pUser) {
if (msg->userAuth.dropped == 1) {
goto _return;
}
SCtgUserAuth userAuth = {0};
memcpy(&userAuth.userAuth, &msg->userAuth, sizeof(msg->userAuth));
userAuth.userCacheSize = ctgGetUserCacheSize(&userAuth.userAuth);
if (taosHashPut(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user), &userAuth, sizeof(userAuth))) {
ctgError("taosHashPut user %s to cache failed", msg->userAuth.user);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
@ -2258,6 +2262,11 @@ int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) {
CTG_CACHE_NUM_INC(CTG_CI_USER, 1);
return TSDB_CODE_SUCCESS;
} else if (msg->userAuth.dropped == 1) {
if (ctgRemoveCacheUser(pCtg, msg->userAuth.user) == 0) {
CTG_CACHE_NUM_DEC(CTG_CI_USER, 1);
}
goto _return;
}
CTG_LOCK(CTG_WRITE, &pUser->lock);

View File

@ -311,6 +311,22 @@ void ctgFreeHandleImpl(SCatalog* pCtg) {
taosMemoryFree(pCtg);
}
int32_t ctgRemoveCacheUser(SCatalog* pCtg, const char* user) {
if (!pCtg || !user) {
return -1;
}
SCtgUserAuth* pUser = (SCtgUserAuth*)taosHashGet(pCtg->userCache, user, strlen(user));
if (pUser) {
ctgFreeSCtgUserAuth(pUser);
if (taosHashRemove(pCtg->userCache, user, strlen(user)) == 0) {
return 0; // user found and removed
}
}
return -1;
}
void ctgFreeHandle(SCatalog* pCtg) {
if (NULL == pCtg) {
return;

View File

@ -104,6 +104,7 @@ extern "C" {
#define EXPLAIN_VGROUP_SLOT_FORMAT "vgroup_slot=%d,%d"
#define EXPLAIN_UID_SLOT_FORMAT "uid_slot=%d,%d"
#define EXPLAIN_SRC_SCAN_FORMAT "src_scan=%d,%d"
#define EXPLAIN_PLAN_BLOCKING "blocking=%d"
#define COMMAND_RESET_LOG "resetLog"
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"

View File

@ -88,8 +88,9 @@ static int32_t buildDescResultDataBlock(SSDataBlock** pOutput) {
return code;
}
static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) {
blockDataEnsureCapacity(pBlock, numOfRows);
static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta, int8_t biMode) {
int32_t blockCap = (biMode != 0) ? numOfRows + 1 : numOfRows;
blockDataEnsureCapacity(pBlock, blockCap);
pBlock->info.rows = 0;
// field
@ -115,6 +116,17 @@ static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock,
colDataSetVal(pCol4, pBlock->info.rows, buf, false);
++(pBlock->info.rows);
}
if (pMeta->tableType == TSDB_SUPER_TABLE && biMode != 0) {
STR_TO_VARSTR(buf, "tbname");
colDataSetVal(pCol1, pBlock->info.rows, buf, false);
STR_TO_VARSTR(buf, "VARCHAR");
colDataSetVal(pCol2, pBlock->info.rows, buf, false);
int32_t bytes = TSDB_TABLE_NAME_LEN - 1;
colDataSetVal(pCol3, pBlock->info.rows, (const char*)&bytes, false);
STR_TO_VARSTR(buf, "TAG");
colDataSetVal(pCol4, pBlock->info.rows, buf, false);
++(pBlock->info.rows);
}
if (pBlock->info.rows <= 0) {
qError("no permission to view any columns");
return TSDB_CODE_PAR_PERMISSION_DENIED;
@ -122,14 +134,14 @@ static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock,
return TSDB_CODE_SUCCESS;
}
static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) {
static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp, int8_t biMode) {
SDescribeStmt* pDesc = (SDescribeStmt*)pStmt;
int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta);
SSDataBlock* pBlock = NULL;
int32_t code = buildDescResultDataBlock(&pBlock);
if (TSDB_CODE_SUCCESS == code) {
code = setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta);
code = setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta, biMode);
}
if (TSDB_CODE_SUCCESS == code) {
code = buildRetrieveTableRsp(pBlock, DESCRIBE_RESULT_COLS, pRsp);
@ -926,10 +938,10 @@ static int32_t execSelectWithoutFrom(SSelectStmt* pSelect, SRetrieveTableRsp** p
return code;
}
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) {
int32_t qExecCommand(int64_t* pConnId, bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp, int8_t biMode) {
switch (nodeType(pStmt)) {
case QUERY_NODE_DESCRIBE_STMT:
return execDescribe(sysInfoUser, pStmt, pRsp);
return execDescribe(sysInfoUser, pStmt, pRsp, biMode);
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
return execResetQueryCache();
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:

View File

@ -629,6 +629,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->outputRowSize);
EXPLAIN_ROW_APPEND_LIMIT(pAggNode->node.pLimit);
EXPLAIN_ROW_APPEND_SLIMIT(pAggNode->node.pSlimit);
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
EXPLAIN_ROW_APPEND(EXPLAIN_PLAN_BLOCKING, !pAggNode->node.forceCreateNonBlockingOptr);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));

View File

@ -3928,7 +3928,7 @@ static void buildVnodeFilteredTbCount(SOperatorInfo* pOperator, STableCountScanO
pAPI->metaFn.getTableUidByName(pInfo->readHandle.vnode, pSupp->stbNameFilter, &uid);
int64_t numOfChildTables = 0;
pAPI->metaFn.getNumOfChildTables(pInfo->readHandle.vnode, uid, &numOfChildTables);
pAPI->metaFn.getNumOfChildTables(pInfo->readHandle.vnode, uid, &numOfChildTables, NULL);
fillTableCountScanDataBlock(pSupp, dbName, pSupp->stbNameFilter, numOfChildTables, pRes);
} else {
@ -3979,7 +3979,7 @@ static void buildVnodeGroupedStbTableCount(STableCountScanOperatorInfo* pInfo, S
pRes->info.id.groupId = groupId;
int64_t ctbNum = 0;
int32_t code = pAPI->metaFn.getNumOfChildTables(pInfo->readHandle.vnode, stbUid, &ctbNum);
int32_t code = pAPI->metaFn.getNumOfChildTables(pInfo->readHandle.vnode, stbUid, &ctbNum, NULL);
fillTableCountScanDataBlock(pSupp, dbName, varDataVal(stbName), ctbNum, pRes);
}

View File

@ -75,7 +75,7 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32
double v = 0;
GET_TYPED_DATA(v, double, pVar->nType, &pVar->d);
colDataSetVal(pDst, rowIndex, (char*)&v, isNull);
} else if (IS_SIGNED_NUMERIC_TYPE(pDst->info.type)) {
} else if (IS_SIGNED_NUMERIC_TYPE(pDst->info.type) || pDst->info.type == TSDB_DATA_TYPE_BOOL) {
int64_t v = 0;
GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
colDataSetVal(pDst, rowIndex, (char*)&v, isNull);
@ -85,7 +85,10 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32
colDataSetVal(pDst, rowIndex, (char*)&v, isNull);
} else if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
colDataSetVal(pDst, rowIndex, (const char*)&currentKey, isNull);
} else { // varchar/nchar data
} else if (pDst->info.type == TSDB_DATA_TYPE_NCHAR || pDst->info.type == TSDB_DATA_TYPE_VARCHAR ||
pDst->info.type == TSDB_DATA_TYPE_VARBINARY) {
colDataSetVal(pDst, rowIndex, pVar->pz, isNull);
} else { // others data
colDataSetNULL(pDst, rowIndex);
}
}

View File

@ -52,6 +52,7 @@ extern "C" {
#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23)
#define FUNC_MGT_GEOMETRY_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(24)
#define FUNC_MGT_FORBID_SYSTABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(25)
#define FUNC_MGT_SKIP_SCAN_CHECK_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(26)
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)

View File

@ -3446,7 +3446,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "_group_key",
.type = FUNCTION_TYPE_GROUP_KEY,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_KEEP_ORDER_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_KEEP_ORDER_FUNC | FUNC_MGT_SKIP_SCAN_CHECK_FUNC,
.translateFunc = translateGroupKey,
.getEnvFunc = getGroupKeyFuncEnv,
.initFunc = functionSetup,

View File

@ -346,6 +346,10 @@ bool fmIsConstantResFunc(SFunctionNode* pFunc) {
return true;
}
bool fmIsSkipScanCheckFunc(int32_t funcId) {
return isSpecificClassifyFunc(funcId, FUNC_MGT_SKIP_SCAN_CHECK_FUNC);
}
void getLastCacheDataType(SDataType* pType) {
pType->bytes = getFirstLastInfoSize(pType->bytes) + VARSTR_HEADER_SIZE;
pType->type = TSDB_DATA_TYPE_BINARY;

View File

@ -543,6 +543,7 @@ static int32_t logicPartitionCopy(const SPartitionLogicNode* pSrc, SPartitionLog
CLONE_NODE_LIST_FIELD(pPartitionKeys);
CLONE_NODE_LIST_FIELD(pTags);
CLONE_NODE_FIELD(pSubtable);
CLONE_NODE_LIST_FIELD(pAggFuncs);
COPY_SCALAR_FIELD(needBlockOutputTsOrder);
COPY_SCALAR_FIELD(pkTsColId);
COPY_SCALAR_FIELD(pkTsColTbId);

Some files were not shown because too many files have changed in this diff Show More