Merge branch '3.0' of github.com:taosdata/TDengine into feat/TS-4243-3.0
This commit is contained in:
commit
87c8f1aa82
|
@ -91,6 +91,7 @@ TDengine 会为 WAL 文件自动创建索引以支持快速随机访问,并提
|
|||
不同语言下, TMQ 订阅相关的 API 及数据结构如下(详细的接口说明可以参考连接器章节,注意consumer结构不是线程安全的,在一个线程使用consumer时,不要在另一个线程close这个consumer):
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
<TabItem value="c" label="C">
|
||||
|
||||
```c
|
||||
|
@ -101,17 +102,17 @@ TDengine 会为 WAL 文件自动创建索引以支持快速随机访问,并提
|
|||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
|
@ -146,7 +147,6 @@ TDengine 会为 WAL 文件自动创建索引以支持快速随机访问,并提
|
|||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
@ -250,281 +250,280 @@ TDengine 会为 WAL 文件自动创建索引以支持快速随机访问,并提
|
|||
+ futures::Stream<
|
||||
Item = Result<(Self::Offset, MessageSet<Self::Meta, Self::Data>), Self::Error>,
|
||||
>,
|
||||
>,
|
||||
>;
|
||||
async fn commit(&self, offset: Self::Offset) -> Result<(), Self::Error>;
|
||||
>,
|
||||
>;
|
||||
async fn commit(&self, offset: Self::Offset) -> Result<(), Self::Error>;
|
||||
|
||||
async fn unsubscribe(self);
|
||||
```
|
||||
async fn unsubscribe(self);
|
||||
```
|
||||
|
||||
可在 <https://docs.rs/taos> 上查看详细 API 说明。
|
||||
可在 <https://docs.rs/taos> 上查看详细 API 说明。
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
||||
```js
|
||||
function TMQConsumer(config)
|
||||
```js
|
||||
function TMQConsumer(config)
|
||||
|
||||
function subscribe(topic)
|
||||
|
||||
function consume(timeout)
|
||||
|
||||
function subscription()
|
||||
|
||||
function unsubscribe()
|
||||
|
||||
function commit(msg)
|
||||
|
||||
function close()
|
||||
```
|
||||
|
||||
function subscribe(topic)
|
||||
</TabItem>
|
||||
|
||||
function consume(timeout)
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
function subscription()
|
||||
```csharp
|
||||
class ConsumerBuilder<TValue>
|
||||
|
||||
ConsumerBuilder(IEnumerable<KeyValuePair<string, string>> config)
|
||||
|
||||
public IConsumer<TValue> Build()
|
||||
|
||||
void Subscribe(IEnumerable<string> topics)
|
||||
|
||||
void Subscribe(string topic)
|
||||
|
||||
ConsumeResult<TValue> Consume(int millisecondsTimeout)
|
||||
|
||||
List<string> Subscription()
|
||||
|
||||
void Unsubscribe()
|
||||
|
||||
List<TopicPartitionOffset> Commit()
|
||||
|
||||
void Close()
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
function unsubscribe()
|
||||
# 数据订阅示例
|
||||
## 写入数据
|
||||
|
||||
function commit(msg)
|
||||
首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
|
||||
|
||||
function close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
```csharp
|
||||
class ConsumerBuilder<TValue>
|
||||
|
||||
ConsumerBuilder(IEnumerable<KeyValuePair<string, string>> config)
|
||||
|
||||
public IConsumer<TValue> Build()
|
||||
|
||||
void Subscribe(IEnumerable<string> topics)
|
||||
|
||||
void Subscribe(string topic)
|
||||
|
||||
ConsumeResult<TValue> Consume(int millisecondsTimeout)
|
||||
|
||||
List<string> Subscription()
|
||||
|
||||
void Unsubscribe()
|
||||
|
||||
List<TopicPartitionOffset> Commit()
|
||||
|
||||
void Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
# 数据订阅示例
|
||||
## 写入数据
|
||||
|
||||
首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
|
||||
|
||||
```sql
|
||||
DROP DATABASE IF EXISTS tmqdb;
|
||||
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
|
||||
```sql
|
||||
DROP DATABASE IF EXISTS tmqdb;
|
||||
CREATE DATABASE tmqdb WAL_RETENTION_PERIOD 3600;
|
||||
CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
|
||||
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
|
||||
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
|
||||
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
|
||||
INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
||||
```
|
||||
## 创建 topic
|
||||
|
||||
使用 SQL 创建一个 topic:
|
||||
使用 SQL 创建一个 topic:
|
||||
|
||||
```sql
|
||||
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
|
||||
```
|
||||
```sql
|
||||
CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
|
||||
```
|
||||
|
||||
## 创建消费者 *consumer*
|
||||
## 创建消费者 *consumer*
|
||||
|
||||
对于不同编程语言,其设置方式如下:
|
||||
对于不同编程语言,其设置方式如下:
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="c" label="C">
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
```c
|
||||
/* 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
|
||||
自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 */
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||
tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
<TabItem value="c" label="C">
|
||||
```c
|
||||
/* 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
|
||||
自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 */
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||
tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "auto.offset.reset", "latest");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
tmq_conf_destroy(conf);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
对于 Java 程序,还可以使用如下配置项:
|
||||
|
||||
| 参数名称 | 类型 | 参数说明 |
|
||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `td.connect.type` | string | 连接类型,"jni" 指原生连接,"ws" 指 websocket 连接,默认值为 "jni" |
|
||||
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
|
||||
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
|
||||
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
|
||||
|
||||
需要注意:此处使用 `bootstrap.servers` 替代 `td.connect.ip` 和 `td.connect.port`,以提供与 Kafka 一致的接口。
|
||||
|
||||
```java
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty("enable.auto.commit", "true");
|
||||
properties.setProperty("auto.commit.interval.ms", "1000");
|
||||
properties.setProperty("group.id", "cgrpName");
|
||||
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
|
||||
properties.setProperty("td.connect.user", "root");
|
||||
properties.setProperty("td.connect.pass", "taosdata");
|
||||
properties.setProperty("auto.offset.reset", "latest");
|
||||
properties.setProperty("msg.with.table.name", "true");
|
||||
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
|
||||
|
||||
TaosConsumer<Meters> consumer = new TaosConsumer<>(properties);
|
||||
|
||||
/* value deserializer definition. */
|
||||
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
|
||||
|
||||
public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Rust" value="Rust">
|
||||
|
||||
```rust
|
||||
let mut dsn: Dsn = "taos://".parse()?;
|
||||
dsn.set("group.id", "group1");
|
||||
dsn.set("client.id", "test");
|
||||
dsn.set("auto.offset.reset", "latest");
|
||||
|
||||
let tmq = TmqBuilder::from_dsn(dsn)?;
|
||||
|
||||
let mut consumer = tmq.build()?;
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
Python 语言下引入 `taos` 库的 `Consumer` 类,创建一个 Consumer 示例:
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "local",
|
||||
"client.id": "1",
|
||||
"enable.auto.commit": "true",
|
||||
"auto.commit.interval.ms": "1000",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "latest",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
||||
```js
|
||||
// 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
|
||||
// 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数
|
||||
|
||||
let consumer = taos.consumer({
|
||||
'enable.auto.commit': 'true',
|
||||
'auto.commit.interval.ms','1000',
|
||||
'group.id': 'tg2',
|
||||
'td.connect.user': 'root',
|
||||
'td.connect.pass': 'taosdata',
|
||||
'auto.offset.reset','latest',
|
||||
'msg.with.table.name': 'true',
|
||||
'td.connect.ip','127.0.0.1',
|
||||
'td.connect.port','6030'
|
||||
});
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
tmq_conf_destroy(conf);
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
```csharp
|
||||
var cfg = new Dictionary<string, string>()
|
||||
{
|
||||
{ "group.id", "group1" },
|
||||
{ "auto.offset.reset", "latest" },
|
||||
{ "td.connect.ip", "127.0.0.1" },
|
||||
{ "td.connect.user", "root" },
|
||||
{ "td.connect.pass", "taosdata" },
|
||||
{ "td.connect.port", "6030" },
|
||||
{ "client.id", "tmq_example" },
|
||||
{ "enable.auto.commit", "true" },
|
||||
{ "msg.with.table.name", "false" },
|
||||
};
|
||||
var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
|
||||
对于 Java 程序,还可以使用如下配置项:
|
||||
|
||||
| 参数名称 | 类型 | 参数说明 |
|
||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `td.connect.type` | string | 连接类型,"jni" 指原生连接,"ws" 指 websocket 连接,默认值为 "jni" |
|
||||
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
|
||||
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
|
||||
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
|
||||
|
||||
需要注意:此处使用 `bootstrap.servers` 替代 `td.connect.ip` 和 `td.connect.port`,以提供与 Kafka 一致的接口。
|
||||
|
||||
```java
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty("enable.auto.commit", "true");
|
||||
properties.setProperty("auto.commit.interval.ms", "1000");
|
||||
properties.setProperty("group.id", "cgrpName");
|
||||
properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
|
||||
properties.setProperty("td.connect.user", "root");
|
||||
properties.setProperty("td.connect.pass", "taosdata");
|
||||
properties.setProperty("auto.offset.reset", "latest");
|
||||
properties.setProperty("msg.with.table.name", "true");
|
||||
properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
|
||||
|
||||
TaosConsumer<Meters> consumer = new TaosConsumer<>(properties);
|
||||
|
||||
/* value deserializer definition. */
|
||||
import com.taosdata.jdbc.tmq.ReferenceDeserializer;
|
||||
|
||||
public class MetersDeserializer extends ReferenceDeserializer<Meters> {
|
||||
}
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Go" value="Go">
|
||||
|
||||
```go
|
||||
conf := &tmq.ConfigMap{
|
||||
"group.id": "test",
|
||||
"auto.offset.reset": "latest",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_c",
|
||||
"enable.auto.commit": "false",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
consumer, err := NewConsumer(conf)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
<TabItem label="Rust" value="Rust">
|
||||
|
||||
```rust
|
||||
let mut dsn: Dsn = "taos://".parse()?;
|
||||
dsn.set("group.id", "group1");
|
||||
dsn.set("client.id", "test");
|
||||
dsn.set("auto.offset.reset", "latest");
|
||||
|
||||
let tmq = TmqBuilder::from_dsn(dsn)?;
|
||||
|
||||
let mut consumer = tmq.build()?;
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
Python 语言下引入 `taos` 库的 `Consumer` 类,创建一个 Consumer 示例:
|
||||
|
||||
```python
|
||||
from taos.tmq import Consumer
|
||||
|
||||
# Syntax: `consumer = Consumer(configs)`
|
||||
#
|
||||
# Example:
|
||||
consumer = Consumer(
|
||||
{
|
||||
"group.id": "local",
|
||||
"client.id": "1",
|
||||
"enable.auto.commit": "true",
|
||||
"auto.commit.interval.ms": "1000",
|
||||
"td.connect.ip": "127.0.0.1",
|
||||
"td.connect.user": "root",
|
||||
"td.connect.pass": "taosdata",
|
||||
"auto.offset.reset": "latest",
|
||||
"msg.with.table.name": "true",
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
## 订阅 *topics*
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
||||
```js
|
||||
// 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
|
||||
// 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数
|
||||
|
||||
let consumer = taos.consumer({
|
||||
'enable.auto.commit': 'true',
|
||||
'auto.commit.interval.ms','1000',
|
||||
'group.id': 'tg2',
|
||||
'td.connect.user': 'root',
|
||||
'td.connect.pass': 'taosdata',
|
||||
'auto.offset.reset','latest',
|
||||
'msg.with.table.name': 'true',
|
||||
'td.connect.ip','127.0.0.1',
|
||||
'td.connect.port','6030'
|
||||
});
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
一个 consumer 支持同时订阅多个 topic。
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
```csharp
|
||||
var cfg = new Dictionary<string, string>()
|
||||
{
|
||||
{ "group.id", "group1" },
|
||||
{ "auto.offset.reset", "latest" },
|
||||
{ "td.connect.ip", "127.0.0.1" },
|
||||
{ "td.connect.user", "root" },
|
||||
{ "td.connect.pass", "taosdata" },
|
||||
{ "td.connect.port", "6030" },
|
||||
{ "client.id", "tmq_example" },
|
||||
{ "enable.auto.commit", "true" },
|
||||
{ "msg.with.table.name", "false" },
|
||||
};
|
||||
var consumer = new ConsumerBuilder<Dictionary<string, object>>(cfg).Build();
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
||||
上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
|
||||
|
||||
## 订阅 *topics*
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="c" label="C">
|
||||
一个 consumer 支持同时订阅多个 topic。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
```c
|
||||
<TabItem value="c" label="C">
|
||||
|
||||
```c
|
||||
// 创建订阅 topics 列表
|
||||
tmq_list_t* topicList = tmq_list_new();
|
||||
tmq_list_append(topicList, "topicName");
|
||||
// 启动订阅
|
||||
tmq_subscribe(tmq, topicList);
|
||||
tmq_list_destroy(topicList);
|
||||
|
||||
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
```java
|
||||
List<String> topics = new ArrayList<>();
|
||||
topics.add("tmq_topic");
|
||||
consumer.subscribe(topics);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Go" label="Go">
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
err = consumer.Subscribe("example_tmq_topic", nil)
|
||||
|
@ -533,26 +532,26 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Rust" label="Rust">
|
||||
</TabItem>
|
||||
<TabItem value="Rust" label="Rust">
|
||||
|
||||
```rust
|
||||
```rust
|
||||
consumer.subscribe(["tmq_meters"]).await?;
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Python" label="Python">
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
```python
|
||||
consumer.subscribe(['topic1', 'topic2'])
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
||||
```js
|
||||
```js
|
||||
// 创建订阅 topics 列表
|
||||
let topics = ['topic_test']
|
||||
|
||||
|
@ -560,11 +559,11 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
consumer.subscribe(topics);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="C#" label="C#">
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
```csharp
|
||||
```csharp
|
||||
// 创建订阅 topics 列表
|
||||
List<String> topics = new List<string>();
|
||||
topics.add("tmq_topic");
|
||||
|
@ -572,18 +571,19 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
consumer.Subscribe(topics);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
## 消费
|
||||
## 消费
|
||||
|
||||
以下代码展示了不同语言下如何对 TMQ 消息进行消费。
|
||||
以下代码展示了不同语言下如何对 TMQ 消息进行消费。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="c" label="C">
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
```c
|
||||
<TabItem value="c" label="C">
|
||||
|
||||
```c
|
||||
// 消费数据
|
||||
while (running) {
|
||||
TAOS_RES* msg = tmq_consumer_poll(tmq, timeOut);
|
||||
|
@ -591,11 +591,11 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
```
|
||||
|
||||
这里是一个 **while** 循环,每调用一次 tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析 API 完成消息内容的解析。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
这里是一个 **while** 循环,每调用一次 tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析 API 完成消息内容的解析。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
```java
|
||||
while(running){
|
||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||
|
@ -604,10 +604,10 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Go" label="Go">
|
||||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
for {
|
||||
|
@ -625,9 +625,9 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Rust" label="Rust">
|
||||
<TabItem value="Rust" label="Rust">
|
||||
|
||||
```rust
|
||||
{
|
||||
|
@ -660,8 +660,8 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="Python" label="Python">
|
||||
</TabItem>
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
```python
|
||||
while True:
|
||||
|
@ -677,9 +677,9 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
print(block.fetchall())
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
||||
```js
|
||||
while(true){
|
||||
|
@ -691,11 +691,11 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="C#" label="C#">
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
```csharp
|
||||
```csharp
|
||||
// 消费数据
|
||||
while (true)
|
||||
{
|
||||
|
@ -708,18 +708,19 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
}
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
## 结束消费
|
||||
## 结束消费
|
||||
|
||||
消费结束后,应当取消订阅。
|
||||
消费结束后,应当取消订阅。
|
||||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
<TabItem value="c" label="C">
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
<TabItem value="c" label="C">
|
||||
|
||||
```c
|
||||
```c
|
||||
/* 取消订阅 */
|
||||
tmq_unsubscribe(tmq);
|
||||
|
||||
|
@ -727,10 +728,10 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
tmq_consumer_close(tmq);
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
||||
```java
|
||||
```java
|
||||
/* 取消订阅 */
|
||||
consumer.unsubscribe();
|
||||
|
||||
|
@ -738,11 +739,11 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
consumer.close();
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Go" label="Go">
|
||||
|
||||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
```go
|
||||
/* Unsubscribe */
|
||||
_ = consumer.Unsubscribe()
|
||||
|
||||
|
@ -750,38 +751,38 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
_ = consumer.Close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Rust" label="Rust">
|
||||
|
||||
<TabItem value="Rust" label="Rust">
|
||||
|
||||
```rust
|
||||
```rust
|
||||
consumer.unsubscribe().await;
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="Python" label="Python">
|
||||
<TabItem value="Python" label="Python">
|
||||
|
||||
```py
|
||||
```py
|
||||
# 取消订阅
|
||||
consumer.unsubscribe()
|
||||
# 关闭消费
|
||||
consumer.close()
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
</TabItem>
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
|
||||
```js
|
||||
```js
|
||||
consumer.unsubscribe();
|
||||
consumer.close();
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="C#" label="C#">
|
||||
<TabItem value="C#" label="C#">
|
||||
|
||||
```csharp
|
||||
```csharp
|
||||
// 取消订阅
|
||||
consumer.Unsubscribe();
|
||||
|
||||
|
@ -789,7 +790,7 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
consumer.Close();
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
||||
|
@ -799,41 +800,41 @@ INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
|
|||
|
||||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
<TabItem label="C" value="c">
|
||||
<CDemo />
|
||||
</TabItem>
|
||||
<TabItem label="C" value="c">
|
||||
<CDemo />
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Java" value="java">
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="本地连接">
|
||||
<Java />
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket 连接">
|
||||
<JavaWS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
</TabItem>
|
||||
<TabItem label="Java" value="java">
|
||||
<Tabs defaultValue="native">
|
||||
<TabItem value="native" label="本地连接">
|
||||
<Java />
|
||||
</TabItem>
|
||||
<TabItem value="ws" label="WebSocket 连接">
|
||||
<JavaWS />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Go" value="Go">
|
||||
<Go/>
|
||||
</TabItem>
|
||||
<TabItem label="Go" value="Go">
|
||||
<Go/>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Rust" value="Rust">
|
||||
<Rust />
|
||||
</TabItem>
|
||||
<TabItem label="Rust" value="Rust">
|
||||
<Rust />
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Python" value="Python">
|
||||
<Python />
|
||||
</TabItem>
|
||||
<TabItem label="Python" value="Python">
|
||||
<Python />
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
<Node/>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="C#" value="C#">
|
||||
<CSharp/>
|
||||
</TabItem>
|
||||
<TabItem label="Node.JS" value="Node.JS">
|
||||
<Node/>
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="C#" value="C#">
|
||||
<CSharp/>
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
||||
#订阅高级功能
|
||||
|
|
|
@ -54,6 +54,8 @@ typedef struct SSessionKey {
|
|||
uint64_t groupId;
|
||||
} SSessionKey;
|
||||
|
||||
typedef int64_t COUNT_TYPE;
|
||||
|
||||
typedef struct SVersionRange {
|
||||
int64_t minVer;
|
||||
int64_t maxVer;
|
||||
|
|
|
@ -247,6 +247,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_EVENT_WINDOW,
|
||||
QUERY_NODE_HINT,
|
||||
QUERY_NODE_VIEW,
|
||||
QUERY_NODE_COUNT_WINDOW,
|
||||
|
||||
// Statement nodes are used in parser and planner module.
|
||||
QUERY_NODE_SET_OPERATOR = 100,
|
||||
|
@ -432,7 +433,9 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL
|
||||
QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL,
|
||||
} ENodeType;
|
||||
|
||||
typedef struct {
|
||||
|
@ -3183,18 +3186,11 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t leftForVer;
|
||||
int64_t resetRelHalt; // reset related stream task halt status
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
} SVDropStreamTaskReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead head;
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int64_t dataVer;
|
||||
} SVStreamTaskVerUpdateReq;
|
||||
|
||||
typedef struct {
|
||||
int8_t reserved;
|
||||
} SVDropStreamTaskRsp;
|
||||
|
@ -3785,12 +3781,12 @@ typedef struct {
|
|||
} SMqHbReq;
|
||||
|
||||
typedef struct {
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
int8_t noPrivilege;
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
int8_t noPrivilege;
|
||||
} STopicPrivilege;
|
||||
|
||||
typedef struct {
|
||||
SArray* topicPrivileges; // SArray<STopicPrivilege>
|
||||
SArray* topicPrivileges; // SArray<STopicPrivilege>
|
||||
} SMqHbRsp;
|
||||
|
||||
typedef struct {
|
||||
|
@ -3928,8 +3924,8 @@ int32_t tDeserializeSMqSeekReq(void* buf, int32_t bufLen, SMqSeekReq* pReq);
|
|||
#define SUBMIT_REQ_COLUMN_DATA_FORMAT 0x2
|
||||
#define SUBMIT_REQ_FROM_FILE 0x4
|
||||
|
||||
#define SOURCE_NULL 0
|
||||
#define SOURCE_TAOSX 1
|
||||
#define SOURCE_NULL 0
|
||||
#define SOURCE_TAOSX 1
|
||||
|
||||
typedef struct {
|
||||
int32_t flags;
|
||||
|
@ -3941,8 +3937,8 @@ typedef struct {
|
|||
SArray* aRowP;
|
||||
SArray* aCol;
|
||||
};
|
||||
int64_t ctimeMs;
|
||||
int8_t source;
|
||||
int64_t ctimeMs;
|
||||
int8_t source;
|
||||
} SSubmitTbData;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -299,74 +299,77 @@
|
|||
#define TK_SESSION 280
|
||||
#define TK_STATE_WINDOW 281
|
||||
#define TK_EVENT_WINDOW 282
|
||||
#define TK_SLIDING 283
|
||||
#define TK_FILL 284
|
||||
#define TK_VALUE 285
|
||||
#define TK_VALUE_F 286
|
||||
#define TK_NONE 287
|
||||
#define TK_PREV 288
|
||||
#define TK_NULL_F 289
|
||||
#define TK_LINEAR 290
|
||||
#define TK_NEXT 291
|
||||
#define TK_HAVING 292
|
||||
#define TK_RANGE 293
|
||||
#define TK_EVERY 294
|
||||
#define TK_ORDER 295
|
||||
#define TK_SLIMIT 296
|
||||
#define TK_SOFFSET 297
|
||||
#define TK_LIMIT 298
|
||||
#define TK_OFFSET 299
|
||||
#define TK_ASC 300
|
||||
#define TK_NULLS 301
|
||||
#define TK_ABORT 302
|
||||
#define TK_AFTER 303
|
||||
#define TK_ATTACH 304
|
||||
#define TK_BEFORE 305
|
||||
#define TK_BEGIN 306
|
||||
#define TK_BITAND 307
|
||||
#define TK_BITNOT 308
|
||||
#define TK_BITOR 309
|
||||
#define TK_BLOCKS 310
|
||||
#define TK_CHANGE 311
|
||||
#define TK_COMMA 312
|
||||
#define TK_CONCAT 313
|
||||
#define TK_CONFLICT 314
|
||||
#define TK_COPY 315
|
||||
#define TK_DEFERRED 316
|
||||
#define TK_DELIMITERS 317
|
||||
#define TK_DETACH 318
|
||||
#define TK_DIVIDE 319
|
||||
#define TK_DOT 320
|
||||
#define TK_EACH 321
|
||||
#define TK_FAIL 322
|
||||
#define TK_FILE 323
|
||||
#define TK_FOR 324
|
||||
#define TK_GLOB 325
|
||||
#define TK_ID 326
|
||||
#define TK_IMMEDIATE 327
|
||||
#define TK_IMPORT 328
|
||||
#define TK_INITIALLY 329
|
||||
#define TK_INSTEAD 330
|
||||
#define TK_ISNULL 331
|
||||
#define TK_MODULES 332
|
||||
#define TK_NK_BITNOT 333
|
||||
#define TK_NK_SEMI 334
|
||||
#define TK_NOTNULL 335
|
||||
#define TK_OF 336
|
||||
#define TK_PLUS 337
|
||||
#define TK_PRIVILEGE 338
|
||||
#define TK_RAISE 339
|
||||
#define TK_RESTRICT 340
|
||||
#define TK_ROW 341
|
||||
#define TK_SEMI 342
|
||||
#define TK_STAR 343
|
||||
#define TK_STATEMENT 344
|
||||
#define TK_STRICT 345
|
||||
#define TK_STRING 346
|
||||
#define TK_TIMES 347
|
||||
#define TK_VALUES 348
|
||||
#define TK_VARIABLE 349
|
||||
#define TK_WAL 350
|
||||
#define TK_COUNT_WINDOW 283
|
||||
#define TK_SLIDING 284
|
||||
#define TK_FILL 285
|
||||
#define TK_VALUE 286
|
||||
#define TK_VALUE_F 287
|
||||
#define TK_NONE 288
|
||||
#define TK_PREV 289
|
||||
#define TK_NULL_F 290
|
||||
#define TK_LINEAR 291
|
||||
#define TK_NEXT 292
|
||||
#define TK_HAVING 293
|
||||
#define TK_RANGE 294
|
||||
#define TK_EVERY 295
|
||||
#define TK_ORDER 296
|
||||
#define TK_SLIMIT 297
|
||||
#define TK_SOFFSET 298
|
||||
#define TK_LIMIT 299
|
||||
#define TK_OFFSET 300
|
||||
#define TK_ASC 301
|
||||
#define TK_NULLS 302
|
||||
#define TK_ABORT 303
|
||||
#define TK_AFTER 304
|
||||
#define TK_ATTACH 305
|
||||
#define TK_BEFORE 306
|
||||
#define TK_BEGIN 307
|
||||
#define TK_BITAND 308
|
||||
#define TK_BITNOT 309
|
||||
#define TK_BITOR 310
|
||||
#define TK_BLOCKS 311
|
||||
#define TK_CHANGE 312
|
||||
#define TK_COMMA 313
|
||||
#define TK_CONCAT 314
|
||||
#define TK_CONFLICT 315
|
||||
#define TK_COPY 316
|
||||
#define TK_DEFERRED 317
|
||||
#define TK_DELIMITERS 318
|
||||
#define TK_DETACH 319
|
||||
#define TK_DIVIDE 320
|
||||
#define TK_DOT 321
|
||||
#define TK_EACH 322
|
||||
#define TK_FAIL 323
|
||||
#define TK_FILE 324
|
||||
#define TK_FOR 325
|
||||
#define TK_GLOB 326
|
||||
#define TK_ID 327
|
||||
#define TK_IMMEDIATE 328
|
||||
#define TK_IMPORT 329
|
||||
#define TK_INITIALLY 330
|
||||
#define TK_INSTEAD 331
|
||||
#define TK_ISNULL 332
|
||||
#define TK_MODULES 333
|
||||
#define TK_NK_BITNOT 334
|
||||
#define TK_NK_SEMI 335
|
||||
#define TK_NOTNULL 336
|
||||
#define TK_OF 337
|
||||
#define TK_PLUS 338
|
||||
#define TK_PRIVILEGE 339
|
||||
#define TK_RAISE 340
|
||||
#define TK_RESTRICT 341
|
||||
#define TK_ROW 342
|
||||
#define TK_SEMI 343
|
||||
#define TK_STAR 344
|
||||
#define TK_STATEMENT 345
|
||||
#define TK_STRICT 346
|
||||
#define TK_STRING 347
|
||||
#define TK_TIMES 348
|
||||
#define TK_VALUES 349
|
||||
#define TK_VARIABLE 350
|
||||
#define TK_WAL 351
|
||||
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
#define TK_NK_COMMENT 601
|
||||
#define TK_NK_ILLEGAL 602
|
||||
|
@ -377,6 +380,7 @@
|
|||
#define TK_NO_BATCH_SCAN 607
|
||||
#define TK_SORT_FOR_GROUP 608
|
||||
#define TK_PARTITION_FIRST 609
|
||||
#define TK_PARA_TABLES_SORT 610
|
||||
|
||||
|
||||
#define TK_NK_NIL 65535
|
||||
|
|
|
@ -215,7 +215,6 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo);
|
|||
int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
|
||||
int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRange *pVerRange, STimeWindow* pWindow);
|
||||
int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
|
||||
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo);
|
||||
bool qStreamScanhistoryFinished(qTaskInfo_t tinfo);
|
||||
int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo);
|
||||
void resetTaskInfo(qTaskInfo_t tinfo);
|
||||
|
|
|
@ -352,14 +352,19 @@ typedef struct SStateStore {
|
|||
int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
|
||||
int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
|
||||
int32_t (*streamStateSessionReset)(SStreamState* pState, void* pVal);
|
||||
int32_t (*streamStateSessionClear)(SStreamState* pState);
|
||||
int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
int32_t (*streamStateCountGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
int32_t (*streamStateSessionAllocWinBuffByNextPosition)(SStreamState* pState, SStreamStateCur* pCur,
|
||||
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
int32_t (*streamStateCountWinAddIfNotExist)(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen);
|
||||
int32_t (*streamStateCountWinAdd)(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
|
||||
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
|
||||
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
|
||||
|
@ -377,6 +382,7 @@ typedef struct SStateStore {
|
|||
int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
|
||||
|
||||
SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* (*streamStateCountSeekKeyPrev)(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
|
||||
SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
|
||||
|
||||
|
|
|
@ -121,6 +121,7 @@ typedef struct SScanLogicNode {
|
|||
bool filesetDelimited; // returned blocks delimited by fileset
|
||||
bool isCountByTag; // true if selectstmt hasCountFunc & part by tag/tbname
|
||||
SArray* pFuncTypes; // for last, last_row
|
||||
bool paraTablesSort; // for table merge scan
|
||||
} SScanLogicNode;
|
||||
|
||||
typedef struct SJoinLogicNode {
|
||||
|
@ -244,7 +245,8 @@ typedef enum EWindowType {
|
|||
WINDOW_TYPE_INTERVAL = 1,
|
||||
WINDOW_TYPE_SESSION,
|
||||
WINDOW_TYPE_STATE,
|
||||
WINDOW_TYPE_EVENT
|
||||
WINDOW_TYPE_EVENT,
|
||||
WINDOW_TYPE_COUNT
|
||||
} EWindowType;
|
||||
|
||||
typedef enum EWindowAlgorithm {
|
||||
|
@ -282,6 +284,8 @@ typedef struct SWindowLogicNode {
|
|||
int8_t igCheckUpdate;
|
||||
EWindowAlgorithm windowAlgo;
|
||||
bool isPartTb;
|
||||
int64_t windowCount;
|
||||
int64_t windowSliding;
|
||||
} SWindowLogicNode;
|
||||
|
||||
typedef struct SFillLogicNode {
|
||||
|
@ -440,6 +444,7 @@ typedef struct STableScanPhysiNode {
|
|||
int8_t igCheckUpdate;
|
||||
bool filesetDelimited;
|
||||
bool needCountEmptyTable;
|
||||
bool paraTablesSort;
|
||||
} STableScanPhysiNode;
|
||||
|
||||
typedef STableScanPhysiNode STableSeqScanPhysiNode;
|
||||
|
@ -631,6 +636,14 @@ typedef struct SEventWinodwPhysiNode {
|
|||
|
||||
typedef SEventWinodwPhysiNode SStreamEventWinodwPhysiNode;
|
||||
|
||||
typedef struct SCountWinodwPhysiNode {
|
||||
SWindowPhysiNode window;
|
||||
int64_t windowCount;
|
||||
int64_t windowSliding;
|
||||
} SCountWinodwPhysiNode;
|
||||
|
||||
typedef SCountWinodwPhysiNode SStreamCountWinodwPhysiNode;
|
||||
|
||||
typedef struct SSortPhysiNode {
|
||||
SPhysiNode node;
|
||||
SNodeList* pExprs; // these are expression list of order_by_clause and parameter expression of aggregate function
|
||||
|
|
|
@ -128,6 +128,7 @@ typedef enum EHintOption {
|
|||
HINT_BATCH_SCAN,
|
||||
HINT_SORT_FOR_GROUP,
|
||||
HINT_PARTITION_FIRST,
|
||||
HINT_PARA_TABLES_SORT
|
||||
} EHintOption;
|
||||
|
||||
typedef struct SHintNode {
|
||||
|
@ -278,6 +279,13 @@ typedef struct SEventWindowNode {
|
|||
SNode* pEndCond;
|
||||
} SEventWindowNode;
|
||||
|
||||
typedef struct SCountWindowNode {
|
||||
ENodeType type; // QUERY_NODE_EVENT_WINDOW
|
||||
SNode* pCol; // timestamp primary key
|
||||
int64_t windowCount;
|
||||
int64_t windowSliding;
|
||||
} SCountWindowNode;
|
||||
|
||||
typedef enum EFillMode {
|
||||
FILL_MODE_NONE = 1,
|
||||
FILL_MODE_VALUE,
|
||||
|
|
|
@ -55,13 +55,16 @@ int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key,
|
|||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void* value, int32_t vLen);
|
||||
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key);
|
||||
int32_t streamStateSessionReset(SStreamState* pState, void* pVal);
|
||||
int32_t streamStateSessionClear(SStreamState* pState);
|
||||
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
int32_t streamStateCountGetKeyByRange(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
|
||||
int32_t streamStateSessionAllocWinBuffByNextPosition(SStreamState* pState, SStreamStateCur* pCur,
|
||||
const SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateCountSeekKeyPrev(SStreamState* pState, const SSessionKey* pKey, COUNT_TYPE count);
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, const SSessionKey* key);
|
||||
|
||||
|
@ -79,6 +82,10 @@ int32_t streamStateReleaseBuf(SStreamState* pState, void* pVal, bool used);
|
|||
int32_t streamStateClearBuff(SStreamState* pState, void* pVal);
|
||||
void streamStateFreeVal(void* val);
|
||||
|
||||
// count window
|
||||
int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen);
|
||||
int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
|
||||
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||
SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
|
||||
|
@ -128,10 +135,6 @@ int sessionRangeKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
|
|||
int sessionWinKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2);
|
||||
int stateSessionKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
|
||||
int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2);
|
||||
#if 0
|
||||
char* streamStateSessionDump(SStreamState* pState);
|
||||
char* streamStateIntervalDump(SStreamState* pState);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -50,21 +50,21 @@ extern "C" {
|
|||
(_t)->hTaskInfo.id.streamId = 0; \
|
||||
} while (0)
|
||||
|
||||
#define STREAM_EXEC_T_EXTRACT_WAL_DATA (-1)
|
||||
#define STREAM_EXEC_T_START_ALL_TASKS (-2)
|
||||
#define STREAM_EXEC_T_START_ONE_TASK (-3)
|
||||
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
|
||||
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
|
||||
#define STREAM_EXEC_T_RESUME_TASK (-6)
|
||||
#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
|
||||
#define STREAM_EXEC_T_EXTRACT_WAL_DATA (-1)
|
||||
#define STREAM_EXEC_T_START_ALL_TASKS (-2)
|
||||
#define STREAM_EXEC_T_START_ONE_TASK (-3)
|
||||
#define STREAM_EXEC_T_RESTART_ALL_TASKS (-4)
|
||||
#define STREAM_EXEC_T_STOP_ALL_TASKS (-5)
|
||||
#define STREAM_EXEC_T_RESUME_TASK (-6)
|
||||
#define STREAM_EXEC_T_UPDATE_TASK_EPSET (-7)
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
typedef struct SStreamQueue SStreamQueue;
|
||||
typedef struct SStreamTaskSM SStreamTaskSM;
|
||||
|
||||
#define SSTREAM_TASK_VER 3
|
||||
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
|
||||
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||
#define SSTREAM_TASK_VER 3
|
||||
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
|
||||
#define SSTREAM_TASK_NEED_CONVERT_VER 2
|
||||
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
|
||||
|
||||
enum {
|
||||
|
@ -405,8 +405,8 @@ typedef struct SHistoryTaskInfo {
|
|||
int32_t tickCount;
|
||||
int32_t retryTimes;
|
||||
int32_t waitInterval;
|
||||
int64_t haltVer; // offset in wal when halt the stream task
|
||||
bool operatorOpen; // false by default
|
||||
int64_t haltVer; // offset in wal when halt the stream task
|
||||
bool operatorOpen; // false by default
|
||||
} SHistoryTaskInfo;
|
||||
|
||||
typedef struct STaskOutputInfo {
|
||||
|
@ -463,21 +463,22 @@ struct SStreamTask {
|
|||
struct SStreamMeta* pMeta;
|
||||
SSHashObj* pNameMap;
|
||||
void* pBackend;
|
||||
char reserve[256];
|
||||
int8_t subtableWithoutMd5;
|
||||
char reserve[255];
|
||||
};
|
||||
|
||||
typedef int32_t (*startComplete_fn_t)(struct SStreamMeta*);
|
||||
|
||||
typedef struct STaskStartInfo {
|
||||
int64_t startTs;
|
||||
int64_t readyTs;
|
||||
int32_t tasksWillRestart;
|
||||
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
|
||||
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
|
||||
int64_t elapsedTime;
|
||||
int32_t restartCount; // restart task counter
|
||||
startComplete_fn_t completeFn; // complete callback function
|
||||
int64_t startTs;
|
||||
int64_t readyTs;
|
||||
int32_t tasksWillRestart;
|
||||
int32_t taskStarting; // restart flag, sentinel to guard the restart procedure.
|
||||
SHashObj* pReadyTaskSet; // tasks that are all ready for running stream processing
|
||||
SHashObj* pFailedTaskSet; // tasks that are done the check downstream process, may be successful or failed
|
||||
int64_t elapsedTime;
|
||||
int32_t restartCount; // restart task counter
|
||||
startComplete_fn_t completeFn; // complete callback function
|
||||
} STaskStartInfo;
|
||||
|
||||
typedef struct STaskUpdateInfo {
|
||||
|
@ -504,7 +505,7 @@ typedef struct SStreamMeta {
|
|||
int32_t vgId;
|
||||
int64_t stage;
|
||||
int32_t role;
|
||||
bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower.
|
||||
bool sendMsgBeforeClosing; // send hb to mnode before close all tasks when switch to follower.
|
||||
STaskStartInfo startInfo;
|
||||
TdThreadRwlock lock;
|
||||
SScanWalInfo scanInfo;
|
||||
|
@ -532,7 +533,7 @@ int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo)
|
|||
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
|
||||
|
||||
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
|
||||
SArray* pTaskList, bool hasFillhistory);
|
||||
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5);
|
||||
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
|
||||
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
||||
void tFreeStreamTask(SStreamTask* pTask);
|
||||
|
@ -656,7 +657,6 @@ int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckp
|
|||
int32_t tDecodeStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSourceReq* pReq);
|
||||
|
||||
int32_t tEncodeStreamCheckpointSourceRsp(SEncoder* pEncoder, const SStreamCheckpointSourceRsp* pRsp);
|
||||
int32_t tDecodeStreamCheckpointSourceRsp(SDecoder* pDecoder, SStreamCheckpointSourceRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead msgHead;
|
||||
|
@ -678,18 +678,18 @@ typedef struct STaskStatusEntry {
|
|||
int32_t statusLastDuration; // to record the last duration of current status
|
||||
int64_t stage;
|
||||
int32_t nodeId;
|
||||
int64_t verStart; // start version in WAL, only valid for source task
|
||||
int64_t verEnd; // end version in WAL, only valid for source task
|
||||
int64_t processedVer; // only valid for source task
|
||||
int64_t checkpointId; // current active checkpoint id
|
||||
int32_t chkpointTransId; // checkpoint trans id
|
||||
int8_t checkpointFailed; // denote if the checkpoint is failed or not
|
||||
bool inputQChanging; // inputQ is changing or not
|
||||
int64_t verStart; // start version in WAL, only valid for source task
|
||||
int64_t verEnd; // end version in WAL, only valid for source task
|
||||
int64_t processedVer; // only valid for source task
|
||||
int64_t checkpointId; // current active checkpoint id
|
||||
int32_t chkpointTransId; // checkpoint trans id
|
||||
int8_t checkpointFailed; // denote if the checkpoint is failed or not
|
||||
bool inputQChanging; // inputQ is changing or not
|
||||
int64_t inputQUnchangeCounter;
|
||||
double inputQUsed; // in MiB
|
||||
double inputQUsed; // in MiB
|
||||
double inputRate;
|
||||
double sinkQuota; // existed quota size for sink task
|
||||
double sinkDataSize; // sink to dst data size
|
||||
double sinkQuota; // existed quota size for sink task
|
||||
double sinkDataSize; // sink to dst data size
|
||||
} STaskStatusEntry;
|
||||
|
||||
typedef struct SStreamHbMsg {
|
||||
|
@ -720,23 +720,10 @@ int32_t tEncodeStreamTaskUpdateMsg(SEncoder* pEncoder, const SStreamTaskNodeUpda
|
|||
int32_t tDecodeStreamTaskUpdateMsg(SDecoder* pDecoder, SStreamTaskNodeUpdateMsg* pMsg);
|
||||
|
||||
typedef struct SStreamTaskState {
|
||||
ETaskStatus state;
|
||||
char* name;
|
||||
ETaskStatus state;
|
||||
char* name;
|
||||
} SStreamTaskState;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t taskId;
|
||||
} SStreamRecoverDownstreamReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer; // SArray<SStreamCheckpointInfo>
|
||||
} SStreamRecoverDownstreamRsp;
|
||||
|
||||
int32_t tEncodeStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq);
|
||||
int32_t tDecodeStreamTaskCheckReq(SDecoder* pDecoder, SStreamTaskCheckReq* pReq);
|
||||
|
||||
|
@ -745,10 +732,12 @@ int32_t tDecodeStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp)
|
|||
|
||||
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq);
|
||||
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
||||
|
||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
|
||||
|
||||
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
|
||||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
|
||||
|
||||
typedef struct SStreamTaskCheckpointReq {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
|
@ -787,21 +776,19 @@ void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
|
|||
// recover and fill history
|
||||
void streamTaskCheckDownstream(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage,
|
||||
int64_t* oldStage);
|
||||
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamId, int32_t vgId, int64_t stage, int64_t* oldStage);
|
||||
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
|
||||
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
|
||||
bool streamTaskIsAllUpstreamClosed(SStreamTask* pTask);
|
||||
bool streamTaskSetSchedStatusWait(SStreamTask* pTask);
|
||||
int8_t streamTaskSetSchedStatusActive(SStreamTask* pTask);
|
||||
int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask);
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, bool metaLock);
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t clearRelHalt, bool metaLock);
|
||||
|
||||
int32_t streamTaskHandleEvent(SStreamTaskSM* pSM, EStreamTaskEvent event);
|
||||
int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent event);
|
||||
void streamTaskRestoreStatus(SStreamTask* pTask);
|
||||
|
||||
int32_t streamTaskStop(SStreamTask* pTask);
|
||||
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
|
||||
SRpcHandleInfo* pRpcInfo, int32_t taskId);
|
||||
int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp);
|
||||
|
@ -813,9 +800,9 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer)
|
|||
int32_t streamQueueGetNumOfItems(const SStreamQueue* pQueue);
|
||||
|
||||
// common
|
||||
int32_t streamRestoreParam(SStreamTask* pTask);
|
||||
void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
void streamTaskResume(SStreamTask* pTask);
|
||||
int32_t streamTaskStop(SStreamTask* pTask);
|
||||
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
|
||||
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
|
||||
|
@ -839,7 +826,8 @@ SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st);
|
|||
// stream task meta
|
||||
void streamMetaInit();
|
||||
void streamMetaCleanup();
|
||||
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage, startComplete_fn_t fn);
|
||||
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage,
|
||||
startComplete_fn_t fn);
|
||||
void streamMetaClose(SStreamMeta* streamMeta);
|
||||
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
|
||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, STaskId* pKey);
|
||||
|
@ -858,22 +846,22 @@ void streamMetaNotifyClose(SStreamMeta* pMeta);
|
|||
void streamMetaStartHb(SStreamMeta* pMeta);
|
||||
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
|
||||
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
|
||||
int64_t endTs, bool ready);
|
||||
int64_t endTs, bool ready);
|
||||
int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta);
|
||||
|
||||
void streamMetaRLock(SStreamMeta* pMeta);
|
||||
void streamMetaRUnLock(SStreamMeta* pMeta);
|
||||
void streamMetaWLock(SStreamMeta* pMeta);
|
||||
void streamMetaWUnLock(SStreamMeta* pMeta);
|
||||
void streamMetaResetStartInfo(STaskStartInfo* pMeta);
|
||||
SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta);
|
||||
void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader);
|
||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
bool streamMetaAllTasksReady(const SStreamMeta* pMeta);
|
||||
tmr_h streamTimerGetInstance();
|
||||
void streamMetaRLock(SStreamMeta* pMeta);
|
||||
void streamMetaRUnLock(SStreamMeta* pMeta);
|
||||
void streamMetaWLock(SStreamMeta* pMeta);
|
||||
void streamMetaWUnLock(SStreamMeta* pMeta);
|
||||
void streamMetaResetStartInfo(STaskStartInfo* pMeta);
|
||||
SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta);
|
||||
void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader);
|
||||
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
|
||||
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
|
||||
bool streamMetaAllTasksReady(const SStreamMeta* pMeta);
|
||||
tmr_h streamTimerGetInstance();
|
||||
|
||||
// checkpoint
|
||||
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||
|
@ -881,7 +869,7 @@ int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
|
|||
int32_t streamTaskBuildCheckpoint(SStreamTask* pTask);
|
||||
void streamTaskClearCheckInfo(SStreamTask* pTask, bool clearChkpReadyMsg);
|
||||
int32_t streamAlignTransferState(SStreamTask* pTask);
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId);
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId, int64_t resetRelHalt);
|
||||
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
|
||||
int8_t isSucceed);
|
||||
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
|
||||
|
@ -892,6 +880,7 @@ void* streamDestroyStateMachine(SStreamTaskSM* pSM);
|
|||
|
||||
int32_t broadcastRetrieveMsg(SStreamTask* pTask, SStreamRetrieveReq *req);
|
||||
void sendRetrieveRsp(SStreamRetrieveReq *pReq, SRpcMsg* pRsp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -41,6 +41,8 @@ typedef int32_t (*_state_file_remove_fn)(SStreamFileState* pFileState, const voi
|
|||
typedef int32_t (*_state_file_get_fn)(SStreamFileState* pFileState, void* pKey, void* data, int32_t* pDataLen);
|
||||
typedef int32_t (*_state_file_clear_fn)(SStreamState* pState);
|
||||
|
||||
typedef int32_t (*range_cmpr_fn)(const SSessionKey* pWin1, const SSessionKey* pWin2);
|
||||
|
||||
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
|
||||
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
|
||||
int64_t checkpointId, int8_t type);
|
||||
|
@ -90,14 +92,19 @@ void sessionWinStateCleanup(void* pBuff);
|
|||
SStreamStateCur* sessionWinStateSeekKeyCurrentPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey);
|
||||
SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
|
||||
SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const SSessionKey* pWinKey);
|
||||
SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey, COUNT_TYPE count);
|
||||
int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
int32_t sessionWinStateMoveToNext(SStreamStateCur* pCur);
|
||||
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey);
|
||||
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey, range_cmpr_fn cmpFn);
|
||||
|
||||
// state window
|
||||
int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
|
||||
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
|
||||
|
||||
// count window
|
||||
int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen);
|
||||
int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -119,6 +119,8 @@ int32_t taosSetFileHandlesLimit();
|
|||
|
||||
int32_t taosLinkFile(char *src, char *dst);
|
||||
|
||||
bool lastErrorIsFileNotExist();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -2121,6 +2121,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
|
|||
pStart += colLength[i];
|
||||
}
|
||||
|
||||
// bool blankFill = *(bool*)p;
|
||||
p += sizeof(bool);
|
||||
|
||||
if (convertUcs4) {
|
||||
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
#include "tglobal.h"
|
||||
#include "tmsgtype.h"
|
||||
|
||||
#define LOG_ID_TAG "connId:0x%"PRIx64",reqId:0x%"PRIx64
|
||||
#define LOG_ID_VALUE *(int64_t*)taos,pRequest->requestId
|
||||
#define LOG_ID_TAG "connId:0x%" PRIx64 ",reqId:0x%" PRIx64
|
||||
#define LOG_ID_VALUE *(int64_t*)taos, pRequest->requestId
|
||||
|
||||
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
|
||||
|
||||
|
@ -31,8 +31,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
|||
char* string = NULL;
|
||||
cJSON* json = cJSON_CreateObject();
|
||||
if (json == NULL) {
|
||||
uError("create json object failed")
|
||||
return NULL;
|
||||
uError("create json object failed") return NULL;
|
||||
}
|
||||
cJSON* type = cJSON_CreateString("create");
|
||||
cJSON_AddItemToObject(json, "type", type);
|
||||
|
@ -56,7 +55,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
|||
cJSON_AddItemToObject(column, "name", cname);
|
||||
cJSON* ctype = cJSON_CreateNumber(s->type);
|
||||
cJSON_AddItemToObject(column, "type", ctype);
|
||||
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY|| s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_VARBINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(column, "length", cbytes);
|
||||
|
@ -131,7 +130,8 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
|||
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -156,7 +156,8 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
|||
cJSON_AddItemToObject(json, "colName", colName);
|
||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -182,7 +183,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
|||
}
|
||||
string = cJSON_PrintUnformatted(json);
|
||||
|
||||
end:
|
||||
end:
|
||||
cJSON_Delete(json);
|
||||
tFreeSMAltertbReq(&req);
|
||||
return string;
|
||||
|
@ -295,9 +296,9 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
|||
cJSON* tvalue = NULL;
|
||||
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
||||
char* buf = NULL;
|
||||
if(pTagVal->type == TSDB_DATA_TYPE_VARBINARY){
|
||||
buf = taosMemoryCalloc(pTagVal->nData*2 + 2 + 3, 1);
|
||||
}else{
|
||||
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
|
||||
buf = taosMemoryCalloc(pTagVal->nData * 2 + 2 + 3, 1);
|
||||
} else {
|
||||
buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
|
||||
}
|
||||
|
||||
|
@ -315,7 +316,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
|||
cJSON_AddItemToArray(tags, tag);
|
||||
}
|
||||
|
||||
end:
|
||||
end:
|
||||
cJSON_AddItemToObject(json, "tags", tags);
|
||||
taosArrayDestroy(pTagVals);
|
||||
}
|
||||
|
@ -469,7 +470,8 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
|
||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_VARBINARY ||
|
||||
vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -490,7 +492,8 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
cJSON_AddItemToObject(json, "colName", colName);
|
||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
||||
cJSON_AddItemToObject(json, "colType", colType);
|
||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_VARBINARY ||
|
||||
vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||
|
@ -527,9 +530,9 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
|||
}
|
||||
buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
|
||||
} else {
|
||||
if(vAlterTbReq.tagType == TSDB_DATA_TYPE_VARBINARY){
|
||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal*2 + 2 + 3, 1);
|
||||
}else{
|
||||
if (vAlterTbReq.tagType == TSDB_DATA_TYPE_VARBINARY) {
|
||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal * 2 + 2 + 3, 1);
|
||||
} else {
|
||||
buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 3, 1);
|
||||
}
|
||||
dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
|
||||
|
@ -677,7 +680,7 @@ _exit:
|
|||
}
|
||||
|
||||
static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if(taos == NULL || meta == NULL) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -692,7 +695,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
uDebug(LOG_ID_TAG" create stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
|
||||
uDebug(LOG_ID_TAG " create stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
|
||||
pRequest->syncQuery = true;
|
||||
if (!pRequest->pDb) {
|
||||
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
|
||||
|
@ -731,8 +734,8 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
pReq.source = TD_REQ_FROM_TAOX;
|
||||
pReq.igExists = true;
|
||||
|
||||
uDebug(LOG_ID_TAG" create stable name:%s suid:%" PRId64 " processSuid:%" PRId64,
|
||||
LOG_ID_VALUE, req.name, req.suid, pReq.suid);
|
||||
uDebug(LOG_ID_TAG " create stable name:%s suid:%" PRId64 " processSuid:%" PRId64, LOG_ID_VALUE, req.name, req.suid,
|
||||
pReq.suid);
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
SName tableName;
|
||||
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
|
||||
|
@ -766,7 +769,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taosMemoryFree(pCmdMsg.pMsg);
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG" create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
uDebug(LOG_ID_TAG " create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
destroyRequest(pRequest);
|
||||
tFreeSMCreateStbReq(&pReq);
|
||||
tDecoderClear(&coder);
|
||||
|
@ -775,7 +778,7 @@ end:
|
|||
}
|
||||
|
||||
static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if(taos == NULL || meta == NULL) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -791,7 +794,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
return code;
|
||||
}
|
||||
|
||||
uDebug(LOG_ID_TAG" drop stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
|
||||
uDebug(LOG_ID_TAG " drop stable, meta:%p, metaLen:%d", LOG_ID_VALUE, meta, metaLen);
|
||||
pRequest->syncQuery = true;
|
||||
if (!pRequest->pDb) {
|
||||
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
|
||||
|
@ -812,9 +815,9 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
goto end;
|
||||
}
|
||||
SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)};
|
||||
SName pName = {0};
|
||||
toName(pRequest->pTscObj->acctId, pRequest->pDb, req.name, &pName);
|
||||
STableMeta* pTableMeta = NULL;
|
||||
|
@ -835,8 +838,8 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
pReq.source = TD_REQ_FROM_TAOX;
|
||||
// pReq.suid = processSuid(req.suid, pRequest->pDb);
|
||||
|
||||
uDebug(LOG_ID_TAG" drop stable name:%s suid:%" PRId64 " new suid:%" PRId64,
|
||||
LOG_ID_VALUE, req.name, req.suid, pReq.suid);
|
||||
uDebug(LOG_ID_TAG " drop stable name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, req.name, req.suid,
|
||||
pReq.suid);
|
||||
STscObj* pTscObj = pRequest->pTscObj;
|
||||
SName tableName = {0};
|
||||
tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
|
||||
|
@ -870,7 +873,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taosMemoryFree(pCmdMsg.pMsg);
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG" drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
uDebug(LOG_ID_TAG " drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
destroyRequest(pRequest);
|
||||
tDecoderClear(&coder);
|
||||
terrno = code;
|
||||
|
@ -889,7 +892,7 @@ static void destroyCreateTbReqBatch(void* data) {
|
|||
}
|
||||
|
||||
static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if(taos == NULL || meta == NULL) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -939,9 +942,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
|
||||
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
|
||||
// loop to create table
|
||||
|
@ -1033,7 +1036,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG" create table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
uDebug(LOG_ID_TAG " create table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
pCreateReq = req.pReqs + iReq;
|
||||
taosMemoryFreeClear(pCreateReq->comment);
|
||||
|
@ -1062,7 +1065,7 @@ static void destroyDropTbReqBatch(void* data) {
|
|||
}
|
||||
|
||||
static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if(taos == NULL || meta == NULL) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1111,9 +1114,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
|
||||
// loop to create table
|
||||
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
|
||||
|
@ -1142,7 +1145,8 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
tb_uid_t oldSuid = pDropReq->suid;
|
||||
pDropReq->suid = pTableMeta->suid;
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
uDebug(LOG_ID_TAG" drop table name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, pDropReq->name, oldSuid, pDropReq->suid);
|
||||
uDebug(LOG_ID_TAG " drop table name:%s suid:%" PRId64 " new suid:%" PRId64, LOG_ID_VALUE, pDropReq->name, oldSuid,
|
||||
pDropReq->suid);
|
||||
|
||||
taosArrayPush(pRequest->tableList, &pName);
|
||||
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
|
||||
|
@ -1185,7 +1189,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG" drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
uDebug(LOG_ID_TAG " drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
taosHashCleanup(pVgroupHashmap);
|
||||
destroyRequest(pRequest);
|
||||
tDecoderClear(&coder);
|
||||
|
@ -1227,16 +1231,16 @@ end:
|
|||
//}
|
||||
|
||||
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if(taos == NULL || meta == NULL) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
SDeleteRes req = {0};
|
||||
SDecoder coder = {0};
|
||||
SDeleteRes req = {0};
|
||||
SDecoder coder = {0};
|
||||
char sql[256] = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
uDebug("connId:0x%"PRIx64" delete data, meta:%p, len:%d", *(int64_t*)taos, meta, metaLen);
|
||||
uDebug("connId:0x%" PRIx64 " delete data, meta:%p, len:%d", *(int64_t*)taos, meta, metaLen);
|
||||
|
||||
// decode and process req
|
||||
void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
|
||||
|
@ -1260,14 +1264,14 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
taos_free_result(res);
|
||||
|
||||
end:
|
||||
uDebug("connId:0x%"PRIx64" delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code));
|
||||
uDebug("connId:0x%" PRIx64 " delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code));
|
||||
tDecoderClear(&coder);
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if(taos == NULL || meta == NULL) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1312,9 +1316,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
|||
}
|
||||
|
||||
SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
.requestId = pRequest->requestId,
|
||||
.requestObjRefId = pRequest->self,
|
||||
.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
|
||||
|
||||
SVgroupInfo pInfo = {0};
|
||||
SName pName = {0};
|
||||
|
@ -1394,8 +1398,8 @@ int taos_write_raw_block_with_fields(TAOS* taos, int rows, char* pData, const ch
|
|||
return taos_write_raw_block_with_fields_with_reqid(taos, rows, pData, tbname, fields, numFields, 0);
|
||||
}
|
||||
|
||||
int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pData, const char *tbname,
|
||||
TAOS_FIELD *fields, int numFields, int64_t reqid){
|
||||
int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pData, const char* tbname,
|
||||
TAOS_FIELD* fields, int numFields, int64_t reqid) {
|
||||
if (!taos || !pData || !tbname) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
|
@ -1410,8 +1414,8 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS *taos, int rows, char *pDat
|
|||
return terrno;
|
||||
}
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d",
|
||||
LOG_ID_VALUE, rows, pData, tbname, fields, numFields);
|
||||
uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE,
|
||||
rows, pData, tbname, fields, numFields);
|
||||
|
||||
pRequest->syncQuery = true;
|
||||
if (!pRequest->pDb) {
|
||||
|
@ -1562,12 +1566,12 @@ end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static void* getRawDataFromRes(void *pRetrieve){
|
||||
static void* getRawDataFromRes(void* pRetrieve) {
|
||||
void* rawData = NULL;
|
||||
// deal with compatibility
|
||||
if(*(int64_t*)pRetrieve == 0){
|
||||
if (*(int64_t*)pRetrieve == 0) {
|
||||
rawData = ((SRetrieveTableRsp*)pRetrieve)->data;
|
||||
}else if(*(int64_t*)pRetrieve == 1){
|
||||
} else if (*(int64_t*)pRetrieve == 1) {
|
||||
rawData = ((SRetrieveTableRspForTmq*)pRetrieve)->data;
|
||||
}
|
||||
ASSERT(rawData != NULL);
|
||||
|
@ -1575,7 +1579,7 @@ static void* getRawDataFromRes(void *pRetrieve){
|
|||
}
|
||||
|
||||
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||
if(taos == NULL || data == NULL){
|
||||
if (taos == NULL || data == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1644,11 +1648,11 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
|||
strcpy(pName.tname, tbName);
|
||||
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
||||
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
||||
// code = TSDB_CODE_SUCCESS;
|
||||
// continue;
|
||||
// }
|
||||
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
||||
// code = TSDB_CODE_SUCCESS;
|
||||
// continue;
|
||||
// }
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
@ -1704,7 +1708,7 @@ end:
|
|||
}
|
||||
|
||||
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||
if(taos == NULL || data == NULL){
|
||||
if (taos == NULL || data == NULL) {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
return terrno;
|
||||
}
|
||||
|
@ -1757,7 +1761,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
}
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
|
||||
uDebug(LOG_ID_TAG" write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum);
|
||||
uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.rsp.blockNum);
|
||||
while (++rspObj.resIter < rspObj.rsp.blockNum) {
|
||||
void* pRetrieve = taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
|
||||
if (!rspObj.rsp.withSchema) {
|
||||
|
@ -1770,7 +1774,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
goto end;
|
||||
}
|
||||
|
||||
uDebug(LOG_ID_TAG" write raw metadata block tbname:%s", LOG_ID_VALUE, tbName);
|
||||
uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName);
|
||||
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
|
||||
strcpy(pName.dbname, pRequest->pDb);
|
||||
strcpy(pName.tname, tbName);
|
||||
|
@ -1817,11 +1821,11 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen)
|
|||
strcpy(pName.tname, pCreateReqDst->ctb.stbName);
|
||||
}
|
||||
code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
|
||||
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
||||
// code = TSDB_CODE_SUCCESS;
|
||||
// continue;
|
||||
// }
|
||||
// if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
// uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
|
||||
// code = TSDB_CODE_SUCCESS;
|
||||
// continue;
|
||||
// }
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -631,7 +631,6 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData* pDstCol = taosArrayGet(pDst->pDataBlock, i);
|
||||
|
||||
for (int32_t j = startIndex; j < (startIndex + rowCount); ++j) {
|
||||
bool isNull = false;
|
||||
if (pBlock->pBlockAgg == NULL) {
|
||||
|
@ -866,9 +865,9 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
|
|||
* @return
|
||||
*/
|
||||
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema
|
||||
// | version | total length | total rows | blankFull | total columns | flag seg| block group id | column schema
|
||||
// | each column length |
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
||||
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(bool) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
|
||||
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
|
||||
}
|
||||
|
||||
|
@ -1436,6 +1435,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
|||
pBlock->info.capacity = 0;
|
||||
pBlock->info.rowSize = 0;
|
||||
pBlock->info.id = pDataBlock->info.id;
|
||||
pBlock->info.blankFill = pDataBlock->info.blankFill;
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
|
@ -2282,6 +2282,10 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
// htonl(colSizes[col]), colSizes[col]);
|
||||
}
|
||||
|
||||
bool* blankFill = (bool*)data;
|
||||
*blankFill = pBlock->info.blankFill;
|
||||
data += sizeof(bool);
|
||||
|
||||
*actualLen = dataLen;
|
||||
*groupId = pBlock->info.id.groupId;
|
||||
ASSERT(dataLen > 0);
|
||||
|
@ -2375,8 +2379,12 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
|||
pStart += colLen[i];
|
||||
}
|
||||
|
||||
bool blankFill = *(bool*)pStart;
|
||||
pStart += sizeof(bool);
|
||||
|
||||
pBlock->info.dataLoad = 1;
|
||||
pBlock->info.rows = numOfRows;
|
||||
pBlock->info.blankFill = blankFill;
|
||||
ASSERT(pStart - pData == dataLen);
|
||||
return pStart;
|
||||
}
|
||||
|
|
|
@ -1606,10 +1606,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
tsTempSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
|
||||
uInfo("%s set to %" PRId64, name, tsTempSpace.reserved);
|
||||
matched = true;
|
||||
} else if (strcasecmp("minimalDataDirGB", name) == 0) {
|
||||
tsDataSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
|
||||
uInfo("%s set to %" PRId64, name, tsDataSpace.reserved);
|
||||
matched = true;
|
||||
} else if (strcasecmp("minimalLogDirGB", name) == 0) {
|
||||
tsLogSpace.reserved = (int64_t)(((double)pItem->fval) * 1024 * 1024 * 1024);
|
||||
uInfo("%s set to %" PRId64, name, tsLogSpace.reserved);
|
||||
|
@ -1680,10 +1676,6 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, char *name) {
|
|||
return -1;
|
||||
}
|
||||
matched = true;
|
||||
} else if (strcasecmp("telemetryServer", name) == 0) {
|
||||
uInfo("%s set from %s to %s", name, pItem->str, tsTelemServer);
|
||||
tstrncpy(tsTelemServer, pItem->str, TSDB_FQDN_LEN);
|
||||
matched = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -699,6 +699,7 @@ typedef struct {
|
|||
int64_t checkpointId;
|
||||
|
||||
int32_t indexForMultiAggBalance;
|
||||
int8_t subTableWithoutMd5;
|
||||
char reserve[256];
|
||||
|
||||
} SStreamObj;
|
||||
|
@ -776,8 +777,8 @@ typedef enum {
|
|||
GRANT_STATE_REASON_MAX,
|
||||
} EGrantStateReason;
|
||||
|
||||
#define GRANT_STATE_NUM 30
|
||||
#define GRANT_ACTIVE_NUM 10
|
||||
#define GRANT_STATE_NUM 30
|
||||
#define GRANT_ACTIVE_NUM 10
|
||||
#define GRANT_ACTIVE_HEAD_LEN 30
|
||||
|
||||
typedef struct {
|
||||
|
@ -812,7 +813,7 @@ typedef struct {
|
|||
int64_t id : 24;
|
||||
};
|
||||
};
|
||||
char machine[TSDB_MACHINE_ID_LEN + 1];
|
||||
char machine[TSDB_MACHINE_ID_LEN + 1];
|
||||
} SGrantMachine;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -24,7 +24,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#define MND_STREAM_RESERVE_SIZE 64
|
||||
#define MND_STREAM_VER_NUMBER 4
|
||||
#define MND_STREAM_VER_NUMBER 5
|
||||
|
||||
#define MND_STREAM_CREATE_NAME "stream-create"
|
||||
#define MND_STREAM_CHECKPOINT_NAME "stream-checkpoint"
|
||||
|
|
|
@ -15,20 +15,20 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndConsumer.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndSubscribe.h"
|
||||
#include "mndTopic.h"
|
||||
#include "mndTrans.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "tcompare.h"
|
||||
#include "tname.h"
|
||||
|
||||
#define MND_CONSUMER_VER_NUMBER 2
|
||||
#define MND_CONSUMER_RESERVE_SIZE 64
|
||||
|
||||
#define MND_MAX_GROUP_PER_TOPIC 100
|
||||
#define MND_MAX_GROUP_PER_TOPIC 100
|
||||
|
||||
static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer);
|
||||
static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer);
|
||||
|
@ -56,7 +56,7 @@ int32_t mndInitConsumer(SMnode *pMnode) {
|
|||
mndSetMsgHandle(pMnode, TDMT_MND_TMQ_SUBSCRIBE, mndProcessSubscribeReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_TMQ_HB, mndProcessMqHbReq);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_TMQ_ASK_EP, mndProcessAskEpReq);
|
||||
// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg);
|
||||
// mndSetMsgHandle(pMnode, TDMT_MND_TMQ_TIMER, mndProcessMqTimerMsg);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_TMQ_CONSUMER_RECOVER, mndProcessConsumerRecoverMsg);
|
||||
mndSetMsgHandle(pMnode, TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, mndProcessConsumerClearMsg);
|
||||
|
||||
|
@ -68,10 +68,11 @@ int32_t mndInitConsumer(SMnode *pMnode) {
|
|||
|
||||
void mndCleanupConsumer(SMnode *pMnode) {}
|
||||
|
||||
void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo* info){
|
||||
void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo *info) {
|
||||
SMqConsumerClearMsg *pClearMsg = rpcMallocCont(sizeof(SMqConsumerClearMsg));
|
||||
if (pClearMsg == NULL) {
|
||||
mError("consumer:0x%"PRIx64" failed to clear consumer due to out of memory. alloc size:%d", consumerId, (int32_t)sizeof(SMqConsumerClearMsg));
|
||||
mError("consumer:0x%" PRIx64 " failed to clear consumer due to out of memory. alloc size:%d", consumerId,
|
||||
(int32_t)sizeof(SMqConsumerClearMsg));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -88,13 +89,14 @@ void mndDropConsumerFromSdb(SMnode *pMnode, int64_t consumerId, SRpcHandleInfo*
|
|||
return;
|
||||
}
|
||||
|
||||
static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser, bool enableReplay) {
|
||||
static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *pMnode, const char *pUser,
|
||||
bool enableReplay) {
|
||||
SMqTopicObj *pTopic = NULL;
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
|
||||
int32_t numOfTopics = taosArrayGetSize(pTopicList);
|
||||
for (int32_t i = 0; i < numOfTopics; i++) {
|
||||
char *pOneTopic = taosArrayGetP(pTopicList, i);
|
||||
char *pOneTopic = taosArrayGetP(pTopicList, i);
|
||||
pTopic = mndAcquireTopic(pMnode, pOneTopic);
|
||||
if (pTopic == NULL) { // terrno has been set by callee function
|
||||
code = -1;
|
||||
|
@ -112,11 +114,11 @@ static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *
|
|||
goto FAILED;
|
||||
}
|
||||
|
||||
if(enableReplay){
|
||||
if(pTopic->subType != TOPIC_SUB_TYPE__COLUMN){
|
||||
if (enableReplay) {
|
||||
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
|
||||
code = TSDB_CODE_TMQ_REPLAY_NOT_SUPPORT;
|
||||
goto FAILED;
|
||||
}else if(pTopic->ntbUid == 0 && pTopic->ctbStbUid == 0) {
|
||||
} else if (pTopic->ntbUid == 0 && pTopic->ctbStbUid == 0) {
|
||||
SDbObj *pDb = mndAcquireDb(pMnode, pTopic->db);
|
||||
if (pDb == NULL) {
|
||||
code = -1;
|
||||
|
@ -132,7 +134,7 @@ static int32_t validateTopics(STrans *pTrans, const SArray *pTopicList, SMnode *
|
|||
}
|
||||
|
||||
mndTransSetDbName(pTrans, pOneTopic, NULL);
|
||||
if(mndTransCheckConflict(pMnode, pTrans) != 0){
|
||||
if (mndTransCheckConflict(pMnode, pTrans) != 0) {
|
||||
code = -1;
|
||||
goto FAILED;
|
||||
}
|
||||
|
@ -172,7 +174,7 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
|
|||
if (pTrans == NULL) {
|
||||
goto FAIL;
|
||||
}
|
||||
if(validateTopics(pTrans, pConsumer->assignedTopics, pMnode, pMsg->info.conn.user, false) != 0){
|
||||
if (validateTopics(pTrans, pConsumer->assignedTopics, pMnode, pMsg->info.conn.user, false) != 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
|
@ -197,7 +199,7 @@ static int32_t mndProcessConsumerClearMsg(SRpcMsg *pMsg) {
|
|||
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, pClearMsg->consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
mError("consumer:0x%"PRIx64" failed to be found to clear it", pClearMsg->consumerId);
|
||||
mError("consumer:0x%" PRIx64 " failed to be found to clear it", pClearMsg->consumerId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -226,15 +228,15 @@ FAIL:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int32_t checkPrivilege(SMnode *pMnode, SMqConsumerObj *pConsumer, SMqHbRsp *rsp, char* user){
|
||||
static int32_t checkPrivilege(SMnode *pMnode, SMqConsumerObj *pConsumer, SMqHbRsp *rsp, char *user) {
|
||||
rsp->topicPrivileges = taosArrayInit(taosArrayGetSize(pConsumer->currentTopics), sizeof(STopicPrivilege));
|
||||
if(rsp->topicPrivileges == NULL){
|
||||
if (rsp->topicPrivileges == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
for(int32_t i = 0; i < taosArrayGetSize(pConsumer->currentTopics); i++){
|
||||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
SMqTopicObj* pTopic = mndAcquireTopic(pMnode, topic);
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pConsumer->currentTopics); i++) {
|
||||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
||||
if (pTopic == NULL) { // terrno has been set by callee function
|
||||
continue;
|
||||
}
|
||||
|
@ -252,10 +254,10 @@ static int32_t checkPrivilege(SMnode *pMnode, SMqConsumerObj *pConsumer, SMqHbR
|
|||
}
|
||||
|
||||
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqHbReq req = {0};
|
||||
SMqHbRsp rsp = {0};
|
||||
int32_t code = 0;
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqHbReq req = {0};
|
||||
SMqHbRsp rsp = {0};
|
||||
SMqConsumerObj *pConsumer = NULL;
|
||||
|
||||
if (tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
|
@ -264,7 +266,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
goto end;
|
||||
}
|
||||
|
||||
int64_t consumerId = req.consumerId;
|
||||
int64_t consumerId = req.consumerId;
|
||||
pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
mError("consumer:0x%" PRIx64 " not exist", consumerId);
|
||||
|
@ -273,7 +275,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
goto end;
|
||||
}
|
||||
code = checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user);
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -295,12 +297,12 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg);
|
||||
}
|
||||
|
||||
for(int i = 0; i < taosArrayGetSize(req.topics); i++){
|
||||
TopicOffsetRows* data = taosArrayGet(req.topics, i);
|
||||
for (int i = 0; i < taosArrayGetSize(req.topics); i++) {
|
||||
TopicOffsetRows *data = taosArrayGet(req.topics, i);
|
||||
mInfo("heartbeat report offset rows.%s:%s", pConsumer->cgroup, data->topicName);
|
||||
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, data->topicName);
|
||||
if(pSub == NULL){
|
||||
if (pSub == NULL) {
|
||||
#ifdef TMQ_DEBUG
|
||||
ASSERT(0);
|
||||
#endif
|
||||
|
@ -308,7 +310,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
}
|
||||
taosWLockLatch(&pSub->lock);
|
||||
SMqConsumerEp *pConsumerEp = taosHashGet(pSub->consumerHash, &consumerId, sizeof(int64_t));
|
||||
if(pConsumerEp){
|
||||
if (pConsumerEp) {
|
||||
taosArrayDestroy(pConsumerEp->offsetRows);
|
||||
pConsumerEp->offsetRows = data->offsetRows;
|
||||
data->offsetRows = NULL;
|
||||
|
@ -413,7 +415,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
char *topic = taosArrayGetP(pConsumer->currentTopics, i);
|
||||
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, pConsumer->cgroup, topic);
|
||||
// txn guarantees pSub is created
|
||||
if(pSub == NULL) {
|
||||
if (pSub == NULL) {
|
||||
#ifdef TMQ_DEBUG
|
||||
ASSERT(0);
|
||||
#endif
|
||||
|
@ -426,7 +428,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
|
||||
// 2.1 fetch topic schema
|
||||
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
|
||||
if(pTopic == NULL) {
|
||||
if (pTopic == NULL) {
|
||||
#ifdef TMQ_DEBUG
|
||||
ASSERT(0);
|
||||
#endif
|
||||
|
@ -460,12 +462,12 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
|
||||
for (int32_t j = 0; j < vgNum; j++) {
|
||||
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, j);
|
||||
// char offsetKey[TSDB_PARTITION_KEY_LEN];
|
||||
// mndMakePartitionKey(offsetKey, pConsumer->cgroup, topic, pVgEp->vgId);
|
||||
// char offsetKey[TSDB_PARTITION_KEY_LEN];
|
||||
// mndMakePartitionKey(offsetKey, pConsumer->cgroup, topic, pVgEp->vgId);
|
||||
|
||||
if(epoch == -1){
|
||||
if (epoch == -1) {
|
||||
SVgObj *pVgroup = mndAcquireVgroup(pMnode, pVgEp->vgId);
|
||||
if(pVgroup){
|
||||
if (pVgroup) {
|
||||
pVgEp->epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||
mndReleaseVgroup(pMnode, pVgroup);
|
||||
}
|
||||
|
@ -495,7 +497,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
goto FAIL;
|
||||
}
|
||||
|
||||
SMqRspHead* pHead = buf;
|
||||
SMqRspHead *pHead = buf;
|
||||
|
||||
pHead->mqMsgType = TMQ_MSG_TYPE__EP_RSP;
|
||||
pHead->epoch = serverEpoch;
|
||||
|
@ -503,7 +505,6 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
pHead->walsver = 0;
|
||||
pHead->walever = 0;
|
||||
|
||||
|
||||
void *abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
|
||||
tEncodeSMqAskEpRsp(&abuf, &rsp);
|
||||
|
||||
|
@ -552,29 +553,23 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
char *msgStr = pMsg->pCont;
|
||||
int32_t code = -1;
|
||||
|
||||
if ((terrno = grantCheck(TSDB_GRANT_SUBSCRIPTION)) < 0) {
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
SCMSubscribeReq subscribe = {0};
|
||||
tDeserializeSCMSubscribeReq(msgStr, &subscribe);
|
||||
|
||||
int64_t consumerId = subscribe.consumerId;
|
||||
int64_t consumerId = subscribe.consumerId;
|
||||
char *cgroup = subscribe.cgroup;
|
||||
SMqConsumerObj *pExistedConsumer = NULL;
|
||||
SMqConsumerObj *pConsumerNew = NULL;
|
||||
STrans *pTrans = NULL;
|
||||
STrans *pTrans = NULL;
|
||||
|
||||
|
||||
SArray *pTopicList = subscribe.topicNames;
|
||||
taosArraySort(pTopicList, taosArrayCompareString);
|
||||
taosArrayRemoveDuplicate(pTopicList, taosArrayCompareString, freeItem);
|
||||
|
||||
int32_t newTopicNum = taosArrayGetSize(pTopicList);
|
||||
for(int i = 0; i < newTopicNum; i++){
|
||||
int32_t gNum = mndGetGroupNumByTopic(pMnode, (const char*)taosArrayGetP(pTopicList, i));
|
||||
if(gNum >= MND_MAX_GROUP_PER_TOPIC){
|
||||
for (int i = 0; i < newTopicNum; i++) {
|
||||
int32_t gNum = mndGetGroupNumByTopic(pMnode, (const char *)taosArrayGetP(pTopicList, i));
|
||||
if (gNum >= MND_MAX_GROUP_PER_TOPIC) {
|
||||
terrno = TSDB_CODE_TMQ_GROUP_OUT_OF_RANGE;
|
||||
code = terrno;
|
||||
goto _over;
|
||||
|
@ -605,7 +600,7 @@ int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
|
|||
pConsumerNew->autoCommitInterval = subscribe.autoCommitInterval;
|
||||
pConsumerNew->resetOffsetCfg = subscribe.resetOffsetCfg;
|
||||
|
||||
// pConsumerNew->updateType = CONSUMER_UPDATE_SUB; // use insert logic
|
||||
// pConsumerNew->updateType = CONSUMER_UPDATE_SUB; // use insert logic
|
||||
taosArrayDestroy(pConsumerNew->assignedTopics);
|
||||
pConsumerNew->assignedTopics = taosArrayDup(pTopicList, topicNameDup);
|
||||
|
||||
|
@ -792,16 +787,15 @@ CM_DECODE_OVER:
|
|||
}
|
||||
|
||||
static int32_t mndConsumerActionInsert(SSdb *pSdb, SMqConsumerObj *pConsumer) {
|
||||
mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d",
|
||||
pConsumer->consumerId, pConsumer->cgroup, pConsumer->status, mndConsumerStatusName(pConsumer->status),
|
||||
pConsumer->epoch);
|
||||
mInfo("consumer:0x%" PRIx64 " sub insert, cgroup:%s status:%d(%s) epoch:%d", pConsumer->consumerId, pConsumer->cgroup,
|
||||
pConsumer->status, mndConsumerStatusName(pConsumer->status), pConsumer->epoch);
|
||||
pConsumer->subscribeTime = pConsumer->createTime;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndConsumerActionDelete(SSdb *pSdb, SMqConsumerObj *pConsumer) {
|
||||
mInfo("consumer:0x%" PRIx64 " perform delete action, status:(%d)%s", pConsumer->consumerId, pConsumer->status,
|
||||
mndConsumerStatusName(pConsumer->status));
|
||||
mndConsumerStatusName(pConsumer->status));
|
||||
tDeleteSMqConsumerObj(pConsumer, false);
|
||||
return 0;
|
||||
}
|
||||
|
@ -828,7 +822,7 @@ static void removeFromNewTopicList(SMqConsumerObj *pConsumer, const char *pTopic
|
|||
taosMemoryFree(p);
|
||||
|
||||
mInfo("consumer:0x%" PRIx64 " remove new topic:%s in the topic list, remain newTopics:%d", pConsumer->consumerId,
|
||||
pTopic, (int)taosArrayGetSize(pConsumer->rebNewTopics));
|
||||
pTopic, (int)taosArrayGetSize(pConsumer->rebNewTopics));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -844,7 +838,7 @@ static void removeFromRemoveTopicList(SMqConsumerObj *pConsumer, const char *pTo
|
|||
taosMemoryFree(p);
|
||||
|
||||
mInfo("consumer:0x%" PRIx64 " remove topic:%s in the removed topic list, remain removedTopics:%d",
|
||||
pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebRemovedTopics));
|
||||
pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->rebRemovedTopics));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -859,13 +853,13 @@ static void removeFromCurrentTopicList(SMqConsumerObj *pConsumer, const char *pT
|
|||
taosMemoryFree(topic);
|
||||
|
||||
mInfo("consumer:0x%" PRIx64 " remove topic:%s in the current topic list, remain currentTopics:%d",
|
||||
pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->currentTopics));
|
||||
pConsumer->consumerId, pTopic, (int)taosArrayGetSize(pConsumer->currentTopics));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool existInCurrentTopicList(const SMqConsumerObj* pConsumer, const char* pTopic) {
|
||||
static bool existInCurrentTopicList(const SMqConsumerObj *pConsumer, const char *pTopic) {
|
||||
bool existing = false;
|
||||
int32_t size = taosArrayGetSize(pConsumer->currentTopics);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
|
@ -882,7 +876,7 @@ static bool existInCurrentTopicList(const SMqConsumerObj* pConsumer, const char*
|
|||
|
||||
static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer, SMqConsumerObj *pNewConsumer) {
|
||||
mInfo("consumer:0x%" PRIx64 " perform update action, update type:%d, subscribe-time:%" PRId64 ", createTime:%" PRId64,
|
||||
pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime);
|
||||
pOldConsumer->consumerId, pNewConsumer->updateType, pOldConsumer->subscribeTime, pOldConsumer->createTime);
|
||||
|
||||
taosWLockLatch(&pOldConsumer->lock);
|
||||
|
||||
|
@ -893,19 +887,21 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
|
|||
|
||||
pOldConsumer->subscribeTime = taosGetTimestampMs();
|
||||
pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE;
|
||||
mInfo("consumer:0x%" PRIx64 " sub update, modify existed consumer",pOldConsumer->consumerId);
|
||||
// } else if (pNewConsumer->updateType == CONSUMER_UPDATE_TIMER_LOST) {
|
||||
// int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics);
|
||||
// for (int32_t i = 0; i < sz; i++) {
|
||||
// char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i));
|
||||
// taosArrayPush(pOldConsumer->rebRemovedTopics, &topic);
|
||||
// }
|
||||
//
|
||||
// int32_t prevStatus = pOldConsumer->status;
|
||||
// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST;
|
||||
// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ", reb-removed-topics:%d",
|
||||
// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus), mndConsumerStatusName(pOldConsumer->status),
|
||||
// pOldConsumer->rebalanceTime, (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
mInfo("consumer:0x%" PRIx64 " sub update, modify existed consumer", pOldConsumer->consumerId);
|
||||
// } else if (pNewConsumer->updateType == CONSUMER_UPDATE_TIMER_LOST) {
|
||||
// int32_t sz = taosArrayGetSize(pOldConsumer->currentTopics);
|
||||
// for (int32_t i = 0; i < sz; i++) {
|
||||
// char *topic = taosStrdup(taosArrayGetP(pOldConsumer->currentTopics, i));
|
||||
// taosArrayPush(pOldConsumer->rebRemovedTopics, &topic);
|
||||
// }
|
||||
//
|
||||
// int32_t prevStatus = pOldConsumer->status;
|
||||
// pOldConsumer->status = MQ_CONSUMER_STATUS_LOST;
|
||||
// mInfo("consumer:0x%" PRIx64 " timer update, timer lost. state %s -> %s, reb-time:%" PRId64 ",
|
||||
// reb-removed-topics:%d",
|
||||
// pOldConsumer->consumerId, mndConsumerStatusName(prevStatus),
|
||||
// mndConsumerStatusName(pOldConsumer->status), pOldConsumer->rebalanceTime,
|
||||
// (int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
} else if (pNewConsumer->updateType == CONSUMER_UPDATE_REC) {
|
||||
int32_t sz = taosArrayGetSize(pOldConsumer->assignedTopics);
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
|
@ -914,7 +910,7 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
|
|||
}
|
||||
|
||||
pOldConsumer->status = MQ_CONSUMER_STATUS_REBALANCE;
|
||||
mInfo("consumer:0x%" PRIx64 " timer update, timer recover",pOldConsumer->consumerId);
|
||||
mInfo("consumer:0x%" PRIx64 " timer update, timer recover", pOldConsumer->consumerId);
|
||||
} else if (pNewConsumer->updateType == CONSUMER_UPDATE_REB) {
|
||||
atomic_add_fetch_32(&pOldConsumer->epoch, 1);
|
||||
|
||||
|
@ -945,11 +941,11 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
|
|||
|
||||
atomic_add_fetch_32(&pOldConsumer->epoch, 1);
|
||||
mInfo("consumer:0x%" PRIx64 " reb update add, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64
|
||||
", current topics:%d, newTopics:%d, removeTopics:%d",
|
||||
pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status,
|
||||
mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime,
|
||||
(int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics),
|
||||
(int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
", current topics:%d, newTopics:%d, removeTopics:%d",
|
||||
pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status,
|
||||
mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime,
|
||||
(int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics),
|
||||
(int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
|
||||
} else if (pNewConsumer->updateType == CONSUMER_REMOVE_REB) {
|
||||
char *removedTopic = taosArrayGetP(pNewConsumer->rebRemovedTopics, 0);
|
||||
|
@ -968,11 +964,11 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
|
|||
atomic_add_fetch_32(&pOldConsumer->epoch, 1);
|
||||
|
||||
mInfo("consumer:0x%" PRIx64 " reb update remove, state (%d)%s -> (%d)%s, new epoch:%d, reb-time:%" PRId64
|
||||
", current topics:%d, newTopics:%d, removeTopics:%d",
|
||||
pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status,
|
||||
mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime,
|
||||
(int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics),
|
||||
(int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
", current topics:%d, newTopics:%d, removeTopics:%d",
|
||||
pOldConsumer->consumerId, status, mndConsumerStatusName(status), pOldConsumer->status,
|
||||
mndConsumerStatusName(pOldConsumer->status), pOldConsumer->epoch, pOldConsumer->rebalanceTime,
|
||||
(int)taosArrayGetSize(pOldConsumer->currentTopics), (int)taosArrayGetSize(pOldConsumer->rebNewTopics),
|
||||
(int)taosArrayGetSize(pOldConsumer->rebRemovedTopics));
|
||||
}
|
||||
|
||||
taosWUnLockLatch(&pOldConsumer->lock);
|
||||
|
@ -1030,8 +1026,8 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
int32_t cols = 0;
|
||||
|
||||
// consumer id
|
||||
char consumerIdHex[32] = {0};
|
||||
sprintf(varDataVal(consumerIdHex), "0x%"PRIx64, pConsumer->consumerId);
|
||||
char consumerIdHex[32] = {0};
|
||||
sprintf(varDataVal(consumerIdHex), "0x%" PRIx64, pConsumer->consumerId);
|
||||
varDataSetLen(consumerIdHex, strlen(varDataVal(consumerIdHex)));
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
|
@ -1086,12 +1082,13 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataSetVal(pColInfo, numOfRows, (const char *)&pConsumer->rebalanceTime, pConsumer->rebalanceTime == 0);
|
||||
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
char buf[TSDB_OFFSET_LEN] = {0};
|
||||
STqOffsetVal pVal = {.type = pConsumer->resetOffsetCfg};
|
||||
tFormatOffset(buf, TSDB_OFFSET_LEN, &pVal);
|
||||
|
||||
char parasStr[64 + TSDB_OFFSET_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName, pConsumer->autoCommit, pConsumer->autoCommitInterval, buf);
|
||||
sprintf(varDataVal(parasStr), "tbname:%d,commit:%d,interval:%dms,reset:%s", pConsumer->withTbName,
|
||||
pConsumer->autoCommit, pConsumer->autoCommitInterval, buf);
|
||||
varDataSetLen(parasStr, strlen(varDataVal(parasStr)));
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
|
|
|
@ -85,6 +85,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) {
|
|||
|
||||
// 3.0.50 ver = 3
|
||||
if (tEncodeI64(pEncoder, pObj->checkpointId) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pObj->subTableWithoutMd5) < 0) return -1;
|
||||
|
||||
if (tEncodeCStrWithLen(pEncoder, pObj->reserve, sizeof(pObj->reserve) - 1) < 0) return -1;
|
||||
|
||||
|
@ -168,6 +169,10 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) {
|
|||
if (sver >= 3) {
|
||||
if (tDecodeI64(pDecoder, &pObj->checkpointId) < 0) return -1;
|
||||
}
|
||||
|
||||
if (sver >= 5) {
|
||||
if (tDecodeI8(pDecoder, &pObj->subTableWithoutMd5) < 0) return -1;
|
||||
}
|
||||
if (tDecodeCStrTo(pDecoder, pObj->reserve) < 0) return -1;
|
||||
|
||||
tEndDecode(pDecoder);
|
||||
|
|
|
@ -14,19 +14,41 @@
|
|||
*/
|
||||
|
||||
#include "mndScheduler.h"
|
||||
#include "tmisce.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndSnode.h"
|
||||
#include "mndVgroup.h"
|
||||
#include "parser.h"
|
||||
#include "tcompare.h"
|
||||
#include "tmisce.h"
|
||||
#include "tname.h"
|
||||
#include "tuuid.h"
|
||||
|
||||
#define SINK_NODE_LEVEL (0)
|
||||
extern bool tsDeployOnSnode;
|
||||
|
||||
static bool hasCountWindowNode(SPhysiNode* pNode) {
|
||||
if (nodeType(pNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT) {
|
||||
return true;
|
||||
} else {
|
||||
size_t size = LIST_LENGTH(pNode->pChildren);
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
SPhysiNode* pChild = (SPhysiNode*)nodesListGetNode(pNode->pChildren, i);
|
||||
if (hasCountWindowNode(pChild)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool countWindowStreamTask(SSubplan* pPlan) {
|
||||
SPhysiNode* pNode = pPlan->pNode;
|
||||
return hasCountWindowNode(pNode);
|
||||
}
|
||||
|
||||
int32_t mndConvertRsmaTask(char** pDst, int32_t* pDstLen, const char* ast, int64_t uid, int8_t triggerType,
|
||||
int64_t watermark, int64_t deleteMark) {
|
||||
SNode* pAst = NULL;
|
||||
|
@ -189,7 +211,7 @@ SVgObj* mndSchedFetchOneVg(SMnode* pMnode, SStreamObj* pStream) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if(pStream->indexForMultiAggBalance == -1){
|
||||
if (pStream->indexForMultiAggBalance == -1) {
|
||||
taosSeedRand(taosSafeRand());
|
||||
pStream->indexForMultiAggBalance = taosRand() % pDbObj->cfg.numOfVgroups;
|
||||
}
|
||||
|
@ -204,7 +226,7 @@ SVgObj* mndSchedFetchOneVg(SMnode* pMnode, SStreamObj* pStream) {
|
|||
sdbRelease(pMnode->pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
if (index++ == pStream->indexForMultiAggBalance){
|
||||
if (index++ == pStream->indexForMultiAggBalance) {
|
||||
pStream->indexForMultiAggBalance++;
|
||||
pStream->indexForMultiAggBalance %= pDbObj->cfg.numOfVgroups;
|
||||
sdbCancelFetch(pMnode->pSdb, pIter);
|
||||
|
@ -217,12 +239,12 @@ SVgObj* mndSchedFetchOneVg(SMnode* pMnode, SStreamObj* pStream) {
|
|||
return pVgroup;
|
||||
}
|
||||
|
||||
static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgroup,
|
||||
SEpSet* pEpset, bool isFillhistory) {
|
||||
int64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
|
||||
static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgroup, SEpSet* pEpset, bool isFillhistory) {
|
||||
int64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
|
||||
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
|
||||
|
||||
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, *pTaskList, pStream->conf.fillHistory);
|
||||
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SINK, pEpset, isFillhistory, 0, *pTaskList,
|
||||
pStream->conf.fillHistory, pStream->subTableWithoutMd5);
|
||||
if (pTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
|
@ -235,12 +257,12 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou
|
|||
return mndSetSinkTaskInfo(pStream, pTask);
|
||||
}
|
||||
|
||||
static int32_t doAddSinkTaskToVg(SMnode* pMnode, SStreamObj* pStream, SEpSet* pEpset, SVgObj* vgObj){
|
||||
static int32_t doAddSinkTaskToVg(SMnode* pMnode, SStreamObj* pStream, SEpSet* pEpset, SVgObj* vgObj) {
|
||||
int32_t code = doAddSinkTask(pStream, pMnode, vgObj, pEpset, false);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
if(pStream->conf.fillHistory){
|
||||
if (pStream->conf.fillHistory) {
|
||||
code = doAddSinkTask(pStream, pMnode, vgObj, pEpset, true);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
|
@ -267,7 +289,7 @@ static int32_t doAddShuffleSinkTask(SMnode* pMnode, SStreamObj* pStream, SEpSet*
|
|||
}
|
||||
|
||||
int32_t code = doAddSinkTaskToVg(pMnode, pStream, pEpset, pVgroup);
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
return code;
|
||||
}
|
||||
|
@ -279,7 +301,7 @@ static int32_t doAddShuffleSinkTask(SMnode* pMnode, SStreamObj* pStream, SEpSet*
|
|||
}
|
||||
|
||||
static int64_t getVgroupLastVer(const SArray* pList, int32_t vgId) {
|
||||
for(int32_t i = 0; i < taosArrayGetSize(pList); ++i) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pList); ++i) {
|
||||
SVgroupVer* pVer = taosArrayGet(pList, i);
|
||||
if (pVer->vgId == vgId) {
|
||||
return pVer->ver;
|
||||
|
@ -315,19 +337,29 @@ static void streamTaskSetDataRange(SStreamTask* pTask, int64_t skey, SArray* pVe
|
|||
pRange->range.minVer = latestVer + 1;
|
||||
pRange->range.maxVer = INT64_MAX;
|
||||
|
||||
mDebug("add source task 0x%x timeWindow:%" PRId64 "-%" PRId64 " verRange:%" PRId64 "-%" PRId64,
|
||||
pTask->id.taskId, pWindow->skey, pWindow->ekey, pRange->range.minVer, pRange->range.maxVer);
|
||||
mDebug("add source task 0x%x timeWindow:%" PRId64 "-%" PRId64 " verRange:%" PRId64 "-%" PRId64, pTask->id.taskId,
|
||||
pWindow->skey, pWindow->ekey, pRange->range.minVer, pRange->range.maxVer);
|
||||
}
|
||||
}
|
||||
|
||||
static SStreamTask* buildSourceTask(SStreamObj* pStream, SEpSet* pEpset,
|
||||
bool isFillhistory, bool useTriggerParam) {
|
||||
static void haltInitialTaskStatus(SStreamTask* pTask, SSubplan* pPlan) {
|
||||
bool hasCountWindowNode = countWindowStreamTask(pPlan);
|
||||
bool isRelStreamTask = (pTask->hTaskInfo.id.taskId != 0);
|
||||
if (hasCountWindowNode && isRelStreamTask) {
|
||||
SStreamStatus* pStatus = &pTask->status;
|
||||
mDebug("s-task:0x%x status is set to %s from %s for count window agg task with fill-history option set",
|
||||
pTask->id.taskId, streamTaskGetStatusStr(pStatus->taskStatus), streamTaskGetStatusStr(TASK_STATUS__HALT));
|
||||
pStatus->taskStatus = TASK_STATUS__HALT;
|
||||
}
|
||||
}
|
||||
|
||||
static SStreamTask* buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillhistory, bool useTriggerParam) {
|
||||
uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
|
||||
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
|
||||
|
||||
SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset,
|
||||
isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0,
|
||||
*pTaskList, pStream->conf.fillHistory);
|
||||
SStreamTask* pTask =
|
||||
tNewStreamTask(uid, TASK_LEVEL__SOURCE, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0,
|
||||
*pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5);
|
||||
if (pTask == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
@ -335,7 +367,7 @@ static SStreamTask* buildSourceTask(SStreamObj* pStream, SEpSet* pEpset,
|
|||
return pTask;
|
||||
}
|
||||
|
||||
static void addNewTaskList(SStreamObj* pStream){
|
||||
static void addNewTaskList(SStreamObj* pStream) {
|
||||
SArray* pTaskList = taosArrayInit(0, POINTER_BYTES);
|
||||
taosArrayPush(pStream->tasks, &pTaskList);
|
||||
if (pStream->conf.fillHistory) {
|
||||
|
@ -364,28 +396,30 @@ static void setHTasksId(SStreamObj* pStream) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset,
|
||||
int64_t skey, SArray* pVerList, SVgObj* pVgroup, bool isFillhistory, bool useTriggerParam ){
|
||||
static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset, int64_t skey,
|
||||
SArray* pVerList, SVgObj* pVgroup, bool isFillhistory, bool useTriggerParam) {
|
||||
// new stream task
|
||||
SStreamTask* pTask = buildSourceTask(pStream, pEpset, isFillhistory, useTriggerParam);
|
||||
if(pTask == NULL){
|
||||
if (pTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
mDebug("doAddSourceTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory);
|
||||
|
||||
haltInitialTaskStatus(pTask, plan);
|
||||
|
||||
streamTaskSetDataRange(pTask, skey, pVerList, pVgroup->vgId);
|
||||
|
||||
int32_t code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup);
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
terrno = code;
|
||||
return terrno;
|
||||
}
|
||||
return TDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SSubplan* getScanSubPlan(const SQueryPlan* pPlan){
|
||||
int32_t numOfPlanLevel = LIST_LENGTH(pPlan->pSubplans);
|
||||
static SSubplan* getScanSubPlan(const SQueryPlan* pPlan) {
|
||||
int32_t numOfPlanLevel = LIST_LENGTH(pPlan->pSubplans);
|
||||
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, numOfPlanLevel - 1);
|
||||
if (LIST_LENGTH(inner->pNodeList) != 1) {
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
|
@ -400,7 +434,7 @@ static SSubplan* getScanSubPlan(const SQueryPlan* pPlan){
|
|||
return plan;
|
||||
}
|
||||
|
||||
static SSubplan* getAggSubPlan(const SQueryPlan* pPlan, int index){
|
||||
static SSubplan* getAggSubPlan(const SQueryPlan* pPlan, int index) {
|
||||
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, index);
|
||||
if (LIST_LENGTH(inner->pNodeList) != 1) {
|
||||
terrno = TSDB_CODE_QRY_INVALID_INPUT;
|
||||
|
@ -415,8 +449,8 @@ static SSubplan* getAggSubPlan(const SQueryPlan* pPlan, int index){
|
|||
return plan;
|
||||
}
|
||||
|
||||
static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream,
|
||||
SEpSet* pEpset, int64_t nextWindowSkey, SArray* pVerList, bool useTriggerParam) {
|
||||
static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream, SEpSet* pEpset,
|
||||
int64_t nextWindowSkey, SArray* pVerList, bool useTriggerParam) {
|
||||
addNewTaskList(pStream);
|
||||
|
||||
void* pIter = NULL;
|
||||
|
@ -433,15 +467,16 @@ static int32_t addSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStream
|
|||
continue;
|
||||
}
|
||||
|
||||
int code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam);
|
||||
if(code != 0){
|
||||
int code =
|
||||
doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, false, useTriggerParam);
|
||||
if (code != 0) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (pStream->conf.fillHistory) {
|
||||
code = doAddSourceTask(pMnode, plan, pStream, pEpset, nextWindowSkey, pVerList, pVgroup, true, useTriggerParam);
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
return code;
|
||||
}
|
||||
|
@ -461,9 +496,9 @@ static SStreamTask* buildAggTask(SStreamObj* pStream, SEpSet* pEpset, bool isFil
|
|||
uint64_t uid = (isFillhistory) ? pStream->hTaskUid : pStream->uid;
|
||||
SArray** pTaskList = (isFillhistory) ? taosArrayGetLast(pStream->pHTasksList) : taosArrayGetLast(pStream->tasks);
|
||||
|
||||
SStreamTask* pAggTask = tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory,
|
||||
useTriggerParam ? pStream->conf.triggerParam : 0,
|
||||
*pTaskList, pStream->conf.fillHistory);
|
||||
SStreamTask* pAggTask =
|
||||
tNewStreamTask(uid, TASK_LEVEL__AGG, pEpset, isFillhistory, useTriggerParam ? pStream->conf.triggerParam : 0,
|
||||
*pTaskList, pStream->conf.fillHistory, pStream->subTableWithoutMd5);
|
||||
if (pAggTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
|
@ -472,8 +507,8 @@ static SStreamTask* buildAggTask(SStreamObj* pStream, SEpSet* pEpset, bool isFil
|
|||
return pAggTask;
|
||||
}
|
||||
|
||||
static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, SEpSet* pEpset,
|
||||
SVgObj* pVgroup, SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam){
|
||||
static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, SEpSet* pEpset, SVgObj* pVgroup,
|
||||
SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) {
|
||||
int32_t code = 0;
|
||||
SStreamTask* pTask = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam);
|
||||
if (pTask == NULL) {
|
||||
|
@ -490,7 +525,7 @@ static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan,
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, SEpSet* pEpset, bool useTriggerParam){
|
||||
static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, SEpSet* pEpset, bool useTriggerParam) {
|
||||
SVgObj* pVgroup = NULL;
|
||||
SSnodeObj* pSnode = NULL;
|
||||
int32_t code = 0;
|
||||
|
@ -504,20 +539,20 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, S
|
|||
}
|
||||
|
||||
code = doAddAggTask(pStream, pMnode, plan, pEpset, pVgroup, pSnode, false, useTriggerParam);
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
goto END;
|
||||
}
|
||||
|
||||
if (pStream->conf.fillHistory) {
|
||||
code = doAddAggTask(pStream, pMnode, plan, pEpset, pVgroup, pSnode, true, useTriggerParam);
|
||||
if(code != 0){
|
||||
if (code != 0) {
|
||||
goto END;
|
||||
}
|
||||
|
||||
setHTasksId(pStream);
|
||||
}
|
||||
|
||||
END:
|
||||
END:
|
||||
if (pSnode != NULL) {
|
||||
sdbRelease(pMnode->pSdb, pSnode);
|
||||
} else {
|
||||
|
@ -526,7 +561,7 @@ static int32_t addAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, S
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t addSinkTask(SMnode* pMnode, SStreamObj* pStream, SEpSet* pEpset){
|
||||
static int32_t addSinkTask(SMnode* pMnode, SStreamObj* pStream, SEpSet* pEpset) {
|
||||
int32_t code = 0;
|
||||
addNewTaskList(pStream);
|
||||
|
||||
|
@ -548,9 +583,9 @@ static int32_t addSinkTask(SMnode* pMnode, SStreamObj* pStream, SEpSet* pEpset){
|
|||
return TDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void bindTaskToSinkTask(SStreamObj* pStream, SMnode* pMnode, SArray* pSinkTaskList, SStreamTask* task){
|
||||
static void bindTaskToSinkTask(SStreamObj* pStream, SMnode* pMnode, SArray* pSinkTaskList, SStreamTask* task) {
|
||||
mndAddDispatcherForInternalTask(pMnode, pStream, pSinkTaskList, task);
|
||||
for(int32_t k = 0; k < taosArrayGetSize(pSinkTaskList); k++) {
|
||||
for (int32_t k = 0; k < taosArrayGetSize(pSinkTaskList); k++) {
|
||||
SStreamTask* pSinkTask = taosArrayGetP(pSinkTaskList, k);
|
||||
streamTaskSetUpstreamInfo(pSinkTask, task);
|
||||
}
|
||||
|
@ -558,10 +593,10 @@ static void bindTaskToSinkTask(SStreamObj* pStream, SMnode* pMnode, SArray* pSin
|
|||
}
|
||||
|
||||
static void bindAggSink(SStreamObj* pStream, SMnode* pMnode, SArray* tasks) {
|
||||
SArray* pSinkTaskList = taosArrayGetP(tasks, SINK_NODE_LEVEL);
|
||||
SArray* pSinkTaskList = taosArrayGetP(tasks, SINK_NODE_LEVEL);
|
||||
SArray** pAggTaskList = taosArrayGetLast(tasks);
|
||||
|
||||
for(int i = 0; i < taosArrayGetSize(*pAggTaskList); i++){
|
||||
for (int i = 0; i < taosArrayGetSize(*pAggTaskList); i++) {
|
||||
SStreamTask* pAggTask = taosArrayGetP(*pAggTaskList, i);
|
||||
bindTaskToSinkTask(pStream, pMnode, pSinkTaskList, pAggTask);
|
||||
mDebug("bindAggSink taskId:%s to sink task list", pAggTask->id.idStr);
|
||||
|
@ -572,7 +607,7 @@ static void bindSourceSink(SStreamObj* pStream, SMnode* pMnode, SArray* tasks, b
|
|||
SArray* pSinkTaskList = taosArrayGetP(tasks, SINK_NODE_LEVEL);
|
||||
SArray* pSourceTaskList = taosArrayGetP(tasks, hasExtraSink ? SINK_NODE_LEVEL + 1 : SINK_NODE_LEVEL);
|
||||
|
||||
for(int i = 0; i < taosArrayGetSize(pSourceTaskList); i++){
|
||||
for (int i = 0; i < taosArrayGetSize(pSourceTaskList); i++) {
|
||||
SStreamTask* pSourceTask = taosArrayGetP(pSourceTaskList, i);
|
||||
mDebug("bindSourceSink taskId:%s to sink task list", pSourceTask->id.idStr);
|
||||
|
||||
|
@ -591,8 +626,8 @@ static void bindTwoLevel(SArray* tasks, int32_t begin, int32_t end) {
|
|||
SArray* pUpTaskList = taosArrayGetP(tasks, size - 2);
|
||||
|
||||
SStreamTask** pDownTask = taosArrayGetLast(pDownTaskList);
|
||||
end = end > taosArrayGetSize(pUpTaskList) ? taosArrayGetSize(pUpTaskList): end;
|
||||
for(int i = begin; i < end; i++){
|
||||
end = end > taosArrayGetSize(pUpTaskList) ? taosArrayGetSize(pUpTaskList) : end;
|
||||
for (int i = begin; i < end; i++) {
|
||||
SStreamTask* pUpTask = taosArrayGetP(pUpTaskList, i);
|
||||
pUpTask->info.selfChildId = i - begin;
|
||||
streamTaskSetFixedDownstreamInfo(pUpTask, *pDownTask);
|
||||
|
@ -616,8 +651,8 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
|
|||
bool multiTarget = (pDbObj->cfg.numOfVgroups > 1);
|
||||
sdbRelease(pSdb, pDbObj);
|
||||
|
||||
mDebug("doScheduleStream numOfPlanLevel:%d, exDb:%d, multiTarget:%d, fix vgId:%d, physicalPlan:%s",
|
||||
numOfPlanLevel, externalTargetDB, multiTarget, pStream->fixedSinkVgId, pStream->physicalPlan);
|
||||
mDebug("doScheduleStream numOfPlanLevel:%d, exDb:%d, multiTarget:%d, fix vgId:%d, physicalPlan:%s", numOfPlanLevel,
|
||||
externalTargetDB, multiTarget, pStream->fixedSinkVgId, pStream->physicalPlan);
|
||||
pStream->tasks = taosArrayInit(numOfPlanLevel + 1, POINTER_BYTES);
|
||||
pStream->pHTasksList = taosArrayInit(numOfPlanLevel + 1, POINTER_BYTES);
|
||||
|
||||
|
@ -632,7 +667,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
|
|||
|
||||
pStream->totalLevel = numOfPlanLevel + hasExtraSink;
|
||||
|
||||
SSubplan* plan = getScanSubPlan(pPlan); // source plan
|
||||
SSubplan* plan = getScanSubPlan(pPlan); // source plan
|
||||
if (plan == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
@ -649,32 +684,32 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
|
|||
return TDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if(numOfPlanLevel == 3){
|
||||
if (numOfPlanLevel == 3) {
|
||||
plan = getAggSubPlan(pPlan, 1); // middle agg plan
|
||||
if (plan == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
do{
|
||||
do {
|
||||
SArray** list = taosArrayGetLast(pStream->tasks);
|
||||
float size = (float)taosArrayGetSize(*list);
|
||||
size_t cnt = (size_t)ceil(size/tsStreamAggCnt);
|
||||
if(cnt <= 1) break;
|
||||
float size = (float)taosArrayGetSize(*list);
|
||||
size_t cnt = (size_t)ceil(size / tsStreamAggCnt);
|
||||
if (cnt <= 1) break;
|
||||
|
||||
mDebug("doScheduleStream add middle agg, size:%d, cnt:%d", (int)size, (int)cnt);
|
||||
addNewTaskList(pStream);
|
||||
|
||||
for(int j = 0; j < cnt; j++){
|
||||
for (int j = 0; j < cnt; j++) {
|
||||
code = addAggTask(pStream, pMnode, plan, pEpset, false);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
bindTwoLevel(pStream->tasks, j*tsStreamAggCnt, (j+1)*tsStreamAggCnt);
|
||||
bindTwoLevel(pStream->tasks, j * tsStreamAggCnt, (j + 1) * tsStreamAggCnt);
|
||||
if (pStream->conf.fillHistory) {
|
||||
bindTwoLevel(pStream->pHTasksList, j*tsStreamAggCnt, (j+1)*tsStreamAggCnt);
|
||||
bindTwoLevel(pStream->pHTasksList, j * tsStreamAggCnt, (j + 1) * tsStreamAggCnt);
|
||||
}
|
||||
}
|
||||
}while(1);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
plan = getAggSubPlan(pPlan, 0);
|
||||
|
@ -684,7 +719,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan*
|
|||
|
||||
mDebug("doScheduleStream add final agg");
|
||||
SArray** list = taosArrayGetLast(pStream->tasks);
|
||||
size_t size = taosArrayGetSize(*list);
|
||||
size_t size = taosArrayGetSize(*list);
|
||||
addNewTaskList(pStream);
|
||||
code = addAggTask(pStream, pMnode, plan, pEpset, true);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -567,6 +567,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
|||
streamObj.conf.triggerParam = pCreate->maxDelay;
|
||||
streamObj.ast = taosStrdup(smaObj.ast);
|
||||
streamObj.indexForMultiAggBalance = -1;
|
||||
streamObj.subTableWithoutMd5 = 1;
|
||||
|
||||
// check the maxDelay
|
||||
if (streamObj.conf.triggerParam < TSDB_MIN_ROLLUP_MAX_DELAY) {
|
||||
|
@ -898,11 +899,11 @@ _OVER:
|
|||
}
|
||||
|
||||
int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SSmaObj *pSma = NULL;
|
||||
void *pIter = NULL;
|
||||
SVgObj *pVgroup = NULL;
|
||||
int32_t code = -1;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SSmaObj *pSma = NULL;
|
||||
void *pIter = NULL;
|
||||
SVgObj *pVgroup = NULL;
|
||||
int32_t code = -1;
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma);
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include "tmisce.h"
|
||||
#include "tname.h"
|
||||
|
||||
#define MND_STREAM_MAX_NUM 60
|
||||
#define MND_STREAM_MAX_NUM 60
|
||||
|
||||
typedef struct SMStreamNodeCheckMsg {
|
||||
int8_t placeHolder; // // to fix windows compile error, define place holder
|
||||
|
@ -152,7 +152,7 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) {
|
|||
goto STREAM_DECODE_OVER;
|
||||
}
|
||||
|
||||
if (sver != MND_STREAM_VER_NUMBER) {
|
||||
if (sver < 1 || sver > MND_STREAM_VER_NUMBER) {
|
||||
terrno = 0;
|
||||
mError("stream read invalid ver, data ver: %d, curr ver: %d", sver, MND_STREAM_VER_NUMBER);
|
||||
goto STREAM_DECODE_OVER;
|
||||
|
@ -192,7 +192,7 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) {
|
|||
STREAM_DECODE_OVER:
|
||||
taosMemoryFreeClear(buf);
|
||||
if (terrno != TSDB_CODE_SUCCESS) {
|
||||
char* p = (pStream == NULL) ? "null" : pStream->name;
|
||||
char *p = (pStream == NULL) ? "null" : pStream->name;
|
||||
mError("stream:%s, failed to decode from raw:%p since %s", p, pRaw, terrstr());
|
||||
taosMemoryFreeClear(pRow);
|
||||
return NULL;
|
||||
|
@ -634,7 +634,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
terrno = TSDB_CODE_SUCCESS;
|
||||
|
||||
if ((terrno = grantCheck(TSDB_GRANT_STREAMS)) < 0) {
|
||||
goto _OVER;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
SCMCreateStreamReq createReq = {0};
|
||||
|
@ -802,8 +802,7 @@ static int32_t mndProcessStreamCheckpointTmr(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
static int32_t mndBuildStreamCheckpointSourceReq(void **pBuf, int32_t *pLen, int32_t nodeId, int64_t checkpointId,
|
||||
int64_t streamId, int32_t taskId, int32_t transId,
|
||||
int8_t mndTrigger) {
|
||||
int64_t streamId, int32_t taskId, int32_t transId, int8_t mndTrigger) {
|
||||
SStreamCheckpointSourceReq req = {0};
|
||||
req.checkpointId = checkpointId;
|
||||
req.nodeId = nodeId;
|
||||
|
@ -878,11 +877,10 @@ static int32_t mndProcessStreamCheckpointTrans(SMnode *pMnode, SStreamObj *pStre
|
|||
int32_t code = -1;
|
||||
int64_t ts = taosGetTimestampMs();
|
||||
if (mndTrigger == 1 && (ts - pStream->checkpointFreq < tsStreamCheckpointInterval * 1000)) {
|
||||
// mWarn("checkpoint interval less than the threshold, ignore it");
|
||||
// mWarn("checkpoint interval less than the threshold, ignore it");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
bool conflict = mndStreamTransConflictCheck(pMnode, pStream->uid, MND_STREAM_CHECKPOINT_NAME, lock);
|
||||
if (conflict) {
|
||||
mndAddtoCheckpointWaitingList(pStream, checkpointId);
|
||||
|
@ -1144,7 +1142,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
STrans* pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_DROP_NAME, "drop stream");
|
||||
STrans *pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_DROP_NAME, "drop stream");
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to drop since %s", dropReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
|
@ -1570,7 +1568,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
STrans* pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_PAUSE_NAME, "pause the stream");
|
||||
STrans *pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_PAUSE_NAME, "pause the stream");
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s failed to pause stream since %s", pauseReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
|
@ -1590,7 +1588,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
// pause stream
|
||||
taosWLockLatch(&pStream->lock);
|
||||
pStream->status = STREAM_STATUS__PAUSE;
|
||||
if (mndPersistTransLog(pStream, pTrans,SDB_STATUS_READY) < 0) {
|
||||
if (mndPersistTransLog(pStream, pTrans, SDB_STATUS_READY) < 0) {
|
||||
taosWUnLockLatch(&pStream->lock);
|
||||
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
|
@ -1617,7 +1615,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
SMnode *pMnode = pReq->info.node;
|
||||
SStreamObj *pStream = NULL;
|
||||
|
||||
if(grantCheckExpire(TSDB_GRANT_STREAMS) < 0){
|
||||
if (grantCheckExpire(TSDB_GRANT_STREAMS) < 0) {
|
||||
terrno = TSDB_CODE_GRANT_EXPIRED;
|
||||
return -1;
|
||||
}
|
||||
|
@ -1659,7 +1657,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
STrans* pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_RESUME_NAME, "resume the stream");
|
||||
STrans *pTrans = doCreateTrans(pMnode, pStream, pReq, MND_STREAM_RESUME_NAME, "resume the stream");
|
||||
if (pTrans == NULL) {
|
||||
mError("stream:%s, failed to resume stream since %s", resumeReq.name, terrstr());
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
|
@ -2106,10 +2104,10 @@ void removeStreamTasksInBuf(SStreamObj *pStream, SStreamExecInfo *pExecNode) {
|
|||
ASSERT(taosHashGetSize(pExecNode->pTaskMap) == taosArrayGetSize(pExecNode->pTaskList));
|
||||
}
|
||||
|
||||
static void doAddTaskId(SArray* pList, int32_t taskId, int64_t uid, int32_t numOfTotal) {
|
||||
static void doAddTaskId(SArray *pList, int32_t taskId, int64_t uid, int32_t numOfTotal) {
|
||||
int32_t num = taosArrayGetSize(pList);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
int32_t* pId = taosArrayGet(pList, i);
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
int32_t *pId = taosArrayGet(pList, i);
|
||||
if (taskId == *pId) {
|
||||
return;
|
||||
}
|
||||
|
@ -2122,7 +2120,7 @@ static void doAddTaskId(SArray* pList, int32_t taskId, int64_t uid, int32_t numO
|
|||
}
|
||||
|
||||
int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
SStreamTaskCheckpointReq req = {0};
|
||||
|
||||
SDecoder decoder = {0};
|
||||
|
@ -2143,7 +2141,7 @@ int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) {
|
|||
|
||||
SStreamObj *pStream = mndGetStreamObj(pMnode, req.streamId);
|
||||
if (pStream == NULL) {
|
||||
mError("failed to find the stream:0x%"PRIx64" not handle the checkpoint req", req.streamId);
|
||||
mError("failed to find the stream:0x%" PRIx64 " not handle the checkpoint req", req.streamId);
|
||||
terrno = TSDB_CODE_MND_STREAM_NOT_EXIST;
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
|
||||
|
@ -2151,13 +2149,13 @@ int32_t mndProcessStreamReqCheckpoint(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
int32_t numOfTasks = mndGetNumOfStreamTasks(pStream);
|
||||
SArray **pReqTaskList = (SArray**)taosHashGet(execInfo.pTransferStateStreams, &req.streamId, sizeof(req.streamId));
|
||||
SArray **pReqTaskList = (SArray **)taosHashGet(execInfo.pTransferStateStreams, &req.streamId, sizeof(req.streamId));
|
||||
if (pReqTaskList == NULL) {
|
||||
SArray *pList = taosArrayInit(4, sizeof(int32_t));
|
||||
doAddTaskId(pList, req.taskId, pStream->uid, numOfTasks);
|
||||
taosHashPut(execInfo.pTransferStateStreams, &req.streamId, sizeof(int64_t), &pList, sizeof(void *));
|
||||
|
||||
pReqTaskList = (SArray**)taosHashGet(execInfo.pTransferStateStreams, &req.streamId, sizeof(req.streamId));
|
||||
pReqTaskList = (SArray **)taosHashGet(execInfo.pTransferStateStreams, &req.streamId, sizeof(req.streamId));
|
||||
} else {
|
||||
doAddTaskId(*pReqTaskList, req.taskId, pStream->uid, numOfTasks);
|
||||
}
|
||||
|
|
|
@ -564,6 +564,10 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
|
|||
return code;
|
||||
}
|
||||
|
||||
if ((terrno = grantCheck(TSDB_GRANT_SUBSCRIPTION)) < 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (tDeserializeSCMCreateTopicReq(pReq->pCont, pReq->contLen, &createTopicReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
|
|
|
@ -85,8 +85,9 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer
|
|||
|
||||
SCheckpointInfo *pChkInfo = &pTask->chkInfo;
|
||||
// checkpoint ver is the kept version, handled data should be the next version.
|
||||
if (pTask->chkInfo.checkpointId != 0) {
|
||||
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
|
||||
if (pChkInfo->checkpointId != 0) {
|
||||
pChkInfo->nextProcessVer = pChkInfo->checkpointVer + 1;
|
||||
pChkInfo->processedVer = pChkInfo->checkpointVer;
|
||||
sndInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " nextProcessVer:%" PRId64,
|
||||
pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||
}
|
||||
|
|
|
@ -67,12 +67,17 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->streamStateSessionPut = streamStateSessionPut;
|
||||
pStore->streamStateSessionGet = streamStateSessionGet;
|
||||
pStore->streamStateSessionDel = streamStateSessionDel;
|
||||
pStore->streamStateSessionReset = streamStateSessionReset;
|
||||
pStore->streamStateSessionClear = streamStateSessionClear;
|
||||
pStore->streamStateSessionGetKVByCur = streamStateSessionGetKVByCur;
|
||||
pStore->streamStateStateAddIfNotExist = streamStateStateAddIfNotExist;
|
||||
pStore->streamStateSessionGetKeyByRange = streamStateSessionGetKeyByRange;
|
||||
pStore->streamStateCountGetKeyByRange = streamStateCountGetKeyByRange;
|
||||
pStore->streamStateSessionAllocWinBuffByNextPosition = streamStateSessionAllocWinBuffByNextPosition;
|
||||
|
||||
pStore->streamStateCountWinAddIfNotExist = streamStateCountWinAddIfNotExist;
|
||||
pStore->streamStateCountWinAdd = streamStateCountWinAdd;
|
||||
|
||||
pStore->updateInfoInit = updateInfoInit;
|
||||
pStore->updateInfoFillBlockData = updateInfoFillBlockData;
|
||||
pStore->updateInfoIsUpdated = updateInfoIsUpdated;
|
||||
|
@ -89,6 +94,7 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->updateInfoDeserialize = updateInfoDeserialize;
|
||||
|
||||
pStore->streamStateSessionSeekKeyNext = streamStateSessionSeekKeyNext;
|
||||
pStore->streamStateCountSeekKeyPrev = streamStateCountSeekKeyPrev;
|
||||
pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev;
|
||||
pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext;
|
||||
|
||||
|
|
|
@ -214,6 +214,13 @@ int32_t tsdbBegin(STsdb* pTsdb);
|
|||
// int32_t tsdbPrepareCommit(STsdb* pTsdb);
|
||||
// int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
|
||||
int32_t tsdbCacheCommit(STsdb* pTsdb);
|
||||
int32_t tsdbCacheNewTable(STsdb* pTsdb, int64_t uid, tb_uid_t suid, SSchemaWrapper* pSchemaRow);
|
||||
int32_t tsdbCacheDropTable(STsdb* pTsdb, int64_t uid, tb_uid_t suid, SSchemaWrapper* pSchemaRow);
|
||||
int32_t tsdbCacheDropSubTables(STsdb* pTsdb, SArray* uids, tb_uid_t suid);
|
||||
int32_t tsdbCacheNewSTableColumn(STsdb* pTsdb, SArray* uids, int16_t cid, int8_t col_type);
|
||||
int32_t tsdbCacheDropSTableColumn(STsdb* pTsdb, SArray* uids, int16_t cid, int8_t col_type);
|
||||
int32_t tsdbCacheNewNTableColumn(STsdb* pTsdb, int64_t uid, int16_t cid, int8_t col_type);
|
||||
int32_t tsdbCacheDropNTableColumn(STsdb* pTsdb, int64_t uid, int16_t cid, int8_t col_type);
|
||||
int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo);
|
||||
int32_t tsdbRetention(STsdb* tsdb, int64_t now, int32_t sync);
|
||||
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg);
|
||||
|
|
|
@ -96,29 +96,29 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!pData || !nData) {
|
||||
metaError("meta/snap: invalide nData: %" PRId32 " meta snap read failed.", nData);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + nData);
|
||||
if (*ppData == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
|
||||
pHdr->type = SNAP_DATA_META;
|
||||
pHdr->size = nData;
|
||||
memcpy(pHdr->data, pData, nData);
|
||||
|
||||
metaDebug("vgId:%d, vnode snapshot meta read data, version:%" PRId64 " uid:%" PRId64 " blockLen:%d",
|
||||
TD_VID(pReader->pMeta->pVnode), key.version, key.uid, nData);
|
||||
|
||||
tdbTbcMoveToNext(pReader->pTbc);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pData || !nData) {
|
||||
metaError("meta/snap: invalide nData: %" PRId32 " meta snap read failed.", nData);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
*ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + nData);
|
||||
if (*ppData == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
|
||||
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
|
||||
pHdr->type = SNAP_DATA_META;
|
||||
pHdr->size = nData;
|
||||
memcpy(pHdr->data, pData, nData);
|
||||
|
||||
metaDebug("vgId:%d, vnode snapshot meta read data, version:%" PRId64 " uid:%" PRId64 " blockLen:%d",
|
||||
TD_VID(pReader->pMeta->pVnode), key.version, key.uid, nData);
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
|
||||
|
@ -619,7 +619,8 @@ SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext* ctx) {
|
|||
|
||||
int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
|
||||
if (ret != 0) {
|
||||
metaDebug("tmqsnap getMetaTableInfoFromSnapshot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version);
|
||||
metaDebug("tmqsnap getMetaTableInfoFromSnapshot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp,
|
||||
idInfo->version);
|
||||
continue;
|
||||
}
|
||||
tdbTbcGet((TBC*)ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
|
||||
|
|
|
@ -303,6 +303,8 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb
|
|||
|
||||
tdbTbcClose(pCtbIdxc);
|
||||
|
||||
(void)tsdbCacheDropSubTables(pMeta->pVnode->pTsdb, tbUidList, pReq->suid);
|
||||
|
||||
metaWLock(pMeta);
|
||||
|
||||
for (int32_t iChild = 0; iChild < taosArrayGetSize(tbUidList); iChild++) {
|
||||
|
@ -334,6 +336,40 @@ _exit:
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void metaGetSubtables(SMeta *pMeta, int64_t suid, SArray *uids) {
|
||||
if (!uids) return;
|
||||
|
||||
int c = 0;
|
||||
void *pKey = NULL;
|
||||
int nKey = 0;
|
||||
TBC *pCtbIdxc = NULL;
|
||||
|
||||
tdbTbcOpen(pMeta->pCtbIdx, &pCtbIdxc, NULL);
|
||||
int rc = tdbTbcMoveTo(pCtbIdxc, &(SCtbIdxKey){.suid = suid, .uid = INT64_MIN}, sizeof(SCtbIdxKey), &c);
|
||||
if (rc < 0) {
|
||||
tdbTbcClose(pCtbIdxc);
|
||||
metaWLock(pMeta);
|
||||
return;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
rc = tdbTbcNext(pCtbIdxc, &pKey, &nKey, NULL, NULL);
|
||||
if (rc < 0) break;
|
||||
|
||||
if (((SCtbIdxKey *)pKey)->suid < suid) {
|
||||
continue;
|
||||
} else if (((SCtbIdxKey *)pKey)->suid > suid) {
|
||||
break;
|
||||
}
|
||||
|
||||
taosArrayPush(uids, &(((SCtbIdxKey *)pKey)->uid));
|
||||
}
|
||||
|
||||
tdbFree(pKey);
|
||||
|
||||
tdbTbcClose(pCtbIdxc);
|
||||
}
|
||||
|
||||
int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
||||
SMetaEntry oStbEntry = {0};
|
||||
SMetaEntry nStbEntry = {0};
|
||||
|
@ -397,9 +433,39 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
|||
nStbEntry.stbEntry.schemaRow = pReq->schemaRow;
|
||||
nStbEntry.stbEntry.schemaTag = pReq->schemaTag;
|
||||
|
||||
int32_t deltaCol = pReq->schemaRow.nCols - oStbEntry.stbEntry.schemaRow.nCols;
|
||||
int nCols = pReq->schemaRow.nCols;
|
||||
int onCols = oStbEntry.stbEntry.schemaRow.nCols;
|
||||
int32_t deltaCol = nCols - onCols;
|
||||
bool updStat = deltaCol != 0 && !metaTbInFilterCache(pMeta, pReq->name, 1);
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
STsdb *pTsdb = pMeta->pVnode->pTsdb;
|
||||
SArray *uids = taosArrayInit(8, sizeof(int64_t));
|
||||
if (deltaCol == 1) {
|
||||
int16_t cid = pReq->schemaRow.pSchema[nCols - 1].colId;
|
||||
int8_t col_type = pReq->schemaRow.pSchema[nCols - 1].type;
|
||||
|
||||
metaGetSubtables(pMeta, pReq->suid, uids);
|
||||
tsdbCacheNewSTableColumn(pTsdb, uids, cid, col_type);
|
||||
} else if (deltaCol == -1) {
|
||||
int16_t cid = -1;
|
||||
int8_t col_type = -1;
|
||||
for (int i = 0, j = 0; i < nCols && j < onCols; ++i, ++j) {
|
||||
if (pReq->schemaRow.pSchema[i].colId != oStbEntry.stbEntry.schemaRow.pSchema[j].colId) {
|
||||
cid = oStbEntry.stbEntry.schemaRow.pSchema[j].colId;
|
||||
col_type = oStbEntry.stbEntry.schemaRow.pSchema[j].type;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (cid != -1) {
|
||||
metaGetSubtables(pMeta, pReq->suid, uids);
|
||||
tsdbCacheDropSTableColumn(pTsdb, uids, cid, col_type);
|
||||
}
|
||||
}
|
||||
if (uids) taosArrayDestroy(uids);
|
||||
}
|
||||
|
||||
metaWLock(pMeta);
|
||||
// compare two entry
|
||||
if (oStbEntry.stbEntry.schemaRow.version != pReq->schemaRow.version) {
|
||||
|
@ -822,6 +888,10 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
|
|||
metaUidCacheClear(pMeta, me.ctbEntry.suid);
|
||||
metaTbGroupCacheClear(pMeta, me.ctbEntry.suid);
|
||||
metaULock(pMeta);
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
tsdbCacheNewTable(pMeta->pVnode->pTsdb, me.uid, me.ctbEntry.suid, NULL);
|
||||
}
|
||||
} else {
|
||||
me.ntbEntry.btime = pReq->btime;
|
||||
me.ntbEntry.ttlDays = pReq->ttl;
|
||||
|
@ -832,6 +902,10 @@ int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRs
|
|||
|
||||
++pStats->numOfNTables;
|
||||
pStats->numOfNTimeSeries += me.ntbEntry.schemaRow.nCols - 1;
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
tsdbCacheNewTable(pMeta->pVnode->pTsdb, me.uid, -1, &me.ntbEntry.schemaRow);
|
||||
}
|
||||
}
|
||||
|
||||
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
|
||||
|
@ -896,6 +970,10 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
|
|||
|
||||
if ((type == TSDB_CHILD_TABLE || type == TSDB_NORMAL_TABLE) && tbUids) {
|
||||
taosArrayPush(tbUids, &uid);
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
tsdbCacheDropTable(pMeta->pVnode->pTsdb, uid, suid, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
if ((type == TSDB_CHILD_TABLE) && tbUid) {
|
||||
|
@ -930,6 +1008,11 @@ void metaDropTables(SMeta *pMeta, SArray *tbUids) {
|
|||
}
|
||||
tSimpleHashPut(suidHash, &suid, sizeof(tb_uid_t), &nCtbDropped, sizeof(int64_t));
|
||||
}
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
tsdbCacheDropTable(pMeta->pVnode->pTsdb, uid, suid, NULL);
|
||||
}
|
||||
|
||||
metaDebug("batch drop table:%" PRId64, uid);
|
||||
}
|
||||
metaULock(pMeta);
|
||||
|
@ -1172,11 +1255,22 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type, tb_uid_t *p
|
|||
metaUpdateStbStats(pMeta, e.ctbEntry.suid, -1, 0);
|
||||
metaUidCacheClear(pMeta, e.ctbEntry.suid);
|
||||
metaTbGroupCacheClear(pMeta, e.ctbEntry.suid);
|
||||
/*
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
tsdbCacheDropTable(pMeta->pVnode->pTsdb, e.uid, e.ctbEntry.suid, NULL);
|
||||
}
|
||||
*/
|
||||
} else if (e.type == TSDB_NORMAL_TABLE) {
|
||||
// drop schema.db (todo)
|
||||
|
||||
--pMeta->pVnode->config.vndStats.numOfNTables;
|
||||
pMeta->pVnode->config.vndStats.numOfNTimeSeries -= e.ntbEntry.schemaRow.nCols - 1;
|
||||
|
||||
/*
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
tsdbCacheDropTable(pMeta->pVnode->pTsdb, e.uid, -1, &e.ntbEntry.schemaRow);
|
||||
}
|
||||
*/
|
||||
} else if (e.type == TSDB_SUPER_TABLE) {
|
||||
tdbTbDelete(pMeta->pSuidIdx, &e.uid, sizeof(tb_uid_t), pMeta->txn);
|
||||
// drop schema.db (todo)
|
||||
|
@ -1364,6 +1458,12 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
|
|||
|
||||
++pMeta->pVnode->config.vndStats.numOfNTimeSeries;
|
||||
metaTimeSeriesNotifyCheck(pMeta);
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
int16_t cid = pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId;
|
||||
int8_t col_type = pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].type;
|
||||
(void)tsdbCacheNewNTableColumn(pMeta->pVnode->pTsdb, entry.uid, cid, col_type);
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_DROP_COLUMN:
|
||||
if (pColumn == NULL) {
|
||||
|
@ -1386,6 +1486,13 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
|
|||
pSchema->nCols--;
|
||||
|
||||
--pMeta->pVnode->config.vndStats.numOfNTimeSeries;
|
||||
|
||||
if (!TSDB_CACHE_NO(pMeta->pVnode->config)) {
|
||||
int16_t cid = pColumn->colId;
|
||||
int8_t col_type = pColumn->type;
|
||||
|
||||
(void)tsdbCacheDropNTableColumn(pMeta->pVnode->pTsdb, entry.uid, cid, col_type);
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
|
||||
if (pColumn == NULL) {
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
*/
|
||||
|
||||
#include "tq.h"
|
||||
#include "vnd.h"
|
||||
#include "tqCommon.h"
|
||||
#include "vnd.h"
|
||||
|
||||
// 0: not init
|
||||
// 1: already inited
|
||||
|
@ -835,36 +835,37 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
// checkpoint ver is the kept version, handled data should be the next version.
|
||||
if (pChkInfo->checkpointId != 0) {
|
||||
pChkInfo->nextProcessVer = pChkInfo->checkpointVer + 1;
|
||||
pChkInfo->processedVer = pChkInfo->checkpointVer;
|
||||
tqInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " currentVer:%" PRId64, pTask->id.idStr,
|
||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||
}
|
||||
|
||||
char* p = streamTaskGetStatus(pTask)->name;
|
||||
const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus);
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64
|
||||
" ms, inputVer:%" PRId64,
|
||||
" child id:%d, level:%d, cur-status:%s, next-status:%s fill-history:%d, related stream task:0x%x "
|
||||
"trigger:%" PRId64 " ms, inputVer:%" PRId64,
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
||||
(int32_t)pTask->streamTaskId.taskId, pTask->info.triggerParam, nextProcessVer);
|
||||
} else {
|
||||
tqInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64
|
||||
" ms, inputVer:%" PRId64,
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.triggerParam, nextProcessVer);
|
||||
tqInfo(
|
||||
"vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64
|
||||
" ms, inputVer:%" PRId64,
|
||||
vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory,
|
||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.triggerParam, nextProcessVer);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg); }
|
||||
|
||||
int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
return tqStreamTaskProcessCheckRsp(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
|
||||
|
@ -988,7 +989,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
streamReExecScanHistoryFuture(pTask, retInfo.idleTime);
|
||||
} else {
|
||||
SStreamTaskState* p = streamTaskGetStatus(pTask);
|
||||
ETaskStatus s = p->state;
|
||||
ETaskStatus s = p->state;
|
||||
|
||||
if (s == TASK_STATUS__PAUSE) {
|
||||
tqDebug("s-task:%s is paused in the step1, elapsed time:%.2fs total:%.2fs, sched-status:%d", pTask->id.idStr,
|
||||
|
@ -1015,8 +1016,8 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
tqError("failed to find s-task:0x%" PRIx64 ", it may have been destroyed, drop related fill-history task:%s",
|
||||
pTask->streamTaskId.taskId, pTask->id.idStr);
|
||||
|
||||
tqDebug("s-task:%s fill-history task set status to be dropping", id);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
||||
tqDebug("s-task:%s fill-history task set status to be dropping and drop it", id);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id, 0);
|
||||
|
||||
atomic_store_32(&pTask->status.inScanHistorySentinel, 0);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
@ -1062,7 +1063,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
return tqStreamTaskProcessDispatchReq(pTq->pStreamMeta, pMsg);
|
||||
return tqStreamTaskProcessDispatchReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
|
@ -1101,7 +1102,7 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
pRsp->info.handle = NULL;
|
||||
|
||||
SStreamCheckpointSourceReq req = {0};
|
||||
SDecoder decoder;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamCheckpointSourceReq(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
|
@ -1192,8 +1193,8 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
streamProcessCheckpointSourceReq(pTask, &req);
|
||||
taosThreadMutexUnlock(&pTask->lock);
|
||||
|
||||
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d",
|
||||
pTask->id.idStr, vgId, pTask->info.taskLevel, req.checkpointId, req.transId);
|
||||
qInfo("s-task:%s (vgId:%d) level:%d receive checkpoint-source msg chkpt:%" PRId64 ", transId:%d", pTask->id.idStr,
|
||||
vgId, pTask->info.taskLevel, req.checkpointId, req.transId);
|
||||
|
||||
code = streamAddCheckpointSourceRspMsg(&req, &pMsg->info, pTask, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -74,11 +74,11 @@ int32_t tqSnapReaderClose(STqSnapReader** ppReader) {
|
|||
}
|
||||
|
||||
int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) {
|
||||
int32_t code = 0;
|
||||
void* pKey = NULL;
|
||||
void* pVal = NULL;
|
||||
int32_t kLen = 0;
|
||||
int32_t vLen = 0;
|
||||
int32_t code = 0;
|
||||
void* pKey = NULL;
|
||||
void* pVal = NULL;
|
||||
int32_t kLen = 0;
|
||||
int32_t vLen = 0;
|
||||
|
||||
if (tdbTbcNext(pReader->pCur, &pKey, &kLen, &pVal, &vLen)) {
|
||||
goto _exit;
|
||||
|
|
|
@ -207,8 +207,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t
|
|||
goto END;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64" 0x%"PRIx64, vgId,
|
||||
pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||
tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64 " 0x%" PRIx64,
|
||||
vgId, pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId, id);
|
||||
|
||||
if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) {
|
||||
code = walFetchBody(pHandle->pWalReader);
|
||||
|
@ -303,7 +303,7 @@ int32_t tqReaderSeek(STqReader* pReader, int64_t ver, const char* id) {
|
|||
int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, const char* id) {
|
||||
int32_t code = 0;
|
||||
|
||||
while(1) {
|
||||
while (1) {
|
||||
code = walNextValidMsg(pReader);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
@ -322,7 +322,8 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
|
|||
|
||||
void* data = taosMemoryMalloc(len);
|
||||
if (data == NULL) {
|
||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then retry
|
||||
// todo: for all stream in this vnode, keep this offset in the offset files, and wait for a moment, and then
|
||||
// retry
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
terrno = code;
|
||||
|
||||
|
@ -369,7 +370,7 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
|
|||
|
||||
// todo ignore the error in wal?
|
||||
bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
||||
SWalReader* pWalReader = pReader->pWalReader;
|
||||
SWalReader* pWalReader = pReader->pWalReader;
|
||||
SSDataBlock* pDataBlock = NULL;
|
||||
|
||||
uint64_t st = taosGetTimestampMs();
|
||||
|
@ -387,22 +388,22 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
|||
pReader->nextBlk = 0;
|
||||
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
|
||||
while (pReader->nextBlk < numOfBlocks) {
|
||||
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64, pReader->nextBlk,
|
||||
numOfBlocks, pReader->msg.msgLen, pReader->msg.ver);
|
||||
tqTrace("tq reader next data block %d/%d, len:%d %" PRId64, pReader->nextBlk, numOfBlocks, pReader->msg.msgLen,
|
||||
pReader->msg.ver);
|
||||
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if ((pSubmitTbData->source & sourceExcluded) != 0){
|
||||
if ((pSubmitTbData->source & sourceExcluded) != 0) {
|
||||
pReader->nextBlk += 1;
|
||||
continue;
|
||||
}
|
||||
if (pReader->tbIdHash == NULL || taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t)) != NULL) {
|
||||
tqTrace("tq reader return submit block, uid:%" PRId64, pSubmitTbData->uid);
|
||||
SSDataBlock* pRes = NULL;
|
||||
int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
|
||||
int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
|
||||
if (code == TSDB_CODE_SUCCESS && pRes->info.rows > 0) {
|
||||
if(pDataBlock == NULL){
|
||||
if (pDataBlock == NULL) {
|
||||
pDataBlock = createOneDataBlock(pRes, true);
|
||||
}else{
|
||||
} else {
|
||||
blockDataMerge(pDataBlock, pRes);
|
||||
}
|
||||
}
|
||||
|
@ -414,16 +415,16 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id, int sourceExcluded) {
|
|||
tDestroySubmitReq(&pReader->submit, TSDB_MSG_FLG_DECODE);
|
||||
pReader->msg.msgStr = NULL;
|
||||
|
||||
if(pDataBlock != NULL){
|
||||
if (pDataBlock != NULL) {
|
||||
blockDataCleanup(pReader->pResBlock);
|
||||
copyDataBlock(pReader->pResBlock, pDataBlock);
|
||||
blockDataDestroy(pDataBlock);
|
||||
return true;
|
||||
}else{
|
||||
} else {
|
||||
qTrace("stream scan return empty, all %d submit blocks consumed, %s", numOfBlocks, id);
|
||||
}
|
||||
|
||||
if(taosGetTimestampMs() - st > 1000){
|
||||
if (taosGetTimestampMs() - st > 1000) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -448,17 +449,11 @@ int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, i
|
|||
return 0;
|
||||
}
|
||||
|
||||
SWalReader* tqGetWalReader(STqReader* pReader) {
|
||||
return pReader->pWalReader;
|
||||
}
|
||||
SWalReader* tqGetWalReader(STqReader* pReader) { return pReader->pWalReader; }
|
||||
|
||||
SSDataBlock* tqGetResultBlock (STqReader* pReader) {
|
||||
return pReader->pResBlock;
|
||||
}
|
||||
SSDataBlock* tqGetResultBlock(STqReader* pReader) { return pReader->pResBlock; }
|
||||
|
||||
int64_t tqGetResultBlockTime(STqReader *pReader){
|
||||
return pReader->lastTs;
|
||||
}
|
||||
int64_t tqGetResultBlockTime(STqReader* pReader) { return pReader->lastTs; }
|
||||
|
||||
bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
|
||||
if (pReader->msg.msgStr == NULL) {
|
||||
|
@ -599,7 +594,7 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol
|
|||
|
||||
if (IS_STR_DATA_TYPE(pColVal->type)) {
|
||||
char val[65535 + 2] = {0};
|
||||
if(COL_VAL_IS_VALUE(pColVal)){
|
||||
if (COL_VAL_IS_VALUE(pColVal)) {
|
||||
if (pColVal->value.pData != NULL) {
|
||||
memcpy(varDataVal(val), pColVal->value.pData, pColVal->value.nData);
|
||||
}
|
||||
|
@ -874,7 +869,7 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
sourceIdx++;
|
||||
} else if (pCol->cid == pColData->info.colId) {
|
||||
tColDataGetValue(pCol, i, &colVal);
|
||||
if(doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS){
|
||||
if (doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS) {
|
||||
goto FAIL;
|
||||
}
|
||||
sourceIdx++;
|
||||
|
@ -958,10 +953,10 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
if (colVal.cid < pColData->info.colId) {
|
||||
sourceIdx++;
|
||||
} else if (colVal.cid == pColData->info.colId) {
|
||||
if(doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS){
|
||||
if (doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS) {
|
||||
goto FAIL;
|
||||
}
|
||||
sourceIdx++;
|
||||
sourceIdx++;
|
||||
targetIdx++;
|
||||
}
|
||||
}
|
||||
|
@ -1001,7 +996,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList, const char
|
|||
taosHashPut(pReader->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
|
||||
}
|
||||
|
||||
tqDebug("s-task:%s %d tables are set to be queried target table", id, (int32_t) taosArrayGetSize(tbUidList));
|
||||
tqDebug("s-task:%s %d tables are set to be queried target table", id, (int32_t)taosArrayGetSize(tbUidList));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1027,9 +1022,7 @@ bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid) {
|
|||
return taosHashGet(pReader->tbIdHash, &uid, sizeof(uint64_t));
|
||||
}
|
||||
|
||||
bool tqCurrentBlockConsumed(const STqReader* pReader) {
|
||||
return pReader->msg.msgStr == NULL;
|
||||
}
|
||||
bool tqCurrentBlockConsumed(const STqReader* pReader) { return pReader->msg.msgStr == NULL; }
|
||||
|
||||
int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
|
||||
for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
|
||||
|
@ -1071,9 +1064,11 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
if (isAdd) {
|
||||
SArray* list = NULL;
|
||||
int ret = qGetTableList(pTqHandle->execHandle.execTb.suid, pTq->pVnode, pTqHandle->execHandle.execTb.node, &list, pTqHandle->execHandle.task);
|
||||
if(ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId);
|
||||
int ret = qGetTableList(pTqHandle->execHandle.execTb.suid, pTq->pVnode, pTqHandle->execHandle.execTb.node,
|
||||
&list, pTqHandle->execHandle.task);
|
||||
if (ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey,
|
||||
pTqHandle->consumerId);
|
||||
taosArrayDestroy(list);
|
||||
taosHashCancelIterate(pTq->pHandle, pIter);
|
||||
taosWUnLockLatch(&pTq->lock);
|
||||
|
|
|
@ -63,8 +63,8 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, STaosxRsp* pRsp, in
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, SSDataBlock** res){
|
||||
uint64_t ts = 0;
|
||||
int32_t getDataBlock(qTaskInfo_t task, const STqHandle* pHandle, int32_t vgId, SSDataBlock** res) {
|
||||
uint64_t ts = 0;
|
||||
qStreamSetOpen(task);
|
||||
|
||||
tqDebug("consumer:0x%" PRIx64 " vgId:%d, tmq one task start execute", pHandle->consumerId, vgId);
|
||||
|
@ -97,27 +97,27 @@ int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal*
|
|||
while (1) {
|
||||
SSDataBlock* pDataBlock = NULL;
|
||||
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
|
||||
if (code != 0){
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if(pRequest->enableReplay){
|
||||
if(IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type) && pHandle->block != NULL){
|
||||
if (pRequest->enableReplay) {
|
||||
if (IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type) && pHandle->block != NULL) {
|
||||
blockDataDestroy(pHandle->block);
|
||||
pHandle->block = NULL;
|
||||
}
|
||||
if(pHandle->block == NULL){
|
||||
if (pHandle->block == NULL) {
|
||||
if (pDataBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
STqOffsetVal offset = {0};
|
||||
qStreamExtractOffset(task, &offset);
|
||||
pHandle->block = createOneDataBlock(pDataBlock, true);
|
||||
// pHandle->block = createDataBlock();
|
||||
// copyDataBlock(pHandle->block, pDataBlock);
|
||||
// pHandle->block = createDataBlock();
|
||||
// copyDataBlock(pHandle->block, pDataBlock);
|
||||
pHandle->blockTime = offset.ts;
|
||||
code = getDataBlock(task, pHandle, vgId, &pDataBlock);
|
||||
if (code != 0){
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal*
|
|||
if (pDataBlock == NULL) {
|
||||
blockDataDestroy(pHandle->block);
|
||||
pHandle->block = NULL;
|
||||
}else{
|
||||
} else {
|
||||
copyDataBlock(pHandle->block, pDataBlock);
|
||||
|
||||
STqOffsetVal offset = {0};
|
||||
|
@ -141,7 +141,7 @@ int32_t tqScanData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal*
|
|||
pHandle->blockTime = offset.ts;
|
||||
}
|
||||
break;
|
||||
}else{
|
||||
} else {
|
||||
if (pDataBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
|
@ -250,7 +250,8 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded) {
|
||||
int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows,
|
||||
int8_t sourceExcluded) {
|
||||
STqExecHandle* pExec = &pHandle->execHandle;
|
||||
SArray* pBlocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
SArray* pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
|
@ -266,7 +267,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) goto loop_table;
|
||||
}
|
||||
|
||||
if ((pSubmitTbDataRet->source & sourceExcluded) != 0){
|
||||
if ((pSubmitTbDataRet->source & sourceExcluded) != 0) {
|
||||
goto loop_table;
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
|
@ -303,7 +304,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
|
||||
tEncoderClear(&encoder);
|
||||
}
|
||||
if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL){
|
||||
if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) {
|
||||
goto loop_table;
|
||||
}
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlocks); i++) {
|
||||
|
@ -334,7 +335,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
if (terrno == TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND) goto loop_db;
|
||||
}
|
||||
|
||||
if ((pSubmitTbDataRet->source & sourceExcluded) != 0){
|
||||
if ((pSubmitTbDataRet->source & sourceExcluded) != 0) {
|
||||
goto loop_db;
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
|
@ -371,7 +372,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxR
|
|||
|
||||
tEncoderClear(&encoder);
|
||||
}
|
||||
if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL){
|
||||
if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) {
|
||||
goto loop_db;
|
||||
}
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlocks); i++) {
|
||||
|
|
|
@ -33,15 +33,18 @@ static int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSData
|
|||
int64_t suid);
|
||||
static int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks);
|
||||
static int32_t buildSubmitMsgImpl(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, int32_t* msgLen);
|
||||
static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema, SSDataBlock* pDataBlock, const char* id);
|
||||
static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema, SSDataBlock* pDataBlock,
|
||||
const char* id);
|
||||
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
|
||||
const char* dstTableName, int64_t* uid);
|
||||
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id);
|
||||
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
|
||||
const char* id);
|
||||
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
|
||||
static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, int32_t numOfTags);
|
||||
static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName,
|
||||
int32_t numOfTags);
|
||||
static SArray* createDefaultTagColName();
|
||||
static void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
|
||||
int64_t gid, bool newSubTableRule);
|
||||
static void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
|
||||
int64_t gid, bool newSubTableRule);
|
||||
|
||||
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
|
||||
const char* pIdStr, bool newSubTableRule) {
|
||||
|
@ -68,10 +71,7 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
|
|||
if (varTbName != NULL && varTbName != (void*)-1) {
|
||||
name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
|
||||
memcpy(name, varDataVal(varTbName), varDataLen(varTbName));
|
||||
if(newSubTableRule &&
|
||||
!isAutoTableName(name) &&
|
||||
!alreadyAddGroupId(name) &&
|
||||
groupId != 0) {
|
||||
if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name) && groupId != 0) {
|
||||
buildCtbNameAddGruopId(name, groupId);
|
||||
}
|
||||
} else if (stbFullName) {
|
||||
|
@ -134,7 +134,7 @@ end:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool tqGetTableInfo(SSHashObj* pTableInfoMap,uint64_t groupId, STableSinkInfo** pInfo) {
|
||||
static bool tqGetTableInfo(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
|
||||
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
|
||||
if (pVal) {
|
||||
*pInfo = *(STableSinkInfo**)pVal;
|
||||
|
@ -149,7 +149,7 @@ static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
|
|||
int32_t tlen = 0;
|
||||
encodeCreateChildTableForRPC(pReqs, TD_VID(pVnode), &buf, &tlen);
|
||||
|
||||
SRpcMsg msg = { .msgType = TDMT_VND_CREATE_TABLE, .pCont = buf, .contLen = tlen };
|
||||
SRpcMsg msg = {.msgType = TDMT_VND_CREATE_TABLE, .pCont = buf, .contLen = tlen};
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqError("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
|
@ -181,14 +181,12 @@ SArray* createDefaultTagColName() {
|
|||
void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
|
||||
int64_t gid, bool newSubTableRule) {
|
||||
if (pDataBlock->info.parTbName[0]) {
|
||||
if(newSubTableRule &&
|
||||
!isAutoTableName(pDataBlock->info.parTbName) &&
|
||||
!alreadyAddGroupId(pDataBlock->info.parTbName) &&
|
||||
gid != 0) {
|
||||
if (newSubTableRule && !isAutoTableName(pDataBlock->info.parTbName) &&
|
||||
!alreadyAddGroupId(pDataBlock->info.parTbName) && gid != 0) {
|
||||
pCreateTableReq->name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
|
||||
strcpy(pCreateTableReq->name, pDataBlock->info.parTbName);
|
||||
buildCtbNameAddGruopId(pCreateTableReq->name, gid);
|
||||
}else{
|
||||
} else {
|
||||
pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName);
|
||||
}
|
||||
} else {
|
||||
|
@ -196,17 +194,18 @@ void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDa
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
|
||||
int64_t suid) {
|
||||
static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock,
|
||||
SStreamTask* pTask, int64_t suid) {
|
||||
tqDebug("s-task:%s build create table msg", pTask->id.idStr);
|
||||
|
||||
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
SArray* tagArray = taosArrayInit(4, sizeof(STagVal));;
|
||||
int32_t code = 0;
|
||||
SArray* tagArray = taosArrayInit(4, sizeof(STagVal));
|
||||
;
|
||||
int32_t code = 0;
|
||||
|
||||
SVCreateTbBatchReq reqs = {0};
|
||||
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
|
||||
if (NULL == reqs.pArray) {
|
||||
tqError("s-task:%s failed to init create table msg, code:%s", pTask->id.idStr, tstrerror(terrno));
|
||||
goto _end;
|
||||
|
@ -262,7 +261,8 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
ASSERT(gid == *(int64_t*)pGpIdData);
|
||||
}
|
||||
|
||||
setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER);
|
||||
setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid,
|
||||
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
|
||||
|
||||
taosArrayPush(reqs.pArray, pCreateTbReq);
|
||||
tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name);
|
||||
|
@ -274,7 +274,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
|
|||
tqError("s-task:%s failed to send create table msg", pTask->id.idStr);
|
||||
}
|
||||
|
||||
_end:
|
||||
_end:
|
||||
taosArrayDestroy(tagArray);
|
||||
taosArrayDestroyEx(crTblArray, (FDelete)tdDestroySVCreateTbReq);
|
||||
return code;
|
||||
|
@ -361,7 +361,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
|
|||
pExisted->aRowP = pFinal;
|
||||
|
||||
tqTrace("s-task:%s rows merged, final rows:%d, uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
|
||||
(int32_t)taosArrayGetSize(pFinal), pExisted->uid, (pExisted->pCreateTbReq != NULL), (pNew->pCreateTbReq != NULL));
|
||||
(int32_t)taosArrayGetSize(pFinal), pExisted->uid, (pExisted->pCreateTbReq != NULL),
|
||||
(pNew->pCreateTbReq != NULL));
|
||||
|
||||
tdDestroySVCreateTbReq(pNew->pCreateTbReq);
|
||||
taosMemoryFree(pNew->pCreateTbReq);
|
||||
|
@ -373,7 +374,7 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock*
|
|||
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
|
||||
|
||||
int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr,
|
||||
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER);
|
||||
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -416,8 +417,8 @@ bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbNam
|
|||
}
|
||||
|
||||
if (pReader->me.ctbEntry.suid != suid) {
|
||||
tqError("vgId:%d, failed to write into %s, since suid mismatch, expect suid:%" PRId64 ", actual:%" PRId64,
|
||||
vgId, ctbName, suid, pReader->me.ctbEntry.suid);
|
||||
tqError("vgId:%d, failed to write into %s, since suid mismatch, expect suid:%" PRId64 ", actual:%" PRId64, vgId,
|
||||
ctbName, suid, pReader->me.ctbEntry.suid);
|
||||
terrno = TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE;
|
||||
return false;
|
||||
}
|
||||
|
@ -437,10 +438,10 @@ SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, in
|
|||
taosArrayClear(pTagArray);
|
||||
initCreateTableMsg(pCreateTbReq, suid, stbFullName, 1);
|
||||
|
||||
STagVal tagVal = { .cid = numOfCols, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = pDataBlock->info.id.groupId};
|
||||
STagVal tagVal = {.cid = numOfCols, .type = TSDB_DATA_TYPE_UBIGINT, .i64 = pDataBlock->info.id.groupId};
|
||||
taosArrayPush(pTagArray, &tagVal);
|
||||
|
||||
tTagNew(pTagArray, 1, false, (STag**) &pCreateTbReq->ctb.pTag);
|
||||
tTagNew(pTagArray, 1, false, (STag**)&pCreateTbReq->ctb.pTag);
|
||||
|
||||
if (pCreateTbReq->ctb.pTag == NULL) {
|
||||
tdDestroySVCreateTbReq(pCreateTbReq);
|
||||
|
@ -513,13 +514,13 @@ int32_t buildSubmitMsgImpl(SSubmitReq2* pSubmitReq, int32_t vgId, void** pMsg, i
|
|||
}
|
||||
|
||||
int32_t tsAscendingSortFn(const void* p1, const void* p2) {
|
||||
SRow* pRow1 = *(SRow**) p1;
|
||||
SRow* pRow2 = *(SRow**) p2;
|
||||
SRow* pRow1 = *(SRow**)p1;
|
||||
SRow* pRow2 = *(SRow**)p2;
|
||||
|
||||
if (pRow1->ts == pRow2->ts) {
|
||||
return 0;
|
||||
} else {
|
||||
return pRow1->ts > pRow2->ts? 1:-1;
|
||||
return pRow1->ts > pRow2->ts ? 1 : -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -563,7 +564,7 @@ int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema, SSDat
|
|||
void* colData = colDataGetData(pColData, j);
|
||||
if (IS_STR_DATA_TYPE(pCol->type)) {
|
||||
// address copy, no value
|
||||
SValue sv = (SValue){.nData = varDataLen(colData), .pData = (uint8_t*) varDataVal(colData)};
|
||||
SValue sv = (SValue){.nData = varDataLen(colData), .pData = (uint8_t*)varDataVal(colData)};
|
||||
SColVal cv = COL_VAL_VALUE(pCol->colId, pCol->type, sv);
|
||||
taosArrayPush(pVals, &cv);
|
||||
} else {
|
||||
|
@ -666,11 +667,9 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
if (dstTableName[0] == 0) {
|
||||
memset(dstTableName, 0, TSDB_TABLE_NAME_LEN);
|
||||
buildCtbNameByGroupIdImpl(stbFullName, groupId, dstTableName);
|
||||
}else{
|
||||
if(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER &&
|
||||
!isAutoTableName(dstTableName) &&
|
||||
!alreadyAddGroupId(dstTableName) &&
|
||||
groupId != 0) {
|
||||
} else {
|
||||
if (pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1 &&
|
||||
!isAutoTableName(dstTableName) && !alreadyAddGroupId(dstTableName) && groupId != 0) {
|
||||
buildCtbNameAddGruopId(dstTableName, groupId);
|
||||
}
|
||||
}
|
||||
|
@ -693,7 +692,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
tqTrace("s-task:%s cached tableInfo uid is invalid, acquire it from meta", id);
|
||||
return doWaitForDstTableCreated(pVnode, pTask, pTableSinkInfo, dstTableName, &pTableData->uid);
|
||||
} else {
|
||||
tqTrace("s-task:%s set the dstTable uid from cache:%"PRId64, id, pTableData->uid);
|
||||
tqTrace("s-task:%s set the dstTable uid from cache:%" PRId64, id, pTableData->uid);
|
||||
}
|
||||
} else {
|
||||
// The auto-create option will always set to be open for those submit messages, which arrive during the period
|
||||
|
@ -714,7 +713,8 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
|
||||
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
|
||||
pTableData->pCreateTbReq =
|
||||
buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER);
|
||||
buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray,
|
||||
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
|
||||
taosArrayDestroy(pTagArray);
|
||||
|
||||
if (pTableData->pCreateTbReq == NULL) {
|
||||
|
@ -746,12 +746,12 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
|
|||
return TDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||
SSubmitTbData* pTableData, const char* id) {
|
||||
int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,
|
||||
SSubmitTbData* pTableData, const char* id) {
|
||||
int32_t numOfRows = pDataBlock->info.rows;
|
||||
|
||||
tqDebug("s-task:%s sink data pipeline, build submit msg from %dth resBlock, including %d rows, dst suid:%" PRId64,
|
||||
id, blockIndex + 1, numOfRows, suid);
|
||||
tqDebug("s-task:%s sink data pipeline, build submit msg from %dth resBlock, including %d rows, dst suid:%" PRId64, id,
|
||||
blockIndex + 1, numOfRows, suid);
|
||||
char* dstTableName = pDataBlock->info.parTbName;
|
||||
|
||||
// convert all rows
|
||||
|
@ -767,14 +767,14 @@ int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_
|
|||
}
|
||||
|
||||
bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) {
|
||||
for(int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
SSDataBlock* p = taosArrayGet(pBlocks, i);
|
||||
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
||||
|
@ -793,7 +793,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
|
||||
numOfBlocks);
|
||||
|
||||
for(int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
for (int32_t i = 0; i < numOfBlocks; ++i) {
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
return;
|
||||
}
|
||||
|
@ -832,7 +832,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
|
|||
}
|
||||
} else {
|
||||
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks);
|
||||
SHashObj* pTableIndexMap = taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
SHashObj* pTableIndexMap =
|
||||
taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
|
||||
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
|
||||
if (submitReq.aSubmitTbData == NULL) {
|
||||
|
|
|
@ -72,10 +72,11 @@ static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, STqOffsetVal pOffset) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, bool* pBlockReturned) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, bool* pBlockReturned) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
*pBlockReturned = false;
|
||||
// In this vnode, data has been polled by consumer for this topic, so let's continue from the last offset value.
|
||||
|
@ -132,7 +133,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
|
||||
uint64_t consumerId = pRequest->consumerId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
terrno = 0;
|
||||
terrno = 0;
|
||||
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, *pOffset);
|
||||
|
@ -156,7 +157,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
taosWUnLockLatch(&pTq->lock);
|
||||
}
|
||||
|
||||
dataRsp.reqOffset = *pOffset; // reqOffset represents the current date offset, may be changed if wal not exists
|
||||
dataRsp.reqOffset = *pOffset; // reqOffset represents the current date offset, may be changed if wal not exists
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
|
||||
end : {
|
||||
|
@ -167,15 +168,15 @@ end : {
|
|||
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
|
||||
tDeleteMqDataRsp(&dataRsp);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
|
||||
SRpcMsg* pMsg, STqOffsetVal* offset) {
|
||||
int code = 0;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SMqMetaRsp metaRsp = {0};
|
||||
STaosxRsp taosxRsp = {0};
|
||||
int code = 0;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SMqMetaRsp metaRsp = {0};
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, *offset);
|
||||
|
||||
if (offset->type != TMQ_OFFSET__LOG) {
|
||||
|
@ -211,14 +212,16 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
int64_t fetchVer = offset->version;
|
||||
|
||||
uint64_t st = taosGetTimestampMs();
|
||||
int totalRows = 0;
|
||||
int totalRows = 0;
|
||||
while (1) {
|
||||
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
|
||||
ASSERT(savedEpoch <= pRequest->epoch);
|
||||
|
||||
if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -230,7 +233,9 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
if (pHead->msgType != TDMT_VND_SUBMIT) {
|
||||
if (totalRows > 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
@ -257,9 +262,11 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle,
|
|||
goto end;
|
||||
}
|
||||
|
||||
if (totalRows >= 4096 || taosxRsp.createTableNum > 0 || (taosGetTimestampMs() - st > 1000)) {
|
||||
if (totalRows >= 4096 || (taosGetTimestampMs() - st > 1000)) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer + 1);
|
||||
code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
code = tqSendDataRsp(
|
||||
pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp,
|
||||
taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId);
|
||||
goto end;
|
||||
} else {
|
||||
fetchVer++;
|
||||
|
@ -279,7 +286,7 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
|
|||
// 1. reset the offset if needed
|
||||
if (IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type)) {
|
||||
// handle the reset offset cases, according to the consumer's choice.
|
||||
bool blockReturned = false;
|
||||
bool blockReturned = false;
|
||||
int32_t code = extractResetOffsetVal(&reqOffset, pTq, pHandle, pRequest, pMsg, &blockReturned);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
|
@ -289,7 +296,7 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ
|
|||
if (blockReturned) {
|
||||
return 0;
|
||||
}
|
||||
} else if(reqOffset.type == 0){ // use the consumer specified offset
|
||||
} else if (reqOffset.type == 0) { // use the consumer specified offset
|
||||
uError("req offset type is 0");
|
||||
return TSDB_CODE_TMQ_INVALID_MSG;
|
||||
}
|
||||
|
@ -452,8 +459,8 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void*
|
|||
|
||||
int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, bool* fhFinished) {
|
||||
SStreamMeta* pMeta = pVnode->pTq->pStreamMeta;
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
if (pDelay != NULL) {
|
||||
*pDelay = 0;
|
||||
|
|
|
@ -317,14 +317,14 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
|||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process retrieve req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||
req.dstTaskId);
|
||||
taosMemoryFree(req.pRetrieve);
|
||||
tDeleteStreamRetrieveReq(&req);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t code = 0;
|
||||
if(pTask->info.taskLevel == TASK_LEVEL__SOURCE){
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
code = streamProcessRetrieveReq(pTask, &req);
|
||||
}else{
|
||||
} else {
|
||||
req.srcNodeId = pTask->info.nodeId;
|
||||
req.srcTaskId = pTask->id.taskId;
|
||||
code = broadcastRetrieveMsg(pTask, &req);
|
||||
|
@ -334,7 +334,7 @@ int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
|||
sendRetrieveRsp(&req, &rsp);
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
taosMemoryFree(req.pRetrieve);
|
||||
tDeleteStreamRetrieveReq(&req);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -612,6 +612,10 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
|
|||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt, false);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// drop the stream task now
|
||||
streamMetaUnregisterTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#define ROCKS_BATCH_SIZE (4096)
|
||||
|
||||
#if 0
|
||||
static int32_t tsdbOpenBICache(STsdb *pTsdb) {
|
||||
int32_t code = 0;
|
||||
SLRUCache *pCache = taosLRUCacheInit(10 * 1024 * 1024, 0, .5);
|
||||
|
@ -52,6 +53,7 @@ static void tsdbCloseBICache(STsdb *pTsdb) {
|
|||
taosThreadMutexDestroy(&pTsdb->biMutex);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int32_t tsdbOpenBCache(STsdb *pTsdb) {
|
||||
int32_t code = 0;
|
||||
|
@ -431,25 +433,6 @@ int32_t tsdbCacheCommit(STsdb *pTsdb) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
|
||||
SLastCol *pLastCol = NULL;
|
||||
|
||||
char *err = NULL;
|
||||
size_t vlen = 0;
|
||||
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
|
||||
size_t klen = ROCKS_KEY_LEN;
|
||||
char *value = NULL;
|
||||
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
|
||||
if (NULL != err) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
||||
rocksdb_free(err);
|
||||
}
|
||||
|
||||
pLastCol = tsdbCacheDeserialize(value);
|
||||
|
||||
return pLastCol;
|
||||
}
|
||||
|
||||
static void reallocVarData(SColVal *pColVal) {
|
||||
if (IS_VAR_DATA_TYPE(pColVal->type)) {
|
||||
uint8_t *pVal = pColVal->value.pData;
|
||||
|
@ -476,6 +459,355 @@ static void tsdbCacheDeleter(const void *key, size_t klen, void *value, void *ud
|
|||
taosMemoryFree(value);
|
||||
}
|
||||
|
||||
static int32_t tsdbCacheNewTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t col_type, int8_t ltype) {
|
||||
int32_t code = 0;
|
||||
|
||||
SLRUCache *pCache = pTsdb->lruCache;
|
||||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
SLastCol noneCol = {.ts = TSKEY_MIN, .colVal = COL_VAL_NONE(cid, col_type), .dirty = 1};
|
||||
SLastCol *pLastCol = &noneCol;
|
||||
|
||||
SLastCol *pTmpLastCol = taosMemoryCalloc(1, sizeof(SLastCol));
|
||||
*pTmpLastCol = *pLastCol;
|
||||
pLastCol = pTmpLastCol;
|
||||
|
||||
reallocVarData(&pLastCol->colVal);
|
||||
size_t charge = sizeof(*pLastCol);
|
||||
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
|
||||
charge += pLastCol->colVal.value.nData;
|
||||
}
|
||||
|
||||
SLastKey *pLastKey = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
|
||||
LRUStatus status = taosLRUCacheInsert(pCache, pLastKey, ROCKS_KEY_LEN, pLastCol, charge, tsdbCacheDeleter, NULL,
|
||||
TAOS_LRU_PRIORITY_LOW, &pTsdb->flushState);
|
||||
if (status != TAOS_LRU_STATUS_OK) {
|
||||
code = -1;
|
||||
}
|
||||
/*
|
||||
// store result back to rocks cache
|
||||
char *value = NULL;
|
||||
size_t vlen = 0;
|
||||
tsdbCacheSerialize(pLastCol, &value, &vlen);
|
||||
|
||||
SLastKey *key = pLastKey;
|
||||
size_t klen = ROCKS_KEY_LEN;
|
||||
rocksdb_writebatch_put(wb, (char *)key, klen, value, vlen);
|
||||
taosMemoryFree(value);
|
||||
*/
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheCommitNoLock(STsdb *pTsdb) {
|
||||
int32_t code = 0;
|
||||
char *err = NULL;
|
||||
|
||||
SLRUCache *pCache = pTsdb->lruCache;
|
||||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
|
||||
taosLRUCacheApply(pCache, tsdbCacheFlushDirty, &pTsdb->flushState);
|
||||
|
||||
rocksMayWrite(pTsdb, true, false, false);
|
||||
rocksMayWrite(pTsdb, true, true, false);
|
||||
rocksdb_flush(pTsdb->rCache.db, pTsdb->rCache.flushoptions, &err);
|
||||
|
||||
if (NULL != err) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
||||
rocksdb_free(err);
|
||||
code = -1;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t col_type, int8_t ltype) {
|
||||
int32_t code = 0;
|
||||
|
||||
// build keys & multi get from rocks
|
||||
char **keys_list = taosMemoryCalloc(2, sizeof(char *));
|
||||
size_t *keys_list_sizes = taosMemoryCalloc(2, sizeof(size_t));
|
||||
const size_t klen = ROCKS_KEY_LEN;
|
||||
|
||||
char *keys = taosMemoryCalloc(2, sizeof(SLastKey));
|
||||
((SLastKey *)keys)[0] = (SLastKey){.ltype = 1, .uid = uid, .cid = cid};
|
||||
((SLastKey *)keys)[1] = (SLastKey){.ltype = 0, .uid = uid, .cid = cid};
|
||||
|
||||
keys_list[0] = keys;
|
||||
keys_list[1] = keys + sizeof(SLastKey);
|
||||
keys_list_sizes[0] = klen;
|
||||
keys_list_sizes[1] = klen;
|
||||
|
||||
char **values_list = taosMemoryCalloc(2, sizeof(char *));
|
||||
size_t *values_list_sizes = taosMemoryCalloc(2, sizeof(size_t));
|
||||
char **errs = taosMemoryCalloc(2, sizeof(char *));
|
||||
|
||||
// rocksMayWrite(pTsdb, true, false, false);
|
||||
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, 2, (const char *const *)keys_list, keys_list_sizes,
|
||||
values_list, values_list_sizes, errs);
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
if (errs[i]) {
|
||||
rocksdb_free(errs[i]);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(errs);
|
||||
|
||||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
{
|
||||
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[0]);
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[0], klen);
|
||||
}
|
||||
pLastCol = tsdbCacheDeserialize(values_list[1]);
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[1], klen);
|
||||
}
|
||||
|
||||
rocksdb_free(values_list[0]);
|
||||
rocksdb_free(values_list[1]);
|
||||
|
||||
bool erase = false;
|
||||
LRUHandle *h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[0], klen);
|
||||
if (h) {
|
||||
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h);
|
||||
erase = true;
|
||||
|
||||
taosLRUCacheRelease(pTsdb->lruCache, h, erase);
|
||||
}
|
||||
if (erase) {
|
||||
taosLRUCacheErase(pTsdb->lruCache, keys_list[0], klen);
|
||||
}
|
||||
|
||||
erase = false;
|
||||
h = taosLRUCacheLookup(pTsdb->lruCache, keys_list[1], klen);
|
||||
if (h) {
|
||||
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pTsdb->lruCache, h);
|
||||
erase = true;
|
||||
|
||||
taosLRUCacheRelease(pTsdb->lruCache, h, erase);
|
||||
}
|
||||
if (erase) {
|
||||
taosLRUCacheErase(pTsdb->lruCache, keys_list[1], klen);
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(keys_list[0]);
|
||||
|
||||
taosMemoryFree(keys_list);
|
||||
taosMemoryFree(keys_list_sizes);
|
||||
taosMemoryFree(values_list);
|
||||
taosMemoryFree(values_list_sizes);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheNewTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrapper *pSchemaRow) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
if (suid < 0) {
|
||||
int nCols = pSchemaRow->nCols;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
int16_t cid = pSchemaRow->pSchema[i].colId;
|
||||
int8_t col_type = pSchemaRow->pSchema[i].type;
|
||||
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
} else {
|
||||
STSchema *pTSchema = NULL;
|
||||
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, -1, &pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int nCols = pTSchema->numOfCols;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
int16_t cid = pTSchema->columns[i].colId;
|
||||
int8_t col_type = pTSchema->columns[i].type;
|
||||
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheDropTable(STsdb *pTsdb, tb_uid_t uid, tb_uid_t suid, SSchemaWrapper *pSchemaRow) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
(void)tsdbCacheCommitNoLock(pTsdb);
|
||||
|
||||
if (suid < 0) {
|
||||
int nCols = pSchemaRow->nCols;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
int16_t cid = pSchemaRow->pSchema[i].colId;
|
||||
int8_t col_type = pSchemaRow->pSchema[i].type;
|
||||
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
} else {
|
||||
STSchema *pTSchema = NULL;
|
||||
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, uid, -1, &pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int nCols = pTSchema->numOfCols;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
int16_t cid = pTSchema->columns[i].colId;
|
||||
int8_t col_type = pTSchema->columns[i].type;
|
||||
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
|
||||
rocksMayWrite(pTsdb, true, false, false);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheDropSubTables(STsdb *pTsdb, SArray *uids, tb_uid_t suid) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
(void)tsdbCacheCommitNoLock(pTsdb);
|
||||
|
||||
STSchema *pTSchema = NULL;
|
||||
code = metaGetTbTSchemaEx(pTsdb->pVnode->pMeta, suid, suid, -1, &pTSchema);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
return -1;
|
||||
}
|
||||
for (int i = 0; i < TARRAY_SIZE(uids); ++i) {
|
||||
int64_t uid = ((tb_uid_t *)TARRAY_DATA(uids))[i];
|
||||
|
||||
int nCols = pTSchema->numOfCols;
|
||||
for (int i = 0; i < nCols; ++i) {
|
||||
int16_t cid = pTSchema->columns[i].colId;
|
||||
int8_t col_type = pTSchema->columns[i].type;
|
||||
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(pTSchema);
|
||||
|
||||
rocksMayWrite(pTsdb, true, false, false);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheNewNTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t col_type) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
|
||||
// rocksMayWrite(pTsdb, true, false, false);
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
//(void)tsdbCacheCommit(pTsdb);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheDropNTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, int8_t col_type) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
(void)tsdbCacheCommitNoLock(pTsdb);
|
||||
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
|
||||
rocksMayWrite(pTsdb, true, false, true);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheNewSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, int8_t col_type) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
for (int i = 0; i < TARRAY_SIZE(uids); ++i) {
|
||||
tb_uid_t uid = ((tb_uid_t *)TARRAY_DATA(uids))[i];
|
||||
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheNewTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
|
||||
// rocksMayWrite(pTsdb, true, false, false);
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
//(void)tsdbCacheCommit(pTsdb);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbCacheDropSTableColumn(STsdb *pTsdb, SArray *uids, int16_t cid, int8_t col_type) {
|
||||
int32_t code = 0;
|
||||
|
||||
taosThreadMutexLock(&pTsdb->lruMutex);
|
||||
|
||||
(void)tsdbCacheCommitNoLock(pTsdb);
|
||||
|
||||
for (int i = 0; i < TARRAY_SIZE(uids); ++i) {
|
||||
int64_t uid = ((tb_uid_t *)TARRAY_DATA(uids))[i];
|
||||
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 0);
|
||||
(void)tsdbCacheDropTableColumn(pTsdb, uid, cid, col_type, 1);
|
||||
}
|
||||
|
||||
rocksMayWrite(pTsdb, true, false, true);
|
||||
|
||||
taosThreadMutexUnlock(&pTsdb->lruMutex);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static SLastCol *tsdbCacheLookup(STsdb *pTsdb, tb_uid_t uid, int16_t cid, int8_t ltype) {
|
||||
SLastCol *pLastCol = NULL;
|
||||
|
||||
char *err = NULL;
|
||||
size_t vlen = 0;
|
||||
SLastKey *key = &(SLastKey){.ltype = ltype, .uid = uid, .cid = cid};
|
||||
size_t klen = ROCKS_KEY_LEN;
|
||||
char *value = NULL;
|
||||
value = rocksdb_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, (char *)key, klen, &vlen, &err);
|
||||
if (NULL != err) {
|
||||
tsdbError("vgId:%d, %s failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, err);
|
||||
rocksdb_free(err);
|
||||
}
|
||||
|
||||
pLastCol = tsdbCacheDeserialize(value);
|
||||
|
||||
return pLastCol;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int idx;
|
||||
SLastKey key;
|
||||
|
@ -1297,11 +1629,13 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
|
|||
goto _err;
|
||||
}
|
||||
|
||||
#if 0
|
||||
code = tsdbOpenBICache(pTsdb);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
#endif
|
||||
|
||||
code = tsdbOpenBCache(pTsdb);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
@ -1343,7 +1677,9 @@ void tsdbCloseCache(STsdb *pTsdb) {
|
|||
taosThreadMutexDestroy(&pTsdb->lruMutex);
|
||||
}
|
||||
|
||||
#if 0
|
||||
tsdbCloseBICache(pTsdb);
|
||||
#endif
|
||||
tsdbCloseBCache(pTsdb);
|
||||
tsdbClosePgCache(pTsdb);
|
||||
tsdbCloseRocksCache(pTsdb);
|
||||
|
@ -3117,6 +3453,7 @@ int32_t tsdbCacheGetElems(SVnode *pVnode) {
|
|||
return elems;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void getBICacheKey(int32_t fid, int64_t commitID, char *key, int *len) {
|
||||
struct {
|
||||
int32_t fid;
|
||||
|
@ -3193,7 +3530,6 @@ int32_t tsdbCacheGetBlockIdx(SLRUCache *pCache, SDataFReader *pFileReader, LRUHa
|
|||
return code;
|
||||
}
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
int32_t tsdbBICacheRelease(SLRUCache *pCache, LRUHandle *h) {
|
||||
int32_t code = 0;
|
||||
|
||||
|
|
|
@ -433,10 +433,12 @@ static int32_t vnodeAsyncLaunchWorker(SVAsync *async) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
int32_t vnodeAsync(SVAsync *async, EVAPriority priority, int32_t (*execute)(void *), void (*complete)(void *),
|
||||
void *arg, int64_t *taskId) {
|
||||
return vnodeAsyncC(async, 0, priority, execute, complete, arg, taskId);
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t vnodeAsyncC(SVAsync *async, int64_t channelId, EVAPriority priority, int32_t (*execute)(void *),
|
||||
void (*complete)(void *), void *arg, int64_t *taskId) {
|
||||
|
|
|
@ -181,12 +181,17 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->streamStateSessionPut = streamStateSessionPut;
|
||||
pStore->streamStateSessionGet = streamStateSessionGet;
|
||||
pStore->streamStateSessionDel = streamStateSessionDel;
|
||||
pStore->streamStateSessionReset = streamStateSessionReset;
|
||||
pStore->streamStateSessionClear = streamStateSessionClear;
|
||||
pStore->streamStateSessionGetKVByCur = streamStateSessionGetKVByCur;
|
||||
pStore->streamStateStateAddIfNotExist = streamStateStateAddIfNotExist;
|
||||
pStore->streamStateSessionGetKeyByRange = streamStateSessionGetKeyByRange;
|
||||
pStore->streamStateCountGetKeyByRange = streamStateCountGetKeyByRange;
|
||||
pStore->streamStateSessionAllocWinBuffByNextPosition = streamStateSessionAllocWinBuffByNextPosition;
|
||||
|
||||
pStore->streamStateCountWinAddIfNotExist = streamStateCountWinAddIfNotExist;
|
||||
pStore->streamStateCountWinAdd = streamStateCountWinAdd;
|
||||
|
||||
pStore->updateInfoInit = updateInfoInit;
|
||||
pStore->updateInfoFillBlockData = updateInfoFillBlockData;
|
||||
pStore->updateInfoIsUpdated = updateInfoIsUpdated;
|
||||
|
@ -203,6 +208,7 @@ void initStateStoreAPI(SStateStore* pStore) {
|
|||
pStore->updateInfoDeserialize = updateInfoDeserialize;
|
||||
|
||||
pStore->streamStateSessionSeekKeyNext = streamStateSessionSeekKeyNext;
|
||||
pStore->streamStateCountSeekKeyPrev = streamStateCountSeekKeyPrev;
|
||||
pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev;
|
||||
pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext;
|
||||
|
||||
|
|
|
@ -848,6 +848,7 @@ typedef struct SCtgCacheItemInfo {
|
|||
do { \
|
||||
CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock); \
|
||||
CTG_API_DEBUG("CTG API leave %s", __FUNCTION__); \
|
||||
return; \
|
||||
} while (0)
|
||||
|
||||
#define CTG_API_ENTER() \
|
||||
|
|
|
@ -2851,7 +2851,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, tbMeta->tableType, dbFName);
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, pTableMeta->tableType, dbFName);
|
||||
|
||||
res.pRes = pTableMeta;
|
||||
taosArrayPush(ctx->pResList, &res);
|
||||
|
@ -2868,7 +2868,7 @@ int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMe
|
|||
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
|
||||
taosHashRelease(dbCache->tbCache, pCache);
|
||||
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, tbMeta->tableType, dbFName);
|
||||
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, pTableMeta->tableType, dbFName);
|
||||
|
||||
res.pRes = pTableMeta;
|
||||
taosArrayPush(ctx->pResList, &res);
|
||||
|
|
|
@ -69,6 +69,10 @@ extern "C" {
|
|||
#define EXPLAIN_EVENT_END_FORMAT "End Cond: "
|
||||
#define EXPLAIN_GROUP_CACHE_FORMAT "Group Cache"
|
||||
#define EXPLAIN_DYN_QRY_CTRL_FORMAT "Dynamic Query Control for %s"
|
||||
#define EXPLAIN_COUNT_FORMAT "Count"
|
||||
#define EXPLAIN_COUNT_INFO_FORMAT "Window Count Info"
|
||||
#define EXPLAIN_COUNT_NUM_FORMAT "Window Count=%" PRId64
|
||||
#define EXPLAIN_COUNT_SLIDING_FORMAT "Window Sliding=%" PRId64
|
||||
|
||||
#define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms"
|
||||
#define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms"
|
||||
|
|
|
@ -1735,6 +1735,31 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT: {
|
||||
SCountWinodwPhysiNode *pCountNode = (SCountWinodwPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_COUNT_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_FUNCTIONS_FORMAT, pCountNode->window.pFuncs->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pCountNode->window.node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
||||
if (verbose) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_COUNT_NUM_FORMAT, pCountNode->windowCount);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_COUNT_SLIDING_FORMAT, pCountNode->windowSliding);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
qError("not supported physical node type %d", pNode->type);
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
|
|
|
@ -283,6 +283,42 @@ typedef struct STableScanInfo {
|
|||
bool needCountEmptyTable;
|
||||
} STableScanInfo;
|
||||
|
||||
typedef enum ESubTableInputType {
|
||||
SUB_TABLE_MEM_BLOCK,
|
||||
SUB_TABLE_EXT_PAGES,
|
||||
} ESubTableInputType;
|
||||
|
||||
typedef struct STmsSubTableInput {
|
||||
STsdbReader* pReader;
|
||||
SQueryTableDataCond tblCond;
|
||||
STableKeyInfo* pKeyInfo;
|
||||
bool bInMemReader;
|
||||
ESubTableInputType type;
|
||||
SSDataBlock* pReaderBlock;
|
||||
|
||||
SArray* aBlockPages;
|
||||
SSDataBlock* pPageBlock;
|
||||
int32_t pageIdx;
|
||||
|
||||
int32_t rowIdx;
|
||||
int64_t* aTs;
|
||||
} STmsSubTableInput;
|
||||
|
||||
typedef struct SBlockOrderInfo SBlockOrderInfo;
|
||||
typedef struct STmsSubTablesMergeInfo {
|
||||
SBlockOrderInfo* pOrderInfo;
|
||||
|
||||
int32_t numSubTables;
|
||||
STmsSubTableInput* aInputs;
|
||||
SMultiwayMergeTreeInfo* pTree;
|
||||
int32_t numSubTablesCompleted;
|
||||
|
||||
int32_t numTableBlocksInMem;
|
||||
SDiskbasedBuf* pBlocksBuf;
|
||||
|
||||
int32_t numInMemReaders;
|
||||
} STmsSubTablesMergeInfo;
|
||||
|
||||
typedef struct STableMergeScanInfo {
|
||||
int32_t tableStartIndex;
|
||||
int32_t tableEndIndex;
|
||||
|
@ -296,7 +332,6 @@ typedef struct STableMergeScanInfo {
|
|||
SSDataBlock* pSortInputBlock;
|
||||
SSDataBlock* pReaderBlock;
|
||||
int64_t startTs; // sort start time
|
||||
SArray* sortSourceParams;
|
||||
SLimitInfo limitInfo;
|
||||
int64_t numOfRows;
|
||||
SScanInfo scanInfo;
|
||||
|
@ -317,6 +352,8 @@ typedef struct STableMergeScanInfo {
|
|||
SSDataBlock* nextDurationBlocks[2];
|
||||
bool rtnNextDurationBlocks;
|
||||
int32_t nextDurationBlocksIdx;
|
||||
|
||||
STmsSubTablesMergeInfo* pSubTablesMergeInfo;
|
||||
} STableMergeScanInfo;
|
||||
|
||||
typedef struct STagScanFilterContext {
|
||||
|
@ -371,6 +408,8 @@ typedef struct SStreamAggSupporter {
|
|||
STimeWindow winRange;
|
||||
SStorageAPI* pSessionAPI;
|
||||
struct SUpdateInfo* pUpdateInfo;
|
||||
int32_t windowCount;
|
||||
int32_t windowSliding;
|
||||
} SStreamAggSupporter;
|
||||
|
||||
typedef struct SWindowSupporter {
|
||||
|
@ -663,6 +702,27 @@ typedef struct SStreamEventAggOperatorInfo {
|
|||
SFilterInfo* pEndCondInfo;
|
||||
} SStreamEventAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamCountAggOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SStreamAggSupporter streamAggSup;
|
||||
SExprSupp scalarSupp; // supporter for perform scalar function
|
||||
SGroupResInfo groupResInfo;
|
||||
int32_t primaryTsIndex; // primary timestamp slot id
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SSDataBlock* pDelRes;
|
||||
SSHashObj* pStDeleted;
|
||||
void* pDelIterator;
|
||||
bool ignoreExpiredData;
|
||||
bool ignoreExpiredDataSaved;
|
||||
SArray* pUpdated;
|
||||
SSHashObj* pStUpdated;
|
||||
int64_t dataVersion;
|
||||
SArray* historyWins;
|
||||
bool reCkBlock;
|
||||
bool recvGetAll;
|
||||
SSDataBlock* pCheckpointRes;
|
||||
} SStreamCountAggOperatorInfo;
|
||||
|
||||
typedef struct SStreamPartitionOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SPartitionBySupporter partitionSup;
|
||||
|
@ -799,6 +859,9 @@ bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, void* pState, ST
|
|||
SStateStore* pStore);
|
||||
void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid,
|
||||
uint64_t* pGp, void* pTbName);
|
||||
void appendAllColumnToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, TSKEY* pCalStartTs,
|
||||
TSKEY* pCalEndTs, uint64_t* pUid, uint64_t* pGp, void* pTbName);
|
||||
|
||||
uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
|
||||
|
||||
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
|
||||
|
@ -863,7 +926,7 @@ void resetWinRange(STimeWindow* winRange);
|
|||
bool checkExpiredData(SStateStore* pAPI, SUpdateInfo* pUpdateInfo, STimeWindowAggSupp* pTwSup, uint64_t tableId, TSKEY ts);
|
||||
int64_t getDeleteMark(SWindowPhysiNode* pWinPhyNode, int64_t interval);
|
||||
void resetUnCloseSessionWinInfo(SSHashObj* winMap);
|
||||
void setStreamOperatorCompleted(struct SOperatorInfo* pOperator);
|
||||
void setStreamOperatorCompleted(struct SOperatorInfo* pOperator);
|
||||
void reloadAggSupFromDownStream(struct SOperatorInfo* downstream, SStreamAggSupporter* pAggSup);
|
||||
|
||||
int32_t encodeSSessionKey(void** buf, SSessionKey* key);
|
||||
|
@ -880,11 +943,18 @@ void freeExchangeGetBasicOperatorParam(void* pParam);
|
|||
void freeOperatorParam(SOperatorParam* pParam, SOperatorParamType type);
|
||||
void freeResetOperatorParams(struct SOperatorInfo* pOperator, SOperatorParamType type, bool allFree);
|
||||
SSDataBlock* getNextBlockFromDownstreamImpl(struct SOperatorInfo* pOperator, int32_t idx, bool clearParam);
|
||||
void getCountWinRange(SStreamAggSupporter* pAggSup, const SSessionKey* pKey, EStreamType mode, SSessionKey* pDelRange);
|
||||
bool doDeleteSessionWindow(SStreamAggSupporter* pAggSup, SSessionKey* pKey);
|
||||
|
||||
void saveDeleteInfo(SArray* pWins, SSessionKey key);
|
||||
void removeSessionResults(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SArray* pWins);
|
||||
void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted);
|
||||
|
||||
bool inSlidingWindow(SInterval* pInterval, STimeWindow* pWin, SDataBlockInfo* pBlockInfo);
|
||||
bool inCalSlidingWindow(SInterval* pInterval, STimeWindow* pWin, TSKEY calStart, TSKEY calEnd, EStreamType blockType);
|
||||
bool compareVal(const char* v, const SStateKeys* pKey);
|
||||
bool inWinRange(STimeWindow* range, STimeWindow* cur);
|
||||
void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* result);
|
||||
|
||||
int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo,
|
||||
TSKEY* primaryKeys, int32_t prevPosition, int32_t order);
|
||||
|
|
|
@ -154,10 +154,14 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
|
|||
|
||||
SOperatorInfo* createStreamEventAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle);
|
||||
|
||||
SOperatorInfo* createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle);
|
||||
|
||||
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createCountwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createGroupCacheOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SGroupCachePhysiNode* pPhyciNode, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createDynQueryCtrlOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SDynQueryCtrlPhysiNode* pPhyciNode, SExecTaskInfo* pTaskInfo);
|
||||
|
|
|
@ -64,8 +64,6 @@ typedef struct {
|
|||
SSchemaWrapper* schema;
|
||||
char tbName[TSDB_TABLE_NAME_LEN]; // this is the current scan table: todo refactor
|
||||
int8_t recoverStep;
|
||||
// bool recoverStep1Finished;
|
||||
// bool recoverStep2Finished;
|
||||
int8_t recoverScanFinished;
|
||||
SQueryTableDataCond tableCond;
|
||||
SVersionRange fillHistoryVer;
|
||||
|
@ -84,7 +82,6 @@ struct SExecTaskInfo {
|
|||
int64_t version; // used for stream to record wal version, why not move to sschemainfo
|
||||
SStreamTaskInfo streamInfo;
|
||||
SArray* schemaInfos;
|
||||
SSchemaInfo schemaInfo;
|
||||
const char* sql; // query sql string
|
||||
jmp_buf env; // jump to this position when error happens.
|
||||
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
|
||||
|
|
|
@ -0,0 +1,271 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "executorInt.h"
|
||||
#include "filter.h"
|
||||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "operator.h"
|
||||
#include "querytask.h"
|
||||
#include "tcommon.h"
|
||||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "ttime.h"
|
||||
|
||||
typedef struct SCountWindowResult {
|
||||
int32_t winRows;
|
||||
SResultRow row;
|
||||
} SCountWindowResult;
|
||||
|
||||
typedef struct SCountWindowSupp {
|
||||
SArray* pWinStates;
|
||||
int32_t stateIndex;
|
||||
} SCountWindowSupp;
|
||||
|
||||
typedef struct SCountWindowOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SExprSupp scalarSup;
|
||||
int32_t tsSlotId; // primary timestamp column slot id
|
||||
STimeWindowAggSupp twAggSup;
|
||||
uint64_t groupId; // current group id, used to identify the data block from different groups
|
||||
SResultRow* pRow;
|
||||
int32_t windowCount;
|
||||
int32_t windowSliding;
|
||||
SCountWindowSupp countSup;
|
||||
} SCountWindowOperatorInfo;
|
||||
|
||||
void destroyCountWindowOperatorInfo(void* param) {
|
||||
SCountWindowOperatorInfo* pInfo = (SCountWindowOperatorInfo*)param;
|
||||
if (pInfo == NULL) {
|
||||
return;
|
||||
}
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
cleanupExprSupp(&pInfo->scalarSup);
|
||||
taosArrayDestroy(pInfo->countSup.pWinStates);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
static void clearWinStateBuff(SCountWindowResult* pBuff) {
|
||||
pBuff->winRows = 0;
|
||||
}
|
||||
|
||||
static SCountWindowResult* getCountWinStateInfo(SCountWindowSupp* pCountSup) {
|
||||
SCountWindowResult* pBuffInfo = taosArrayGet(pCountSup->pWinStates, pCountSup->stateIndex);
|
||||
pCountSup->stateIndex = (pCountSup->stateIndex + 1) % taosArrayGetSize(pCountSup->pWinStates);
|
||||
return pBuffInfo;
|
||||
}
|
||||
|
||||
static SCountWindowResult* setCountWindowOutputBuff(SExprSupp* pExprSup, SCountWindowSupp* pCountSup, SResultRow** pResult) {
|
||||
SCountWindowResult* pBuff = getCountWinStateInfo(pCountSup);
|
||||
(*pResult) = &pBuff->row;
|
||||
setResultRowInitCtx(*pResult, pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset);
|
||||
return pBuff;
|
||||
}
|
||||
|
||||
static int32_t updateCountWindowInfo(int32_t start, int32_t blockRows, int32_t countWinRows, int32_t* pCurrentRows) {
|
||||
int32_t rows = TMIN(countWinRows - (*pCurrentRows), blockRows - start);
|
||||
(*pCurrentRows) += rows;
|
||||
return rows;
|
||||
}
|
||||
|
||||
int32_t doCountWindowAggImpl(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SExprSupp* pExprSup = &pOperator->exprSupp;
|
||||
SCountWindowOperatorInfo* pInfo = pOperator->info;
|
||||
SSDataBlock* pRes = pInfo->binfo.pRes;
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->tsSlotId);
|
||||
TSKEY* tsCols = (TSKEY*)pColInfoData->pData;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
for (int32_t i = 0; i < pBlock->info.rows;) {
|
||||
int32_t step = pInfo->windowSliding;
|
||||
SCountWindowResult* pBuffInfo = setCountWindowOutputBuff(pExprSup, &pInfo->countSup, &pInfo->pRow);
|
||||
int32_t prevRows = pBuffInfo->winRows;
|
||||
int32_t num = updateCountWindowInfo(i, pBlock->info.rows, pInfo->windowCount, &pBuffInfo->winRows);
|
||||
if (prevRows == 0) {
|
||||
pInfo->pRow->win.skey = tsCols[i];
|
||||
}
|
||||
pInfo->pRow->win.ekey = tsCols[num + i - 1];
|
||||
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->pRow->win, 0);
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pExprSup->pCtx, &pInfo->twAggSup.timeWindowData, i, num,
|
||||
pBlock->info.rows, pExprSup->numOfExprs);
|
||||
if (pBuffInfo->winRows == pInfo->windowCount) {
|
||||
doUpdateNumOfRows(pExprSup->pCtx, pInfo->pRow, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset);
|
||||
copyResultrowToDataBlock(pExprSup->pExprInfo, pExprSup->numOfExprs, pInfo->pRow, pExprSup->pCtx, pRes,
|
||||
pExprSup->rowEntryInfoOffset, pTaskInfo);
|
||||
pRes->info.rows += pInfo->pRow->numOfRows;
|
||||
clearWinStateBuff(pBuffInfo);
|
||||
clearResultRowInitFlag(pExprSup->pCtx, pExprSup->numOfExprs);
|
||||
}
|
||||
if (pInfo->windowCount != pInfo->windowSliding) {
|
||||
if (prevRows <= pInfo->windowSliding) {
|
||||
if (pBuffInfo->winRows > pInfo->windowSliding) {
|
||||
step = pInfo->windowSliding - prevRows;
|
||||
}
|
||||
} else {
|
||||
step = 0;
|
||||
}
|
||||
}
|
||||
i += step;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static void buildCountResult(SExprSupp* pExprSup, SCountWindowSupp* pCountSup, SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock) {
|
||||
SResultRow* pResultRow = NULL;
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pCountSup->pWinStates); i++) {
|
||||
SCountWindowResult* pBuff = setCountWindowOutputBuff(pExprSup, pCountSup, &pResultRow);
|
||||
if (pBuff->winRows == 0) {
|
||||
continue;;
|
||||
}
|
||||
doUpdateNumOfRows(pExprSup->pCtx, pResultRow, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset);
|
||||
copyResultrowToDataBlock(pExprSup->pExprInfo, pExprSup->numOfExprs, pResultRow, pExprSup->pCtx, pBlock,
|
||||
pExprSup->rowEntryInfoOffset, pTaskInfo);
|
||||
pBlock->info.rows += pResultRow->numOfRows;
|
||||
clearWinStateBuff(pBuff);
|
||||
clearResultRowInitFlag(pExprSup->pCtx, pExprSup->numOfExprs);
|
||||
}
|
||||
}
|
||||
|
||||
static SSDataBlock* countWindowAggregate(SOperatorInfo* pOperator) {
|
||||
SCountWindowOperatorInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SExprSupp* pExprSup = &pOperator->exprSupp;
|
||||
int32_t order = pInfo->binfo.inputTsOrder;
|
||||
SSDataBlock* pRes = pInfo->binfo.pRes;
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
blockDataCleanup(pRes);
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0);
|
||||
if (pBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
pRes->info.scanFlag = pBlock->info.scanFlag;
|
||||
setInputDataBlock(pExprSup, pBlock, order, MAIN_SCAN, true);
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->tsSlotId);
|
||||
|
||||
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
|
||||
if (pInfo->scalarSup.pExprInfo != NULL) {
|
||||
pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
|
||||
pInfo->scalarSup.numOfExprs, NULL);
|
||||
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
|
||||
}
|
||||
}
|
||||
|
||||
if (pInfo->groupId == 0) {
|
||||
pInfo->groupId = pBlock->info.id.groupId;
|
||||
} else if (pInfo->groupId != pBlock->info.id.groupId) {
|
||||
buildCountResult(pExprSup, &pInfo->countSup, pTaskInfo, pRes);
|
||||
pInfo->groupId = pBlock->info.id.groupId;
|
||||
}
|
||||
|
||||
doCountWindowAggImpl(pOperator, pBlock);
|
||||
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
|
||||
return pRes;
|
||||
}
|
||||
}
|
||||
|
||||
buildCountResult(pExprSup, &pInfo->countSup, pTaskInfo, pRes);
|
||||
return pRes->info.rows == 0 ? NULL : pRes;
|
||||
}
|
||||
|
||||
SOperatorInfo* createCountwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SCountWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SCountWindowOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SCountWinodwPhysiNode* pCountWindowNode = (SCountWinodwPhysiNode*)physiNode;
|
||||
|
||||
pInfo->tsSlotId = ((SColumnNode*)pCountWindowNode->window.pTspk)->slotId;
|
||||
|
||||
if (pCountWindowNode->window.pExprs != NULL) {
|
||||
int32_t numOfScalarExpr = 0;
|
||||
SExprInfo* pScalarExprInfo = createExprInfo(pCountWindowNode->window.pExprs, NULL, &numOfScalarExpr);
|
||||
code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
|
||||
size_t keyBufSize = 0;
|
||||
int32_t num = 0;
|
||||
SExprInfo* pExprInfo = createExprInfo(pCountWindowNode->window.pFuncs, NULL, &num);
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
|
||||
code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str,
|
||||
pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
SSDataBlock* pResBlock = createDataBlockFromDescNode(pCountWindowNode->window.node.pOutputDataBlockDesc);
|
||||
blockDataEnsureCapacity(pResBlock, pOperator->resultInfo.capacity);
|
||||
|
||||
initBasicInfo(&pInfo->binfo, pResBlock);
|
||||
initResultRowInfo(&pInfo->binfo.resultRowInfo);
|
||||
pInfo->binfo.inputTsOrder = physiNode->inputTsOrder;
|
||||
pInfo->binfo.outputTsOrder = physiNode->outputTsOrder;
|
||||
pInfo->windowCount = pCountWindowNode->windowCount;
|
||||
pInfo->windowSliding = pCountWindowNode->windowSliding;
|
||||
//sizeof(SCountWindowResult)
|
||||
int32_t itemSize = sizeof(int32_t) + pInfo->aggSup.resultRowSize;
|
||||
int32_t numOfItem = 1;
|
||||
if (pInfo->windowCount != pInfo->windowSliding) {
|
||||
numOfItem = pInfo->windowCount / pInfo->windowSliding + 1;
|
||||
}
|
||||
pInfo->countSup.pWinStates = taosArrayInit_s(itemSize, numOfItem);
|
||||
if (!pInfo->countSup.pWinStates) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->countSup.stateIndex = 0;
|
||||
|
||||
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
|
||||
|
||||
setOperatorInfo(pOperator, "CountWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT, true, OP_NOT_OPENED, pInfo,
|
||||
pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, countWindowAggregate, NULL, destroyCountWindowOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
if (pInfo != NULL) {
|
||||
destroyCountWindowOperatorInfo(pInfo);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pOperator);
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
|
@ -2245,6 +2245,8 @@ char* getStreamOpName(uint16_t opType) {
|
|||
return "stream partitionby";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
return "stream event";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
return "stream count";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -1027,52 +1027,6 @@ int32_t qSetStreamOperatorOptionForScanHistory(qTaskInfo_t tinfo) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
const char* id = GET_TASKID(pTaskInfo);
|
||||
SOperatorInfo* pOperator = pTaskInfo->pRoot;
|
||||
|
||||
while (1) {
|
||||
uint16_t type = pOperator->operatorType;
|
||||
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
|
||||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_MID_INTERVAL) {
|
||||
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
|
||||
pInfo->twAggSup.calTrigger = pInfo->twAggSup.calTriggerSaved;
|
||||
pInfo->twAggSup.deleteMark = pInfo->twAggSup.deleteMarkSaved;
|
||||
pInfo->ignoreExpiredData = pInfo->ignoreExpiredDataSaved;
|
||||
qInfo("%s restore stream agg executors param for interval: %d, %" PRId64, id, pInfo->twAggSup.calTrigger,
|
||||
pInfo->twAggSup.deleteMark);
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
|
||||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
|
||||
type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
|
||||
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
|
||||
pInfo->twAggSup.calTrigger = pInfo->twAggSup.calTriggerSaved;
|
||||
pInfo->twAggSup.deleteMark = pInfo->twAggSup.deleteMarkSaved;
|
||||
pInfo->ignoreExpiredData = pInfo->ignoreExpiredDataSaved;
|
||||
qInfo("%s restore stream agg executor param for session: %d, %" PRId64, id, pInfo->twAggSup.calTrigger,
|
||||
pInfo->twAggSup.deleteMark);
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
|
||||
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
|
||||
pInfo->twAggSup.calTrigger = pInfo->twAggSup.calTriggerSaved;
|
||||
pInfo->twAggSup.deleteMark = pInfo->twAggSup.deleteMarkSaved;
|
||||
pInfo->ignoreExpiredData = pInfo->ignoreExpiredDataSaved;
|
||||
qInfo("%s restore stream agg executor param for state: %d, %" PRId64, id, pInfo->twAggSup.calTrigger,
|
||||
pInfo->twAggSup.deleteMark);
|
||||
}
|
||||
|
||||
// iterate operator tree
|
||||
if (pOperator->numOfDownstream != 1 || pOperator->pDownstream[0] == NULL) {
|
||||
if (pOperator->numOfDownstream > 1) {
|
||||
qError("unexpected stream, multiple downstream");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
pOperator = pOperator->pDownstream[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool qStreamScanhistoryFinished(qTaskInfo_t tinfo) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
return pTaskInfo->streamInfo.recoverScanFinished;
|
||||
|
|
|
@ -542,6 +542,10 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR
|
|||
pOptr = createGroupCacheOperatorInfo(ops, size, (SGroupCachePhysiNode*)pPhyNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL == type) {
|
||||
pOptr = createDynQueryCtrlOperatorInfo(ops, size, (SDynQueryCtrlPhysiNode*)pPhyNode, pTaskInfo);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT == type) {
|
||||
pOptr = createStreamCountAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, pHandle);
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT == type) {
|
||||
pOptr = createCountwindowOperatorInfo(ops[0], pPhyNode, pTaskInfo);
|
||||
} else {
|
||||
terrno = TSDB_CODE_INVALID_PARA;
|
||||
pTaskInfo->code = terrno;
|
||||
|
|
|
@ -1284,6 +1284,14 @@ static bool isSlidingWindow(SStreamScanInfo* pInfo) {
|
|||
return isIntervalWindow(pInfo) && pInfo->interval.interval != pInfo->interval.sliding;
|
||||
}
|
||||
|
||||
static bool isCountSlidingWindow(SStreamScanInfo* pInfo) {
|
||||
return pInfo->windowSup.pStreamAggSup && (pInfo->windowSup.pStreamAggSup->windowCount != pInfo->windowSup.pStreamAggSup->windowSliding);
|
||||
}
|
||||
|
||||
static bool isCountWindow(SStreamScanInfo* pInfo) {
|
||||
return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT;
|
||||
}
|
||||
|
||||
static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
|
||||
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
|
||||
|
@ -1394,7 +1402,7 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
|
|||
TSKEY* calEndData = (TSKEY*)pCalEndTsCol->pData;
|
||||
|
||||
setGroupId(pInfo, pBlock, GROUPID_COLUMN_INDEX, *pRowIndex);
|
||||
if (isSlidingWindow(pInfo)) {
|
||||
if (isSlidingWindow(pInfo) || isCountSlidingWindow(pInfo)) {
|
||||
pInfo->updateWin.skey = calStartData[*pRowIndex];
|
||||
pInfo->updateWin.ekey = calEndData[*pRowIndex];
|
||||
}
|
||||
|
@ -1588,6 +1596,47 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock, EStreamType mode) {
|
||||
blockDataCleanup(pDestBlock);
|
||||
if (pSrcBlock->info.rows == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
int32_t code = blockDataEnsureCapacity(pDestBlock, pSrcBlock->info.rows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
ASSERT(taosArrayGetSize(pSrcBlock->pDataBlock) >= 3);
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
TSKEY* startData = (TSKEY*)pStartTsCol->pData;
|
||||
SColumnInfoData* pEndTsCol = taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
|
||||
SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
|
||||
uint64_t* uidCol = (uint64_t*)pUidCol->pData;
|
||||
|
||||
SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
int64_t ver = pSrcBlock->info.version - 1;
|
||||
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
|
||||
uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], ver);
|
||||
SSessionKey startWin = {.win.skey = startData[i], .win.ekey = endData[i], .groupId = groupId};
|
||||
SSessionKey range = {0};
|
||||
getCountWinRange(pInfo->windowSup.pStreamAggSup, &startWin, mode, &range);
|
||||
colDataSetVal(pDestStartCol, i, (const char*)&range.win.skey, false);
|
||||
colDataSetVal(pDestEndCol, i, (const char*)&range.win.ekey, false);
|
||||
|
||||
colDataSetNULL(pDestUidCol, i);
|
||||
colDataSetVal(pDestGpCol, i, (const char*)&groupId, false);
|
||||
colDataSetVal(pDestCalStartTsCol, i, (const char*)&range.win.skey, false);
|
||||
colDataSetVal(pDestCalEndTsCol, i, (const char*)&range.win.ekey, false);
|
||||
pDestBlock->info.rows++;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
|
||||
blockDataCleanup(pDestBlock);
|
||||
int32_t rows = pSrcBlock->info.rows;
|
||||
|
@ -1724,12 +1773,14 @@ static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pS
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
|
||||
static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock, EStreamType type) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (isIntervalWindow(pInfo)) {
|
||||
code = generateIntervalScanRange(pInfo, pSrcBlock, pDestBlock);
|
||||
} else if (isSessionWindow(pInfo) || isStateWindow(pInfo)) {
|
||||
code = generateSessionScanRange(pInfo, pSrcBlock, pDestBlock);
|
||||
} else if (isCountWindow(pInfo)) {
|
||||
code = generateCountScanRange(pInfo, pSrcBlock, pDestBlock, type);
|
||||
} else {
|
||||
code = generateDeleteResultBlock(pInfo, pSrcBlock, pDestBlock);
|
||||
}
|
||||
|
@ -1742,6 +1793,11 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock,
|
|||
|
||||
void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid,
|
||||
uint64_t* pGp, void* pTbName) {
|
||||
appendAllColumnToStreamSpecialBlock(pBlock, pStartTs, pEndTs, pStartTs, pEndTs, pUid, pGp, pTbName);
|
||||
}
|
||||
|
||||
void appendAllColumnToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, TSKEY* pCalStartTs,
|
||||
TSKEY* pCalEndTs, uint64_t* pUid, uint64_t* pGp, void* pTbName) {
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
|
||||
|
@ -1753,8 +1809,8 @@ void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKE
|
|||
colDataSetVal(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false);
|
||||
colDataSetVal(pUidCol, pBlock->info.rows, (const char*)pUid, false);
|
||||
colDataSetVal(pGpCol, pBlock->info.rows, (const char*)pGp, false);
|
||||
colDataSetVal(pCalStartCol, pBlock->info.rows, (const char*)pStartTs, false);
|
||||
colDataSetVal(pCalEndCol, pBlock->info.rows, (const char*)pEndTs, false);
|
||||
colDataSetVal(pCalStartCol, pBlock->info.rows, (const char*)pCalStartTs, false);
|
||||
colDataSetVal(pCalEndCol, pBlock->info.rows, (const char*)pCalEndTs, false);
|
||||
colDataSetVal(pTableCol, pBlock->info.rows, (const char*)pTbName, pTbName == NULL);
|
||||
pBlock->info.rows++;
|
||||
}
|
||||
|
@ -2159,6 +2215,14 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
|
|||
taosMemoryFree(pUpInfo);
|
||||
}
|
||||
}
|
||||
static bool hasScanRange(SStreamScanInfo* pInfo) {
|
||||
SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
|
||||
return pSup && pSup->pScanBlock->info.rows > 0 && (isStateWindow(pInfo) || isCountWindow(pInfo));
|
||||
}
|
||||
|
||||
static bool isStreamWindow(SStreamScanInfo* pInfo) {
|
||||
return isIntervalWindow(pInfo) || isSessionWindow(pInfo) || isStateWindow(pInfo) || isCountWindow(pInfo);
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
||||
// NOTE: this operator does never check if current status is done or not
|
||||
|
@ -2307,7 +2371,7 @@ FETCH_NEXT_BLOCK:
|
|||
goto FETCH_NEXT_BLOCK;
|
||||
}
|
||||
|
||||
if (!isIntervalWindow(pInfo) && !isSessionWindow(pInfo) && !isStateWindow(pInfo)) {
|
||||
if (!isStreamWindow(pInfo)) {
|
||||
generateDeleteResultBlock(pInfo, pDelBlock, pInfo->pDeleteDataRes);
|
||||
pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT;
|
||||
printSpecDataBlock(pDelBlock, getStreamOpName(pOperator->operatorType), "delete result", GET_TASKID(pTaskInfo));
|
||||
|
@ -2321,7 +2385,7 @@ FETCH_NEXT_BLOCK:
|
|||
} else {
|
||||
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
|
||||
pInfo->updateResIndex = 0;
|
||||
generateScanRange(pInfo, pDelBlock, pInfo->pUpdateRes);
|
||||
generateScanRange(pInfo, pDelBlock, pInfo->pUpdateRes, STREAM_DELETE_DATA);
|
||||
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
|
||||
copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
|
||||
pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA;
|
||||
|
@ -2359,7 +2423,7 @@ FETCH_NEXT_BLOCK:
|
|||
}
|
||||
} break;
|
||||
case STREAM_SCAN_FROM_DELETE_DATA: {
|
||||
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
|
||||
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes, STREAM_DELETE_DATA);
|
||||
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
|
||||
copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
|
||||
|
@ -2367,7 +2431,7 @@ FETCH_NEXT_BLOCK:
|
|||
return pInfo->pDeleteDataRes;
|
||||
} break;
|
||||
case STREAM_SCAN_FROM_UPDATERES: {
|
||||
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
|
||||
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes, STREAM_CLEAR);
|
||||
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
|
||||
return pInfo->pUpdateRes;
|
||||
|
@ -2392,10 +2456,10 @@ FETCH_NEXT_BLOCK:
|
|||
break;
|
||||
}
|
||||
|
||||
SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
|
||||
if (isStateWindow(pInfo) && pSup->pScanBlock->info.rows > 0) {
|
||||
if (hasScanRange(pInfo)) {
|
||||
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
|
||||
pInfo->updateResIndex = 0;
|
||||
SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
|
||||
copyDataBlock(pInfo->pUpdateRes, pSup->pScanBlock);
|
||||
blockDataCleanup(pSup->pScanBlock);
|
||||
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
|
||||
|
@ -3357,6 +3421,420 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
// table merge scan operator
|
||||
|
||||
// table merge scan operator
|
||||
// TODO: limit / duration optimization
|
||||
// TODO: get block from tsdReader function, with task killed, func_data all filter out, skip, finish
|
||||
// TODO: error processing, memory freeing
|
||||
// TODO: add log for error and perf
|
||||
// TODO: tsdb reader open/close dynamically
|
||||
// TODO: blockdata deep cleanup
|
||||
|
||||
static int32_t subTblRowCompareFn(const void* pLeft, const void* pRight, void* param) {
|
||||
int32_t left = *(int32_t*)pLeft;
|
||||
int32_t right = *(int32_t*)pRight;
|
||||
STmsSubTablesMergeInfo* pInfo = (STmsSubTablesMergeInfo*)param;
|
||||
|
||||
int32_t leftIdx = pInfo->aInputs[left].rowIdx;
|
||||
int32_t rightIdx = pInfo->aInputs[right].rowIdx;
|
||||
|
||||
if (leftIdx == -1) {
|
||||
return 1;
|
||||
} else if (rightIdx == -1) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t leftTs = pInfo->aInputs[left].aTs[leftIdx];
|
||||
int64_t rightTs = pInfo->aInputs[right].aTs[rightIdx];
|
||||
int32_t ret = leftTs>rightTs ? 1 : ((leftTs < rightTs) ? -1 : 0);
|
||||
if (pInfo->pOrderInfo->order == TSDB_ORDER_DESC) {
|
||||
ret = -1 * ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t dumpQueryTableCond(const SQueryTableDataCond* src, SQueryTableDataCond* dst) {
|
||||
memcpy((void*)dst, (void*)src, sizeof(SQueryTableDataCond));
|
||||
dst->colList = taosMemoryCalloc(src->numOfCols, sizeof(SColumnInfo));
|
||||
for (int i = 0; i < src->numOfCols; i++) {
|
||||
dst->colList[i] = src->colList[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t fetchNextSubTableBlockFromReader(SOperatorInfo* pOperator, STmsSubTableInput* pInput, bool* pSubTableHasBlock) {
|
||||
int32_t code = 0;
|
||||
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
SReadHandle* pHandle = &pInfo->base.readHandle;
|
||||
STmsSubTablesMergeInfo* pSubTblsInfo = pInfo->pSubTablesMergeInfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
const SStorageAPI* pAPI= &pTaskInfo->storageAPI;
|
||||
|
||||
blockDataCleanup(pInput->pReaderBlock);
|
||||
if (!pInput->bInMemReader) {
|
||||
code = pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, &pInput->tblCond, pInput->pKeyInfo, 1, pInput->pReaderBlock,
|
||||
(void**)&pInput->pReader, GET_TASKID(pTaskInfo), NULL);
|
||||
if (code != 0) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
}
|
||||
|
||||
pInfo->base.dataReader = pInput->pReader;
|
||||
|
||||
while (true) {
|
||||
bool hasNext = false;
|
||||
int32_t code = pAPI->tsdReader.tsdNextDataBlock(pInfo->base.dataReader, &hasNext);
|
||||
if (code != 0) {
|
||||
pAPI->tsdReader.tsdReaderReleaseDataBlock(pInfo->base.dataReader);
|
||||
pInfo->base.dataReader = NULL;
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
if (!hasNext || isTaskKilled(pTaskInfo)) {
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
pAPI->tsdReader.tsdReaderReleaseDataBlock(pInfo->base.dataReader);
|
||||
pInfo->base.dataReader = NULL;
|
||||
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
|
||||
}
|
||||
*pSubTableHasBlock = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pInput->tblCond.order == TSDB_ORDER_ASC) {
|
||||
pInput->tblCond.twindows.skey = pInput->pReaderBlock->info.window.ekey + 1;
|
||||
} else {
|
||||
pInput->tblCond.twindows.ekey = pInput->pReaderBlock->info.window.skey - 1;
|
||||
}
|
||||
|
||||
uint32_t status = 0;
|
||||
code = loadDataBlock(pOperator, &pInfo->base, pInput->pReaderBlock, &status);
|
||||
if (code != 0) {
|
||||
pInfo->base.dataReader = NULL;
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
if (status == FUNC_DATA_REQUIRED_ALL_FILTEROUT) {
|
||||
*pSubTableHasBlock = false;
|
||||
break;
|
||||
}
|
||||
if (status == FUNC_DATA_REQUIRED_FILTEROUT || pInput->pReaderBlock->info.rows == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
*pSubTableHasBlock = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (*pSubTableHasBlock) {
|
||||
pInput->pReaderBlock->info.id.groupId = tableListGetTableGroupId(pInfo->base.pTableListInfo, pInput->pReaderBlock->info.id.uid);
|
||||
pOperator->resultInfo.totalRows += pInput->pReaderBlock->info.rows;
|
||||
}
|
||||
if (!pInput->bInMemReader || !*pSubTableHasBlock) {
|
||||
pAPI->tsdReader.tsdReaderClose(pInput->pReader);
|
||||
pInput->pReader = NULL;
|
||||
}
|
||||
|
||||
pInfo->base.dataReader = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void setGroupStartEndIndex(STableMergeScanInfo* pInfo) {
|
||||
pInfo->bGroupProcessed = false;
|
||||
|
||||
size_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
|
||||
int32_t i = pInfo->tableStartIndex + 1;
|
||||
for (; i < numOfTables; ++i) {
|
||||
STableKeyInfo* tableKeyInfo = tableListGetInfo(pInfo->base.pTableListInfo, i);
|
||||
if (tableKeyInfo->groupId != pInfo->groupId) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
pInfo->tableEndIndex = i - 1;
|
||||
}
|
||||
|
||||
static int32_t openSubTablesMergeSort(STmsSubTablesMergeInfo* pSubTblsInfo) {
|
||||
for (int32_t i = 0; i < pSubTblsInfo->numSubTables; ++i) {
|
||||
STmsSubTableInput * pInput = pSubTblsInfo->aInputs + i;
|
||||
if (pInput->rowIdx == -1) {
|
||||
continue;
|
||||
}
|
||||
if (pInput->type == SUB_TABLE_MEM_BLOCK) {
|
||||
pInput->rowIdx = 0;
|
||||
pInput->pageIdx = -1;
|
||||
}
|
||||
SSDataBlock* pInputBlock = (pInput->type == SUB_TABLE_MEM_BLOCK) ? pInput->pReaderBlock : pInput->pPageBlock;
|
||||
SColumnInfoData* col = taosArrayGet(pInputBlock->pDataBlock, pSubTblsInfo->pOrderInfo->slotId);
|
||||
pInput->aTs = (int64_t*)col->pData;
|
||||
}
|
||||
tMergeTreeCreate(&pSubTblsInfo->pTree, pSubTblsInfo->numSubTables, pSubTblsInfo, subTblRowCompareFn);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t initSubTablesMergeInfo(STableMergeScanInfo* pInfo) {
|
||||
setGroupStartEndIndex(pInfo);
|
||||
STmsSubTablesMergeInfo* pSubTblsInfo = taosMemoryCalloc(1, sizeof(STmsSubTablesMergeInfo));
|
||||
if (pSubTblsInfo == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pSubTblsInfo->pOrderInfo = taosArrayGet(pInfo->pSortInfo, 0);
|
||||
pSubTblsInfo->numSubTables = pInfo->tableEndIndex - pInfo->tableStartIndex + 1;
|
||||
pSubTblsInfo->aInputs = taosMemoryCalloc(pSubTblsInfo->numSubTables, sizeof(STmsSubTableInput));
|
||||
if (pSubTblsInfo->aInputs == NULL) {
|
||||
taosMemoryFree(pSubTblsInfo);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
int32_t bufPageSize = pInfo->bufPageSize;
|
||||
int32_t inMemSize = (pSubTblsInfo->numSubTables - pSubTblsInfo->numTableBlocksInMem) * bufPageSize;
|
||||
int32_t code =
|
||||
createDiskbasedBuf(&pSubTblsInfo->pBlocksBuf, pInfo->bufPageSize, inMemSize, "blocksExternalBuf", tsTempDir);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(pSubTblsInfo->aInputs);
|
||||
taosMemoryFree(pSubTblsInfo);
|
||||
return code;
|
||||
}
|
||||
pSubTblsInfo->numTableBlocksInMem = pSubTblsInfo->numSubTables;
|
||||
pSubTblsInfo->numInMemReaders = pSubTblsInfo->numSubTables;
|
||||
|
||||
pInfo->pSubTablesMergeInfo = pSubTblsInfo;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t initSubTableInputs(SOperatorInfo* pOperator, STableMergeScanInfo* pInfo) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SReadHandle* pHandle = &pInfo->base.readHandle;
|
||||
SStorageAPI* pAPI = &pTaskInfo->storageAPI;
|
||||
|
||||
STmsSubTablesMergeInfo* pSubTblsInfo = pInfo->pSubTablesMergeInfo;
|
||||
|
||||
for (int32_t i = 0; i < pSubTblsInfo->numSubTables; ++i) {
|
||||
STmsSubTableInput* pInput = pSubTblsInfo->aInputs + i;
|
||||
pInput->type = SUB_TABLE_MEM_BLOCK;
|
||||
dumpQueryTableCond(&pInfo->base.cond, &pInput->tblCond);
|
||||
pInput->pReaderBlock = createOneDataBlock(pInfo->pResBlock, false);
|
||||
pInput->pPageBlock = createOneDataBlock(pInfo->pResBlock, false);
|
||||
STableKeyInfo* keyInfo = tableListGetInfo(pInfo->base.pTableListInfo, i + pInfo->tableStartIndex);
|
||||
pInput->pKeyInfo = keyInfo;
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
|
||||
}
|
||||
|
||||
if (i + 1 < pSubTblsInfo->numInMemReaders) {
|
||||
pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, &pInput->tblCond, keyInfo, 1, pInput->pReaderBlock,
|
||||
(void**)&pInput->pReader, GET_TASKID(pTaskInfo), NULL);
|
||||
pInput->bInMemReader = true;
|
||||
} else {
|
||||
pInput->pReader = NULL;
|
||||
pInput->bInMemReader = false;
|
||||
}
|
||||
bool hasNext = true;
|
||||
fetchNextSubTableBlockFromReader(pOperator, pInput, &hasNext);
|
||||
if (!hasNext) {
|
||||
pInput->rowIdx = -1;
|
||||
++pSubTblsInfo->numSubTablesCompleted;
|
||||
continue;
|
||||
} else {
|
||||
pInput->rowIdx = 0;
|
||||
pInput->pageIdx = -1;
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t adjustSubTableFromMemBlock(SOperatorInfo* pOperatorInfo, STmsSubTablesMergeInfo* pSubTblsInfo) {
|
||||
STableMergeScanInfo* pInfo = pOperatorInfo->info;
|
||||
STmsSubTableInput* pInput = pSubTblsInfo->aInputs + tMergeTreeGetChosenIndex(pSubTblsInfo->pTree);
|
||||
bool hasNext = true;
|
||||
fetchNextSubTableBlockFromReader(pOperatorInfo, pInput, &hasNext);
|
||||
if (!hasNext) {
|
||||
pInput->rowIdx = -1;
|
||||
++pSubTblsInfo->numSubTablesCompleted;
|
||||
} else {
|
||||
pInput->rowIdx = 0;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t adjustSubTableForNextRow(SOperatorInfo* pOperatorInfo, STmsSubTablesMergeInfo* pSubTblsInfo) {
|
||||
STableMergeScanInfo* pInfo = pOperatorInfo->info;
|
||||
STmsSubTableInput* pInput = pSubTblsInfo->aInputs + tMergeTreeGetChosenIndex(pSubTblsInfo->pTree);
|
||||
|
||||
SSDataBlock* pInputBlock = (pInput->type == SUB_TABLE_MEM_BLOCK) ? pInput->pReaderBlock : pInput->pPageBlock;
|
||||
if (pInput->rowIdx < pInputBlock->info.rows - 1) {
|
||||
++pInput->rowIdx;
|
||||
} else if (pInput->rowIdx == pInputBlock->info.rows -1 ) {
|
||||
if (pInput->type == SUB_TABLE_MEM_BLOCK) {
|
||||
adjustSubTableFromMemBlock(pOperatorInfo, pSubTblsInfo);
|
||||
}
|
||||
if (pInput->rowIdx != -1) {
|
||||
SColumnInfoData* col = taosArrayGet(pInputBlock->pDataBlock, pSubTblsInfo->pOrderInfo->slotId);
|
||||
pInput->aTs = (int64_t*)col->pData;
|
||||
}
|
||||
}
|
||||
|
||||
tMergeTreeAdjust(pSubTblsInfo->pTree, tMergeTreeGetAdjustIndex(pSubTblsInfo->pTree));
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t appendChosenRowToDataBlock(STmsSubTablesMergeInfo* pSubTblsInfo, SSDataBlock* pBlock) {
|
||||
STmsSubTableInput* pInput = pSubTblsInfo->aInputs + tMergeTreeGetChosenIndex(pSubTblsInfo->pTree);
|
||||
SSDataBlock* pInputBlock = (pInput->type == SUB_TABLE_MEM_BLOCK) ? pInput->pReaderBlock : pInput->pPageBlock;
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
SColumnInfoData* pSrcColInfo = taosArrayGet(pInputBlock->pDataBlock, i);
|
||||
bool isNull = colDataIsNull(pSrcColInfo, pInputBlock->info.rows, pInput->rowIdx, NULL);
|
||||
|
||||
if (isNull) {
|
||||
colDataSetVal(pColInfo, pBlock->info.rows, NULL, true);
|
||||
} else {
|
||||
if (pSrcColInfo->pData != NULL) {
|
||||
char* pData = colDataGetData(pSrcColInfo, pInput->rowIdx);
|
||||
colDataSetVal(pColInfo, pBlock->info.rows, pData, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
pBlock->info.dataLoad = 1;
|
||||
pBlock->info.scanFlag = pInputBlock->info.scanFlag;
|
||||
pBlock->info.rows += 1;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SSDataBlock* getSubTablesSortedBlock(SOperatorInfo* pOperator, SSDataBlock* pResBlock, int32_t capacity) {
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STmsSubTablesMergeInfo* pSubTblsInfo = pInfo->pSubTablesMergeInfo;
|
||||
|
||||
blockDataCleanup(pResBlock);
|
||||
bool finished = false;
|
||||
while (true) {
|
||||
while (1) {
|
||||
if (pSubTblsInfo->numSubTablesCompleted >= pSubTblsInfo->numSubTables) {
|
||||
finished = true;
|
||||
break;
|
||||
}
|
||||
|
||||
appendChosenRowToDataBlock(pSubTblsInfo, pResBlock);
|
||||
adjustSubTableForNextRow(pOperator, pSubTblsInfo);
|
||||
|
||||
if (pResBlock->info.rows >= capacity) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
T_LONG_JMP(pOperator->pTaskInfo->env, pTaskInfo->code);
|
||||
}
|
||||
|
||||
bool limitReached = applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo);
|
||||
if (finished || limitReached || pResBlock->info.rows > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (pResBlock->info.rows > 0) ? pResBlock : NULL;
|
||||
}
|
||||
|
||||
static int32_t startSubTablesTableMergeScan(SOperatorInfo* pOperator) {
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
|
||||
initSubTablesMergeInfo(pInfo);
|
||||
|
||||
initSubTableInputs(pOperator, pInfo);
|
||||
|
||||
openSubTablesMergeSort(pInfo->pSubTablesMergeInfo);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t stopSubTablesTableMergeScan(STableMergeScanInfo* pInfo) {
|
||||
STmsSubTablesMergeInfo* pSubTblsInfo = pInfo->pSubTablesMergeInfo;
|
||||
if (pSubTblsInfo != NULL) {
|
||||
tMergeTreeDestroy(&pSubTblsInfo->pTree);
|
||||
|
||||
for (int32_t i = 0; i < pSubTblsInfo->numSubTables; ++i) {
|
||||
STmsSubTableInput* pInput = pSubTblsInfo->aInputs + i;
|
||||
taosMemoryFree(pInput->tblCond.colList);
|
||||
blockDataDestroy(pInput->pReaderBlock);
|
||||
blockDataDestroy(pInput->pPageBlock);
|
||||
taosArrayDestroy(pInput->aBlockPages);
|
||||
pInfo->base.readerAPI.tsdReaderClose(pInput->pReader);
|
||||
pInput->pReader = NULL;
|
||||
}
|
||||
|
||||
destroyDiskbasedBuf(pSubTblsInfo->pBlocksBuf);
|
||||
taosMemoryFree(pSubTblsInfo->aInputs);
|
||||
|
||||
taosMemoryFree(pSubTblsInfo);
|
||||
pInfo->pSubTablesMergeInfo = NULL;
|
||||
}
|
||||
taosMemoryTrim(0);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* doTableMergeScanParaSubTables(SOperatorInfo* pOperator) {
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
|
||||
int32_t code = pOperator->fpSet._openFn(pOperator);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
size_t tableListSize = tableListGetSize(pInfo->base.pTableListInfo);
|
||||
if (!pInfo->hasGroupId) {
|
||||
pInfo->hasGroupId = true;
|
||||
|
||||
if (tableListSize == 0) {
|
||||
setOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
pInfo->tableStartIndex = 0;
|
||||
pInfo->groupId = ((STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex))->groupId;
|
||||
startSubTablesTableMergeScan(pOperator);
|
||||
}
|
||||
|
||||
SSDataBlock* pBlock = NULL;
|
||||
while (pInfo->tableStartIndex < tableListSize) {
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
|
||||
}
|
||||
|
||||
pBlock = getSubTablesSortedBlock(pOperator, pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
if (pBlock == NULL && !pInfo->bGroupProcessed && pInfo->needCountEmptyTable) {
|
||||
STableKeyInfo* tbInfo = tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex);
|
||||
pBlock = getOneRowResultBlock(pTaskInfo, &pInfo->base, pInfo->pResBlock, tbInfo);
|
||||
}
|
||||
if (pBlock != NULL) {
|
||||
pBlock->info.id.groupId = pInfo->groupId;
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
pInfo->bGroupProcessed = true;
|
||||
return pBlock;
|
||||
} else {
|
||||
// Data of this group are all dumped, let's try the next group
|
||||
stopSubTablesTableMergeScan(pInfo);
|
||||
if (pInfo->tableEndIndex >= tableListSize - 1) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
|
||||
pInfo->groupId = tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableStartIndex)->groupId;
|
||||
startSubTablesTableMergeScan(pOperator);
|
||||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||
}
|
||||
}
|
||||
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
|
||||
static void tableMergeScanDoSkipTable(uint64_t uid, void* pTableMergeScanInfo) {
|
||||
STableMergeScanInfo* pInfo = pTableMergeScanInfo;
|
||||
if (pInfo->mSkipTables == NULL) {
|
||||
|
@ -3511,15 +3989,6 @@ SArray* generateSortByTsInfo(SArray* colMatchInfo, int32_t order) {
|
|||
return pList;
|
||||
}
|
||||
|
||||
int32_t dumpQueryTableCond(const SQueryTableDataCond* src, SQueryTableDataCond* dst) {
|
||||
memcpy((void*)dst, (void*)src, sizeof(SQueryTableDataCond));
|
||||
dst->colList = taosMemoryCalloc(src->numOfCols, sizeof(SColumnInfo));
|
||||
for (int i = 0; i < src->numOfCols; i++) {
|
||||
dst->colList[i] = src->colList[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tableMergeScanTsdbNotifyCb(ETsdReaderNotifyType type, STsdReaderNotifyInfo* info, void* param) {
|
||||
STableMergeScanInfo* pTmsInfo = param;
|
||||
if (type == TSD_READER_NOTIFY_DURATION_START) {
|
||||
|
@ -3607,8 +4076,6 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
int32_t tableStartIdx = pInfo->tableStartIndex;
|
||||
int32_t tableEndIdx = pInfo->tableEndIndex;
|
||||
|
||||
tSimpleHashClear(pInfo->mTableNumRows);
|
||||
|
||||
int32_t numOfTable = tableEndIdx - tableStartIdx + 1;
|
||||
STableKeyInfo* startKeyInfo = tableListGetInfo(pInfo->base.pTableListInfo, tableStartIdx);
|
||||
pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, &pInfo->base.cond, startKeyInfo, numOfTable, pInfo->pReaderBlock,
|
||||
|
@ -3759,10 +4226,8 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
|
||||
void destroyTableMergeScanOperatorInfo(void* param) {
|
||||
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->base.cond);
|
||||
|
||||
int32_t numOfTable = taosArrayGetSize(pTableScanInfo->sortSourceParams);
|
||||
|
||||
// start one reader variable
|
||||
pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
|
||||
pTableScanInfo->base.dataReader = NULL;
|
||||
|
||||
|
@ -3773,18 +4238,22 @@ void destroyTableMergeScanOperatorInfo(void* param) {
|
|||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTableScanInfo->sortSourceParams);
|
||||
tsortDestroySortHandle(pTableScanInfo->pSortHandle);
|
||||
pTableScanInfo->pSortHandle = NULL;
|
||||
taosHashCleanup(pTableScanInfo->mSkipTables);
|
||||
pTableScanInfo->mSkipTables = NULL;
|
||||
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
|
||||
// end one reader variable
|
||||
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->base.cond);
|
||||
destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI);
|
||||
|
||||
pTableScanInfo->pResBlock = blockDataDestroy(pTableScanInfo->pResBlock);
|
||||
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
|
||||
pTableScanInfo->pReaderBlock = blockDataDestroy(pTableScanInfo->pReaderBlock);
|
||||
|
||||
taosArrayDestroy(pTableScanInfo->pSortInfo);
|
||||
|
||||
stopSubTablesTableMergeScan(pTableScanInfo);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -3858,14 +4327,6 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
goto _error;
|
||||
}
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 1024);
|
||||
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
|
||||
blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
|
||||
pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam));
|
||||
|
||||
pInfo->pSortInfo = generateSortByTsInfo(pInfo->base.matchInfo.pList, pInfo->base.cond.order);
|
||||
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
|
||||
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
|
||||
|
||||
pInfo->mergeLimit = -1;
|
||||
|
@ -3874,24 +4335,37 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
pInfo->mergeLimit = pInfo->limitInfo.limit.limit + pInfo->limitInfo.limit.offset;
|
||||
pInfo->mSkipTables = NULL;
|
||||
}
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 1024);
|
||||
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
|
||||
blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
|
||||
|
||||
pInfo->pSortInfo = generateSortByTsInfo(pInfo->base.matchInfo.pList, pInfo->base.cond.order);
|
||||
pInfo->pReaderBlock = createOneDataBlock(pInfo->pResBlock, false);
|
||||
|
||||
pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable;
|
||||
|
||||
int32_t rowSize = pInfo->pResBlock->info.rowSize;
|
||||
uint32_t nCols = taosArrayGetSize(pInfo->pResBlock->pDataBlock);
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize, nCols);
|
||||
|
||||
//start one reader variable
|
||||
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
|
||||
|
||||
if (!tsExperimental) {
|
||||
pInfo->filesetDelimited = false;
|
||||
} else {
|
||||
pInfo->filesetDelimited = pTableScanNode->filesetDelimited;
|
||||
}
|
||||
pInfo->needCountEmptyTable = tsCountAlwaysReturnValue && pTableScanNode->needCountEmptyTable;
|
||||
|
||||
// end one reader variable
|
||||
setOperatorInfo(pOperator, "TableMergeScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, false, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doTableMergeScan, NULL, destroyTableMergeScanOperatorInfo,
|
||||
optrDefaultBufFn, getTableMergeScanExplainExecInfo, optrDefaultGetNextExtFn, NULL);
|
||||
pOperator->fpSet = createOperatorFpSet(
|
||||
optrDummyOpenFn, pTableScanNode->paraTablesSort ? doTableMergeScanParaSubTables : doTableMergeScan, NULL,
|
||||
destroyTableMergeScanOperatorInfo, optrDefaultBufFn, getTableMergeScanExplainExecInfo, optrDefaultGetNextExtFn,
|
||||
NULL);
|
||||
pOperator->cost.openCost = 0;
|
||||
return pOperator;
|
||||
|
||||
|
|
|
@ -0,0 +1,714 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "executorInt.h"
|
||||
#include "function.h"
|
||||
#include "functionMgt.h"
|
||||
#include "operator.h"
|
||||
#include "querytask.h"
|
||||
#include "tchecksum.h"
|
||||
#include "tcommon.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "ttime.h"
|
||||
|
||||
#define IS_FINAL_COUNT_OP(op) ((op)->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_COUNT)
|
||||
#define STREAM_COUNT_OP_STATE_NAME "StreamCountHistoryState"
|
||||
#define STREAM_COUNT_OP_CHECKPOINT_NAME "StreamCountOperator_Checkpoint"
|
||||
|
||||
typedef struct SCountWindowInfo {
|
||||
SResultWindowInfo winInfo;
|
||||
COUNT_TYPE* pWindowCount;
|
||||
} SCountWindowInfo;
|
||||
|
||||
typedef enum {
|
||||
NONE_WINDOW = 0,
|
||||
CREATE_NEW_WINDOW,
|
||||
MOVE_NEXT_WINDOW,
|
||||
} BuffOp;
|
||||
typedef struct SBuffInfo {
|
||||
bool rebuildWindow;
|
||||
BuffOp winBuffOp;
|
||||
SStreamStateCur* pCur;
|
||||
} SBuffInfo;
|
||||
|
||||
void destroyStreamCountAggOperatorInfo(void* param) {
|
||||
SStreamCountAggOperatorInfo* pInfo = (SStreamCountAggOperatorInfo*)param;
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
destroyStreamAggSupporter(&pInfo->streamAggSup);
|
||||
cleanupExprSupp(&pInfo->scalarSupp);
|
||||
clearGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
blockDataDestroy(pInfo->pDelRes);
|
||||
tSimpleHashCleanup(pInfo->pStUpdated);
|
||||
tSimpleHashCleanup(pInfo->pStDeleted);
|
||||
pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
|
||||
taosArrayDestroy(pInfo->historyWins);
|
||||
blockDataDestroy(pInfo->pCheckpointRes);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
bool isSlidingCountWindow(SStreamAggSupporter* pAggSup) {
|
||||
return pAggSup->windowCount != pAggSup->windowSliding;
|
||||
}
|
||||
|
||||
void setCountOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId, SCountWindowInfo* pCurWin,
|
||||
SBuffInfo* pBuffInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t size = pAggSup->resultRowSize;
|
||||
pCurWin->winInfo.sessionWin.groupId = groupId;
|
||||
pCurWin->winInfo.sessionWin.win.skey = ts;
|
||||
pCurWin->winInfo.sessionWin.win.ekey = ts;
|
||||
|
||||
if (isSlidingCountWindow(pAggSup)) {
|
||||
if (pBuffInfo->winBuffOp == CREATE_NEW_WINDOW) {
|
||||
pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin,
|
||||
(void**)&pCurWin->winInfo.pStatePos, &size);
|
||||
code = TSDB_CODE_FAILED;
|
||||
} else if (pBuffInfo->winBuffOp == MOVE_NEXT_WINDOW) {
|
||||
ASSERT(pBuffInfo->pCur);
|
||||
pAggSup->stateStore.streamStateCurNext(pAggSup->pState, pBuffInfo->pCur);
|
||||
code = pAggSup->stateStore.streamStateSessionGetKVByCur(pBuffInfo->pCur, &pCurWin->winInfo.sessionWin,
|
||||
(void**)&pCurWin->winInfo.pStatePos, &size);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin,
|
||||
(void**)&pCurWin->winInfo.pStatePos, &size);
|
||||
}
|
||||
} else {
|
||||
pBuffInfo->pCur = pAggSup->stateStore.streamStateCountSeekKeyPrev(pAggSup->pState, &pCurWin->winInfo.sessionWin, pAggSup->windowCount);
|
||||
code = pAggSup->stateStore.streamStateSessionGetKVByCur(pBuffInfo->pCur, &pCurWin->winInfo.sessionWin,
|
||||
(void**)&pCurWin->winInfo.pStatePos, &size);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
pAggSup->stateStore.streamStateCountWinAdd(pAggSup->pState, &pCurWin->winInfo.sessionWin,
|
||||
(void**)&pCurWin->winInfo.pStatePos, &size);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
code = pAggSup->stateStore.streamStateCountWinAddIfNotExist(
|
||||
pAggSup->pState, &pCurWin->winInfo.sessionWin, pAggSup->windowCount, (void**)&pCurWin->winInfo.pStatePos, &size);
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
pCurWin->winInfo.isOutput = true;
|
||||
}
|
||||
pCurWin->pWindowCount=
|
||||
(COUNT_TYPE*) ((char*)pCurWin->winInfo.pStatePos->pRowBuff + (pAggSup->resultRowSize - sizeof(COUNT_TYPE)));
|
||||
|
||||
if (*pCurWin->pWindowCount == pAggSup->windowCount) {
|
||||
pBuffInfo->rebuildWindow = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t updateCountWindowInfo(SStreamAggSupporter* pAggSup, SCountWindowInfo* pWinInfo, TSKEY* pTs, int32_t start, int32_t rows, int32_t maxRows,
|
||||
SSHashObj* pStDeleted, bool* pRebuild) {
|
||||
SSessionKey sWinKey = pWinInfo->winInfo.sessionWin;
|
||||
int32_t num = 0;
|
||||
for (int32_t i = start; i < rows; i++) {
|
||||
if (pTs[i] < pWinInfo->winInfo.sessionWin.win.ekey) {
|
||||
num++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
int32_t maxNum = TMIN(maxRows - *pWinInfo->pWindowCount, rows - start);
|
||||
if (num > maxNum) {
|
||||
*pRebuild = true;
|
||||
}
|
||||
*pWinInfo->pWindowCount += maxNum;
|
||||
bool needDelState = false;
|
||||
if (pWinInfo->winInfo.sessionWin.win.skey > pTs[start]) {
|
||||
needDelState = true;
|
||||
if (pStDeleted && pWinInfo->winInfo.isOutput) {
|
||||
saveDeleteRes(pStDeleted, pWinInfo->winInfo.sessionWin);
|
||||
}
|
||||
|
||||
pWinInfo->winInfo.sessionWin.win.skey = pTs[start];
|
||||
}
|
||||
|
||||
if (pWinInfo->winInfo.sessionWin.win.ekey < pTs[maxNum + start - 1]) {
|
||||
needDelState = true;
|
||||
pWinInfo->winInfo.sessionWin.win.ekey = pTs[maxNum + start - 1];
|
||||
}
|
||||
|
||||
if (needDelState) {
|
||||
memcpy(pWinInfo->winInfo.pStatePos->pKey, &pWinInfo->winInfo.sessionWin, sizeof(SSessionKey));
|
||||
if (pWinInfo->winInfo.pStatePos->needFree) {
|
||||
pAggSup->stateStore.streamStateSessionDel(pAggSup->pState, &sWinKey);
|
||||
}
|
||||
}
|
||||
|
||||
return maxNum;
|
||||
}
|
||||
|
||||
void getCountWinRange(SStreamAggSupporter* pAggSup, const SSessionKey* pKey, EStreamType mode, SSessionKey* pDelRange) {
|
||||
*pDelRange = *pKey;
|
||||
SStreamStateCur* pCur = NULL;
|
||||
if (isSlidingCountWindow(pAggSup)) {
|
||||
pCur = pAggSup->stateStore.streamStateCountSeekKeyPrev(pAggSup->pState, pKey, pAggSup->windowCount);
|
||||
} else {
|
||||
pCur = pAggSup->stateStore.streamStateSessionSeekKeyCurrentNext(pAggSup->pState, pKey);
|
||||
}
|
||||
SSessionKey tmpKey = {0};
|
||||
int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &tmpKey, NULL, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
pAggSup->stateStore.streamStateFreeCur(pCur);
|
||||
return;
|
||||
}
|
||||
pDelRange->win = tmpKey.win;
|
||||
while (mode == STREAM_DELETE_DATA) {
|
||||
pAggSup->stateStore.streamStateCurNext(pAggSup->pState, pCur);
|
||||
code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &tmpKey, NULL, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
break;
|
||||
}
|
||||
pDelRange->win.ekey = TMAX(pDelRange->win.ekey, tmpKey.win.ekey);
|
||||
}
|
||||
pAggSup->stateStore.streamStateFreeCur(pCur);
|
||||
}
|
||||
|
||||
static void destroySBuffInfo(SStreamAggSupporter* pAggSup, SBuffInfo* pBuffInfo) {
|
||||
pAggSup->stateStore.streamStateFreeCur(pBuffInfo->pCur);
|
||||
}
|
||||
|
||||
bool inCountCalSlidingWindow(SStreamAggSupporter* pAggSup, STimeWindow* pWin, TSKEY sKey, TSKEY eKey) {
|
||||
if (pAggSup->windowCount == pAggSup->windowSliding) {
|
||||
return true;
|
||||
}
|
||||
if (sKey <= pWin->skey && pWin->ekey <= eKey) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool inCountSlidingWindow(SStreamAggSupporter* pAggSup, STimeWindow* pWin, SDataBlockInfo* pBlockInfo) {
|
||||
return inCountCalSlidingWindow(pAggSup, pWin, pBlockInfo->calWin.skey, pBlockInfo->calWin.ekey);
|
||||
}
|
||||
|
||||
static void doStreamCountAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pStUpdated,
|
||||
SSHashObj* pStDeleted) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
|
||||
uint64_t groupId = pSDataBlock->info.id.groupId;
|
||||
int64_t code = TSDB_CODE_SUCCESS;
|
||||
SResultRow* pResult = NULL;
|
||||
int32_t rows = pSDataBlock->info.rows;
|
||||
int32_t winRows = 0;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
SBuffInfo buffInfo = {.rebuildWindow = false, .winBuffOp = NONE_WINDOW, .pCur = NULL};
|
||||
|
||||
pInfo->dataVersion = TMAX(pInfo->dataVersion, pSDataBlock->info.version);
|
||||
pAggSup->winRange = pTaskInfo->streamInfo.fillHistoryWindow;
|
||||
if (pAggSup->winRange.ekey <= 0) {
|
||||
pAggSup->winRange.ekey = INT64_MAX;
|
||||
}
|
||||
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
|
||||
TSKEY* startTsCols = (int64_t*)pStartTsCol->pData;
|
||||
blockDataEnsureCapacity(pAggSup->pScanBlock, rows * 2);
|
||||
SStreamStateCur* pCur = NULL;
|
||||
COUNT_TYPE slidingRows = 0;
|
||||
|
||||
for (int32_t i = 0; i < rows;) {
|
||||
if (pInfo->ignoreExpiredData &&
|
||||
checkExpiredData(&pInfo->streamAggSup.stateStore, pInfo->streamAggSup.pUpdateInfo, &pInfo->twAggSup,
|
||||
pSDataBlock->info.id.uid, startTsCols[i])) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
SCountWindowInfo curWin = {0};
|
||||
buffInfo.rebuildWindow = false;
|
||||
setCountOutputBuf(pAggSup, startTsCols[i], groupId, &curWin, &buffInfo);
|
||||
if (!inCountSlidingWindow(pAggSup, &curWin.winInfo.sessionWin.win, &pSDataBlock->info)) {
|
||||
buffInfo.winBuffOp = MOVE_NEXT_WINDOW;
|
||||
continue;
|
||||
}
|
||||
setSessionWinOutputInfo(pStUpdated, &curWin.winInfo);
|
||||
slidingRows = *curWin.pWindowCount;
|
||||
if (!buffInfo.rebuildWindow) {
|
||||
winRows = updateCountWindowInfo(pAggSup, &curWin, startTsCols, i, rows, pAggSup->windowCount, pStDeleted, &buffInfo.rebuildWindow);
|
||||
}
|
||||
if (buffInfo.rebuildWindow) {
|
||||
SSessionKey range = {0};
|
||||
if (isSlidingCountWindow(pAggSup)) {
|
||||
curWin.winInfo.sessionWin.win.skey = startTsCols[i];
|
||||
curWin.winInfo.sessionWin.win.ekey = startTsCols[i];
|
||||
}
|
||||
getCountWinRange(pAggSup, &curWin.winInfo.sessionWin, STREAM_DELETE_DATA, &range);
|
||||
range.win.skey = TMIN(startTsCols[i], range.win.skey);
|
||||
range.win.ekey = TMAX(startTsCols[rows-1], range.win.ekey);
|
||||
uint64_t uid = 0;
|
||||
appendOneRowToStreamSpecialBlock(pAggSup->pScanBlock, &range.win.skey, &range.win.ekey, &uid, &range.groupId, NULL);
|
||||
break;
|
||||
}
|
||||
code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &curWin.winInfo, &pResult, i, winRows, rows, numOfOutput,
|
||||
pOperator, 0);
|
||||
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
|
||||
qError("%s do stream count aggregate impl error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
saveSessionOutputBuf(pAggSup, &curWin.winInfo);
|
||||
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) {
|
||||
code = saveResult(curWin.winInfo, pStUpdated);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s do stream count aggregate impl, set result error, code %s", GET_TASKID(pTaskInfo),
|
||||
tstrerror(code));
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
|
||||
SSessionKey key = {0};
|
||||
getSessionHashKey(&curWin.winInfo.sessionWin, &key);
|
||||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curWin.winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
if (isSlidingCountWindow(pAggSup)) {
|
||||
if (slidingRows <= pAggSup->windowSliding) {
|
||||
if (slidingRows + winRows > pAggSup->windowSliding) {
|
||||
buffInfo.winBuffOp = CREATE_NEW_WINDOW;
|
||||
winRows = pAggSup->windowSliding - slidingRows;
|
||||
ASSERT(i >= 0);
|
||||
}
|
||||
} else {
|
||||
buffInfo.winBuffOp = MOVE_NEXT_WINDOW;
|
||||
winRows = 0;
|
||||
}
|
||||
}
|
||||
i += winRows;
|
||||
}
|
||||
destroySBuffInfo(pAggSup, &buffInfo);
|
||||
}
|
||||
|
||||
static SSDataBlock* buildCountResult(SOperatorInfo* pOperator) {
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
doBuildDeleteDataBlock(pOperator, pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
|
||||
if (pInfo->pDelRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->pDelRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
|
||||
doBuildSessionResult(pOperator, pAggSup->pState, &pInfo->groupResInfo, pBInfo->pRes);
|
||||
if (pBInfo->pRes->info.rows > 0) {
|
||||
printDataBlock(pBInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pBInfo->pRes;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t doStreamCountEncodeOpState(void** buf, int32_t len, SOperatorInfo* pOperator, bool isParent) {
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
if (!pInfo) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* pData = (buf == NULL) ? NULL : *buf;
|
||||
|
||||
// 1.streamAggSup.pResultRows
|
||||
int32_t tlen = 0;
|
||||
int32_t mapSize = tSimpleHashGetSize(pInfo->streamAggSup.pResultRows);
|
||||
tlen += taosEncodeFixedI32(buf, mapSize);
|
||||
void* pIte = NULL;
|
||||
size_t keyLen = 0;
|
||||
int32_t iter = 0;
|
||||
while ((pIte = tSimpleHashIterate(pInfo->streamAggSup.pResultRows, pIte, &iter)) != NULL) {
|
||||
void* key = taosHashGetKey(pIte, &keyLen);
|
||||
tlen += encodeSSessionKey(buf, key);
|
||||
tlen += encodeSResultWindowInfo(buf, pIte, pInfo->streamAggSup.resultRowSize);
|
||||
}
|
||||
|
||||
// 2.twAggSup
|
||||
tlen += encodeSTimeWindowAggSupp(buf, &pInfo->twAggSup);
|
||||
|
||||
// 3.dataVersion
|
||||
tlen += taosEncodeFixedI32(buf, pInfo->dataVersion);
|
||||
|
||||
// 4.checksum
|
||||
if (isParent) {
|
||||
if (buf) {
|
||||
uint32_t cksum = taosCalcChecksum(0, pData, len - sizeof(uint32_t));
|
||||
tlen += taosEncodeFixedU32(buf, cksum);
|
||||
} else {
|
||||
tlen += sizeof(uint32_t);
|
||||
}
|
||||
}
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
void* doStreamCountDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOperator, bool isParent) {
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
if (!pInfo) {
|
||||
return buf;
|
||||
}
|
||||
|
||||
// 4.checksum
|
||||
if (isParent) {
|
||||
int32_t dataLen = len - sizeof(uint32_t);
|
||||
void* pCksum = POINTER_SHIFT(buf, dataLen);
|
||||
if (taosCheckChecksum(buf, dataLen, *(uint32_t*)pCksum) != TSDB_CODE_SUCCESS) {
|
||||
qError("stream count state is invalid");
|
||||
return buf;
|
||||
}
|
||||
}
|
||||
|
||||
// 1.streamAggSup.pResultRows
|
||||
int32_t mapSize = 0;
|
||||
buf = taosDecodeFixedI32(buf, &mapSize);
|
||||
for (int32_t i = 0; i < mapSize; i++) {
|
||||
SSessionKey key = {0};
|
||||
SResultWindowInfo winfo = {0};
|
||||
buf = decodeSSessionKey(buf, &key);
|
||||
buf = decodeSResultWindowInfo(buf, &winfo, pInfo->streamAggSup.resultRowSize);
|
||||
tSimpleHashPut(pInfo->streamAggSup.pResultRows, &key, sizeof(SSessionKey), &winfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
// 2.twAggSup
|
||||
buf = decodeSTimeWindowAggSupp(buf, &pInfo->twAggSup);
|
||||
|
||||
// 3.dataVersion
|
||||
buf = taosDecodeFixedI64(buf, &pInfo->dataVersion);
|
||||
return buf;
|
||||
}
|
||||
|
||||
void doStreamCountSaveCheckpoint(SOperatorInfo* pOperator) {
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t len = doStreamCountEncodeOpState(NULL, 0, pOperator, true);
|
||||
void* buf = taosMemoryCalloc(1, len);
|
||||
void* pBuf = buf;
|
||||
len = doStreamCountEncodeOpState(&pBuf, len, pOperator, true);
|
||||
pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_COUNT_OP_CHECKPOINT_NAME,
|
||||
strlen(STREAM_COUNT_OP_CHECKPOINT_NAME), buf, len);
|
||||
taosMemoryFree(buf);
|
||||
}
|
||||
|
||||
void doResetCountWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock) {
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
TSKEY* startDatas = (TSKEY*)pStartTsCol->pData;
|
||||
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
TSKEY* endDatas = (TSKEY*)pEndTsCol->pData;
|
||||
SColumnInfoData* pCalStartTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
TSKEY* calStartDatas = (TSKEY*)pStartTsCol->pData;
|
||||
SColumnInfoData* pCalEndTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
TSKEY* calEndDatas = (TSKEY*)pEndTsCol->pData;
|
||||
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
uint64_t* gpDatas = (uint64_t*)pGroupCol->pData;
|
||||
|
||||
SRowBuffPos* pPos = NULL;
|
||||
int32_t size = 0;
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i++) {
|
||||
SSessionKey key = {.groupId = gpDatas[i], .win.skey = startDatas[i], .win.ekey = endDatas[i]};
|
||||
SStreamStateCur* pCur = NULL;
|
||||
if (isSlidingCountWindow(pAggSup)) {
|
||||
pCur = pAggSup->stateStore.streamStateCountSeekKeyPrev(pAggSup->pState, &key, pAggSup->windowCount);
|
||||
} else {
|
||||
pCur = pAggSup->stateStore.streamStateSessionSeekKeyCurrentNext(pAggSup->pState, &key);
|
||||
}
|
||||
while (1) {
|
||||
SSessionKey tmpKey = {0};
|
||||
int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &tmpKey, (void **)&pPos, &size);
|
||||
if (code != TSDB_CODE_SUCCESS || tmpKey.win.skey > endDatas[i]) {
|
||||
pAggSup->stateStore.streamStateFreeCur(pCur);
|
||||
break;
|
||||
}
|
||||
if (!inCountCalSlidingWindow(pAggSup, &tmpKey.win, calStartDatas[i], calEndDatas[i])) {
|
||||
pAggSup->stateStore.streamStateCurNext(pAggSup->pState, pCur);
|
||||
continue;
|
||||
}
|
||||
pAggSup->stateStore.streamStateSessionReset(pAggSup->pState, pPos->pRowBuff);
|
||||
pAggSup->stateStore.streamStateCurNext(pAggSup->pState, pCur);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void doDeleteCountWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* result) {
|
||||
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
|
||||
TSKEY* startDatas = (TSKEY*)pStartTsCol->pData;
|
||||
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
|
||||
TSKEY* endDatas = (TSKEY*)pEndTsCol->pData;
|
||||
SColumnInfoData* pCalStartTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
|
||||
TSKEY* calStartDatas = (TSKEY*)pStartTsCol->pData;
|
||||
SColumnInfoData* pCalEndTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
|
||||
TSKEY* calEndDatas = (TSKEY*)pEndTsCol->pData;
|
||||
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
|
||||
uint64_t* gpDatas = (uint64_t*)pGroupCol->pData;
|
||||
for (int32_t i = 0; i < pBlock->info.rows; i++) {
|
||||
SSessionKey key = {.win.skey = startDatas[i], .win.ekey = endDatas[i], .groupId = gpDatas[i]};
|
||||
while (1) {
|
||||
SSessionKey curWin = {0};
|
||||
int32_t code = pAggSup->stateStore.streamStateCountGetKeyByRange(pAggSup->pState, &key, &curWin);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
break;
|
||||
}
|
||||
doDeleteSessionWindow(pAggSup, &curWin);
|
||||
if (result) {
|
||||
saveDeleteInfo(result, curWin);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void deleteCountWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate,
|
||||
SSHashObj* pMapDelete) {
|
||||
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
|
||||
if (isSlidingCountWindow(pAggSup)) {
|
||||
doDeleteCountWindows(pAggSup, pBlock, pWins);
|
||||
} else {
|
||||
doDeleteTimeWindows(pAggSup, pBlock, pWins);
|
||||
}
|
||||
removeSessionResults(pAggSup, pMapUpdate, pWins);
|
||||
copyDeleteWindowInfo(pWins, pMapDelete);
|
||||
taosArrayDestroy(pWins);
|
||||
}
|
||||
|
||||
static SSDataBlock* doStreamCountAgg(SOperatorInfo* pOperator) {
|
||||
SExprSupp* pSup = &pOperator->exprSupp;
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
SOptrBasicInfo* pBInfo = &pInfo->binfo;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
qDebug("stask:%s %s status: %d", GET_TASKID(pTaskInfo), getStreamOpName(pOperator->operatorType), pOperator->status);
|
||||
if (pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
} else if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
SSDataBlock* opRes = buildCountResult(pOperator);
|
||||
if (opRes) {
|
||||
return opRes;
|
||||
}
|
||||
|
||||
if (pInfo->recvGetAll) {
|
||||
pInfo->recvGetAll = false;
|
||||
resetUnCloseSessionWinInfo(pInfo->streamAggSup.pResultRows);
|
||||
}
|
||||
|
||||
if (pInfo->reCkBlock) {
|
||||
pInfo->reCkBlock = false;
|
||||
printDataBlock(pInfo->pCheckpointRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pCheckpointRes;
|
||||
}
|
||||
|
||||
setStreamOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (!pInfo->pUpdated) {
|
||||
pInfo->pUpdated = taosArrayInit(16, sizeof(SResultWindowInfo));
|
||||
}
|
||||
if (!pInfo->pStUpdated) {
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pStUpdated = tSimpleHashInit(64, hashFn);
|
||||
}
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
if (pBlock == NULL) {
|
||||
break;
|
||||
}
|
||||
printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "recv", GET_TASKID(pTaskInfo));
|
||||
|
||||
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
deleteCountWinState(&pInfo->streamAggSup, pBlock, pInfo->pStUpdated, pInfo->pStDeleted);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_CLEAR) {
|
||||
doResetCountWindows(&pInfo->streamAggSup, pBlock);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_GET_ALL) {
|
||||
pInfo->recvGetAll = true;
|
||||
getAllSessionWindow(pAggSup->pResultRows, pInfo->pStUpdated);
|
||||
continue;
|
||||
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
|
||||
return pBlock;
|
||||
} else if (pBlock->info.type == STREAM_CHECKPOINT) {
|
||||
pAggSup->stateStore.streamStateCommit(pAggSup->pState);
|
||||
doStreamCountSaveCheckpoint(pOperator);
|
||||
copyDataBlock(pInfo->pCheckpointRes, pBlock);
|
||||
continue;
|
||||
} else {
|
||||
ASSERTS(pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_INVALID, "invalid SSDataBlock type");
|
||||
}
|
||||
|
||||
if (pInfo->scalarSupp.pExprInfo != NULL) {
|
||||
SExprSupp* pExprSup = &pInfo->scalarSupp;
|
||||
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
|
||||
}
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pSup, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
|
||||
doStreamCountAggImpl(pOperator, pBlock, pInfo->pStUpdated, pInfo->pStDeleted);
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.watermark);
|
||||
}
|
||||
// restore the value
|
||||
pOperator->status = OP_RES_TO_RETURN;
|
||||
|
||||
closeSessionWindow(pAggSup->pResultRows, &pInfo->twAggSup, pInfo->pStUpdated);
|
||||
copyUpdateResult(&pInfo->pStUpdated, pInfo->pUpdated, sessionKeyCompareAsc);
|
||||
removeSessionDeleteResults(pInfo->pStDeleted, pInfo->pUpdated);
|
||||
initGroupResInfoFromArrayList(&pInfo->groupResInfo, pInfo->pUpdated);
|
||||
pInfo->pUpdated = NULL;
|
||||
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
|
||||
|
||||
SSDataBlock* opRes = buildCountResult(pOperator);
|
||||
if (opRes) {
|
||||
return opRes;
|
||||
}
|
||||
|
||||
setStreamOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void streamCountReleaseState(SOperatorInfo* pOperator) {
|
||||
SStreamEventAggOperatorInfo* pInfo = pOperator->info;
|
||||
int32_t resSize = sizeof(TSKEY);
|
||||
char* pBuff = taosMemoryCalloc(1, resSize);
|
||||
memcpy(pBuff, &pInfo->twAggSup.maxTs, sizeof(TSKEY));
|
||||
qDebug("===stream=== count window operator relase state. ");
|
||||
pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_COUNT_OP_STATE_NAME,
|
||||
strlen(STREAM_COUNT_OP_STATE_NAME), pBuff, resSize);
|
||||
pInfo->streamAggSup.stateStore.streamStateCommit(pInfo->streamAggSup.pState);
|
||||
taosMemoryFreeClear(pBuff);
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (downstream->fpSet.releaseStreamStateFn) {
|
||||
downstream->fpSet.releaseStreamStateFn(downstream);
|
||||
}
|
||||
}
|
||||
|
||||
void streamCountReloadState(SOperatorInfo* pOperator) {
|
||||
SStreamCountAggOperatorInfo* pInfo = pOperator->info;
|
||||
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
|
||||
int32_t size = 0;
|
||||
void* pBuf = NULL;
|
||||
|
||||
int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_COUNT_OP_STATE_NAME,
|
||||
strlen(STREAM_COUNT_OP_STATE_NAME), &pBuf, &size);
|
||||
TSKEY ts = *(TSKEY*)pBuf;
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, ts);
|
||||
taosMemoryFree(pBuf);
|
||||
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
if (downstream->fpSet.reloadStreamStateFn) {
|
||||
downstream->fpSet.reloadStreamStateFn(downstream);
|
||||
}
|
||||
reloadAggSupFromDownStream(downstream, &pInfo->streamAggSup);
|
||||
}
|
||||
|
||||
SOperatorInfo* createStreamCountAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
|
||||
SExecTaskInfo* pTaskInfo, SReadHandle* pHandle) {
|
||||
SCountWinodwPhysiNode* pCountNode = (SCountWinodwPhysiNode*)pPhyNode;
|
||||
int32_t numOfCols = 0;
|
||||
int32_t code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
SStreamCountAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamCountAggOperatorInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
if (pCountNode->window.pExprs != NULL) {
|
||||
int32_t numOfScalar = 0;
|
||||
SExprInfo* pScalarExprInfo = createExprInfo(pCountNode->window.pExprs, NULL, &numOfScalar);
|
||||
code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
SExprSupp* pExpSup = &pOperator->exprSupp;
|
||||
|
||||
SExprInfo* pExprInfo = createExprInfo(pCountNode->window.pFuncs, NULL, &numOfCols);
|
||||
SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc);
|
||||
code = initBasicInfoEx(&pInfo->binfo, pExpSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = initStreamAggSupporter(&pInfo->streamAggSup, pExpSup, numOfCols, 0,
|
||||
pTaskInfo->streamInfo.pState, sizeof(COUNT_TYPE), 0, &pTaskInfo->storageAPI.stateStore, pHandle,
|
||||
&pInfo->twAggSup, GET_TASKID(pTaskInfo), &pTaskInfo->storageAPI);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
pInfo->streamAggSup.windowCount = pCountNode->windowCount;
|
||||
pInfo->streamAggSup.windowSliding = pCountNode->windowSliding;
|
||||
|
||||
pInfo->twAggSup = (STimeWindowAggSupp){
|
||||
.waterMark = pCountNode->window.watermark,
|
||||
.calTrigger = pCountNode->window.triggerType,
|
||||
.maxTs = INT64_MIN,
|
||||
.minTs = INT64_MAX,
|
||||
};
|
||||
|
||||
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
|
||||
|
||||
pInfo->primaryTsIndex = ((SColumnNode*)pCountNode->window.pTspk)->slotId;
|
||||
|
||||
pInfo->binfo.pRes = pResBlock;
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pInfo->pStDeleted = tSimpleHashInit(64, hashFn);
|
||||
pInfo->pDelIterator = NULL;
|
||||
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
|
||||
pInfo->ignoreExpiredData = pCountNode->window.igExpired;
|
||||
pInfo->ignoreExpiredDataSaved = false;
|
||||
pInfo->pUpdated = NULL;
|
||||
pInfo->pStUpdated = NULL;
|
||||
pInfo->dataVersion = 0;
|
||||
pInfo->historyWins = taosArrayInit(4, sizeof(SSessionKey));
|
||||
if (!pInfo->historyWins) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->pCheckpointRes = createSpecialDataBlock(STREAM_CHECKPOINT);
|
||||
pInfo->recvGetAll = false;
|
||||
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT;
|
||||
// for stream
|
||||
void* buff = NULL;
|
||||
int32_t len = 0;
|
||||
int32_t res =
|
||||
pInfo->streamAggSup.stateStore.streamStateGetInfo(pInfo->streamAggSup.pState, STREAM_COUNT_OP_CHECKPOINT_NAME,
|
||||
strlen(STREAM_COUNT_OP_CHECKPOINT_NAME), &buff, &len);
|
||||
if (res == TSDB_CODE_SUCCESS) {
|
||||
doStreamCountDecodeOpState(buff, len, pOperator, true);
|
||||
taosMemoryFree(buff);
|
||||
}
|
||||
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT, true,
|
||||
OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamCountAgg, NULL, destroyStreamCountAggOperatorInfo,
|
||||
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
|
||||
setOperatorStreamStateFn(pOperator, streamCountReleaseState, streamCountReloadState);
|
||||
|
||||
if (downstream) {
|
||||
initDownStream(downstream, &pInfo->streamAggSup, pOperator->operatorType, pInfo->primaryTsIndex, &pInfo->twAggSup);
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
}
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
if (pInfo != NULL) {
|
||||
destroyStreamCountAggOperatorInfo(pInfo);
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pOperator);
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1945,7 +1945,7 @@ int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool doDeleteSessionWindow(SStreamAggSupporter* pAggSup, SSessionKey* pKey) {
|
||||
bool doDeleteSessionWindow(SStreamAggSupporter* pAggSup, SSessionKey* pKey) {
|
||||
pAggSup->stateStore.streamStateSessionDel(pAggSup->pState, pKey);
|
||||
SSessionKey hashKey = {0};
|
||||
getSessionHashKey(pKey, &hashKey);
|
||||
|
@ -2343,7 +2343,7 @@ int32_t getAllSessionWindow(SSHashObj* pHashMap, SSHashObj* pStUpdated) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted) {
|
||||
void copyDeleteWindowInfo(SArray* pResWins, SSHashObj* pStDeleted) {
|
||||
int32_t size = taosArrayGetSize(pResWins);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SSessionKey* pWinKey = taosArrayGet(pResWins, i);
|
||||
|
|
|
@ -1043,7 +1043,7 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
|
|||
return 0;
|
||||
}
|
||||
|
||||
static SSDataBlock* getRowsBlockWithinMergeLimit(const SSortHandle* pHandle, SSHashObj* mTableNumRows, SSDataBlock* pOrigBlk, bool* pExtractedBlock) {
|
||||
static SSDataBlock* getRowsBlockWithinMergeLimit(const SSortHandle* pHandle, SSHashObj* mTableNumRows, SSDataBlock* pOrigBlk, bool* pExtractedBlock, bool *pSkipBlock) {
|
||||
int64_t nRows = 0;
|
||||
int64_t prevRows = 0;
|
||||
void* pNum = tSimpleHashGet(mTableNumRows, &pOrigBlk->info.id.uid, sizeof(pOrigBlk->info.id.uid));
|
||||
|
@ -1062,9 +1062,15 @@ static SSDataBlock* getRowsBlockWithinMergeLimit(const SSortHandle* pHandle, SSH
|
|||
if (pHandle->mergeLimitReachedFn) {
|
||||
pHandle->mergeLimitReachedFn(pOrigBlk->info.id.uid, pHandle->mergeLimitReachedParam);
|
||||
}
|
||||
keepRows = pHandle->mergeLimit - prevRows;
|
||||
keepRows = pHandle->mergeLimit > prevRows ? (pHandle->mergeLimit - prevRows) : 0;
|
||||
}
|
||||
|
||||
|
||||
if (keepRows == 0) {
|
||||
*pSkipBlock = true;
|
||||
return pOrigBlk;
|
||||
}
|
||||
|
||||
*pSkipBlock = false;
|
||||
SSDataBlock* pBlock = NULL;
|
||||
if (keepRows != pOrigBlk->info.rows) {
|
||||
pBlock = blockDataExtractBlock(pOrigBlk, 0, keepRows);
|
||||
|
@ -1106,8 +1112,12 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
|||
|
||||
int64_t p = taosGetTimestampUs();
|
||||
bool bExtractedBlock = false;
|
||||
bool bSkipBlock = false;
|
||||
if (pBlk != NULL && pHandle->mergeLimit > 0) {
|
||||
pBlk = getRowsBlockWithinMergeLimit(pHandle, mTableNumRows, pBlk, &bExtractedBlock);
|
||||
pBlk = getRowsBlockWithinMergeLimit(pHandle, mTableNumRows, pBlk, &bExtractedBlock, &bSkipBlock);
|
||||
if (bSkipBlock) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (pBlk != NULL) {
|
||||
|
@ -1121,7 +1131,6 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
|||
|
||||
if (pBlk != NULL) {
|
||||
szSort += blockDataGetSize(pBlk);
|
||||
|
||||
void* ppBlk = tSimpleHashGet(mUidBlk, &pBlk->info.id.uid, sizeof(pBlk->info.id.uid));
|
||||
if (ppBlk != NULL) {
|
||||
SSDataBlock* tBlk = *(SSDataBlock**)(ppBlk);
|
||||
|
@ -1138,7 +1147,6 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
|||
|
||||
if ((pBlk != NULL && szSort > maxBufSize) || (pBlk == NULL && szSort > 0)) {
|
||||
tSimpleHashClear(mUidBlk);
|
||||
|
||||
code = sortBlocksToExtSource(pHandle, aBlkSort, pOrder, aExtSrc);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tSimpleHashCleanup(mUidBlk);
|
||||
|
|
|
@ -459,7 +459,7 @@ static void idxInterRsltDestroy(SArray* results) {
|
|||
|
||||
static int idxMergeFinalResults(SArray* in, EIndexOperatorType oType, SArray* out) {
|
||||
// refactor, merge interResults into fResults by oType
|
||||
for (int i = 0; i < taosArrayGetSize(in); i--) {
|
||||
for (int i = 0; i < taosArrayGetSize(in); i++) {
|
||||
SArray* t = taosArrayGetP(in, i);
|
||||
taosArraySort(t, uidCompare);
|
||||
taosArrayRemoveDuplicate(t, uidCompare, NULL);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "index.h"
|
||||
#include "indexComm.h"
|
||||
#include "indexInt.h"
|
||||
#include "indexUtil.h"
|
||||
#include "nodes.h"
|
||||
#include "querynodes.h"
|
||||
#include "scalar.h"
|
||||
|
@ -77,15 +78,15 @@ typedef struct SIFParam {
|
|||
char dbName[TSDB_DB_NAME_LEN];
|
||||
char colName[TSDB_COL_NAME_LEN * 2 + 4];
|
||||
|
||||
SIndexMetaArg arg;
|
||||
SIndexMetaArg arg;
|
||||
SMetaDataFilterAPI api;
|
||||
} SIFParam;
|
||||
|
||||
typedef struct SIFCtx {
|
||||
int32_t code;
|
||||
SHashObj *pRes; /* element is SIFParam */
|
||||
bool noExec; // true: just iterate condition tree, and add hint to executor plan
|
||||
SIndexMetaArg arg;
|
||||
int32_t code;
|
||||
SHashObj *pRes; /* element is SIFParam */
|
||||
bool noExec; // true: just iterate condition tree, and add hint to executor plan
|
||||
SIndexMetaArg arg;
|
||||
SMetaDataFilterAPI *pAPI;
|
||||
} SIFCtx;
|
||||
|
||||
|
@ -669,6 +670,10 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
|
|||
if (sifSetFltParam(left, right, &typedata, ¶m) != 0) return -1;
|
||||
}
|
||||
ret = left->api.metaFilterTableIds(arg->metaEx, ¶m, output->result);
|
||||
if (ret == 0) {
|
||||
taosArraySort(output->result, uidCompare);
|
||||
taosArrayRemoveDuplicate(output->result, uidCompare, NULL);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -875,8 +880,8 @@ static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *ou
|
|||
} else if (node->condType == LOGIC_COND_TYPE_NOT) {
|
||||
// taosArrayAddAll(output->result, params[m].result);
|
||||
}
|
||||
taosArraySort(output->result, idxUidCompare);
|
||||
taosArrayRemoveDuplicate(output->result, idxUidCompare, NULL);
|
||||
taosArraySort(output->result, uidCompare);
|
||||
taosArrayRemoveDuplicate(output->result, uidCompare, NULL);
|
||||
}
|
||||
} else {
|
||||
for (int32_t m = 0; m < node->pParameterList->length; m++) {
|
||||
|
@ -1016,7 +1021,7 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status, SMetaDataFilterAPI* pAPI) {
|
||||
static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status, SMetaDataFilterAPI *pAPI) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (pNode == NULL) {
|
||||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
|
@ -1054,7 +1059,8 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status, SMetaDataFilte
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result, SIdxFltStatus *status, SMetaDataFilterAPI* pAPI) {
|
||||
int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result, SIdxFltStatus *status,
|
||||
SMetaDataFilterAPI *pAPI) {
|
||||
SIdxFltStatus st = idxGetFltStatus(pFilterNode, pAPI);
|
||||
if (st == SFLT_NOT_INDEX) {
|
||||
*status = st;
|
||||
|
@ -1081,7 +1087,7 @@ int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result,
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SIdxFltStatus idxGetFltStatus(SNode *pFilterNode, SMetaDataFilterAPI* pAPI) {
|
||||
SIdxFltStatus idxGetFltStatus(SNode *pFilterNode, SMetaDataFilterAPI *pAPI) {
|
||||
SIdxFltStatus st = SFLT_NOT_INDEX;
|
||||
if (pFilterNode == NULL) {
|
||||
return SFLT_NOT_INDEX;
|
||||
|
|
|
@ -329,6 +329,13 @@ static int32_t eventWindowNodeCopy(const SEventWindowNode* pSrc, SEventWindowNod
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t countWindowNodeCopy(const SCountWindowNode* pSrc, SCountWindowNode* pDst) {
|
||||
CLONE_NODE_FIELD(pCol);
|
||||
COPY_SCALAR_FIELD(windowCount);
|
||||
COPY_SCALAR_FIELD(windowSliding);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t sessionWindowNodeCopy(const SSessionWindowNode* pSrc, SSessionWindowNode* pDst) {
|
||||
CLONE_NODE_FIELD_EX(pCol, SColumnNode*);
|
||||
CLONE_NODE_FIELD_EX(pGap, SValueNode*);
|
||||
|
@ -449,6 +456,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
|
|||
COPY_SCALAR_FIELD(filesetDelimited);
|
||||
COPY_SCALAR_FIELD(isCountByTag);
|
||||
CLONE_OBJECT_FIELD(pFuncTypes, functParamClone);
|
||||
COPY_SCALAR_FIELD(paraTablesSort);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -551,6 +559,8 @@ static int32_t logicWindowCopy(const SWindowLogicNode* pSrc, SWindowLogicNode* p
|
|||
COPY_SCALAR_FIELD(igExpired);
|
||||
COPY_SCALAR_FIELD(igCheckUpdate);
|
||||
COPY_SCALAR_FIELD(windowAlgo);
|
||||
COPY_SCALAR_FIELD(windowCount);
|
||||
COPY_SCALAR_FIELD(windowSliding);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -679,6 +689,7 @@ static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhy
|
|||
COPY_SCALAR_FIELD(igExpired);
|
||||
COPY_SCALAR_FIELD(filesetDelimited);
|
||||
COPY_SCALAR_FIELD(needCountEmptyTable);
|
||||
COPY_SCALAR_FIELD(paraTablesSort);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -853,6 +864,9 @@ SNode* nodesCloneNode(const SNode* pNode) {
|
|||
case QUERY_NODE_EVENT_WINDOW:
|
||||
code = eventWindowNodeCopy((const SEventWindowNode*)pNode, (SEventWindowNode*)pDst);
|
||||
break;
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
code = countWindowNodeCopy((const SCountWindowNode*)pNode, (SCountWindowNode*)pDst);
|
||||
break;
|
||||
case QUERY_NODE_SESSION_WINDOW:
|
||||
code = sessionWindowNodeCopy((const SSessionWindowNode*)pNode, (SSessionWindowNode*)pDst);
|
||||
break;
|
||||
|
|
|
@ -91,6 +91,8 @@ const char* nodesNodeName(ENodeType type) {
|
|||
return "CaseWhen";
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
return "EventWindow";
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
return "CountWindow";
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
return "SetOperator";
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
|
@ -343,6 +345,10 @@ const char* nodesNodeName(ENodeType type) {
|
|||
return "PhysiMergeEventWindow";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
return "PhysiStreamEventWindow";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
return "PhysiMergeCountWindow";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
return "PhysiStreamCountWindow";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||
return "PhysiProject";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
|
||||
|
@ -692,6 +698,7 @@ static const char* jkScanLogicPlanTagCond = "TagCond";
|
|||
static const char* jkScanLogicPlanGroupTags = "GroupTags";
|
||||
static const char* jkScanLogicPlanOnlyMetaCtbIdx = "OnlyMetaCtbIdx";
|
||||
static const char* jkScanLogicPlanFilesetDelimited = "FilesetDelimited";
|
||||
static const char* jkScanLogicPlanparaTablesSort = "paraTablesSort";
|
||||
|
||||
static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SScanLogicNode* pNode = (const SScanLogicNode*)pObj;
|
||||
|
@ -739,6 +746,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkScanLogicPlanFilesetDelimited, pNode->filesetDelimited);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkScanLogicPlanFilesetDelimited, pNode->paraTablesSort);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -789,6 +799,9 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkScanLogicPlanFilesetDelimited, &pNode->filesetDelimited);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkScanLogicPlanFilesetDelimited, &pNode->paraTablesSort);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1882,6 +1895,7 @@ static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid";
|
|||
static const char* jkTableScanPhysiPlanIgnoreUpdate = "IgnoreUpdate";
|
||||
static const char* jkTableScanPhysiPlanFilesetDelimited = "FilesetDelimited";
|
||||
static const char* jkTableScanPhysiPlanNeedCountEmptyTable = "NeedCountEmptyTable";
|
||||
static const char* jkTableScanPhysiPlanparaTablesSort = "paraTablesSort";
|
||||
|
||||
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
|
||||
|
@ -1956,6 +1970,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanNeedCountEmptyTable, pNode->needCountEmptyTable);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanparaTablesSort, pNode->paraTablesSort);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2032,6 +2049,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanNeedCountEmptyTable, &pNode->needCountEmptyTable);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanparaTablesSort, &pNode->paraTablesSort);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2745,6 +2765,36 @@ static int32_t jsonToPhysiEventWindowNode(const SJson* pJson, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static const char* jkCountWindowPhysiPlanWindowCount = "WindowCount";
|
||||
static const char* jkCountWindowPhysiPlanWindowSliding = "WindowSliding";
|
||||
|
||||
static int32_t physiCountWindowNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SCountWinodwPhysiNode* pNode = (const SCountWinodwPhysiNode*)pObj;
|
||||
|
||||
int32_t code = physiWindowNodeToJson(pObj, pJson);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkCountWindowPhysiPlanWindowCount, pNode->windowCount);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkCountWindowPhysiPlanWindowSliding, pNode->windowSliding);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t jsonToPhysiCountWindowNode(const SJson* pJson, void* pObj) {
|
||||
SCountWinodwPhysiNode* pNode = (SCountWinodwPhysiNode*)pObj;
|
||||
|
||||
int32_t code = jsonToPhysiWindowNode(pJson, pObj);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkCountWindowPhysiPlanWindowCount, &pNode->windowCount);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkCountWindowPhysiPlanWindowSliding, &pNode->windowSliding);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static const char* jkPartitionPhysiPlanExprs = "Exprs";
|
||||
static const char* jkPartitionPhysiPlanPartitionKeys = "PartitionKeys";
|
||||
static const char* jkPartitionPhysiPlanTargets = "Targets";
|
||||
|
@ -4408,6 +4458,36 @@ static int32_t jsonToEventWindowNode(const SJson* pJson, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static const char* jkCountWindowTsPrimaryKey = "CountTsPrimaryKey";
|
||||
static const char* jkCountWindowCount = "CountWindowCount";
|
||||
static const char* jkCountWindowSliding = "CountWindowSliding";
|
||||
|
||||
static int32_t countWindowNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SCountWindowNode* pNode = (const SCountWindowNode*)pObj;
|
||||
|
||||
int32_t code = tjsonAddObject(pJson, jkCountWindowTsPrimaryKey, nodeToJson, pNode->pCol);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkCountWindowCount, pNode->windowCount);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddIntegerToObject(pJson, jkCountWindowSliding, pNode->windowSliding);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t jsonToCountWindowNode(const SJson* pJson, void* pObj) {
|
||||
SCountWindowNode* pNode = (SCountWindowNode*)pObj;
|
||||
|
||||
int32_t code = jsonToNodeObject(pJson, jkCountWindowTsPrimaryKey, &pNode->pCol);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkCountWindowCount, &pNode->windowCount);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBigIntValue(pJson, jkCountWindowSliding, &pNode->windowSliding);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static const char* jkIntervalWindowInterval = "Interval";
|
||||
static const char* jkIntervalWindowOffset = "Offset";
|
||||
static const char* jkIntervalWindowSliding = "Sliding";
|
||||
|
@ -6991,6 +7071,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
|||
return caseWhenNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
return eventWindowNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
return countWindowNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
return setOperatorToJson(pObj, pJson);
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
|
@ -7230,6 +7312,9 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
return physiEventWindowNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
return physiCountWindowNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||
return physiPartitionNodeToJson(pObj, pJson);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||
|
@ -7323,6 +7408,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
|||
return jsonToCaseWhenNode(pJson, pObj);
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
return jsonToEventWindowNode(pJson, pObj);
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
return jsonToCountWindowNode(pJson, pObj);
|
||||
case QUERY_NODE_SET_OPERATOR:
|
||||
return jsonToSetOperator(pJson, pObj);
|
||||
case QUERY_NODE_SELECT_STMT:
|
||||
|
@ -7570,6 +7657,9 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
return jsonToPhysiEventWindowNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
return jsonToPhysiCountWindowNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||
return jsonToPhysiPartitionNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||
|
|
|
@ -2185,6 +2185,9 @@ static int32_t physiTableScanNodeInlineToMsg(const void* pObj, STlvEncoder* pEnc
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeValueBool(pEncoder, pNode->needCountEmptyTable);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeValueBool(pEncoder, pNode->paraTablesSort);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2269,6 +2272,9 @@ static int32_t msgToPhysiTableScanNodeInline(STlvDecoder* pDecoder, void* pObj)
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvDecodeValueBool(pDecoder, &pNode->needCountEmptyTable);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvDecodeValueBool(pDecoder, &pNode->paraTablesSort);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -3210,6 +3216,46 @@ static int32_t msgToPhysiEventWindowNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
return code;
|
||||
}
|
||||
|
||||
enum { PHY_COUNT_CODE_WINDOW = 1, PHY_COUNT_CODE_WINDOW_COUNT, PHY_COUNT_CODE_WINDOW_SLIDING };
|
||||
|
||||
static int32_t physiCountWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
||||
const SCountWinodwPhysiNode* pNode = (const SCountWinodwPhysiNode*)pObj;
|
||||
|
||||
int32_t code = tlvEncodeObj(pEncoder, PHY_COUNT_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI64(pEncoder, PHY_COUNT_CODE_WINDOW_COUNT, pNode->windowCount);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tlvEncodeI64(pEncoder, PHY_COUNT_CODE_WINDOW_SLIDING, pNode->windowSliding);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t msgToPhysiCountWindowNode(STlvDecoder* pDecoder, void* pObj) {
|
||||
SCountWinodwPhysiNode* pNode = (SCountWinodwPhysiNode*)pObj;
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
STlv* pTlv = NULL;
|
||||
tlvForEach(pDecoder, pTlv, code) {
|
||||
switch (pTlv->type) {
|
||||
case PHY_COUNT_CODE_WINDOW:
|
||||
code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
|
||||
break;
|
||||
case PHY_COUNT_CODE_WINDOW_COUNT:
|
||||
code = tlvDecodeI64(pTlv, &pNode->windowCount);
|
||||
break;
|
||||
case PHY_COUNT_CODE_WINDOW_SLIDING:
|
||||
code = tlvDecodeI64(pTlv, &pNode->windowSliding);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
enum {
|
||||
PHY_PARTITION_CODE_BASE_NODE = 1,
|
||||
PHY_PARTITION_CODE_EXPR,
|
||||
|
@ -4200,6 +4246,10 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
code = physiEventWindowNodeToMsg(pObj, pEncoder);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
code = physiCountWindowNodeToMsg(pObj, pEncoder);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||
code = physiPartitionNodeToMsg(pObj, pEncoder);
|
||||
break;
|
||||
|
@ -4355,6 +4405,10 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
code = msgToPhysiEventWindowNode(pDecoder, pObj);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
code = msgToPhysiCountWindowNode(pDecoder, pObj);
|
||||
break;
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||
code = msgToPhysiPartitionNode(pDecoder, pObj);
|
||||
break;
|
||||
|
|
|
@ -176,6 +176,11 @@ static EDealRes dispatchExpr(SNode* pNode, ETraversalOrder order, FNodeWalker wa
|
|||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_COUNT_WINDOW: {
|
||||
SCountWindowNode* pEvent = (SCountWindowNode*)pNode;
|
||||
res = walkExpr(pEvent->pCol, order, walker, pContext);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -367,6 +372,11 @@ static EDealRes rewriteExpr(SNode** pRawNode, ETraversalOrder order, FNodeRewrit
|
|||
}
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_COUNT_WINDOW: {
|
||||
SCountWindowNode* pEvent = (SCountWindowNode*)pNode;
|
||||
res = rewriteExpr(&pEvent->pCol, order, rewriter, pContext);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -302,6 +302,8 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SCaseWhenNode));
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
return makeNode(type, sizeof(SEventWindowNode));
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
return makeNode(type, sizeof(SCountWindowNode));
|
||||
case QUERY_NODE_HINT:
|
||||
return makeNode(type, sizeof(SHintNode));
|
||||
case QUERY_NODE_VIEW:
|
||||
|
@ -585,6 +587,10 @@ SNode* nodesMakeNode(ENodeType type) {
|
|||
return makeNode(type, sizeof(SEventWinodwPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
return makeNode(type, sizeof(SStreamEventWinodwPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
return makeNode(type, sizeof(SCountWinodwPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT:
|
||||
return makeNode(type, sizeof(SStreamCountWinodwPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
|
||||
return makeNode(type, sizeof(SPartitionPhysiNode));
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||
|
@ -850,6 +856,11 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pEvent->pEndCond);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_COUNT_WINDOW: {
|
||||
SCountWindowNode* pEvent = (SCountWindowNode*)pNode;
|
||||
nodesDestroyNode(pEvent->pCol);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_HINT: {
|
||||
SHintNode* pHint = (SHintNode*)pNode;
|
||||
destroyHintValue(pHint->option, pHint->value);
|
||||
|
@ -1427,6 +1438,12 @@ void nodesDestroyNode(SNode* pNode) {
|
|||
nodesDestroyNode(pPhyNode->pEndCond);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT: {
|
||||
SCountWinodwPhysiNode* pPhyNode = (SCountWinodwPhysiNode*)pNode;
|
||||
destroyWinodwPhysiNode((SWindowPhysiNode*)pPhyNode);
|
||||
break;
|
||||
}
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
|
||||
destroyPartitionPhysiNode((SPartitionPhysiNode*)pNode);
|
||||
break;
|
||||
|
|
|
@ -131,6 +131,7 @@ SNode* createOrderByExprNode(SAstCreateContext* pCxt, SNode* pExpr, EOrder order
|
|||
SNode* createSessionWindowNode(SAstCreateContext* pCxt, SNode* pCol, SNode* pGap);
|
||||
SNode* createStateWindowNode(SAstCreateContext* pCxt, SNode* pExpr);
|
||||
SNode* createEventWindowNode(SAstCreateContext* pCxt, SNode* pStartCond, SNode* pEndCond);
|
||||
SNode* createCountWindowNode(SAstCreateContext* pCxt, const SToken* pCountToken, const SToken* pSlidingToken);
|
||||
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
|
||||
SNode* pFill);
|
||||
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
|
||||
|
|
|
@ -517,7 +517,7 @@ cmd ::= SHOW VNODES.
|
|||
// show alive
|
||||
cmd ::= SHOW db_name_cond_opt(A) ALIVE. { pCxt->pRootNode = createShowAliveStmt(pCxt, A, QUERY_NODE_SHOW_DB_ALIVE_STMT); }
|
||||
cmd ::= SHOW CLUSTER ALIVE. { pCxt->pRootNode = createShowAliveStmt(pCxt, NULL, QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT); }
|
||||
cmd ::= SHOW db_name_cond_opt(A) VIEWS. { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VIEWS_STMT, A, NULL, OP_TYPE_LIKE); }
|
||||
cmd ::= SHOW db_name_cond_opt(A) VIEWS like_pattern_opt(B). { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VIEWS_STMT, A, B, OP_TYPE_LIKE); }
|
||||
cmd ::= SHOW CREATE VIEW full_table_name(A). { pCxt->pRootNode = createShowCreateViewStmt(pCxt, QUERY_NODE_SHOW_CREATE_VIEW_STMT, A); }
|
||||
cmd ::= SHOW COMPACTS. { pCxt->pRootNode = createShowCompactsStmt(pCxt, QUERY_NODE_SHOW_COMPACTS_STMT); }
|
||||
cmd ::= SHOW COMPACT NK_INTEGER(A). { pCxt->pRootNode = createShowCompactDetailsStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &A)); }
|
||||
|
@ -1160,6 +1160,10 @@ twindow_clause_opt(A) ::=
|
|||
sliding_opt(D) fill_opt(E). { A = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C), D, E); }
|
||||
twindow_clause_opt(A) ::=
|
||||
EVENT_WINDOW START WITH search_condition(B) END WITH search_condition(C). { A = createEventWindowNode(pCxt, B, C); }
|
||||
twindow_clause_opt(A) ::=
|
||||
COUNT_WINDOW NK_LP NK_INTEGER(B) NK_RP. { A = createCountWindowNode(pCxt, &B, &B); }
|
||||
twindow_clause_opt(A) ::=
|
||||
COUNT_WINDOW NK_LP NK_INTEGER(B) NK_COMMA NK_INTEGER(C) NK_RP. { A = createCountWindowNode(pCxt, &B, &C); }
|
||||
|
||||
sliding_opt(A) ::= . { A = NULL; }
|
||||
sliding_opt(A) ::= SLIDING NK_LP interval_sliding_duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
|
||||
|
|
|
@ -401,6 +401,9 @@ bool addHintNodeToList(SAstCreateContext* pCxt, SNodeList** ppHintList, EHintOpt
|
|||
case HINT_PARTITION_FIRST:
|
||||
if (paramNum > 0 || hasHint(*ppHintList, HINT_SORT_FOR_GROUP)) return true;
|
||||
break;
|
||||
case HINT_PARA_TABLES_SORT:
|
||||
if (paramNum > 0 || hasHint(*ppHintList, HINT_PARA_TABLES_SORT)) return true;
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
@ -479,6 +482,14 @@ SNodeList* createHintNodeList(SAstCreateContext* pCxt, const SToken* pLiteral) {
|
|||
}
|
||||
opt = HINT_PARTITION_FIRST;
|
||||
break;
|
||||
case TK_PARA_TABLES_SORT:
|
||||
lastComma = false;
|
||||
if (0 != opt || inParamList) {
|
||||
quit = true;
|
||||
break;
|
||||
}
|
||||
opt = HINT_PARA_TABLES_SORT;
|
||||
break;
|
||||
case TK_NK_LP:
|
||||
lastComma = false;
|
||||
if (0 == opt || inParamList) {
|
||||
|
@ -883,6 +894,20 @@ SNode* createEventWindowNode(SAstCreateContext* pCxt, SNode* pStartCond, SNode*
|
|||
return (SNode*)pEvent;
|
||||
}
|
||||
|
||||
SNode* createCountWindowNode(SAstCreateContext* pCxt, const SToken* pCountToken, const SToken* pSlidingToken) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
SCountWindowNode* pCount = (SCountWindowNode*)nodesMakeNode(QUERY_NODE_COUNT_WINDOW);
|
||||
CHECK_OUT_OF_MEM(pCount);
|
||||
pCount->pCol = createPrimaryKeyCol(pCxt, NULL);
|
||||
if (NULL == pCount->pCol) {
|
||||
nodesDestroyNode((SNode*)pCount);
|
||||
CHECK_OUT_OF_MEM(NULL);
|
||||
}
|
||||
pCount->windowCount = taosStr2Int64(pCountToken->z, NULL, 10);
|
||||
pCount->windowSliding = taosStr2Int64(pSlidingToken->z, NULL, 10);
|
||||
return (SNode*)pCount;
|
||||
}
|
||||
|
||||
SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode* pOffset, SNode* pSliding,
|
||||
SNode* pFill) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
|
|
|
@ -663,7 +663,7 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
}
|
||||
|
||||
char* p = (char*)data;
|
||||
// | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
|
||||
// | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each column
|
||||
// length |
|
||||
int32_t version = *(int32_t*)data;
|
||||
p += sizeof(int32_t);
|
||||
|
|
|
@ -72,6 +72,7 @@ static SKeyword keywordTable[] = {
|
|||
{"CONSUMERS", TK_CONSUMERS},
|
||||
{"CONTAINS", TK_CONTAINS},
|
||||
{"COUNT", TK_COUNT},
|
||||
{"COUNT_WINDOW", TK_COUNT_WINDOW},
|
||||
{"CREATE", TK_CREATE},
|
||||
{"CURRENT_USER", TK_CURRENT_USER},
|
||||
{"DATABASE", TK_DATABASE},
|
||||
|
@ -173,6 +174,7 @@ static SKeyword keywordTable[] = {
|
|||
{"OUTPUTTYPE", TK_OUTPUTTYPE},
|
||||
{"PAGES", TK_PAGES},
|
||||
{"PAGESIZE", TK_PAGESIZE},
|
||||
{"PARA_TABLES_SORT", TK_PARA_TABLES_SORT},
|
||||
{"PARTITION", TK_PARTITION},
|
||||
{"PARTITION_FIRST", TK_PARTITION_FIRST},
|
||||
{"PASS", TK_PASS},
|
||||
|
|
|
@ -4002,6 +4002,15 @@ static int32_t translateEventWindow(STranslateContext* pCxt, SSelectStmt* pSelec
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateCountWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
|
||||
!isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TIMELINE_QUERY,
|
||||
"COUNT_WINDOW requires valid time series input");
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateSpecificWindow(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
switch (nodeType(pSelect->pWindow)) {
|
||||
case QUERY_NODE_STATE_WINDOW:
|
||||
|
@ -4012,6 +4021,8 @@ static int32_t translateSpecificWindow(STranslateContext* pCxt, SSelectStmt* pSe
|
|||
return translateIntervalWindow(pCxt, pSelect);
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
return translateEventWindow(pCxt, pSelect);
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
return translateCountWindow(pCxt, pSelect);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -7799,6 +7810,53 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
|
|||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"The trigger mode of non window query can only be AT_ONCE");
|
||||
}
|
||||
|
||||
if (pSelect->pWindow != NULL && pSelect->pWindow->type == QUERY_NODE_COUNT_WINDOW) {
|
||||
if ( (SRealTableNode*)pSelect->pFromTable && ((SRealTableNode*)pSelect->pFromTable)->pMeta
|
||||
&& TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType
|
||||
&& !hasPartitionByTbname(pSelect->pPartitionByList) ) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Count window for stream on super table must patitioned by table name");
|
||||
}
|
||||
|
||||
int64_t watermark = 0;
|
||||
if (pStmt->pOptions->pWatermark) {
|
||||
translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark);
|
||||
watermark =((SValueNode*)pStmt->pOptions->pWatermark)->datum.i;
|
||||
}
|
||||
if (watermark <= 0) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Watermark of Count window must exceed 0.");
|
||||
}
|
||||
|
||||
if (pStmt->pOptions->ignoreExpired != 1) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Ignore expired data of Count window must be 1.");
|
||||
}
|
||||
|
||||
SCountWindowNode* pCountWin = (SCountWindowNode*)pSelect->pWindow;
|
||||
if (pCountWin->windowCount <= 1) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must exceed 1.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowSliding <= 0) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must exceed 0.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowSliding > pCountWin->windowCount) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"sliding value no larger than the count value.");
|
||||
}
|
||||
|
||||
if (pCountWin->windowCount > INT32_MAX) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
|
||||
"Size of Count window must less than 2147483647(INT32_MAX).");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -9411,7 +9469,20 @@ static int32_t createOperatorNode(EOperatorType opType, const char* pColName, SN
|
|||
}
|
||||
|
||||
static const char* getTbNameColName(ENodeType type) {
|
||||
return (QUERY_NODE_SHOW_STABLES_STMT == type ? "stable_name" : "table_name");
|
||||
const char* colName;
|
||||
switch (type)
|
||||
{
|
||||
case QUERY_NODE_SHOW_VIEWS_STMT:
|
||||
colName = "view_name";
|
||||
break;
|
||||
case QUERY_NODE_SHOW_STABLES_STMT:
|
||||
colName = "stable_name";
|
||||
break;
|
||||
default:
|
||||
colName = "table_name";
|
||||
break;
|
||||
}
|
||||
return colName;
|
||||
}
|
||||
|
||||
static int32_t createLogicCondNode(SNode* pCond1, SNode* pCond2, SNode** pCond, ELogicConditionType logicCondType) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -47,6 +47,7 @@ int32_t validateQueryPlan(SPlanContext* pCxt, SQueryPlan* pPlan);
|
|||
|
||||
bool getBatchScanOptionFromHint(SNodeList* pList);
|
||||
bool getSortForGroupOptHint(SNodeList* pList);
|
||||
bool getparaTablesSortOptHint(SNodeList* pList);
|
||||
bool getOptHint(SNodeList* pList, EHintOption hint);
|
||||
SLogicNode* getLogicNodeRootNode(SLogicNode* pCurr);
|
||||
int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes);
|
||||
|
|
|
@ -501,7 +501,7 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
} else {
|
||||
nodesDestroyNode((SNode*)pScan);
|
||||
}
|
||||
|
||||
pScan->paraTablesSort = getparaTablesSortOptHint(pSelect->pHint);
|
||||
pCxt->hasScan = true;
|
||||
|
||||
return code;
|
||||
|
@ -1008,6 +1008,29 @@ static int32_t createWindowLogicNodeByEvent(SLogicPlanContext* pCxt, SEventWindo
|
|||
return createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode);
|
||||
}
|
||||
|
||||
static int32_t createWindowLogicNodeByCount(SLogicPlanContext* pCxt, SCountWindowNode* pCount, SSelectStmt* pSelect,
|
||||
SLogicNode** pLogicNode) {
|
||||
SWindowLogicNode* pWindow = (SWindowLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_WINDOW);
|
||||
if (NULL == pWindow) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pWindow->winType = WINDOW_TYPE_COUNT;
|
||||
pWindow->node.groupAction = getGroupAction(pCxt, pSelect);
|
||||
pWindow->node.requireDataOrder =
|
||||
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_IN_BLOCK : getRequireDataOrder(true, pSelect);
|
||||
pWindow->node.resultDataOrder =
|
||||
pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : pWindow->node.requireDataOrder;
|
||||
pWindow->windowCount = pCount->windowCount;
|
||||
pWindow->windowSliding = pCount->windowSliding;
|
||||
pWindow->pTspk = nodesCloneNode(pCount->pCol);
|
||||
if (NULL == pWindow->pTspk) {
|
||||
nodesDestroyNode((SNode*)pWindow);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
return createWindowLogicNodeFinalize(pCxt, pSelect, pWindow, pLogicNode);
|
||||
}
|
||||
|
||||
static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
|
||||
if (NULL == pSelect->pWindow) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -1021,6 +1044,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
|
|||
return createWindowLogicNodeByInterval(pCxt, (SIntervalWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
||||
case QUERY_NODE_EVENT_WINDOW:
|
||||
return createWindowLogicNodeByEvent(pCxt, (SEventWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
||||
case QUERY_NODE_COUNT_WINDOW:
|
||||
return createWindowLogicNodeByCount(pCxt, (SCountWindowNode*)pSelect->pWindow, pSelect, pLogicNode);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -651,6 +651,7 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
|
|||
pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false;
|
||||
pTableScan->filesetDelimited = pScanLogicNode->filesetDelimited;
|
||||
pTableScan->needCountEmptyTable = pScanLogicNode->isCountByTag;
|
||||
pTableScan->paraTablesSort = pScanLogicNode->paraTablesSort;
|
||||
|
||||
int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
@ -1751,6 +1752,27 @@ static int32_t createEventWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pC
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t createCountWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
|
||||
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
|
||||
SCountWinodwPhysiNode* pCount = (SCountWinodwPhysiNode*)makePhysiNode(
|
||||
pCxt, (SLogicNode*)pWindowLogicNode,
|
||||
(pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_COUNT : QUERY_NODE_PHYSICAL_PLAN_MERGE_COUNT));
|
||||
if (NULL == pCount) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pCount->windowCount = pWindowLogicNode->windowCount;
|
||||
pCount->windowSliding = pWindowLogicNode->windowSliding;
|
||||
|
||||
int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pCount->window, pWindowLogicNode);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pPhyNode = (SPhysiNode*)pCount;
|
||||
} else {
|
||||
nodesDestroyNode((SNode*)pCount);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode,
|
||||
SPhysiNode** pPhyNode) {
|
||||
switch (pWindowLogicNode->winType) {
|
||||
|
@ -1762,6 +1784,8 @@ static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildr
|
|||
return createStateWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
||||
case WINDOW_TYPE_EVENT:
|
||||
return createEventWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
||||
case WINDOW_TYPE_COUNT:
|
||||
return createCountWindowPhysiNode(pCxt, pChildren, pWindowLogicNode, pPhyNode);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -872,6 +872,18 @@ static int32_t stbSplSplitEvent(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t stbSplSplitCountForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||
return TSDB_CODE_PLAN_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
static int32_t stbSplSplitCount(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||
if (pCxt->pPlanCxt->streamQuery) {
|
||||
return stbSplSplitCountForStream(pCxt, pInfo);
|
||||
} else {
|
||||
return stbSplSplitSessionOrStateForBatch(pCxt, pInfo);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t stbSplSplitWindowForCrossTable(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
|
||||
switch (((SWindowLogicNode*)pInfo->pSplitNode)->winType) {
|
||||
case WINDOW_TYPE_INTERVAL:
|
||||
|
@ -882,6 +894,8 @@ static int32_t stbSplSplitWindowForCrossTable(SSplitContext* pCxt, SStableSplitI
|
|||
return stbSplSplitState(pCxt, pInfo);
|
||||
case WINDOW_TYPE_EVENT:
|
||||
return stbSplSplitEvent(pCxt, pInfo);
|
||||
case WINDOW_TYPE_COUNT:
|
||||
return stbSplSplitCount(pCxt, pInfo);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -237,6 +237,15 @@ static int32_t adjustEventDataRequirement(SWindowLogicNode* pWindow, EDataOrderL
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t adjustCountDataRequirement(SWindowLogicNode* pWindow, EDataOrderLevel requirement) {
|
||||
if (requirement <= pWindow->node.resultDataOrder) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
pWindow->node.resultDataOrder = requirement;
|
||||
pWindow->node.requireDataOrder = requirement;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t adjustWindowDataRequirement(SWindowLogicNode* pWindow, EDataOrderLevel requirement) {
|
||||
switch (pWindow->winType) {
|
||||
case WINDOW_TYPE_INTERVAL:
|
||||
|
@ -247,6 +256,8 @@ static int32_t adjustWindowDataRequirement(SWindowLogicNode* pWindow, EDataOrder
|
|||
return adjustStateDataRequirement(pWindow, requirement);
|
||||
case WINDOW_TYPE_EVENT:
|
||||
return adjustEventDataRequirement(pWindow, requirement);
|
||||
case WINDOW_TYPE_COUNT:
|
||||
return adjustCountDataRequirement(pWindow, requirement);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -455,6 +466,18 @@ bool getOptHint(SNodeList* pList, EHintOption hint) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool getparaTablesSortOptHint(SNodeList* pList) {
|
||||
if (!pList) return false;
|
||||
SNode* pNode;
|
||||
FOREACH(pNode, pList) {
|
||||
SHintNode* pHint = (SHintNode*)pNode;
|
||||
if (pHint->option == HINT_PARA_TABLES_SORT) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int32_t collectTableAliasFromNodes(SNode* pNode, SSHashObj** ppRes) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SLogicNode* pCurr = (SLogicNode*)pNode;
|
||||
|
|
|
@ -66,7 +66,7 @@ FORCE_INLINE bool schJobNeedToStop(SSchJob *pJob, int8_t *pStatus) {
|
|||
return true;
|
||||
}
|
||||
|
||||
if ((*pJob->chkKillFp)(pJob->chkKillParam)) {
|
||||
if (pJob->chkKillFp && (*pJob->chkKillFp)(pJob->chkKillParam)) {
|
||||
schUpdateJobErrCode(pJob, TSDB_CODE_TSC_QUERY_KILLED);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -263,6 +263,7 @@ void schCloseJobRef(void) {
|
|||
|
||||
uint64_t schGenTaskId(void) { return atomic_add_fetch_64(&schMgmt.taskId, 1); }
|
||||
|
||||
#ifdef BUILD_NO_CALL
|
||||
uint64_t schGenUUID(void) {
|
||||
static uint64_t hashId = 0;
|
||||
static int32_t requestSerialId = 0;
|
||||
|
@ -284,6 +285,7 @@ uint64_t schGenUUID(void) {
|
|||
uint64_t id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
|
||||
return id;
|
||||
}
|
||||
#endif
|
||||
|
||||
void schFreeRpcCtxVal(const void *arg) {
|
||||
if (NULL == arg) {
|
||||
|
|
|
@ -25,4 +25,9 @@ IF(NOT TD_DARWIN)
|
|||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scheduler/"
|
||||
PRIVATE "${TD_SOURCE_DIR}/source/libs/scheduler/inc"
|
||||
)
|
||||
add_test(
|
||||
NAME schedulerTest
|
||||
COMMAND schedulerTest
|
||||
)
|
||||
|
||||
ENDIF()
|
|
@ -54,20 +54,20 @@
|
|||
|
||||
namespace {
|
||||
|
||||
extern "C" int32_t schHandleResponseMsg(SSchJob *job, SSchTask *task, int32_t msgType, char *msg, int32_t msgSize,
|
||||
int32_t rspCode);
|
||||
extern "C" int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t msgType, int32_t rspCode);
|
||||
extern "C" int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDataBuf *pMsg, int32_t rspCode);
|
||||
extern "C" int32_t schHandleCallback(void *param, const SDataBuf *pMsg, int32_t rspCode);
|
||||
|
||||
int64_t insertJobRefId = 0;
|
||||
int64_t queryJobRefId = 0;
|
||||
|
||||
bool schtJobDone = false;
|
||||
uint64_t schtMergeTemplateId = 0x4;
|
||||
uint64_t schtFetchTaskId = 0;
|
||||
uint64_t schtQueryId = 1;
|
||||
|
||||
bool schtTestStop = false;
|
||||
bool schtTestDeadLoop = false;
|
||||
int32_t schtTestMTRunSec = 10;
|
||||
int32_t schtTestMTRunSec = 1;
|
||||
int32_t schtTestPrintNum = 1000;
|
||||
int32_t schtStartFetch = 0;
|
||||
|
||||
|
@ -85,10 +85,69 @@ void schtInitLogFile() {
|
|||
}
|
||||
|
||||
void schtQueryCb(SExecResult *pResult, void *param, int32_t code) {
|
||||
assert(TSDB_CODE_SUCCESS == code);
|
||||
*(int32_t *)param = 1;
|
||||
}
|
||||
|
||||
int32_t schtBuildQueryRspMsg(uint32_t *msize, void** rspMsg) {
|
||||
SQueryTableRsp rsp = {0};
|
||||
rsp.code = 0;
|
||||
rsp.affectedRows = 0;
|
||||
rsp.tbVerInfo = NULL;
|
||||
|
||||
int32_t msgSize = tSerializeSQueryTableRsp(NULL, 0, &rsp);
|
||||
if (msgSize < 0) {
|
||||
qError("tSerializeSQueryTableRsp failed");
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
void *pRsp = taosMemoryCalloc(msgSize, 1);
|
||||
if (NULL == pRsp) {
|
||||
qError("rpcMallocCont %d failed", msgSize);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (tSerializeSQueryTableRsp(pRsp, msgSize, &rsp) < 0) {
|
||||
qError("tSerializeSQueryTableRsp %d failed", msgSize);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
*rspMsg = pRsp;
|
||||
*msize = msgSize;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int32_t schtBuildFetchRspMsg(uint32_t *msize, void** rspMsg) {
|
||||
SRetrieveTableRsp* rsp = (SRetrieveTableRsp*)taosMemoryCalloc(sizeof(SRetrieveTableRsp), 1);
|
||||
rsp->completed = 1;
|
||||
rsp->numOfRows = 10;
|
||||
rsp->compLen = 0;
|
||||
|
||||
*rspMsg = rsp;
|
||||
*msize = sizeof(SRetrieveTableRsp);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t schtBuildSubmitRspMsg(uint32_t *msize, void** rspMsg) {
|
||||
SSubmitRsp2 submitRsp = {0};
|
||||
int32_t msgSize = 0, ret = 0;
|
||||
SEncoder ec = {0};
|
||||
|
||||
tEncodeSize(tEncodeSSubmitRsp2, &submitRsp, msgSize, ret);
|
||||
void* msg = taosMemoryCalloc(1, msgSize);
|
||||
tEncoderInit(&ec, (uint8_t*)msg, msgSize);
|
||||
tEncodeSSubmitRsp2(&ec, &submitRsp);
|
||||
tEncoderClear(&ec);
|
||||
|
||||
*rspMsg = msg;
|
||||
*msize = msgSize;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
void schtBuildQueryDag(SQueryPlan *dag) {
|
||||
uint64_t qId = schtQueryId;
|
||||
|
||||
|
@ -98,8 +157,8 @@ void schtBuildQueryDag(SQueryPlan *dag) {
|
|||
SNodeListNode *scan = (SNodeListNode *)nodesMakeNode(QUERY_NODE_NODE_LIST);
|
||||
SNodeListNode *merge = (SNodeListNode *)nodesMakeNode(QUERY_NODE_NODE_LIST);
|
||||
|
||||
SSubplan *scanPlan = (SSubplan *)taosMemoryCalloc(1, sizeof(SSubplan));
|
||||
SSubplan *mergePlan = (SSubplan *)taosMemoryCalloc(1, sizeof(SSubplan));
|
||||
SSubplan *scanPlan = (SSubplan*)nodesMakeNode(QUERY_NODE_PHYSICAL_SUBPLAN);
|
||||
SSubplan *mergePlan = (SSubplan*)nodesMakeNode(QUERY_NODE_PHYSICAL_SUBPLAN);
|
||||
|
||||
scanPlan->id.queryId = qId;
|
||||
scanPlan->id.groupId = 0x0000000000000002;
|
||||
|
@ -113,7 +172,7 @@ void schtBuildQueryDag(SQueryPlan *dag) {
|
|||
scanPlan->pChildren = NULL;
|
||||
scanPlan->level = 1;
|
||||
scanPlan->pParents = nodesMakeList();
|
||||
scanPlan->pNode = (SPhysiNode *)taosMemoryCalloc(1, sizeof(SPhysiNode));
|
||||
scanPlan->pNode = (SPhysiNode *)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN);
|
||||
scanPlan->msgType = TDMT_SCH_QUERY;
|
||||
|
||||
mergePlan->id.queryId = qId;
|
||||
|
@ -125,7 +184,7 @@ void schtBuildQueryDag(SQueryPlan *dag) {
|
|||
|
||||
mergePlan->pChildren = nodesMakeList();
|
||||
mergePlan->pParents = NULL;
|
||||
mergePlan->pNode = (SPhysiNode *)taosMemoryCalloc(1, sizeof(SPhysiNode));
|
||||
mergePlan->pNode = (SPhysiNode *)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_MERGE);
|
||||
mergePlan->msgType = TDMT_SCH_QUERY;
|
||||
|
||||
merge->pNodeList = nodesMakeList();
|
||||
|
@ -151,8 +210,7 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) {
|
|||
SNodeListNode *scan = (SNodeListNode *)nodesMakeNode(QUERY_NODE_NODE_LIST);
|
||||
SNodeListNode *merge = (SNodeListNode *)nodesMakeNode(QUERY_NODE_NODE_LIST);
|
||||
|
||||
SSubplan *scanPlan = (SSubplan *)taosMemoryCalloc(scanPlanNum, sizeof(SSubplan));
|
||||
SSubplan *mergePlan = (SSubplan *)taosMemoryCalloc(1, sizeof(SSubplan));
|
||||
SSubplan *mergePlan = (SSubplan*)nodesMakeNode(QUERY_NODE_PHYSICAL_SUBPLAN);
|
||||
|
||||
merge->pNodeList = nodesMakeList();
|
||||
scan->pNodeList = nodesMakeList();
|
||||
|
@ -160,29 +218,30 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) {
|
|||
mergePlan->pChildren = nodesMakeList();
|
||||
|
||||
for (int32_t i = 0; i < scanPlanNum; ++i) {
|
||||
scanPlan[i].id.queryId = qId;
|
||||
scanPlan[i].id.groupId = 0x0000000000000002;
|
||||
scanPlan[i].id.subplanId = 0x0000000000000003 + i;
|
||||
scanPlan[i].subplanType = SUBPLAN_TYPE_SCAN;
|
||||
SSubplan *scanPlan = (SSubplan*)nodesMakeNode(QUERY_NODE_PHYSICAL_SUBPLAN);
|
||||
scanPlan->id.queryId = qId;
|
||||
scanPlan->id.groupId = 0x0000000000000002;
|
||||
scanPlan->id.subplanId = 0x0000000000000003 + i;
|
||||
scanPlan->subplanType = SUBPLAN_TYPE_SCAN;
|
||||
|
||||
scanPlan[i].execNode.nodeId = 1 + i;
|
||||
scanPlan[i].execNode.epSet.inUse = 0;
|
||||
scanPlan[i].execNodeStat.tableNum = taosRand() % 30;
|
||||
addEpIntoEpSet(&scanPlan[i].execNode.epSet, "ep0", 6030);
|
||||
addEpIntoEpSet(&scanPlan[i].execNode.epSet, "ep1", 6030);
|
||||
addEpIntoEpSet(&scanPlan[i].execNode.epSet, "ep2", 6030);
|
||||
scanPlan[i].execNode.epSet.inUse = taosRand() % 3;
|
||||
scanPlan->execNode.nodeId = 1 + i;
|
||||
scanPlan->execNode.epSet.inUse = 0;
|
||||
scanPlan->execNodeStat.tableNum = taosRand() % 30;
|
||||
addEpIntoEpSet(&scanPlan->execNode.epSet, "ep0", 6030);
|
||||
addEpIntoEpSet(&scanPlan->execNode.epSet, "ep1", 6030);
|
||||
addEpIntoEpSet(&scanPlan->execNode.epSet, "ep2", 6030);
|
||||
scanPlan->execNode.epSet.inUse = taosRand() % 3;
|
||||
|
||||
scanPlan[i].pChildren = NULL;
|
||||
scanPlan[i].level = 1;
|
||||
scanPlan[i].pParents = nodesMakeList();
|
||||
scanPlan[i].pNode = (SPhysiNode *)taosMemoryCalloc(1, sizeof(SPhysiNode));
|
||||
scanPlan[i].msgType = TDMT_SCH_QUERY;
|
||||
scanPlan->pChildren = NULL;
|
||||
scanPlan->level = 1;
|
||||
scanPlan->pParents = nodesMakeList();
|
||||
scanPlan->pNode = (SPhysiNode *)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN);
|
||||
scanPlan->msgType = TDMT_SCH_QUERY;
|
||||
|
||||
nodesListAppend(scanPlan[i].pParents, (SNode *)mergePlan);
|
||||
nodesListAppend(mergePlan->pChildren, (SNode *)(scanPlan + i));
|
||||
nodesListAppend(scanPlan->pParents, (SNode *)mergePlan);
|
||||
nodesListAppend(mergePlan->pChildren, (SNode *)scanPlan);
|
||||
|
||||
nodesListAppend(scan->pNodeList, (SNode *)(scanPlan + i));
|
||||
nodesListAppend(scan->pNodeList, (SNode *)scanPlan);
|
||||
}
|
||||
|
||||
mergePlan->id.queryId = qId;
|
||||
|
@ -193,7 +252,7 @@ void schtBuildQueryFlowCtrlDag(SQueryPlan *dag) {
|
|||
mergePlan->execNode.epSet.numOfEps = 0;
|
||||
|
||||
mergePlan->pParents = NULL;
|
||||
mergePlan->pNode = (SPhysiNode *)taosMemoryCalloc(1, sizeof(SPhysiNode));
|
||||
mergePlan->pNode = (SPhysiNode *)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_MERGE);
|
||||
mergePlan->msgType = TDMT_SCH_QUERY;
|
||||
|
||||
nodesListAppend(merge->pNodeList, (SNode *)mergePlan);
|
||||
|
@ -211,45 +270,50 @@ void schtBuildInsertDag(SQueryPlan *dag) {
|
|||
dag->numOfSubplans = 2;
|
||||
dag->pSubplans = nodesMakeList();
|
||||
SNodeListNode *inserta = (SNodeListNode *)nodesMakeNode(QUERY_NODE_NODE_LIST);
|
||||
|
||||
SSubplan *insertPlan = (SSubplan *)taosMemoryCalloc(2, sizeof(SSubplan));
|
||||
|
||||
insertPlan[0].id.queryId = qId;
|
||||
insertPlan[0].id.groupId = 0x0000000000000003;
|
||||
insertPlan[0].id.subplanId = 0x0000000000000004;
|
||||
insertPlan[0].subplanType = SUBPLAN_TYPE_MODIFY;
|
||||
insertPlan[0].level = 0;
|
||||
|
||||
insertPlan[0].execNode.nodeId = 1;
|
||||
insertPlan[0].execNode.epSet.inUse = 0;
|
||||
addEpIntoEpSet(&insertPlan[0].execNode.epSet, "ep0", 6030);
|
||||
|
||||
insertPlan[0].pChildren = NULL;
|
||||
insertPlan[0].pParents = NULL;
|
||||
insertPlan[0].pNode = NULL;
|
||||
insertPlan[0].pDataSink = (SDataSinkNode *)taosMemoryCalloc(1, sizeof(SDataSinkNode));
|
||||
insertPlan[0].msgType = TDMT_VND_SUBMIT;
|
||||
|
||||
insertPlan[1].id.queryId = qId;
|
||||
insertPlan[1].id.groupId = 0x0000000000000003;
|
||||
insertPlan[1].id.subplanId = 0x0000000000000005;
|
||||
insertPlan[1].subplanType = SUBPLAN_TYPE_MODIFY;
|
||||
insertPlan[1].level = 0;
|
||||
|
||||
insertPlan[1].execNode.nodeId = 1;
|
||||
insertPlan[1].execNode.epSet.inUse = 0;
|
||||
addEpIntoEpSet(&insertPlan[1].execNode.epSet, "ep0", 6030);
|
||||
|
||||
insertPlan[1].pChildren = NULL;
|
||||
insertPlan[1].pParents = NULL;
|
||||
insertPlan[1].pNode = NULL;
|
||||
insertPlan[1].pDataSink = (SDataSinkNode *)taosMemoryCalloc(1, sizeof(SDataSinkNode));
|
||||
insertPlan[1].msgType = TDMT_VND_SUBMIT;
|
||||
|
||||
inserta->pNodeList = nodesMakeList();
|
||||
|
||||
SSubplan *insertPlan = (SSubplan*)nodesMakeNode(QUERY_NODE_PHYSICAL_SUBPLAN);
|
||||
|
||||
insertPlan->id.queryId = qId;
|
||||
insertPlan->id.groupId = 0x0000000000000003;
|
||||
insertPlan->id.subplanId = 0x0000000000000004;
|
||||
insertPlan->subplanType = SUBPLAN_TYPE_MODIFY;
|
||||
insertPlan->level = 0;
|
||||
|
||||
insertPlan->execNode.nodeId = 1;
|
||||
insertPlan->execNode.epSet.inUse = 0;
|
||||
addEpIntoEpSet(&insertPlan->execNode.epSet, "ep0", 6030);
|
||||
|
||||
insertPlan->pChildren = NULL;
|
||||
insertPlan->pParents = NULL;
|
||||
insertPlan->pNode = NULL;
|
||||
insertPlan->pDataSink = (SDataSinkNode*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_INSERT);
|
||||
((SDataInserterNode*)insertPlan->pDataSink)->size = 1;
|
||||
((SDataInserterNode*)insertPlan->pDataSink)->pData = taosMemoryCalloc(1, 1);
|
||||
insertPlan->msgType = TDMT_VND_SUBMIT;
|
||||
|
||||
nodesListAppend(inserta->pNodeList, (SNode *)insertPlan);
|
||||
insertPlan += 1;
|
||||
|
||||
insertPlan = (SSubplan*)nodesMakeNode(QUERY_NODE_PHYSICAL_SUBPLAN);
|
||||
|
||||
insertPlan->id.queryId = qId;
|
||||
insertPlan->id.groupId = 0x0000000000000003;
|
||||
insertPlan->id.subplanId = 0x0000000000000005;
|
||||
insertPlan->subplanType = SUBPLAN_TYPE_MODIFY;
|
||||
insertPlan->level = 0;
|
||||
|
||||
insertPlan->execNode.nodeId = 1;
|
||||
insertPlan->execNode.epSet.inUse = 0;
|
||||
addEpIntoEpSet(&insertPlan->execNode.epSet, "ep0", 6030);
|
||||
|
||||
insertPlan->pChildren = NULL;
|
||||
insertPlan->pParents = NULL;
|
||||
insertPlan->pNode = NULL;
|
||||
insertPlan->pDataSink = (SDataSinkNode*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN_INSERT);
|
||||
((SDataInserterNode*)insertPlan->pDataSink)->size = 1;
|
||||
((SDataInserterNode*)insertPlan->pDataSink)->pData = taosMemoryCalloc(1, 1);
|
||||
insertPlan->msgType = TDMT_VND_SUBMIT;
|
||||
|
||||
nodesListAppend(inserta->pNodeList, (SNode *)insertPlan);
|
||||
|
||||
nodesListAppend(dag->pSubplans, (SNode *)inserta);
|
||||
|
@ -325,7 +389,7 @@ void schtSetRpcSendRequest() {
|
|||
}
|
||||
}
|
||||
|
||||
int32_t schtAsyncSendMsgToServer(void *pTransporter, SEpSet *epSet, int64_t *pTransporterId, SMsgSendInfo *pInfo) {
|
||||
int32_t schtAsyncSendMsgToServer(void *pTransporter, SEpSet *epSet, int64_t *pTransporterId, SMsgSendInfo *pInfo, bool persistHandle, void* rpcCtx) {
|
||||
if (pInfo) {
|
||||
taosMemoryFreeClear(pInfo->param);
|
||||
taosMemoryFreeClear(pInfo->msgInfo.pData);
|
||||
|
@ -336,17 +400,17 @@ int32_t schtAsyncSendMsgToServer(void *pTransporter, SEpSet *epSet, int64_t *pTr
|
|||
|
||||
void schtSetAsyncSendMsgToServer() {
|
||||
static Stub stub;
|
||||
stub.set(asyncSendMsgToServer, schtAsyncSendMsgToServer);
|
||||
stub.set(asyncSendMsgToServerExt, schtAsyncSendMsgToServer);
|
||||
{
|
||||
#ifdef WINDOWS
|
||||
AddrAny any;
|
||||
std::map<std::string, void *> result;
|
||||
any.get_func_addr("asyncSendMsgToServer", result);
|
||||
any.get_func_addr("asyncSendMsgToServerExt", result);
|
||||
#endif
|
||||
#ifdef LINUX
|
||||
AddrAny any("libtransport.so");
|
||||
std::map<std::string, void *> result;
|
||||
any.get_global_func_addr_dynsym("^asyncSendMsgToServer$", result);
|
||||
any.get_global_func_addr_dynsym("^asyncSendMsgToServerExt$", result);
|
||||
#endif
|
||||
for (const auto &f : result) {
|
||||
stub.set(f.second, schtAsyncSendMsgToServer);
|
||||
|
@ -374,15 +438,21 @@ void *schtSendRsp(void *param) {
|
|||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SSubmitRsp rsp = {0};
|
||||
rsp.affectedRows = 10;
|
||||
schHandleResponseMsg(pJob, task, TDMT_VND_SUBMIT_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildSubmitRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_VND_SUBMIT_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
schHandleResponseMsg(pJob, task, task->execId, &msg, 0);
|
||||
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
schReleaseJob(job);
|
||||
|
||||
schtJobDone = true;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -393,11 +463,13 @@ void *schtCreateFetchRspThread(void *param) {
|
|||
taosSsleep(1);
|
||||
|
||||
int32_t code = 0;
|
||||
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, sizeof(SRetrieveTableRsp));
|
||||
rsp->completed = 1;
|
||||
rsp->numOfRows = 10;
|
||||
|
||||
code = schHandleResponseMsg(pJob, pJob->fetchTask, TDMT_SCH_FETCH_RSP, (char *)rsp, sizeof(*rsp), 0);
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildFetchRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_MERGE_FETCH_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleResponseMsg(pJob, pJob->fetchTask, pJob->fetchTask->execId, &msg, 0);
|
||||
|
||||
schReleaseJob(job);
|
||||
|
||||
|
@ -414,7 +486,7 @@ void *schtFetchRspThread(void *aa) {
|
|||
continue;
|
||||
}
|
||||
|
||||
taosUsleep(1);
|
||||
taosUsleep(100);
|
||||
|
||||
param = (SSchTaskCallbackParam *)taosMemoryCalloc(1, sizeof(*param));
|
||||
|
||||
|
@ -426,10 +498,11 @@ void *schtFetchRspThread(void *aa) {
|
|||
rsp->completed = 1;
|
||||
rsp->numOfRows = 10;
|
||||
|
||||
dataBuf.msgType = TDMT_SCH_FETCH_RSP;
|
||||
dataBuf.pData = rsp;
|
||||
dataBuf.len = sizeof(*rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_SCH_FETCH_RSP, 0);
|
||||
code = schHandleCallback(param, &dataBuf, 0);
|
||||
|
||||
assert(code == 0 || code);
|
||||
}
|
||||
|
@ -456,7 +529,7 @@ void *schtRunJobThread(void *aa) {
|
|||
char *dbname = "1.db1";
|
||||
char *tablename = "table1";
|
||||
SVgroupInfo vgInfo = {0};
|
||||
SQueryPlan dag;
|
||||
SQueryPlan* dag = (SQueryPlan*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN);
|
||||
|
||||
schtInitLogFile();
|
||||
|
||||
|
@ -470,19 +543,19 @@ void *schtRunJobThread(void *aa) {
|
|||
SSchJob *pJob = NULL;
|
||||
SSchTaskCallbackParam *param = NULL;
|
||||
SHashObj *execTasks = NULL;
|
||||
SDataBuf dataBuf = {0};
|
||||
uint32_t jobFinished = 0;
|
||||
int32_t queryDone = 0;
|
||||
|
||||
while (!schtTestStop) {
|
||||
schtBuildQueryDag(&dag);
|
||||
schtBuildQueryDag(dag);
|
||||
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SEp));
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SQueryNodeLoad));
|
||||
|
||||
SEp qnodeAddr = {0};
|
||||
strcpy(qnodeAddr.fqdn, "qnode0.ep");
|
||||
qnodeAddr.port = 6031;
|
||||
taosArrayPush(qnodeList, &qnodeAddr);
|
||||
SQueryNodeLoad load = {0};
|
||||
load.addr.epSet.numOfEps = 1;
|
||||
strcpy(load.addr.epSet.eps[0].fqdn, "qnode0.ep");
|
||||
load.addr.epSet.eps[0].port = 6031;
|
||||
taosArrayPush(qnodeList, &load);
|
||||
|
||||
queryDone = 0;
|
||||
|
||||
|
@ -492,7 +565,7 @@ void *schtRunJobThread(void *aa) {
|
|||
req.syncReq = false;
|
||||
req.pConn = &conn;
|
||||
req.pNodeList = qnodeList;
|
||||
req.pDag = &dag;
|
||||
req.pDag = dag;
|
||||
req.sql = "select * from tb";
|
||||
req.execFp = schtQueryCb;
|
||||
req.cbParam = &queryDone;
|
||||
|
@ -503,7 +576,7 @@ void *schtRunJobThread(void *aa) {
|
|||
pJob = schAcquireJob(queryJobRefId);
|
||||
if (NULL == pJob) {
|
||||
taosArrayDestroy(qnodeList);
|
||||
schtFreeQueryDag(&dag);
|
||||
schtFreeQueryDag(dag);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -526,11 +599,14 @@ void *schtRunJobThread(void *aa) {
|
|||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId;
|
||||
SQueryTableRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_SCH_QUERY_RSP, 0);
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleCallback(param, &msg, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
|
@ -545,11 +621,13 @@ void *schtRunJobThread(void *aa) {
|
|||
SSchTask *task = (SSchTask *)pIter;
|
||||
|
||||
param->taskId = task->taskId - 1;
|
||||
SQueryTableRsp rsp = {0};
|
||||
dataBuf.pData = &rsp;
|
||||
dataBuf.len = sizeof(rsp);
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleCallback(param, &dataBuf, TDMT_SCH_QUERY_RSP, 0);
|
||||
code = schHandleCallback(param, &msg, 0);
|
||||
assert(code == 0 || code);
|
||||
|
||||
pIter = taosHashIterate(execTasks, pIter);
|
||||
|
@ -575,7 +653,6 @@ void *schtRunJobThread(void *aa) {
|
|||
if (0 == code) {
|
||||
SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)data;
|
||||
assert(pRsp->completed == 1);
|
||||
assert(pRsp->numOfRows == 10);
|
||||
}
|
||||
|
||||
data = NULL;
|
||||
|
@ -587,7 +664,7 @@ void *schtRunJobThread(void *aa) {
|
|||
taosHashCleanup(execTasks);
|
||||
taosArrayDestroy(qnodeList);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
schtFreeQueryDag(dag);
|
||||
|
||||
if (++jobFinished % schtTestPrintNum == 0) {
|
||||
printf("jobFinished:%d\n", jobFinished);
|
||||
|
@ -609,6 +686,7 @@ void *schtFreeJobThread(void *aa) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(queryTest, normalCase) {
|
||||
|
@ -618,21 +696,20 @@ TEST(queryTest, normalCase) {
|
|||
char *tablename = "table1";
|
||||
SVgroupInfo vgInfo = {0};
|
||||
int64_t job = 0;
|
||||
SQueryPlan dag;
|
||||
SQueryPlan* dag = (SQueryPlan*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN);
|
||||
|
||||
memset(&dag, 0, sizeof(dag));
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SQueryNodeLoad));
|
||||
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SEp));
|
||||
|
||||
SEp qnodeAddr = {0};
|
||||
strcpy(qnodeAddr.fqdn, "qnode0.ep");
|
||||
qnodeAddr.port = 6031;
|
||||
taosArrayPush(qnodeList, &qnodeAddr);
|
||||
SQueryNodeLoad load = {0};
|
||||
load.addr.epSet.numOfEps = 1;
|
||||
strcpy(load.addr.epSet.eps[0].fqdn, "qnode0.ep");
|
||||
load.addr.epSet.eps[0].port = 6031;
|
||||
taosArrayPush(qnodeList, &load);
|
||||
|
||||
int32_t code = schedulerInit();
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
schtBuildQueryDag(&dag);
|
||||
schtBuildQueryDag(dag);
|
||||
|
||||
schtSetPlanToString();
|
||||
schtSetExecNode();
|
||||
|
@ -645,7 +722,7 @@ TEST(queryTest, normalCase) {
|
|||
SSchedulerReq req = {0};
|
||||
req.pConn = &conn;
|
||||
req.pNodeList = qnodeList;
|
||||
req.pDag = &dag;
|
||||
req.pDag = dag;
|
||||
req.sql = "select * from tb";
|
||||
req.execFp = schtQueryCb;
|
||||
req.cbParam = &queryDone;
|
||||
|
@ -659,9 +736,14 @@ TEST(queryTest, normalCase) {
|
|||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_SCH_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleResponseMsg(pJob, task, task->execId, &msg, 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
@ -669,11 +751,18 @@ TEST(queryTest, normalCase) {
|
|||
pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
if (JOB_TASK_STATUS_EXEC == task->status) {
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleResponseMsg(pJob, task, task->execId, &msg, 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
}
|
||||
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_SCH_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
|
@ -703,18 +792,12 @@ TEST(queryTest, normalCase) {
|
|||
ASSERT_EQ(pRsp->numOfRows, 10);
|
||||
taosMemoryFreeClear(data);
|
||||
|
||||
data = NULL;
|
||||
code = schedulerFetchRows(job, &req);
|
||||
ASSERT_EQ(code, 0);
|
||||
ASSERT_TRUE(data == NULL);
|
||||
|
||||
schReleaseJob(job);
|
||||
|
||||
schedulerDestroy();
|
||||
|
||||
schedulerFreeJob(&job, 0);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
schedulerDestroy();
|
||||
}
|
||||
|
||||
TEST(queryTest, readyFirstCase) {
|
||||
|
@ -724,21 +807,20 @@ TEST(queryTest, readyFirstCase) {
|
|||
char *tablename = "table1";
|
||||
SVgroupInfo vgInfo = {0};
|
||||
int64_t job = 0;
|
||||
SQueryPlan dag;
|
||||
SQueryPlan* dag = (SQueryPlan*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN);
|
||||
|
||||
memset(&dag, 0, sizeof(dag));
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SQueryNodeLoad));
|
||||
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SEp));
|
||||
|
||||
SEp qnodeAddr = {0};
|
||||
strcpy(qnodeAddr.fqdn, "qnode0.ep");
|
||||
qnodeAddr.port = 6031;
|
||||
taosArrayPush(qnodeList, &qnodeAddr);
|
||||
SQueryNodeLoad load = {0};
|
||||
load.addr.epSet.numOfEps = 1;
|
||||
strcpy(load.addr.epSet.eps[0].fqdn, "qnode0.ep");
|
||||
load.addr.epSet.eps[0].port = 6031;
|
||||
taosArrayPush(qnodeList, &load);
|
||||
|
||||
int32_t code = schedulerInit();
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
schtBuildQueryDag(&dag);
|
||||
schtBuildQueryDag(dag);
|
||||
|
||||
schtSetPlanToString();
|
||||
schtSetExecNode();
|
||||
|
@ -751,7 +833,7 @@ TEST(queryTest, readyFirstCase) {
|
|||
SSchedulerReq req = {0};
|
||||
req.pConn = &conn;
|
||||
req.pNodeList = qnodeList;
|
||||
req.pDag = &dag;
|
||||
req.pDag = dag;
|
||||
req.sql = "select * from tb";
|
||||
req.execFp = schtQueryCb;
|
||||
req.cbParam = &queryDone;
|
||||
|
@ -764,8 +846,13 @@ TEST(queryTest, readyFirstCase) {
|
|||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_SCH_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleResponseMsg(pJob, task, task->execId, &msg, 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
|
@ -775,10 +862,18 @@ TEST(queryTest, readyFirstCase) {
|
|||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_SCH_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
if (JOB_TASK_STATUS_EXEC == task->status) {
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleResponseMsg(pJob, task, task->execId, &msg, 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
}
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
|
||||
|
@ -807,18 +902,11 @@ TEST(queryTest, readyFirstCase) {
|
|||
ASSERT_EQ(pRsp->numOfRows, 10);
|
||||
taosMemoryFreeClear(data);
|
||||
|
||||
data = NULL;
|
||||
code = schedulerFetchRows(job, &req);
|
||||
ASSERT_EQ(code, 0);
|
||||
ASSERT_TRUE(data == NULL);
|
||||
|
||||
schReleaseJob(job);
|
||||
|
||||
schedulerFreeJob(&job, 0);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
schedulerDestroy();
|
||||
|
||||
schedulerFreeJob(&job, 0);
|
||||
}
|
||||
|
||||
TEST(queryTest, flowCtrlCase) {
|
||||
|
@ -828,35 +916,39 @@ TEST(queryTest, flowCtrlCase) {
|
|||
char *tablename = "table1";
|
||||
SVgroupInfo vgInfo = {0};
|
||||
int64_t job = 0;
|
||||
SQueryPlan dag;
|
||||
SQueryPlan* dag = (SQueryPlan*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN);
|
||||
|
||||
schtInitLogFile();
|
||||
|
||||
taosSeedRand(taosGetTimestampSec());
|
||||
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SEp));
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SQueryNodeLoad));
|
||||
|
||||
SQueryNodeLoad load = {0};
|
||||
load.addr.epSet.numOfEps = 1;
|
||||
strcpy(load.addr.epSet.eps[0].fqdn, "qnode0.ep");
|
||||
load.addr.epSet.eps[0].port = 6031;
|
||||
taosArrayPush(qnodeList, &load);
|
||||
|
||||
SEp qnodeAddr = {0};
|
||||
strcpy(qnodeAddr.fqdn, "qnode0.ep");
|
||||
qnodeAddr.port = 6031;
|
||||
taosArrayPush(qnodeList, &qnodeAddr);
|
||||
|
||||
int32_t code = schedulerInit();
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
schtBuildQueryFlowCtrlDag(&dag);
|
||||
schtBuildQueryFlowCtrlDag(dag);
|
||||
|
||||
schtSetPlanToString();
|
||||
schtSetExecNode();
|
||||
schtSetAsyncSendMsgToServer();
|
||||
|
||||
initTaskQueue();
|
||||
|
||||
int32_t queryDone = 0;
|
||||
SRequestConnInfo conn = {0};
|
||||
conn.pTrans = mockPointer;
|
||||
SSchedulerReq req = {0};
|
||||
req.pConn = &conn;
|
||||
req.pNodeList = qnodeList;
|
||||
req.pDag = &dag;
|
||||
req.pDag = dag;
|
||||
req.sql = "select * from tb";
|
||||
req.execFp = schtQueryCb;
|
||||
req.cbParam = &queryDone;
|
||||
|
@ -866,41 +958,27 @@ TEST(queryTest, flowCtrlCase) {
|
|||
|
||||
SSchJob *pJob = schAcquireJob(job);
|
||||
|
||||
bool qDone = false;
|
||||
|
||||
while (!qDone) {
|
||||
while (!queryDone) {
|
||||
void *pIter = taosHashIterate(pJob->execTasks, NULL);
|
||||
if (NULL == pIter) {
|
||||
break;
|
||||
}
|
||||
|
||||
while (pIter) {
|
||||
SSchTask *task = *(SSchTask **)pIter;
|
||||
|
||||
taosHashCancelIterate(pJob->execTasks, pIter);
|
||||
|
||||
if (task->lastMsgType == TDMT_SCH_QUERY) {
|
||||
SQueryTableRsp rsp = {0};
|
||||
code = schHandleResponseMsg(pJob, task, TDMT_SCH_QUERY_RSP, (char *)&rsp, sizeof(rsp), 0);
|
||||
|
||||
if (JOB_TASK_STATUS_EXEC == task->status && 0 != task->lastMsgType) {
|
||||
SDataBuf msg = {0};
|
||||
void* rmsg = NULL;
|
||||
schtBuildQueryRspMsg(&msg.len, &rmsg);
|
||||
msg.msgType = TDMT_SCH_QUERY_RSP;
|
||||
msg.pData = rmsg;
|
||||
|
||||
code = schHandleResponseMsg(pJob, task, task->execId, &msg, 0);
|
||||
|
||||
ASSERT_EQ(code, 0);
|
||||
} else {
|
||||
qDone = true;
|
||||
break;
|
||||
}
|
||||
|
||||
pIter = NULL;
|
||||
pIter = taosHashIterate(pJob->execTasks, pIter);
|
||||
}
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (queryDone) {
|
||||
break;
|
||||
}
|
||||
|
||||
taosUsleep(10000);
|
||||
}
|
||||
|
||||
TdThreadAttr thattr;
|
||||
taosThreadAttrInit(&thattr);
|
||||
|
||||
|
@ -918,18 +996,11 @@ TEST(queryTest, flowCtrlCase) {
|
|||
ASSERT_EQ(pRsp->numOfRows, 10);
|
||||
taosMemoryFreeClear(data);
|
||||
|
||||
data = NULL;
|
||||
code = schedulerFetchRows(job, &req);
|
||||
ASSERT_EQ(code, 0);
|
||||
ASSERT_TRUE(data == NULL);
|
||||
|
||||
schReleaseJob(job);
|
||||
|
||||
schedulerFreeJob(&job, 0);
|
||||
|
||||
schtFreeQueryDag(&dag);
|
||||
|
||||
schedulerDestroy();
|
||||
|
||||
schedulerFreeJob(&job, 0);
|
||||
}
|
||||
|
||||
TEST(insertTest, normalCase) {
|
||||
|
@ -938,20 +1009,21 @@ TEST(insertTest, normalCase) {
|
|||
char *dbname = "1.db1";
|
||||
char *tablename = "table1";
|
||||
SVgroupInfo vgInfo = {0};
|
||||
SQueryPlan dag;
|
||||
SQueryPlan* dag = (SQueryPlan*)nodesMakeNode(QUERY_NODE_PHYSICAL_PLAN);
|
||||
uint64_t numOfRows = 0;
|
||||
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SEp));
|
||||
SArray *qnodeList = taosArrayInit(1, sizeof(SQueryNodeLoad));
|
||||
|
||||
SEp qnodeAddr = {0};
|
||||
strcpy(qnodeAddr.fqdn, "qnode0.ep");
|
||||
qnodeAddr.port = 6031;
|
||||
taosArrayPush(qnodeList, &qnodeAddr);
|
||||
SQueryNodeLoad load = {0};
|
||||
load.addr.epSet.numOfEps = 1;
|
||||
strcpy(load.addr.epSet.eps[0].fqdn, "qnode0.ep");
|
||||
load.addr.epSet.eps[0].port = 6031;
|
||||
taosArrayPush(qnodeList, &load);
|
||||
|
||||
int32_t code = schedulerInit();
|
||||
ASSERT_EQ(code, 0);
|
||||
|
||||
schtBuildInsertDag(&dag);
|
||||
schtBuildInsertDag(dag);
|
||||
|
||||
schtSetPlanToString();
|
||||
schtSetAsyncSendMsgToServer();
|
||||
|
@ -959,24 +1031,32 @@ TEST(insertTest, normalCase) {
|
|||
TdThreadAttr thattr;
|
||||
taosThreadAttrInit(&thattr);
|
||||
|
||||
schtJobDone = false;
|
||||
|
||||
TdThread thread1;
|
||||
taosThreadCreate(&(thread1), &thattr, schtSendRsp, &insertJobRefId);
|
||||
|
||||
SExecResult res = {0};
|
||||
|
||||
int32_t queryDone = 0;
|
||||
SRequestConnInfo conn = {0};
|
||||
conn.pTrans = mockPointer;
|
||||
SSchedulerReq req = {0};
|
||||
req.pConn = &conn;
|
||||
req.pNodeList = qnodeList;
|
||||
req.pDag = &dag;
|
||||
req.pDag = dag;
|
||||
req.sql = "insert into tb values(now,1)";
|
||||
req.execFp = schtQueryCb;
|
||||
req.cbParam = NULL;
|
||||
req.cbParam = &queryDone;
|
||||
|
||||
code = schedulerExecJob(&req, &insertJobRefId);
|
||||
ASSERT_EQ(code, 0);
|
||||
ASSERT_EQ(res.numOfRows, 20);
|
||||
|
||||
while (true) {
|
||||
if (schtJobDone) {
|
||||
break;
|
||||
}
|
||||
|
||||
taosUsleep(10000);
|
||||
}
|
||||
|
||||
schedulerFreeJob(&insertJobRefId, 0);
|
||||
|
||||
|
@ -989,7 +1069,7 @@ TEST(multiThread, forceFree) {
|
|||
|
||||
TdThread thread1, thread2, thread3;
|
||||
taosThreadCreate(&(thread1), &thattr, schtRunJobThread, NULL);
|
||||
taosThreadCreate(&(thread2), &thattr, schtFreeJobThread, NULL);
|
||||
// taosThreadCreate(&(thread2), &thattr, schtFreeJobThread, NULL);
|
||||
taosThreadCreate(&(thread3), &thattr, schtFetchRspThread, NULL);
|
||||
|
||||
while (true) {
|
||||
|
@ -1002,7 +1082,17 @@ TEST(multiThread, forceFree) {
|
|||
}
|
||||
|
||||
schtTestStop = true;
|
||||
taosSsleep(3);
|
||||
//taosSsleep(3);
|
||||
}
|
||||
|
||||
TEST(otherTest, otherCase) {
|
||||
// excpet test
|
||||
schReleaseJob(0);
|
||||
schFreeRpcCtx(NULL);
|
||||
|
||||
ASSERT_EQ(schDumpEpSet(NULL), (char*)NULL);
|
||||
ASSERT_EQ(strcmp(schGetOpStr(SCH_OP_NULL), "NULL"), 0);
|
||||
ASSERT_EQ(strcmp(schGetOpStr((SCH_OP_TYPE)100), "UNKNOWN"), 0);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
|
|
|
@ -179,7 +179,8 @@ int32_t streamStateSessionDel_rocksdb(SStreamState* pState, const SSessionKey* k
|
|||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev_rocksdb(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pState, SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState);
|
||||
SStreamStateCur* streamStateSessionSeekKeyPrev_rocksdb(SStreamState* pState, const SSessionKey* key);
|
||||
SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState, int64_t groupId);
|
||||
int32_t streamStateSessionCurPrev_rocksdb(SStreamStateCur* pCur);
|
||||
|
||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
|
||||
|
|
|
@ -109,8 +109,6 @@ void destroyStreamDataBlock(SStreamDataBlock* pBlock);
|
|||
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
|
||||
int32_t streamBroadcastToUpTasks(SStreamTask* pTask, const SSDataBlock* pBlock);
|
||||
|
||||
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
|
||||
|
||||
int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId);
|
||||
int32_t streamSendCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
|
||||
|
||||
|
|
|
@ -2883,13 +2883,13 @@ int32_t streamStateSessionDel_rocksdb(SStreamState* pState, const SSessionKey* k
|
|||
return code;
|
||||
}
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState) {
|
||||
SStreamStateCur* streamStateSessionSeekToLast_rocksdb(SStreamState* pState, int64_t groupId) {
|
||||
stDebug("streamStateSessionSeekToLast_rocksdb");
|
||||
|
||||
int32_t code = 0;
|
||||
|
||||
SSessionKey maxSessionKey = {.groupId = UINT64_MAX, .win = {.skey = INT64_MAX, .ekey = INT64_MAX}};
|
||||
SStateSessionKey maxKey = {.key = maxSessionKey, .opNum = INT64_MAX};
|
||||
SSessionKey maxSessionKey = {.groupId = groupId, .win = {.skey = INT64_MAX, .ekey = INT64_MAX}};
|
||||
SStateSessionKey maxKey = {.key = maxSessionKey, .opNum = pState->number};
|
||||
|
||||
STREAM_STATE_PUT_ROCKSDB(pState, "sess", &maxKey, "", 0);
|
||||
if (code != 0) {
|
||||
|
@ -3048,6 +3048,46 @@ SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, con
|
|||
return pCur;
|
||||
}
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekKeyPrev_rocksdb(SStreamState* pState, const SSessionKey* key) {
|
||||
stDebug("streamStateSessionSeekKeyPrev_rocksdb");
|
||||
STaskDbWrapper* wrapper = pState->pTdbState->pOwner->pBackend;
|
||||
SStreamStateCur* pCur = createStreamStateCursor();
|
||||
if (pCur == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pCur->db = wrapper->db;
|
||||
pCur->iter = streamStateIterCreate(pState, "sess", (rocksdb_snapshot_t**)&pCur->snapshot,
|
||||
(rocksdb_readoptions_t**)&pCur->readOpt);
|
||||
pCur->number = pState->number;
|
||||
|
||||
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
|
||||
|
||||
char buf[128] = {0};
|
||||
int len = stateSessionKeyEncode(&sKey, buf);
|
||||
if (!streamStateIterSeekAndValid(pCur->iter, buf, len)) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
while (rocksdb_iter_valid(pCur->iter) && iterValueIsStale(pCur->iter)) rocksdb_iter_prev(pCur->iter);
|
||||
if (!rocksdb_iter_valid(pCur->iter)) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t klen;
|
||||
const char* iKey = rocksdb_iter_key(pCur->iter, &klen);
|
||||
SStateSessionKey curKey = {0};
|
||||
stateSessionKeyDecode(&curKey, (char*)iKey);
|
||||
if (stateSessionKeyCmpr(&sKey, sizeof(sKey), &curKey, sizeof(curKey)) > 0) return pCur;
|
||||
|
||||
rocksdb_iter_prev(pCur->iter);
|
||||
if (!rocksdb_iter_valid(pCur->iter)) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
return pCur;
|
||||
}
|
||||
|
||||
int32_t streamStateSessionGetKVByCur_rocksdb(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
stDebug("streamStateSessionGetKVByCur_rocksdb");
|
||||
if (!pCur) {
|
||||
|
|
|
@ -68,18 +68,6 @@ int32_t tEncodeStreamCheckpointSourceRsp(SEncoder* pEncoder, const SStreamCheckp
|
|||
return pEncoder->pos;
|
||||
}
|
||||
|
||||
int32_t tDecodeStreamCheckpointSourceRsp(SDecoder* pDecoder, SStreamCheckpointSourceRsp* pRsp) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pRsp->streamId) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pRsp->checkpointId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pRsp->taskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pRsp->nodeId) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pRsp->expireTime) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pRsp->success) < 0) return -1;
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeStreamCheckpointReadyMsg(SEncoder* pEncoder, const SStreamCheckpointReadyMsg* pReq) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||
|
@ -512,7 +500,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) {
|
|||
SStreamTaskId hTaskId = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
|
||||
|
||||
stDebug("s-task:%s fill-history finish checkpoint done, drop related fill-history task:0x%x", id, hTaskId.taskId);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pTask->pMeta->vgId, &hTaskId);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pTask->pMeta->vgId, &hTaskId, 1);
|
||||
} else {
|
||||
stWarn("s-task:%s related fill-history task:0x%x is erased", id, (int32_t)pTask->hTaskInfo.id.taskId);
|
||||
}
|
||||
|
|
|
@ -162,6 +162,8 @@ int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq) { taosMemoryFree(pReq->pRetrieve); }
|
||||
|
||||
void sendRetrieveRsp(SStreamRetrieveReq *pReq, SRpcMsg* pRsp){
|
||||
void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamRetrieveRsp));
|
||||
((SMsgHead*)buf)->vgId = htonl(pReq->srcNodeId);
|
||||
|
@ -174,7 +176,7 @@ void sendRetrieveRsp(SStreamRetrieveReq *pReq, SRpcMsg* pRsp){
|
|||
tmsgSendRsp(pRsp);
|
||||
}
|
||||
|
||||
int32_t broadcastRetrieveMsg(SStreamTask* pTask, SStreamRetrieveReq *req){
|
||||
int32_t broadcastRetrieveMsg(SStreamTask* pTask, SStreamRetrieveReq* req) {
|
||||
int32_t code = 0;
|
||||
void* buf = NULL;
|
||||
int32_t sz = taosArrayGetSize(pTask->upstreamInfo.pList);
|
||||
|
@ -569,6 +571,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S
|
|||
char ctbName[TSDB_TABLE_FNAME_LEN] = {0};
|
||||
if (pDataBlock->info.parTbName[0]) {
|
||||
if(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER &&
|
||||
pTask->subtableWithoutMd5 != 1 &&
|
||||
!isAutoTableName(pDataBlock->info.parTbName) &&
|
||||
!alreadyAddGroupId(pDataBlock->info.parTbName) &&
|
||||
groupId != 0){
|
||||
|
|
|
@ -328,7 +328,7 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) {
|
|||
id, (int32_t) pTask->streamTaskId.taskId);
|
||||
|
||||
// 1. free it and remove fill-history task from disk meta-store
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id);
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &pTask->id, 0);
|
||||
|
||||
// 2. save to disk
|
||||
streamMetaWLock(pMeta);
|
||||
|
|
|
@ -725,9 +725,6 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
|
|||
|
||||
// it is an fill-history task, remove the related stream task's id that points to it
|
||||
atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamTaskClearHTaskAttr(pTask, false);
|
||||
}
|
||||
|
||||
taosHashRemove(pMeta->pTasksMap, &id, sizeof(id));
|
||||
doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id);
|
||||
|
|
|
@ -28,6 +28,12 @@ int sessionStateKeyCompare(const SSessionKey* pWin1, const void* pDatas, int pos
|
|||
return sessionWinKeyCmpr(pWin1, pWin2);
|
||||
}
|
||||
|
||||
int sessionStateRangeKeyCompare(const SSessionKey* pWin1, const void* pDatas, int pos) {
|
||||
SRowBuffPos* pPos2 = taosArrayGetP(pDatas, pos);
|
||||
SSessionKey* pWin2 = (SSessionKey*)pPos2->pKey;
|
||||
return sessionRangeKeyCmpr(pWin1, pWin2);
|
||||
}
|
||||
|
||||
int32_t binarySearch(void* keyList, int num, const void* key, __session_compare_fn_t cmpFn) {
|
||||
int firstPos = 0, lastPos = num - 1, midPos = -1;
|
||||
int numOfRows = 0;
|
||||
|
@ -69,6 +75,12 @@ bool inSessionWindow(SSessionKey* pKey, TSKEY ts, int64_t gap) {
|
|||
return false;
|
||||
}
|
||||
|
||||
SStreamStateCur* createSessionStateCursor(SStreamFileState* pFileState) {
|
||||
SStreamStateCur* pCur = createStreamStateCursor();
|
||||
pCur->pStreamFileState = pFileState;
|
||||
return pCur;
|
||||
}
|
||||
|
||||
static SRowBuffPos* addNewSessionWindow(SStreamFileState* pFileState, SArray* pWinInfos, const SSessionKey* pKey) {
|
||||
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
|
||||
ASSERT(pNewPos->pRowBuff);
|
||||
|
@ -91,7 +103,12 @@ SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKe
|
|||
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
|
||||
pNewPos->needFree = true;
|
||||
pNewPos->beFlushed = true;
|
||||
memcpy(pNewPos->pRowBuff, p, *pVLen);
|
||||
if(p) {
|
||||
memcpy(pNewPos->pRowBuff, p, *pVLen);
|
||||
} else {
|
||||
int32_t len = getRowStateRowSize(pFileState);
|
||||
memset(pNewPos->pRowBuff, 0, len);
|
||||
}
|
||||
taosMemoryFree(p);
|
||||
return pNewPos;
|
||||
}
|
||||
|
@ -364,9 +381,8 @@ static SStreamStateCur* seekKeyCurrentPrev_buff(SStreamFileState* pFileState, co
|
|||
}
|
||||
|
||||
if (index >= 0) {
|
||||
pCur = createStreamStateCursor();
|
||||
pCur = createSessionStateCursor(pFileState);
|
||||
pCur->buffIndex = index;
|
||||
pCur->pStreamFileState = pFileState;
|
||||
if (pIndex) {
|
||||
*pIndex = index;
|
||||
}
|
||||
|
@ -405,7 +421,7 @@ static void checkAndTransformCursor(SStreamFileState* pFileState, const uint64_t
|
|||
if (taosArrayGetSize(pWinStates) > 0 &&
|
||||
(code == TSDB_CODE_FAILED || sessionStateKeyCompare(&key, pWinStates, 0) >= 0)) {
|
||||
if (!(*ppCur)) {
|
||||
(*ppCur) = createStreamStateCursor();
|
||||
(*ppCur) = createSessionStateCursor(pFileState);
|
||||
}
|
||||
transformCursor(pFileState, *ppCur);
|
||||
} else if (*ppCur) {
|
||||
|
@ -419,7 +435,7 @@ SStreamStateCur* sessionWinStateSeekKeyCurrentNext(SStreamFileState* pFileState,
|
|||
int32_t index = -1;
|
||||
SStreamStateCur* pCur = seekKeyCurrentPrev_buff(pFileState, pWinKey, &pWinStates, &index);
|
||||
if (pCur) {
|
||||
if (sessionStateKeyCompare(pWinKey, pWinStates, index) > 0) {
|
||||
if (sessionStateRangeKeyCompare(pWinKey, pWinStates, index) > 0) {
|
||||
sessionWinStateMoveToNext(pCur);
|
||||
}
|
||||
return pCur;
|
||||
|
@ -446,6 +462,66 @@ SStreamStateCur* sessionWinStateSeekKeyNext(SStreamFileState* pFileState, const
|
|||
return pCur;
|
||||
}
|
||||
|
||||
SStreamStateCur* countWinStateSeekKeyPrev(SStreamFileState* pFileState, const SSessionKey* pWinKey, COUNT_TYPE count) {
|
||||
SArray* pWinStates = NULL;
|
||||
int32_t index = -1;
|
||||
SStreamStateCur* pBuffCur = seekKeyCurrentPrev_buff(pFileState, pWinKey, &pWinStates, &index);
|
||||
int32_t resSize = getRowStateRowSize(pFileState);
|
||||
COUNT_TYPE winCount = 0;
|
||||
if (pBuffCur) {
|
||||
while (index >= 0) {
|
||||
SRowBuffPos* pPos = taosArrayGetP(pWinStates, index);
|
||||
winCount = *((COUNT_TYPE*) ((char*)pPos->pRowBuff + (resSize - sizeof(COUNT_TYPE))));
|
||||
if (sessionStateRangeKeyCompare(pWinKey, pWinStates, index) == 0 || winCount < count) {
|
||||
index--;
|
||||
} else if (index >= 0) {
|
||||
pBuffCur->buffIndex = index + 1;
|
||||
return pBuffCur;
|
||||
}
|
||||
}
|
||||
pBuffCur->buffIndex = 0;
|
||||
} else if (taosArrayGetSize(pWinStates) > 0) {
|
||||
pBuffCur = createSessionStateCursor(pFileState);
|
||||
pBuffCur->buffIndex = 0;
|
||||
}
|
||||
|
||||
void* pFileStore = getStateFileStore(pFileState);
|
||||
SStreamStateCur* pCur = streamStateSessionSeekKeyPrev_rocksdb(pFileStore, pWinKey);
|
||||
if (pCur) {
|
||||
SSessionKey key = {0};
|
||||
void* pVal = NULL;
|
||||
int len = 0;
|
||||
int32_t code = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &len);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
streamStateFreeCur(pCur);
|
||||
return pBuffCur;
|
||||
}
|
||||
winCount = *((COUNT_TYPE*) ((char*)pVal + (resSize - sizeof(COUNT_TYPE))));
|
||||
if (sessionRangeKeyCmpr(pWinKey, &key) != 0 && winCount == count) {
|
||||
streamStateFreeCur(pCur);
|
||||
return pBuffCur;
|
||||
}
|
||||
streamStateCurPrev(pFileStore, pCur);
|
||||
while (1) {
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, &key, &pVal, &len);
|
||||
if (code == TSDB_CODE_FAILED) {
|
||||
streamStateCurNext(pFileStore, pCur);
|
||||
streamStateFreeCur(pBuffCur);
|
||||
return pCur;
|
||||
}
|
||||
winCount = *((COUNT_TYPE*) ((char*)pVal + (resSize - sizeof(COUNT_TYPE))));
|
||||
if (sessionRangeKeyCmpr(pWinKey, &key) == 0 || winCount < count) {
|
||||
streamStateCurPrev(pFileStore, pCur);
|
||||
} else {
|
||||
streamStateCurNext(pFileStore, pCur);
|
||||
streamStateFreeCur(pBuffCur);
|
||||
return pCur;
|
||||
}
|
||||
}
|
||||
}
|
||||
return pBuffCur;
|
||||
}
|
||||
|
||||
int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
if (!pCur) {
|
||||
return TSDB_CODE_FAILED;
|
||||
|
@ -503,7 +579,7 @@ int32_t sessionWinStateMoveToNext(SStreamStateCur* pCur) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey) {
|
||||
int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessionKey* key, SSessionKey* curKey, range_cmpr_fn cmpFn) {
|
||||
SStreamStateCur* pCur = sessionWinStateSeekKeyCurrentPrev(pFileState, key);
|
||||
SSessionKey tmpKey = *key;
|
||||
int32_t code = sessionWinStateGetKVByCur(pCur, &tmpKey, NULL, NULL);
|
||||
|
@ -520,7 +596,7 @@ int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessio
|
|||
goto _end;
|
||||
}
|
||||
|
||||
if (sessionRangeKeyCmpr(key, &tmpKey) == 0) {
|
||||
if (cmpFn(key, &tmpKey) == 0) {
|
||||
*curKey = tmpKey;
|
||||
goto _end;
|
||||
} else if (!hasCurrentPrev) {
|
||||
|
@ -530,7 +606,7 @@ int32_t sessionWinStateGetKeyByRange(SStreamFileState* pFileState, const SSessio
|
|||
|
||||
sessionWinStateMoveToNext(pCur);
|
||||
code = sessionWinStateGetKVByCur(pCur, &tmpKey, NULL, NULL);
|
||||
if (code == TSDB_CODE_SUCCESS && sessionRangeKeyCmpr(key, &tmpKey) == 0) {
|
||||
if (code == TSDB_CODE_SUCCESS && cmpFn(key, &tmpKey) == 0) {
|
||||
*curKey = tmpKey;
|
||||
} else {
|
||||
code = TSDB_CODE_FAILED;
|
||||
|
@ -636,3 +712,143 @@ int32_t getStateWinResultBuff(SStreamFileState* pFileState, SSessionKey* key, ch
|
|||
_end:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t getCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, COUNT_TYPE winCount, void** pVal, int32_t* pVLen) {
|
||||
SSessionKey* pWinKey = pKey;
|
||||
const TSKEY gap = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SSHashObj* pSessionBuff = getRowStateBuff(pFileState);
|
||||
SArray* pWinStates = NULL;
|
||||
void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t));
|
||||
if (ppBuff) {
|
||||
pWinStates = (SArray*)(*ppBuff);
|
||||
} else {
|
||||
pWinStates = taosArrayInit(16, POINTER_BYTES);
|
||||
tSimpleHashPut(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t), &pWinStates, POINTER_BYTES);
|
||||
}
|
||||
|
||||
TSKEY startTs = pWinKey->win.skey;
|
||||
TSKEY endTs = pWinKey->win.ekey;
|
||||
|
||||
int32_t size = taosArrayGetSize(pWinStates);
|
||||
if (size == 0) {
|
||||
void* pFileStore = getStateFileStore(pFileState);
|
||||
void* pRockVal = NULL;
|
||||
SStreamStateCur* pCur = streamStateSessionSeekToLast_rocksdb(pFileStore, pKey->groupId);
|
||||
code = streamStateSessionGetKVByCur_rocksdb(pCur, pWinKey, &pRockVal, pVLen);
|
||||
if (code == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) {
|
||||
qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, pWinKey->win.ekey, code);
|
||||
if (code == TSDB_CODE_SUCCESS) {
|
||||
int32_t valSize = *pVLen;
|
||||
COUNT_TYPE* pWinStateCout = (COUNT_TYPE*)( (char*)(pRockVal) + (valSize - sizeof(COUNT_TYPE)) );
|
||||
if (inSessionWindow(pWinKey, startTs, gap) || (*pWinStateCout) < winCount) {
|
||||
(*pVal) = createSessionWinBuff(pFileState, pWinKey, pRockVal, pVLen);
|
||||
streamStateFreeCur(pCur);
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
pWinKey->win.skey = startTs;
|
||||
pWinKey->win.ekey = endTs;
|
||||
(*pVal) = createSessionWinBuff(pFileState, pWinKey, NULL, NULL);
|
||||
taosMemoryFree(pRockVal);
|
||||
streamStateFreeCur(pCur);
|
||||
} else {
|
||||
(*pVal) = addNewSessionWindow(pFileState, pWinStates, pWinKey);
|
||||
code = TSDB_CODE_FAILED;
|
||||
}
|
||||
goto _end;
|
||||
}
|
||||
|
||||
// find the first position which is smaller than the pWinKey
|
||||
int32_t index = binarySearch(pWinStates, size, pWinKey, sessionStateKeyCompare);
|
||||
SRowBuffPos* pPos = NULL;
|
||||
int32_t valSize = *pVLen;
|
||||
|
||||
if (index >= 0) {
|
||||
pPos = taosArrayGetP(pWinStates, index);
|
||||
COUNT_TYPE* pWinStateCout = (COUNT_TYPE*)( (char*)(pPos->pRowBuff) + (valSize - sizeof(COUNT_TYPE)) );
|
||||
if (inSessionWindow(pPos->pKey, startTs, gap) || (index == size - 1 && (*pWinStateCout) < winCount) ) {
|
||||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
*pWinKey = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
}
|
||||
|
||||
if (index == -1) {
|
||||
if (!isDeteled(pFileState, endTs)) {
|
||||
void* p = NULL;
|
||||
void* pFileStore = getStateFileStore(pFileState);
|
||||
SStreamStateCur* pCur = streamStateSessionSeekToLast_rocksdb(pFileStore, pKey->groupId);
|
||||
int32_t code_file = streamStateSessionGetKVByCur_rocksdb(pCur, pWinKey, &p, pVLen);
|
||||
if (code_file == TSDB_CODE_SUCCESS) {
|
||||
(*pVal) = createSessionWinBuff(pFileState, pWinKey, p, pVLen);
|
||||
code = code_file;
|
||||
qDebug("===stream===1 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, pWinKey->win.ekey, code_file);
|
||||
streamStateFreeCur(pCur);
|
||||
goto _end;
|
||||
}
|
||||
taosMemoryFree(p);
|
||||
streamStateFreeCur(pCur);
|
||||
}
|
||||
}
|
||||
|
||||
if (index + 1 < size) {
|
||||
pPos = taosArrayGetP(pWinStates, index + 1);
|
||||
(*pVal) = pPos;
|
||||
SSessionKey* pDestWinKey = (SSessionKey*)pPos->pKey;
|
||||
pPos->beUsed = true;
|
||||
*pWinKey = *pDestWinKey;
|
||||
goto _end;
|
||||
}
|
||||
|
||||
(*pVal) = addNewSessionWindow(pFileState, pWinStates, pWinKey);
|
||||
code = TSDB_CODE_FAILED;
|
||||
|
||||
_end:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t createCountWinResultBuff(SStreamFileState* pFileState, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
SSessionKey* pWinKey = pKey;
|
||||
const TSKEY gap = 0;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SSHashObj* pSessionBuff = getRowStateBuff(pFileState);
|
||||
SArray* pWinStates = NULL;
|
||||
void** ppBuff = tSimpleHashGet(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t));
|
||||
if (ppBuff) {
|
||||
pWinStates = (SArray*)(*ppBuff);
|
||||
} else {
|
||||
pWinStates = taosArrayInit(16, POINTER_BYTES);
|
||||
tSimpleHashPut(pSessionBuff, &pWinKey->groupId, sizeof(uint64_t), &pWinStates, POINTER_BYTES);
|
||||
}
|
||||
|
||||
TSKEY startTs = pWinKey->win.skey;
|
||||
TSKEY endTs = pWinKey->win.ekey;
|
||||
|
||||
int32_t size = taosArrayGetSize(pWinStates);
|
||||
if (size == 0) {
|
||||
void* pFileStore = getStateFileStore(pFileState);
|
||||
void* p = NULL;
|
||||
|
||||
SStreamStateCur* pCur = streamStateSessionSeekToLast_rocksdb(pFileStore, pKey->groupId);
|
||||
int32_t code_file = streamStateSessionGetKVByCur_rocksdb(pCur, pWinKey, &p, pVLen);
|
||||
if (code_file == TSDB_CODE_SUCCESS || isFlushedState(pFileState, endTs, 0)) {
|
||||
(*pVal) = createSessionWinBuff(pFileState, pWinKey, p, pVLen);
|
||||
code = code_file;
|
||||
qDebug("===stream===0 get state win:%" PRId64 ",%" PRId64 " from disc, res %d", pWinKey->win.skey, pWinKey->win.ekey, code_file);
|
||||
} else {
|
||||
(*pVal) = addNewSessionWindow(pFileState, pWinStates, pWinKey);
|
||||
code = TSDB_CODE_FAILED;
|
||||
taosMemoryFree(p);
|
||||
}
|
||||
streamStateFreeCur(pCur);
|
||||
goto _end;
|
||||
} else {
|
||||
(*pVal) = addNewSessionWindow(pFileState, pWinStates, pWinKey);
|
||||
}
|
||||
|
||||
_end:
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -391,6 +391,16 @@ void doProcessDownstreamReadyRsp(SStreamTask* pTask) {
|
|||
int64_t startTs = pTask->execInfo.start;
|
||||
streamMetaAddTaskLaunchResult(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, initTs, startTs, true);
|
||||
|
||||
if (pTask->status.taskStatus == TASK_STATUS__HALT) {
|
||||
ASSERT(HAS_RELATED_FILLHISTORY_TASK(pTask) && (pTask->info.fillHistory == 0));
|
||||
|
||||
// halt it self for count window stream task until the related
|
||||
// fill history task completd.
|
||||
stDebug("s-task:%s level:%d initial status is %s from mnode, set it to be halt", pTask->id.idStr,
|
||||
pTask->info.taskLevel, streamTaskGetStatusStr(pTask->status.taskStatus));
|
||||
streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_HALT);
|
||||
}
|
||||
|
||||
// start the related fill-history task, when current task is ready
|
||||
// not invoke in success callback due to the deadlock.
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
|
@ -539,11 +549,6 @@ int32_t streamSetParamForScanHistory(SStreamTask* pTask) {
|
|||
return qSetStreamOperatorOptionForScanHistory(pTask->exec.pExecutor);
|
||||
}
|
||||
|
||||
int32_t streamRestoreParam(SStreamTask* pTask) {
|
||||
stDebug("s-task:%s restore operator param after scan-history", pTask->id.idStr);
|
||||
return qRestoreStreamOperatorOption(pTask->exec.pExecutor);
|
||||
}
|
||||
|
||||
// source
|
||||
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow) {
|
||||
return qStreamSourceScanParamForHistoryScanStep1(pTask->exec.pExecutor, pVerRange, pWindow);
|
||||
|
@ -804,7 +809,7 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) {
|
|||
|
||||
// check stream task status in the first place.
|
||||
SStreamTaskState* pStatus = streamTaskGetStatus(pTask);
|
||||
if (pStatus->state != TASK_STATUS__READY) {
|
||||
if (pStatus->state != TASK_STATUS__READY && pStatus->state != TASK_STATUS__HALT) {
|
||||
stDebug("s-task:%s not launch related fill-history task:0x%" PRIx64 "-0x%x, status:%s", idStr, hStreamId, hTaskId,
|
||||
pStatus->name);
|
||||
|
||||
|
|
|
@ -42,6 +42,14 @@ int sessionRangeKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int countRangeKeyEqual(const SSessionKey* pWin1, const SSessionKey* pWin2) {
|
||||
if (pWin1->groupId == pWin2->groupId && pWin1->win.skey <= pWin2->win.skey && pWin2->win.skey <= pWin1->win.ekey) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int sessionWinKeyCmpr(const SSessionKey* pWin1, const SSessionKey* pWin2) {
|
||||
if (pWin1->groupId > pWin2->groupId) {
|
||||
return 1;
|
||||
|
@ -752,6 +760,12 @@ int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key) {
|
|||
#endif
|
||||
}
|
||||
|
||||
int32_t streamStateSessionReset(SStreamState* pState, void* pVal) {
|
||||
int32_t len = getRowStateRowSize(pState->pFileState);
|
||||
memset(pVal, 0, len);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return sessionWinStateSeekKeyCurrentPrev(pState->pFileState, key);
|
||||
|
@ -846,6 +860,13 @@ SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSess
|
|||
#endif
|
||||
}
|
||||
|
||||
SStreamStateCur* streamStateCountSeekKeyPrev(SStreamState* pState, const SSessionKey* key, COUNT_TYPE count) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return countWinStateSeekKeyPrev(pState->pFileState, key, count);
|
||||
#else
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t streamStateSessionGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return sessionWinStateGetKVByCur(pCur, pKey, pVal, pVLen);
|
||||
|
@ -896,7 +917,7 @@ int32_t streamStateSessionClear(SStreamState* pState) {
|
|||
|
||||
int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return sessionWinStateGetKeyByRange(pState->pFileState, key, curKey);
|
||||
return sessionWinStateGetKeyByRange(pState->pFileState, key, curKey, sessionRangeKeyCmpr);
|
||||
#else
|
||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||
if (pCur == NULL) {
|
||||
|
@ -946,6 +967,13 @@ int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey*
|
|||
#endif
|
||||
}
|
||||
|
||||
int32_t streamStateCountGetKeyByRange(SStreamState* pState, const SSessionKey* key, SSessionKey* curKey) {
|
||||
#ifdef USE_ROCKSDB
|
||||
return sessionWinStateGetKeyByRange(pState->pFileState, key, curKey, countRangeKeyEqual);
|
||||
#else
|
||||
#endif
|
||||
}
|
||||
|
||||
int32_t streamStateSessionAddIfNotExist(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
|
||||
int32_t* pVLen) {
|
||||
#ifdef USE_ROCKSDB
|
||||
|
@ -1135,90 +1163,11 @@ SStreamStateCur* createStreamStateCursor() {
|
|||
return pCur;
|
||||
}
|
||||
|
||||
#if 0
|
||||
char* streamStateSessionDump(SStreamState* pState) {
|
||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||
if (pCur == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
tdbTbcMoveToFirst(pCur->pCur);
|
||||
|
||||
SSessionKey key = {0};
|
||||
void* buf = NULL;
|
||||
int32_t bufSize = 0;
|
||||
int32_t code = streamStateSessionGetKVByCur(pCur, &key, &buf, &bufSize);
|
||||
if (code != 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t size = 2048;
|
||||
char* dumpBuf = taosMemoryCalloc(size, 1);
|
||||
int64_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.win.skey);
|
||||
len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey);
|
||||
len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId);
|
||||
while (1) {
|
||||
tdbTbcMoveToNext(pCur->pCur);
|
||||
key = (SSessionKey){0};
|
||||
code = streamStateSessionGetKVByCur(pCur, &key, NULL, 0);
|
||||
if (code != 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return dumpBuf;
|
||||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.win.skey);
|
||||
len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey);
|
||||
len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId);
|
||||
}
|
||||
streamStateFreeCur(pCur);
|
||||
return dumpBuf;
|
||||
// count window
|
||||
int32_t streamStateCountWinAddIfNotExist(SStreamState* pState, SSessionKey* pKey, COUNT_TYPE winCount, void** ppVal, int32_t* pVLen) {
|
||||
return getCountWinResultBuff(pState->pFileState, pKey, winCount, ppVal, pVLen);
|
||||
}
|
||||
|
||||
char* streamStateIntervalDump(SStreamState* pState) {
|
||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||
if (pCur == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
tdbTbcMoveToFirst(pCur->pCur);
|
||||
|
||||
SWinKey key = {0};
|
||||
void* buf = NULL;
|
||||
int32_t bufSize = 0;
|
||||
int32_t code = streamStateGetKVByCur(pCur, &key, (const void **)&buf, &bufSize);
|
||||
if (code != 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int32_t size = 2048;
|
||||
char* dumpBuf = taosMemoryCalloc(size, 1);
|
||||
int64_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.ts);
|
||||
// len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey);
|
||||
len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId);
|
||||
while (1) {
|
||||
tdbTbcMoveToNext(pCur->pCur);
|
||||
key = (SWinKey){0};
|
||||
code = streamStateGetKVByCur(pCur, &key, NULL, 0);
|
||||
if (code != 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return dumpBuf;
|
||||
}
|
||||
len += snprintf(dumpBuf + len, size - len, "||s:%15" PRId64 ",", key.ts);
|
||||
// len += snprintf(dumpBuf + len, size - len, "e:%15" PRId64 ",", key.win.ekey);
|
||||
len += snprintf(dumpBuf + len, size - len, "g:%15" PRId64 "||", key.groupId);
|
||||
}
|
||||
streamStateFreeCur(pCur);
|
||||
return dumpBuf;
|
||||
int32_t streamStateCountWinAdd(SStreamState* pState, SSessionKey* pKey, void** pVal, int32_t* pVLen) {
|
||||
return createCountWinResultBuff(pState->pFileState, pKey, pVal, pVLen);
|
||||
}
|
||||
#endif
|
|
@ -80,7 +80,7 @@ static SStreamChildEpInfo* createStreamTaskEpInfo(const SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam,
|
||||
SArray* pTaskList, bool hasFillhistory) {
|
||||
SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5) {
|
||||
SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask));
|
||||
if (pTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -96,6 +96,7 @@ SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset,
|
|||
pTask->info.taskLevel = taskLevel;
|
||||
pTask->info.fillHistory = fillHistory;
|
||||
pTask->info.triggerParam = triggerParam;
|
||||
pTask->subtableWithoutMd5 = subtableWithoutMd5;
|
||||
|
||||
pTask->status.pSM = streamCreateStateMachine(pTask);
|
||||
if (pTask->status.pSM == NULL) {
|
||||
|
@ -108,7 +109,7 @@ SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset,
|
|||
|
||||
pTask->id.idStr = taosStrdup(buf);
|
||||
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
|
||||
pTask->status.taskStatus = (fillHistory || hasFillhistory) ? TASK_STATUS__SCAN_HISTORY : TASK_STATUS__READY;
|
||||
pTask->status.taskStatus = fillHistory? TASK_STATUS__SCAN_HISTORY : TASK_STATUS__READY;
|
||||
pTask->inputq.status = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->outputq.status = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
|
@ -126,7 +127,6 @@ int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo)
|
|||
if (tEncodeI32(pEncoder, pInfo->taskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pInfo->nodeId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pInfo->childId) < 0) return -1;
|
||||
/*if (tEncodeI64(pEncoder, pInfo->processedVer) < 0) return -1;*/
|
||||
if (tEncodeSEpSet(pEncoder, &pInfo->epSet) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pInfo->stage) < 0) return -1;
|
||||
return 0;
|
||||
|
@ -136,7 +136,6 @@ int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo) {
|
|||
if (tDecodeI32(pDecoder, &pInfo->taskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pInfo->nodeId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pInfo->childId) < 0) return -1;
|
||||
/*if (tDecodeI64(pDecoder, &pInfo->processedVer) < 0) return -1;*/
|
||||
if (tDecodeSEpSet(pDecoder, &pInfo->epSet) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pInfo->stage) < 0) return -1;
|
||||
return 0;
|
||||
|
@ -205,6 +204,7 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
|
|||
if (tEncodeCStr(pEncoder, pTask->outputInfo.shuffleDispatcher.stbFullName) < 0) return -1;
|
||||
}
|
||||
if (tEncodeI64(pEncoder, pTask->info.triggerParam) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pTask->subtableWithoutMd5) < 0) return -1;
|
||||
if (tEncodeCStrWithLen(pEncoder, pTask->reserve, sizeof(pTask->reserve) - 1) < 0) return -1;
|
||||
|
||||
tEndEncode(pEncoder);
|
||||
|
@ -287,6 +287,7 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
|
|||
if (tDecodeCStrTo(pDecoder, pTask->outputInfo.shuffleDispatcher.stbFullName) < 0) return -1;
|
||||
}
|
||||
if (tDecodeI64(pDecoder, &pTask->info.triggerParam) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pTask->subtableWithoutMd5) < 0) return -1;
|
||||
if (tDecodeCStrTo(pDecoder, pTask->reserve) < 0) return -1;
|
||||
|
||||
tEndDecode(pDecoder);
|
||||
|
@ -294,7 +295,6 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo) {
|
||||
int64_t ver;
|
||||
int64_t skip64;
|
||||
int8_t skip8;
|
||||
int32_t skip32;
|
||||
|
@ -482,7 +482,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i
|
|||
|
||||
pTask->execInfo.created = taosGetTimestampMs();
|
||||
SCheckpointInfo* pChkInfo = &pTask->chkInfo;
|
||||
SDataRange* pRange = &pTask->dataRange;
|
||||
SDataRange* pRange = &pTask->dataRange;
|
||||
|
||||
// only set the version info for stream tasks without fill-history task
|
||||
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
|
||||
|
@ -648,7 +648,7 @@ int32_t streamTaskStop(SStreamTask* pTask) {
|
|||
|
||||
streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_STOP);
|
||||
qKillTask(pTask->exec.pExecutor, TSDB_CODE_SUCCESS);
|
||||
while (/*pTask->status.schedStatus != TASK_SCHED_STATUS__INACTIVE */ !streamTaskIsIdle(pTask)) {
|
||||
while (!streamTaskIsIdle(pTask)) {
|
||||
stDebug("s-task:%s level:%d wait for task to be idle and then close, check again in 100ms", id,
|
||||
pTask->info.taskLevel);
|
||||
taosMsleep(100);
|
||||
|
@ -755,9 +755,9 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) {
|
|||
return status;
|
||||
}
|
||||
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, bool metaLock) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
||||
int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt, bool metaLock) {
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId};
|
||||
if (pTask->info.fillHistory == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -773,6 +773,12 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, bool metaLock) {
|
|||
|
||||
taosThreadMutexLock(&(*ppStreamTask)->lock);
|
||||
CLEAR_RELATED_FILLHISTORY_TASK((*ppStreamTask));
|
||||
|
||||
if (resetRelHalt) {
|
||||
(*ppStreamTask)->status.taskStatus = TASK_STATUS__READY;
|
||||
stDebug("s-task:0x%" PRIx64 " set the status to be ready", sTaskId.taskId);
|
||||
}
|
||||
|
||||
streamMetaSaveTask(pMeta, *ppStreamTask);
|
||||
taosThreadMutexUnlock(&(*ppStreamTask)->lock);
|
||||
}
|
||||
|
@ -784,7 +790,7 @@ int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, bool metaLock) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId) {
|
||||
int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskId* pTaskId, int64_t resetRelHalt) {
|
||||
SVDropStreamTaskReq* pReq = rpcMallocCont(sizeof(SVDropStreamTaskReq));
|
||||
if (pReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -794,6 +800,7 @@ int32_t streamBuildAndSendDropTaskMsg(SMsgCb* pMsgCb, int32_t vgId, SStreamTaskI
|
|||
pReq->head.vgId = vgId;
|
||||
pReq->taskId = pTaskId->taskId;
|
||||
pReq->streamId = pTaskId->streamId;
|
||||
pReq->resetRelHalt = resetRelHalt;
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_DROP, .pCont = pReq, .contLen = sizeof(SVDropStreamTaskReq)};
|
||||
int32_t code = tmsgPutToQueue(pMsgCb, WRITE_QUEUE, &msg);
|
||||
|
@ -864,7 +871,7 @@ void streamTaskPause(SStreamMeta* pMeta, SStreamTask* pTask) {
|
|||
|
||||
void streamTaskResume(SStreamTask* pTask) {
|
||||
SStreamTaskState prevState = *streamTaskGetStatus(pTask);
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
SStreamMeta* pMeta = pTask->pMeta;
|
||||
|
||||
if (prevState.state == TASK_STATUS__PAUSE || prevState.state == TASK_STATUS__HALT) {
|
||||
streamTaskRestoreStatus(pTask);
|
||||
|
@ -881,9 +888,7 @@ void streamTaskResume(SStreamTask* pTask) {
|
|||
}
|
||||
}
|
||||
|
||||
bool streamTaskIsSinkTask(const SStreamTask* pTask) {
|
||||
return pTask->info.taskLevel == TASK_LEVEL__SINK;
|
||||
}
|
||||
bool streamTaskIsSinkTask(const SStreamTask* pTask) { return pTask->info.taskLevel == TASK_LEVEL__SINK; }
|
||||
|
||||
int32_t streamTaskSendCheckpointReq(SStreamTask* pTask) {
|
||||
int32_t code;
|
||||
|
|
|
@ -480,6 +480,15 @@ int32_t deleteRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t ke
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
int32_t resetRowBuff(SStreamFileState* pFileState, const void* pKey, int32_t keyLen) {
|
||||
int32_t code_buff = pFileState->stateBuffRemoveFn(pFileState->rowStateBuff, pKey, keyLen);
|
||||
int32_t code_file = pFileState->stateFileRemoveFn(pFileState, pKey);
|
||||
if (code_buff == TSDB_CODE_SUCCESS || code_file == TSDB_CODE_SUCCESS) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
static void recoverSessionRowBuff(SStreamFileState* pFileState, SRowBuffPos* pPos) {
|
||||
int32_t len = 0;
|
||||
void* pBuff = NULL;
|
||||
|
@ -656,7 +665,7 @@ int32_t recoverSesssion(SStreamFileState* pFileState, int64_t ckId) {
|
|||
deleteExpiredCheckPoint(pFileState, mark);
|
||||
}
|
||||
|
||||
SStreamStateCur* pCur = streamStateSessionSeekToLast_rocksdb(pFileState->pFileStore);
|
||||
SStreamStateCur* pCur = streamStateSessionSeekToLast_rocksdb(pFileState->pFileStore, INT64_MAX);
|
||||
if (pCur == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ add_test(
|
|||
COMMAND streamUpdateTest
|
||||
)
|
||||
|
||||
# add_test(
|
||||
# NAME checkpointTest
|
||||
# COMMAND checkpointTest
|
||||
# )
|
||||
add_test(
|
||||
NAME checkpointTest
|
||||
COMMAND checkpointTest
|
||||
)
|
|
@ -140,11 +140,7 @@ void* rpcMallocCont(int64_t contLen) {
|
|||
return start + sizeof(STransMsgHead);
|
||||
}
|
||||
|
||||
void rpcFreeCont(void* cont) {
|
||||
if (cont == NULL) return;
|
||||
taosMemoryFree((char*)cont - TRANS_MSG_OVERHEAD);
|
||||
tTrace("rpc free cont:%p", (char*)cont - TRANS_MSG_OVERHEAD);
|
||||
}
|
||||
void rpcFreeCont(void* cont) { transFreeMsg(cont); }
|
||||
|
||||
void* rpcReallocCont(void* ptr, int64_t contLen) {
|
||||
if (ptr == NULL) return rpcMallocCont(contLen);
|
||||
|
|
|
@ -218,7 +218,6 @@ static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq,
|
|||
/// static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease,
|
||||
/// NULL,cliHandleUpdate};
|
||||
|
||||
static FORCE_INLINE void destroyUserdata(STransMsg* userdata);
|
||||
static FORCE_INLINE void destroyCmsg(void* cmsg);
|
||||
static FORCE_INLINE void destroyCmsgAndAhandle(void* cmsg);
|
||||
static FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst);
|
||||
|
@ -962,6 +961,10 @@ static void cliSendCb(uv_write_t* req, int status) {
|
|||
tTrace("%s conn %p send cost:%dus ", CONN_GET_INST_LABEL(pConn), pConn, (int)cost);
|
||||
}
|
||||
}
|
||||
if (pMsg->msg.contLen == 0 && pMsg->msg.pCont != 0) {
|
||||
rpcFreeCont(pMsg->msg.pCont);
|
||||
pMsg->msg.pCont = 0;
|
||||
}
|
||||
|
||||
if (status == 0) {
|
||||
tDebug("%s conn %p data already was written out", CONN_GET_INST_LABEL(pConn), pConn);
|
||||
|
@ -1950,14 +1953,6 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void destroyUserdata(STransMsg* userdata) {
|
||||
if (userdata->pCont == NULL) {
|
||||
return;
|
||||
}
|
||||
transFreeMsg(userdata->pCont);
|
||||
userdata->pCont = NULL;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void destroyCmsg(void* arg) {
|
||||
SCliMsg* pMsg = arg;
|
||||
if (pMsg == NULL) {
|
||||
|
@ -1965,7 +1960,7 @@ static FORCE_INLINE void destroyCmsg(void* arg) {
|
|||
}
|
||||
|
||||
transDestroyConnCtx(pMsg->ctx);
|
||||
destroyUserdata(&pMsg->msg);
|
||||
transFreeMsg(pMsg->msg.pCont);
|
||||
taosMemoryFree(pMsg);
|
||||
}
|
||||
|
||||
|
@ -1984,7 +1979,7 @@ static FORCE_INLINE void destroyCmsgAndAhandle(void* param) {
|
|||
tDebug("destroy Ahandle C");
|
||||
|
||||
transDestroyConnCtx(pMsg->ctx);
|
||||
destroyUserdata(&pMsg->msg);
|
||||
transFreeMsg(pMsg->msg.pCont);
|
||||
taosMemoryFree(pMsg);
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,7 @@ void transFreeMsg(void* msg) {
|
|||
if (msg == NULL) {
|
||||
return;
|
||||
}
|
||||
tTrace("rpc free cont:%p", (char*)msg - TRANS_MSG_OVERHEAD);
|
||||
taosMemoryFree((char*)msg - sizeof(STransMsgHead));
|
||||
}
|
||||
int transSockInfo2Str(struct sockaddr* sockname, char* dst) {
|
||||
|
|
|
@ -632,6 +632,11 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in
|
|||
return writeLen;
|
||||
}
|
||||
|
||||
bool lastErrorIsFileNotExist() {
|
||||
DWORD dwError = GetLastError();
|
||||
return dwError == ERROR_FILE_NOT_FOUND;
|
||||
}
|
||||
|
||||
#else
|
||||
int taosOpenFileNotStream(const char *path, int32_t tdFileOptions) {
|
||||
int access = O_BINARY;
|
||||
|
@ -1028,6 +1033,8 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in
|
|||
#endif
|
||||
}
|
||||
|
||||
bool lastErrorIsFileNotExist() { return errno == ENOENT; }
|
||||
|
||||
#endif // WINDOWS
|
||||
|
||||
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
|
||||
|
|
|
@ -350,7 +350,7 @@ void taosResetLog() {
|
|||
static bool taosCheckFileIsOpen(char *logFileName) {
|
||||
TdFilePtr pFile = taosOpenFile(logFileName, TD_FILE_WRITE);
|
||||
if (pFile == NULL) {
|
||||
if (errno == ENOENT) {
|
||||
if (lastErrorIsFileNotExist()) {
|
||||
return false;
|
||||
} else {
|
||||
printf("\nfailed to open log file:%s, reason:%s\n", logFileName, strerror(errno));
|
||||
|
|
|
@ -18,6 +18,9 @@ from frame.autogen import *
|
|||
|
||||
|
||||
class TDTestCase(TBase):
|
||||
updatecfgDict = {
|
||||
'slowLogScope':"query"
|
||||
}
|
||||
|
||||
def init(self, conn, logSql, replicaVar=3):
|
||||
super(TDTestCase, self).init(conn, logSql, replicaVar=3, db="snapshot", checkColName="c1")
|
||||
|
|
|
@ -33,7 +33,8 @@ class TDTestCase(TBase):
|
|||
"lossyColumns" : "float,double",
|
||||
"fPrecision" : "0.000000001",
|
||||
"dPrecision" : "0.00000000000000001",
|
||||
"ifAdtFse" : "1"
|
||||
"ifAdtFse" : "1",
|
||||
'slowLogScope' : "insert"
|
||||
}
|
||||
|
||||
def insertData(self):
|
||||
|
@ -56,7 +57,7 @@ class TDTestCase(TBase):
|
|||
sql = f"select * from {self.db}.{self.stb} where fc!=100"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(0)
|
||||
sql = f"select count(*) from {self.db}.{self.stb} where dc!=200"
|
||||
sql = f"select * from {self.db}.{self.stb} where dc!=200"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(0)
|
||||
sql = f"select avg(fc) from {self.db}.{self.stb}"
|
||||
|
|
|
@ -31,6 +31,9 @@ from frame.srvCtl import *
|
|||
|
||||
|
||||
class TDTestCase(TBase):
|
||||
updatecfgDict = {
|
||||
'slowLogScope' : "others"
|
||||
}
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
tdLog.debug(f"start to init {__file__}")
|
||||
|
|
|
@ -31,10 +31,10 @@ class TDTestCase(TBase):
|
|||
'queryMaxConcurrentTables': '2K',
|
||||
'streamMax': '1M',
|
||||
'totalMemoryKB': '1G',
|
||||
#'rpcQueueMemoryAllowed': '1T',
|
||||
#'mndLogRetention': '1P',
|
||||
'streamBufferSize':'2G'
|
||||
}
|
||||
'streamMax': '1P',
|
||||
'streamBufferSize':'1T',
|
||||
'slowLogScope':"query"
|
||||
}
|
||||
|
||||
def insertData(self):
|
||||
tdLog.info(f"insert data.")
|
||||
|
@ -47,8 +47,40 @@ class TDTestCase(TBase):
|
|||
# taosBenchmark run
|
||||
etool.benchMark(command = f"-d {self.db} -t {self.childtable_count} -n {self.insert_rows} -v 2 -y")
|
||||
|
||||
def checkQueryOK(self, rets):
|
||||
if rets[-2][:9] != "Query OK,":
|
||||
tdLog.exit(f"check taos -s return unecpect: {rets}")
|
||||
|
||||
def doTaos(self):
|
||||
tdLog.info(f"check taos command options...")
|
||||
|
||||
# local command
|
||||
options = [
|
||||
"DebugFlag 143",
|
||||
"enableCoreFile 1",
|
||||
"fqdn 127.0.0.1",
|
||||
"firstEp 127.0.0.1",
|
||||
"locale ENG",
|
||||
"metaCacheMaxSize 10000",
|
||||
"minimalTmpDirGB 5",
|
||||
"minimalLogDirGB 1",
|
||||
"secondEp 127.0.0.2",
|
||||
"smlChildTableName smltbname",
|
||||
"smlAutoChildTableNameDelimiter autochild",
|
||||
"smlTagName tagname",
|
||||
"smlTsDefaultName tsdef",
|
||||
"serverPort 6030",
|
||||
"slowLogScope insert",
|
||||
"timezone tz",
|
||||
"tempDir /var/tmp"
|
||||
]
|
||||
# exec
|
||||
for option in options:
|
||||
rets = etool.runBinFile("taos", f"-s \"alter local '{option}'\";")
|
||||
self.checkQueryOK(rets)
|
||||
# error
|
||||
etool.runBinFile("taos", f"-s \"alter local 'nocmd check'\";")
|
||||
|
||||
# help
|
||||
rets = etool.runBinFile("taos", "--help")
|
||||
self.checkListNotEmpty(rets)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue