Merge branch '3.0' into fix/TD-18445
This commit is contained in:
commit
e33be23e10
|
@ -1,12 +0,0 @@
|
|||
|
||||
# rust-bindings
|
||||
ExternalProject_Add(rust-bindings
|
||||
GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||
GIT_TAG 7ed7a97
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust"
|
||||
BINARY_DIR "${TD_SOURCE_DIR}/examples/rust"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -105,11 +105,6 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# rust-bindings
|
||||
if(${RUST_BINDINGS})
|
||||
cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${RUST_BINDINGS})
|
||||
|
||||
# lucene
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include "taos.h"
|
||||
|
||||
static int running = 1;
|
||||
static char dbName[64] = "tmqdb";
|
||||
static char stbName[64] = "stb";
|
||||
static char topicName[64] = "topicname";
|
||||
|
||||
static int32_t msg_process(TAOS_RES* msg) {
|
||||
char buf[1024];
|
||||
int32_t rows = 0;
|
||||
|
||||
const char* topicName = tmq_get_topic_name(msg);
|
||||
const char* dbName = tmq_get_db_name(msg);
|
||||
int32_t vgroupId = tmq_get_vgroup_id(msg);
|
||||
|
||||
printf("topic: %s\n", topicName);
|
||||
printf("db: %s\n", dbName);
|
||||
printf("vgroup id: %d\n", vgroupId);
|
||||
|
||||
while (1) {
|
||||
TAOS_ROW row = taos_fetch_row(msg);
|
||||
if (row == NULL) break;
|
||||
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
int32_t numOfFields = taos_field_count(msg);
|
||||
int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
rows++;
|
||||
taos_print_row(buf, row, fields, numOfFields);
|
||||
printf("row content: %s\n", buf);
|
||||
}
|
||||
|
||||
return rows;
|
||||
}
|
||||
|
||||
static int32_t init_env() {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_RES* pRes;
|
||||
// drop database if exists
|
||||
printf("create database\n");
|
||||
pRes = taos_query(pConn, "drop database if exists tmqdb");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// create database
|
||||
pRes = taos_query(pConn, "create database tmqdb");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// create super table
|
||||
printf("create super table\n");
|
||||
pRes = taos_query(
|
||||
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// create sub tables
|
||||
printf("create sub tables\n");
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// insert data
|
||||
printf("insert data into sub tables\n");
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(pConn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t create_topic() {
|
||||
printf("create topic\n");
|
||||
TAOS_RES* pRes;
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRes = taos_query(pConn, "use tmqdb");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(pConn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
|
||||
printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
|
||||
}
|
||||
|
||||
tmq_t* build_consumer() {
|
||||
tmq_conf_res_t code;
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
code = tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "group.id", "cgrpName");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "client.id", "user defined name");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "td.connect.user", "root");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
|
||||
if (TMQ_CONF_OK != code) return NULL;
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
tmq_conf_destroy(conf);
|
||||
return tmq;
|
||||
}
|
||||
|
||||
tmq_list_t* build_topic_list() {
|
||||
tmq_list_t* topicList = tmq_list_new();
|
||||
int32_t code = tmq_list_append(topicList, "topicname");
|
||||
if (code) {
|
||||
return NULL;
|
||||
}
|
||||
return topicList;
|
||||
}
|
||||
|
||||
void basic_consume_loop(tmq_t* tmq) {
|
||||
int32_t totalRows = 0;
|
||||
int32_t msgCnt = 0;
|
||||
int32_t timeout = 5000;
|
||||
while (running) {
|
||||
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
|
||||
if (tmqmsg) {
|
||||
msgCnt++;
|
||||
totalRows += msg_process(tmqmsg);
|
||||
taos_free_result(tmqmsg);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int32_t code;
|
||||
|
||||
if (init_env() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (create_topic() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmq_t* tmq = build_consumer();
|
||||
if (NULL == tmq) {
|
||||
fprintf(stderr, "%% build_consumer() fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
tmq_list_t* topic_list = build_topic_list();
|
||||
if (NULL == topic_list) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((code = tmq_subscribe(tmq, topic_list))) {
|
||||
fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
|
||||
}
|
||||
tmq_list_destroy(topic_list);
|
||||
|
||||
basic_consume_loop(tmq);
|
||||
|
||||
code = tmq_consumer_close(tmq);
|
||||
if (code) {
|
||||
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
} else {
|
||||
fprintf(stderr, "%% Consumer closed\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -724,7 +724,6 @@ consumer.close();
|
|||
|
||||
</TabItem>
|
||||
|
||||
|
||||
<TabItem value="Go" label="Go">
|
||||
|
||||
```go
|
||||
|
@ -769,6 +768,7 @@ consumer.Unsubscribe();
|
|||
// 关闭消费
|
||||
consumer.Close();
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
</Tabs>
|
||||
|
@ -809,7 +809,7 @@ SHOW SUBSCRIPTIONS;
|
|||
<Tabs defaultValue="java" groupId="lang">
|
||||
|
||||
<TabItem label="C" value="c">
|
||||
<CDemo>
|
||||
<CDemo />
|
||||
</TabItem>
|
||||
|
||||
<TabItem label="Java" value="java">
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
```c
|
||||
{{#include docs/examples/c/tmq-example.c}}
|
||||
{{#include docs/examples/c/tmq_example.c}}
|
||||
```
|
||||
|
|
|
@ -25,7 +25,7 @@ taosKeeper 安装方式:
|
|||
<!-- taosKeeper 需要在操作系统终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数启动) 和 [配置文件](#配置文件启动)。命令行参数优先级高于配置文件参数。-->
|
||||
taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
|
||||
|
||||
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。**
|
||||
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
|
||||
|
||||
<!--
|
||||
### 命令行参数启动
|
||||
|
@ -93,7 +93,7 @@ taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产
|
|||
|
||||
```shell
|
||||
$ taos
|
||||
#
|
||||
# 如上示例,使用 log 库作为监控日志存储位置
|
||||
> use log;
|
||||
> select * from cluster_info limit 1;
|
||||
```
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
sidebar_label: taosX
|
||||
title: 使用 taosX 在集群间复制数据
|
||||
---
|
|
@ -193,7 +193,7 @@ docker run -d \
|
|||
|
||||
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下:
|
||||
|
||||
- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。
|
||||
- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。
|
||||
- ALIAS BY:可设置当前查询别名。
|
||||
- GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
|
||||
|
||||
|
@ -205,7 +205,11 @@ docker run -d \
|
|||
|
||||
### 导入 Dashboard
|
||||
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。该 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
|
||||
|
||||

|
||||
|
||||
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
|
||||
|
||||
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 6.3 KiB |
|
@ -98,10 +98,9 @@ int32_t create_stream() {
|
|||
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
|
||||
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
|
||||
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
|
||||
pRes =
|
||||
taos_query(pConn,
|
||||
"create stream stream1 trigger max_delay 10s into outstb as select _wstart, sum(k) from st1 partition "
|
||||
"by tbname session(ts, 10s) ");
|
||||
pRes = taos_query(pConn,
|
||||
"create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, "
|
||||
"count(k) from st1 partition by tbname interval(20s) ");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
/target
|
||||
Cargo.lock
|
|
@ -0,0 +1,18 @@
|
|||
[package]
|
||||
name = "rust"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
taos = "*"
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = "0.4"
|
||||
itertools = "0.10.3"
|
||||
pretty_env_logger = "0.4.0"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
anyhow = "1"
|
|
@ -0,0 +1,80 @@
|
|||
use anyhow::Result;
|
||||
use serde::Deserialize;
|
||||
use taos::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
|
||||
taos.exec_many([
|
||||
"drop database if exists test",
|
||||
"create database test keep 36500",
|
||||
"use test",
|
||||
"create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint,
|
||||
c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned,
|
||||
c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t1 varchar(100))",
|
||||
])
|
||||
.await?;
|
||||
let mut stmt = Stmt::init(&taos)?;
|
||||
stmt.prepare(
|
||||
"insert into ? using tb1 tags(?) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
)?;
|
||||
stmt.set_tbname("d0")?;
|
||||
stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
|
||||
|
||||
let params = vec![
|
||||
ColumnView::from_millis_timestamp(vec![164000000000]),
|
||||
ColumnView::from_bools(vec![true]),
|
||||
ColumnView::from_tiny_ints(vec![i8::MAX]),
|
||||
ColumnView::from_small_ints(vec![i16::MAX]),
|
||||
ColumnView::from_ints(vec![i32::MAX]),
|
||||
ColumnView::from_big_ints(vec![i64::MAX]),
|
||||
ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
|
||||
ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
|
||||
ColumnView::from_unsigned_ints(vec![u32::MAX]),
|
||||
ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
|
||||
ColumnView::from_floats(vec![f32::MAX]),
|
||||
ColumnView::from_doubles(vec![f64::MAX]),
|
||||
ColumnView::from_varchar(vec!["ABC"]),
|
||||
ColumnView::from_nchar(vec!["涛思数据"]),
|
||||
];
|
||||
let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
|
||||
assert_eq!(rows, 1);
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
struct Row {
|
||||
ts: String,
|
||||
c1: bool,
|
||||
c2: i8,
|
||||
c3: i16,
|
||||
c4: i32,
|
||||
c5: i64,
|
||||
c6: u8,
|
||||
c7: u16,
|
||||
c8: u32,
|
||||
c9: u64,
|
||||
c10: Option<f32>,
|
||||
c11: f64,
|
||||
c12: String,
|
||||
c13: String,
|
||||
t1: serde_json::Value,
|
||||
}
|
||||
|
||||
let rows: Vec<Row> = taos
|
||||
.query("select * from tb1")
|
||||
.await?
|
||||
.deserialize()
|
||||
.try_collect()
|
||||
.await?;
|
||||
let row = &rows[0];
|
||||
dbg!(&row);
|
||||
assert_eq!(row.c5, i64::MAX);
|
||||
assert_eq!(row.c8, u32::MAX);
|
||||
assert_eq!(row.c9, u64::MAX);
|
||||
assert_eq!(row.c10.unwrap(), f32::MAX);
|
||||
// assert_eq!(row.c11, f64::MAX);
|
||||
assert_eq!(row.c12, "ABC");
|
||||
assert_eq!(row.c13, "涛思数据");
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
use anyhow::Result;
|
||||
use serde::Deserialize;
|
||||
use taos::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
|
||||
taos.exec_many([
|
||||
"drop database if exists test_bindable",
|
||||
"create database test_bindable keep 36500",
|
||||
"use test_bindable",
|
||||
"create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint,
|
||||
c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned,
|
||||
c10 float, c11 double, c12 varchar(100), c13 nchar(100))",
|
||||
])
|
||||
.await?;
|
||||
let mut stmt = Stmt::init(&taos)?;
|
||||
stmt.prepare("insert into tb1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")?;
|
||||
let params = vec![
|
||||
ColumnView::from_millis_timestamp(vec![0]),
|
||||
ColumnView::from_bools(vec![true]),
|
||||
ColumnView::from_tiny_ints(vec![i8::MAX]),
|
||||
ColumnView::from_small_ints(vec![i16::MAX]),
|
||||
ColumnView::from_ints(vec![i32::MAX]),
|
||||
ColumnView::from_big_ints(vec![i64::MAX]),
|
||||
ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
|
||||
ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
|
||||
ColumnView::from_unsigned_ints(vec![u32::MAX]),
|
||||
ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
|
||||
ColumnView::from_floats(vec![f32::MAX]),
|
||||
ColumnView::from_doubles(vec![f64::MAX]),
|
||||
ColumnView::from_varchar(vec!["ABC"]),
|
||||
ColumnView::from_nchar(vec!["涛思数据"]),
|
||||
];
|
||||
let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
|
||||
assert_eq!(rows, 1);
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
struct Row {
|
||||
ts: String,
|
||||
c1: bool,
|
||||
c2: i8,
|
||||
c3: i16,
|
||||
c4: i32,
|
||||
c5: i64,
|
||||
c6: u8,
|
||||
c7: u16,
|
||||
c8: u32,
|
||||
c9: u64,
|
||||
c10: Option<f32>,
|
||||
c11: f64,
|
||||
c12: String,
|
||||
c13: String,
|
||||
}
|
||||
|
||||
let rows: Vec<Row> = taos
|
||||
.query("select * from tb1")
|
||||
.await?
|
||||
.deserialize()
|
||||
.try_collect()
|
||||
.await?;
|
||||
let row = &rows[0];
|
||||
dbg!(&row);
|
||||
assert_eq!(row.c5, i64::MAX);
|
||||
assert_eq!(row.c8, u32::MAX);
|
||||
assert_eq!(row.c9, u64::MAX);
|
||||
assert_eq!(row.c10.unwrap(), f32::MAX);
|
||||
// assert_eq!(row.c11, f64::MAX);
|
||||
assert_eq!(row.c12, "ABC");
|
||||
assert_eq!(row.c13, "涛思数据");
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use chrono::{DateTime, Local};
|
||||
use taos::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let dsn = "taos://";
|
||||
|
||||
let opts = PoolBuilder::new()
|
||||
.max_size(5000) // max connections
|
||||
.max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
|
||||
.min_idle(Some(1000)) // minimal idle connections
|
||||
.connection_timeout(Duration::from_secs(2));
|
||||
|
||||
let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
|
||||
|
||||
let taos = pool.get()?;
|
||||
|
||||
let db = "query";
|
||||
|
||||
// prepare database
|
||||
taos.exec_many([
|
||||
format!("DROP DATABASE IF EXISTS `{db}`"),
|
||||
format!("CREATE DATABASE `{db}`"),
|
||||
format!("USE `{db}`"),
|
||||
])
|
||||
.await?;
|
||||
|
||||
let inserted = taos.exec_many([
|
||||
// create super table
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))",
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
// insert into child table
|
||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||
// insert with NULL values
|
||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||
// insert and automatically create table with tags if not exists
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
// insert many records in a single sql
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||
]).await?;
|
||||
|
||||
assert_eq!(inserted, 6);
|
||||
loop {
|
||||
let count: usize = taos
|
||||
.query_one("select count(*) from `meters`")
|
||||
.await?
|
||||
.unwrap_or_default();
|
||||
|
||||
if count >= 6 {
|
||||
break;
|
||||
} else {
|
||||
println!("waiting for data");
|
||||
}
|
||||
}
|
||||
|
||||
let mut result = taos.query("select tbname, * from `meters`").await?;
|
||||
|
||||
for field in result.fields() {
|
||||
println!("got field: {}", field.name());
|
||||
}
|
||||
|
||||
// Query option 1, use rows stream.
|
||||
let mut rows = result.rows();
|
||||
let mut nrows = 0;
|
||||
while let Some(row) = rows.try_next().await? {
|
||||
for (col, (name, value)) in row.enumerate() {
|
||||
println!(
|
||||
"[{}] got value in col {} (named `{:>8}`): {}",
|
||||
nrows, col, name, value
|
||||
);
|
||||
}
|
||||
nrows += 1;
|
||||
}
|
||||
|
||||
// Query options 2, use deserialization with serde.
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
struct Record {
|
||||
tbname: String,
|
||||
// deserialize timestamp to chrono::DateTime<Local>
|
||||
ts: DateTime<Local>,
|
||||
// float to f32
|
||||
current: Option<f32>,
|
||||
// int to i32
|
||||
voltage: Option<i32>,
|
||||
phase: Option<f32>,
|
||||
groupid: i32,
|
||||
// binary/varchar to String
|
||||
location: String,
|
||||
}
|
||||
|
||||
let records: Vec<Record> = taos
|
||||
.query("select tbname, * from `meters`")
|
||||
.await?
|
||||
.deserialize()
|
||||
.try_collect()
|
||||
.await?;
|
||||
|
||||
dbg!(result.summary());
|
||||
assert_eq!(records.len(), 6);
|
||||
dbg!(records);
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use chrono::{DateTime, Local};
|
||||
use taos::*;
|
||||
|
||||
// Query options 2, use deserialization with serde.
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
struct Record {
|
||||
// deserialize timestamp to chrono::DateTime<Local>
|
||||
ts: DateTime<Local>,
|
||||
// float to f32
|
||||
current: Option<f32>,
|
||||
// int to i32
|
||||
voltage: Option<i32>,
|
||||
phase: Option<f32>,
|
||||
}
|
||||
|
||||
async fn prepare(taos: Taos) -> anyhow::Result<()> {
|
||||
let inserted = taos.exec_many([
|
||||
// create child table
|
||||
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
|
||||
// insert into child table
|
||||
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
|
||||
// insert with NULL values
|
||||
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
|
||||
// insert and automatically create table with tags if not exists
|
||||
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
|
||||
// insert many records in a single sql
|
||||
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
|
||||
]).await?;
|
||||
assert_eq!(inserted, 6);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
// std::env::set_var("RUST_LOG", "debug");
|
||||
pretty_env_logger::init();
|
||||
let dsn = "taos://localhost:6030";
|
||||
let builder = TaosBuilder::from_dsn(dsn)?;
|
||||
|
||||
let taos = builder.build()?;
|
||||
let db = "tmq";
|
||||
|
||||
// prepare database
|
||||
taos.exec_many([
|
||||
"DROP TOPIC IF EXISTS tmq_meters".to_string(),
|
||||
format!("DROP DATABASE IF EXISTS `{db}`"),
|
||||
format!("CREATE DATABASE `{db}`"),
|
||||
format!("USE `{db}`"),
|
||||
// create super table
|
||||
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))".to_string(),
|
||||
// create topic for subscription
|
||||
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
||||
])
|
||||
.await?;
|
||||
|
||||
let task = tokio::spawn(prepare(taos));
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// subscribe
|
||||
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
|
||||
|
||||
let mut consumer = tmq.build()?;
|
||||
consumer.subscribe(["tmq_meters"]).await?;
|
||||
|
||||
{
|
||||
let mut stream = consumer.stream();
|
||||
|
||||
while let Some((offset, message)) = stream.try_next().await? {
|
||||
// get information from offset
|
||||
|
||||
// the topic
|
||||
let topic = offset.topic();
|
||||
// the vgroup id, like partition id in kafka.
|
||||
let vgroup_id = offset.vgroup_id();
|
||||
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
|
||||
|
||||
if let Some(data) = message.into_data() {
|
||||
while let Some(block) = data.fetch_raw_block().await? {
|
||||
// one block for one table, get table name if needed
|
||||
let name = block.table_name();
|
||||
let records: Vec<Record> = block.deserialize().try_collect()?;
|
||||
println!(
|
||||
"** table: {}, got {} records: {:#?}\n",
|
||||
name.unwrap(),
|
||||
records.len(),
|
||||
records
|
||||
);
|
||||
}
|
||||
}
|
||||
consumer.commit(offset).await?;
|
||||
}
|
||||
}
|
||||
|
||||
consumer.unsubscribe().await;
|
||||
|
||||
task.await??;
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
|
@ -60,6 +60,7 @@ enum {
|
|||
STREAM_INPUT__DATA_RETRIEVE,
|
||||
STREAM_INPUT__GET_RES,
|
||||
STREAM_INPUT__CHECKPOINT,
|
||||
STREAM_INPUT__DESTROY,
|
||||
};
|
||||
|
||||
typedef enum EStreamType {
|
||||
|
|
|
@ -53,6 +53,7 @@ enum {
|
|||
TASK_SCHED_STATUS__WAITING,
|
||||
TASK_SCHED_STATUS__ACTIVE,
|
||||
TASK_SCHED_STATUS__FAILED,
|
||||
TASK_SCHED_STATUS__DROPPING,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -127,6 +128,10 @@ typedef struct {
|
|||
int8_t type;
|
||||
} SStreamCheckpoint;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
} SStreamTaskDestroy;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
SSDataBlock* pBlock;
|
||||
|
@ -211,7 +216,6 @@ typedef struct {
|
|||
void* vnode;
|
||||
FTbSink* tbSinkFunc;
|
||||
STSchema* pTSchema;
|
||||
SHashObj* pHash; // groupId to tbuid
|
||||
} STaskSinkTb;
|
||||
|
||||
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
|
||||
|
|
|
@ -30,7 +30,10 @@ extern bool gRaftDetailLog;
|
|||
#define SYNC_SPEED_UP_HB_TIMER 400
|
||||
#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
|
||||
#define SYNC_SLOW_DOWN_RANGE 100
|
||||
#define SYNC_MAX_READ_RANGE 10
|
||||
#define SYNC_MAX_READ_RANGE 2
|
||||
#define SYNC_MAX_PROGRESS_WAIT_MS 4000
|
||||
#define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20)
|
||||
#define SYNC_MAX_RECV_TIME_RANGE_MS 1000
|
||||
|
||||
#define SYNC_MAX_BATCH_SIZE 1
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
|
|
|
@ -423,6 +423,7 @@ typedef struct SyncAppendEntriesReply {
|
|||
SyncTerm privateTerm;
|
||||
bool success;
|
||||
SyncIndex matchIndex;
|
||||
int64_t startTime;
|
||||
} SyncAppendEntriesReply;
|
||||
|
||||
SyncAppendEntriesReply* syncAppendEntriesReplyBuild(int32_t vgId);
|
||||
|
|
|
@ -291,6 +291,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_MND_STREAM_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F1)
|
||||
#define TSDB_CODE_MND_INVALID_STREAM_OPTION TAOS_DEF_ERROR_CODE(0, 0x03F2)
|
||||
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
|
||||
#define TSDB_CODE_MND_STREAM_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x03F4)
|
||||
|
||||
// mnode-sma
|
||||
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
|
||||
|
|
|
@ -1343,12 +1343,14 @@ SSDataBlock* createDataBlock() {
|
|||
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
|
||||
if (pBlock == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData));
|
||||
if (pBlock->pDataBlock == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosMemoryFree(pBlock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pBlock;
|
||||
|
@ -1423,6 +1425,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
|
|||
}
|
||||
|
||||
void colDataDestroy(SColumnInfoData* pColData) {
|
||||
if(!pColData) return;
|
||||
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
|
||||
taosMemoryFreeClear(pColData->varmeta.offset);
|
||||
} else {
|
||||
|
|
|
@ -424,6 +424,8 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
}
|
||||
mndAddTaskToTaskSet(taskSourceLevel, pTask);
|
||||
|
||||
pTask->triggerParam = 0;
|
||||
|
||||
// source
|
||||
pTask->taskLevel = TASK_LEVEL__SOURCE;
|
||||
|
||||
|
|
|
@ -1145,7 +1145,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t suid, col_id_t colId) {
|
||||
static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
|
@ -1154,7 +1154,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
|
|||
if (pIter == NULL) break;
|
||||
|
||||
mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s",
|
||||
pTopic->name, stbname, suid, colId, pTopic->subType, pTopic->sql);
|
||||
pTopic->name, stbFullName, suid, colId, pTopic->subType, pTopic->sql);
|
||||
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
|
||||
sdbRelease(pSdb, pTopic);
|
||||
continue;
|
||||
|
@ -1192,20 +1192,66 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
|
|||
sdbRelease(pSdb, pTopic);
|
||||
nodesDestroyNode(pAst);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndCheckAlterColForStream(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
SStreamObj *pStream = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
SNode *pAst = NULL;
|
||||
if (nodesStringToNode(pStream->ast, &pAst) != 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SNodeList *pNodeList = NULL;
|
||||
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
|
||||
SNode *pNode = NULL;
|
||||
FOREACH(pNode, pNodeList) {
|
||||
SColumnNode *pCol = (SColumnNode *)pNode;
|
||||
|
||||
if (pCol->tableId != suid) {
|
||||
mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
|
||||
goto NEXT;
|
||||
}
|
||||
if (pCol->colId > 0 && pCol->colId == colId) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
nodesDestroyNode(pAst);
|
||||
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
|
||||
mError("stream:%s, check colId:%d conflicted", pStream->name, pCol->colId);
|
||||
return -1;
|
||||
}
|
||||
mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
|
||||
}
|
||||
|
||||
NEXT:
|
||||
sdbRelease(pSdb, pStream);
|
||||
nodesDestroyNode(pAst);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndCheckAlterColForTSma(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
SSmaObj *pSma = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name, stbname,
|
||||
suid, colId, pSma->sql);
|
||||
mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name,
|
||||
stbFullName, suid, colId, pSma->sql);
|
||||
|
||||
SNode *pAst = NULL;
|
||||
if (nodesStringToNode(pSma->ast, &pAst) != 0) {
|
||||
terrno = TSDB_CODE_SDB_INVALID_DATA_CONTENT;
|
||||
mError("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d failed since parse AST err",
|
||||
pSma->name, stbname, suid, colId);
|
||||
pSma->name, stbFullName, suid, colId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1218,7 +1264,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
|
|||
|
||||
if ((pCol->tableId != suid) && (pSma->stbUid != suid)) {
|
||||
mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
|
||||
goto NEXT2;
|
||||
goto NEXT;
|
||||
}
|
||||
if ((pCol->colId) > 0 && (pCol->colId == colId)) {
|
||||
sdbRelease(pSdb, pSma);
|
||||
|
@ -1230,11 +1276,24 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
|
|||
mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
|
||||
}
|
||||
|
||||
NEXT2:
|
||||
NEXT:
|
||||
sdbRelease(pSdb, pSma);
|
||||
nodesDestroyNode(pAst);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
|
||||
if (mndCheckAlterColForTopic(pMnode, stbFullName, suid, colId) < 0) {
|
||||
return -1;
|
||||
}
|
||||
if (mndCheckAlterColForStream(pMnode, stbFullName, suid, colId) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (mndCheckAlterColForTSma(pMnode, stbFullName, suid, colId) < 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1930,6 +1989,98 @@ _OVER:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
SMqTopicObj *pTopic = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
if (pTopic->stbUid == suid) {
|
||||
sdbRelease(pSdb, pTopic);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
|
||||
sdbRelease(pSdb, pTopic);
|
||||
continue;
|
||||
}
|
||||
|
||||
SNode *pAst = NULL;
|
||||
if (nodesStringToNode(pTopic->ast, &pAst) != 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SNodeList *pNodeList = NULL;
|
||||
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
|
||||
SNode *pNode = NULL;
|
||||
FOREACH(pNode, pNodeList) {
|
||||
SColumnNode *pCol = (SColumnNode *)pNode;
|
||||
|
||||
if (pCol->tableId == suid) {
|
||||
sdbRelease(pSdb, pTopic);
|
||||
nodesDestroyNode(pAst);
|
||||
return -1;
|
||||
} else {
|
||||
goto NEXT;
|
||||
}
|
||||
}
|
||||
NEXT:
|
||||
sdbRelease(pSdb, pTopic);
|
||||
nodesDestroyNode(pAst);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName, int64_t suid) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
while (1) {
|
||||
SStreamObj *pStream = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pStream->smaId != 0) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pStream->targetStbUid == suid) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SNode *pAst = NULL;
|
||||
if (nodesStringToNode(pStream->ast, &pAst) != 0) {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SNodeList *pNodeList = NULL;
|
||||
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
|
||||
SNode *pNode = NULL;
|
||||
FOREACH(pNode, pNodeList) {
|
||||
SColumnNode *pCol = (SColumnNode *)pNode;
|
||||
|
||||
if (pCol->tableId == suid) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
nodesDestroyNode(pAst);
|
||||
return -1;
|
||||
} else {
|
||||
goto NEXT;
|
||||
}
|
||||
}
|
||||
NEXT:
|
||||
sdbRelease(pSdb, pStream);
|
||||
nodesDestroyNode(pAst);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
int32_t code = -1;
|
||||
|
@ -1971,6 +2122,16 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDropStbForTopic(pMnode, dropReq.name, pStb->uid) < 0) {
|
||||
terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
if (mndCheckDropStbForStream(pMnode, dropReq.name, pStb->uid) < 0) {
|
||||
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = mndDropStb(pMnode, pReq, pDb, pStb);
|
||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
|
||||
|
|
|
@ -91,8 +91,9 @@ typedef struct SMetaEntry SMetaEntry;
|
|||
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
|
||||
void metaReaderClear(SMetaReader *pReader);
|
||||
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
|
||||
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags);
|
||||
int32_t metaReadNext(SMetaReader *pReader);
|
||||
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal);
|
||||
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
|
||||
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
|
||||
|
||||
typedef struct SMetaFltParam {
|
||||
|
|
|
@ -87,7 +87,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
|
|||
}
|
||||
|
||||
// open pCtbIdx
|
||||
ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), 0, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
|
||||
ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), -1, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
|
||||
if (ret < 0) {
|
||||
metaError("vgId:%d, failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno));
|
||||
goto _err;
|
||||
|
|
|
@ -53,6 +53,80 @@ _err:
|
|||
return -1;
|
||||
}
|
||||
|
||||
// int metaGetTableEntryByUidTest(void* meta, SArray *uidList) {
|
||||
//
|
||||
// SArray* readerList = taosArrayInit(taosArrayGetSize(uidList), sizeof(SMetaReader));
|
||||
// SArray* uidVersion = taosArrayInit(taosArrayGetSize(uidList), sizeof(STbDbKey));
|
||||
// SMeta *pMeta = meta;
|
||||
// int64_t version;
|
||||
// SHashObj *uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
//
|
||||
// int64_t stt1 = taosGetTimestampUs();
|
||||
// for(int i = 0; i < taosArrayGetSize(uidList); i++) {
|
||||
// void* ppVal = NULL;
|
||||
// int vlen = 0;
|
||||
// uint64_t * uid = taosArrayGet(uidList, i);
|
||||
// // query uid.idx
|
||||
// if (tdbTbGet(pMeta->pUidIdx, uid, sizeof(*uid), &ppVal, &vlen) < 0) {
|
||||
// continue;
|
||||
// }
|
||||
// version = *(int64_t *)ppVal;
|
||||
//
|
||||
// STbDbKey tbDbKey = {.version = version, .uid = *uid};
|
||||
// taosArrayPush(uidVersion, &tbDbKey);
|
||||
// taosHashPut(uHash, uid, sizeof(int64_t), ppVal, sizeof(int64_t));
|
||||
// }
|
||||
// int64_t stt2 = taosGetTimestampUs();
|
||||
// qDebug("metaGetTableEntryByUidTest1 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt2-stt1);
|
||||
//
|
||||
// TBC *pCur = NULL;
|
||||
// tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
|
||||
// tdbTbcMoveToFirst(pCur);
|
||||
// void *pKey = NULL;
|
||||
// int kLen = 0;
|
||||
//
|
||||
// while(1){
|
||||
// SMetaReader pReader = {0};
|
||||
// int32_t ret = tdbTbcNext(pCur, &pKey, &kLen, &pReader.pBuf, &pReader.szBuf);
|
||||
// if (ret < 0) break;
|
||||
// STbDbKey *tmp = (STbDbKey*)pKey;
|
||||
// int64_t *ver = (int64_t*)taosHashGet(uHash, &tmp->uid, sizeof(int64_t));
|
||||
// if(ver == NULL || *ver != tmp->version) continue;
|
||||
// taosArrayPush(readerList, &pReader);
|
||||
// }
|
||||
// tdbTbcClose(pCur);
|
||||
//
|
||||
// taosArrayClear(readerList);
|
||||
// int64_t stt3 = taosGetTimestampUs();
|
||||
// qDebug("metaGetTableEntryByUidTest2 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt3-stt2);
|
||||
// for(int i = 0; i < taosArrayGetSize(uidVersion); i++) {
|
||||
// SMetaReader pReader = {0};
|
||||
//
|
||||
// STbDbKey *tbDbKey = taosArrayGet(uidVersion, i);
|
||||
// // query table.db
|
||||
// if (tdbTbGet(pMeta->pTbDb, tbDbKey, sizeof(STbDbKey), &pReader.pBuf, &pReader.szBuf) < 0) {
|
||||
// continue;
|
||||
// }
|
||||
// taosArrayPush(readerList, &pReader);
|
||||
// }
|
||||
// int64_t stt4 = taosGetTimestampUs();
|
||||
// qDebug("metaGetTableEntryByUidTest3 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt4-stt3);
|
||||
//
|
||||
// for(int i = 0; i < taosArrayGetSize(readerList); i++){
|
||||
// SMetaReader* pReader = taosArrayGet(readerList, i);
|
||||
// metaReaderInit(pReader, meta, 0);
|
||||
// // decode the entry
|
||||
// tDecoderInit(&pReader->coder, pReader->pBuf, pReader->szBuf);
|
||||
//
|
||||
// if (metaDecodeEntry(&pReader->coder, &pReader->me) < 0) {
|
||||
// }
|
||||
// metaReaderClear(pReader);
|
||||
// }
|
||||
// int64_t stt5 = taosGetTimestampUs();
|
||||
// qDebug("metaGetTableEntryByUidTest4 rows:%d, cost:%ld us", taosArrayGetSize(readerList), stt5-stt4);
|
||||
// return 0;
|
||||
// }
|
||||
|
||||
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
|
||||
SMeta *pMeta = pReader->pMeta;
|
||||
int64_t version;
|
||||
|
@ -749,9 +823,8 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
|
|||
|
||||
#endif
|
||||
|
||||
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) {
|
||||
ASSERT(pEntry->type == TSDB_CHILD_TABLE);
|
||||
STag *tag = (STag *)pEntry->ctbEntry.pTags;
|
||||
const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
|
||||
STag *tag = (STag *)pTag;
|
||||
if (type == TSDB_DATA_TYPE_JSON) {
|
||||
return tag;
|
||||
}
|
||||
|
@ -853,6 +926,9 @@ int32_t metaFilterTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (p->suid != pKey->suid) {
|
||||
break;
|
||||
}
|
||||
first = false;
|
||||
if (p != NULL) {
|
||||
int32_t cmp = (*param->filterFunc)(p->data, pKey->data, pKey->type);
|
||||
|
@ -889,6 +965,38 @@ END:
|
|||
return ret;
|
||||
}
|
||||
|
||||
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) {
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid);
|
||||
|
||||
SHashObj *uHash = NULL;
|
||||
size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids
|
||||
if (len > 0) {
|
||||
uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
for (int i = 0; i < len; i++) {
|
||||
int64_t *uid = taosArrayGet(uidList, i);
|
||||
taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
|
||||
}
|
||||
}
|
||||
while (1) {
|
||||
tb_uid_t id = metaCtbCursorNext(pCur);
|
||||
if (id == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) {
|
||||
continue;
|
||||
} else if (len == 0) {
|
||||
taosArrayPush(uidList, &id);
|
||||
}
|
||||
|
||||
taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen);
|
||||
}
|
||||
|
||||
taosHashCleanup(uHash);
|
||||
metaCloseCtbCursor(pCur);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo) {
|
||||
int32_t code = 0;
|
||||
void *pData = NULL;
|
||||
|
|
|
@ -181,10 +181,28 @@ int metaDelJsonVarFromIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const SSche
|
|||
|
||||
int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
||||
SMetaEntry me = {0};
|
||||
int kLen = 0;
|
||||
int vLen = 0;
|
||||
const void *pKey = NULL;
|
||||
const void *pVal = NULL;
|
||||
void *pBuf = NULL;
|
||||
int32_t szBuf = 0;
|
||||
void *p = NULL;
|
||||
|
||||
// validate req
|
||||
if (tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name), NULL, NULL) == 0) {
|
||||
void *pData = NULL;
|
||||
int nData = 0;
|
||||
if (tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData) == 0) {
|
||||
tb_uid_t uid = *(tb_uid_t *)pData;
|
||||
tdbFree(pData);
|
||||
SMetaInfo info;
|
||||
metaGetInfo(pMeta, uid, &info);
|
||||
if (info.uid == info.suid) {
|
||||
return 0;
|
||||
} else {
|
||||
terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// set structs
|
||||
|
@ -865,6 +883,9 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
|
|||
metaUpdateTagIdx(pMeta, &ctbEntry);
|
||||
}
|
||||
|
||||
SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid};
|
||||
tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags, ((STag*)(ctbEntry.ctbEntry.pTags))->len, &pMeta->txn);
|
||||
|
||||
tDecoderClear(&dc1);
|
||||
tDecoderClear(&dc2);
|
||||
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
|
||||
|
@ -1069,7 +1090,8 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) {
|
|||
|
||||
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) {
|
||||
SCtbIdxKey ctbIdxKey = {.suid = pME->ctbEntry.suid, .uid = pME->uid};
|
||||
return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), NULL, 0, &pMeta->txn);
|
||||
|
||||
return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), pME->ctbEntry.pTags, ((STag*)(pME->ctbEntry.pTags))->len, &pMeta->txn);
|
||||
}
|
||||
|
||||
int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int32_t nTagData, int8_t type, tb_uid_t uid,
|
||||
|
|
|
@ -628,8 +628,6 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
|
|||
}
|
||||
|
||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (pTask->taskLevel == TASK_LEVEL__AGG) {
|
||||
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
|
||||
}
|
||||
|
@ -640,8 +638,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
|
|||
pTask->outputQueue = streamQueueOpen();
|
||||
|
||||
if (pTask->inputQueue == NULL || pTask->outputQueue == NULL) {
|
||||
code = -1;
|
||||
goto FAIL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
|
||||
|
@ -686,14 +683,9 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
|
|||
|
||||
streamSetupTrigger(pTask);
|
||||
|
||||
tqInfo("deploy stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
|
||||
tqInfo("expand stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
|
||||
pTask->selfChildId);
|
||||
|
||||
FAIL:
|
||||
if (pTask->inputQueue) streamQueueClose(pTask->inputQueue);
|
||||
if (pTask->outputQueue) streamQueueClose(pTask->outputQueue);
|
||||
// TODO free executor
|
||||
return code;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
|
||||
|
|
|
@ -231,11 +231,12 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
|||
|
||||
ASSERT(pTask->tbSink.pTSchema);
|
||||
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
SSubmitReq* pReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
|
||||
SSubmitReq* submitReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
|
||||
pTask->tbSink.stbFullName, &deleteReq);
|
||||
|
||||
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
|
||||
|
||||
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
|
||||
int32_t code;
|
||||
int32_t len;
|
||||
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
|
||||
|
@ -244,21 +245,21 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
|||
ASSERT(0);
|
||||
}
|
||||
SEncoder encoder;
|
||||
void* buf = rpcMallocCont(len + sizeof(SMsgHead));
|
||||
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead));
|
||||
void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, abuf, len);
|
||||
tEncodeSBatchDeleteReq(&encoder, &deleteReq);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
((SMsgHead*)buf)->vgId = pVnode->config.vgId;
|
||||
((SMsgHead*)serializedDeleteReq)->vgId = pVnode->config.vgId;
|
||||
|
||||
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
|
||||
SRpcMsg msg = {
|
||||
.msgType = TDMT_VND_BATCH_DEL,
|
||||
.pCont = buf,
|
||||
.pCont = serializedDeleteReq,
|
||||
.contLen = len + sizeof(SMsgHead),
|
||||
};
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
rpcFreeCont(serializedDeleteReq);
|
||||
tqDebug("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
}
|
||||
|
@ -268,11 +269,12 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
|||
// build write msg
|
||||
SRpcMsg msg = {
|
||||
.msgType = TDMT_VND_SUBMIT,
|
||||
.pCont = pReq,
|
||||
.contLen = ntohl(pReq->length),
|
||||
.pCont = submitReq,
|
||||
.contLen = ntohl(submitReq->length),
|
||||
};
|
||||
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
rpcFreeCont(submitReq);
|
||||
tqDebug("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -221,7 +221,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
|
|||
|
||||
STagVal tagVal = {0};
|
||||
tagVal.cid = pSColumnNode->colId;
|
||||
const char* p = metaGetTableTagVal(&mr->me, pSColumnNode->node.resType.type, &tagVal);
|
||||
const char* p = metaGetTableTagVal(mr->me.ctbEntry.pTags, pSColumnNode->node.resType.type, &tagVal);
|
||||
if (p == NULL) {
|
||||
res->node.resType.type = TSDB_DATA_TYPE_NULL;
|
||||
} else if (pSColumnNode->node.resType.type == TSDB_DATA_TYPE_JSON) {
|
||||
|
@ -298,6 +298,209 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
typedef struct tagFilterAssist{
|
||||
SHashObj *colHash;
|
||||
int32_t index;
|
||||
SArray *cInfoList;
|
||||
}tagFilterAssist;
|
||||
|
||||
static EDealRes getColumn(SNode** pNode, void* pContext) {
|
||||
SColumnNode* pSColumnNode = NULL;
|
||||
if (QUERY_NODE_COLUMN == nodeType((*pNode))) {
|
||||
pSColumnNode = *(SColumnNode**)pNode;
|
||||
}else if(QUERY_NODE_FUNCTION == nodeType((*pNode))){
|
||||
SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode);
|
||||
if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) {
|
||||
pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
|
||||
if (NULL == pSColumnNode) {
|
||||
return DEAL_RES_ERROR;
|
||||
}
|
||||
pSColumnNode->colId = -1;
|
||||
pSColumnNode->colType = COLUMN_TYPE_TBNAME;
|
||||
pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR;
|
||||
pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE;
|
||||
nodesDestroyNode(*pNode);
|
||||
*pNode = (SNode*)pSColumnNode;
|
||||
}else{
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
}else{
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
tagFilterAssist *pData = (tagFilterAssist *)pContext;
|
||||
void *data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId));
|
||||
if(!data){
|
||||
taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode)));
|
||||
pSColumnNode->slotId = pData->index++;
|
||||
SColumnInfo cInfo = {.colId = pSColumnNode->colId, .type = pSColumnNode->node.resType.type, .bytes = pSColumnNode->node.resType.bytes};
|
||||
#if TAG_FILTER_DEBUG
|
||||
qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type);
|
||||
#endif
|
||||
taosArrayPush(pData->cInfoList, &cInfo);
|
||||
}else{
|
||||
SColumnNode* col = *(SColumnNode**)data;
|
||||
pSColumnNode->slotId = col->slotId;
|
||||
}
|
||||
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) {
|
||||
SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData));
|
||||
if (pColumnData == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pColumnData->info.type = pType->type;
|
||||
pColumnData->info.bytes = pType->bytes;
|
||||
pColumnData->info.scale = pType->scale;
|
||||
pColumnData->info.precision = pType->precision;
|
||||
|
||||
int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
taosMemoryFree(pColumnData);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
pParam->columnData = pColumnData;
|
||||
pParam->colAlloced = true;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond){
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SArray* pBlockList = NULL;
|
||||
SSDataBlock* pResBlock = NULL;
|
||||
SHashObj * tags = NULL;
|
||||
SScalarParam output = {0};
|
||||
|
||||
tagFilterAssist ctx = {0};
|
||||
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
|
||||
if(ctx.colHash == NULL){
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
ctx.index = 0;
|
||||
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
|
||||
if(ctx.cInfoList == NULL){
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
|
||||
nodesRewriteExprPostOrder(&pTagCond, getColumn, (void *)&ctx);
|
||||
|
||||
pResBlock = createDataBlock();
|
||||
if (pResBlock == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto end;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
|
||||
SColumnInfoData colInfo = {{0}, 0};
|
||||
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
|
||||
blockDataAppendColInfo(pResBlock, &colInfo);
|
||||
}
|
||||
|
||||
// int64_t stt = taosGetTimestampUs();
|
||||
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
|
||||
code = metaGetTableTags(metaHandle, suid, uidList, tags);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
goto end;
|
||||
}
|
||||
|
||||
int32_t rows = taosArrayGetSize(uidList);
|
||||
if(rows == 0){
|
||||
goto end;
|
||||
}
|
||||
// int64_t stt1 = taosGetTimestampUs();
|
||||
// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
|
||||
|
||||
code = blockDataEnsureCapacity(pResBlock, rows);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
goto end;
|
||||
}
|
||||
|
||||
// int64_t st = taosGetTimestampUs();
|
||||
for (int32_t i = 0; i < rows; i++) {
|
||||
int64_t* uid = taosArrayGet(uidList, i);
|
||||
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
|
||||
ASSERT(tag);
|
||||
for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){
|
||||
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
|
||||
|
||||
if(pColInfo->info.colId == -1){ // tbname
|
||||
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
metaGetTableNameByUid(metaHandle, *uid, str);
|
||||
colDataAppend(pColInfo, i, str, false);
|
||||
#if TAG_FILTER_DEBUG
|
||||
qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2);
|
||||
#endif
|
||||
}else{
|
||||
STagVal tagVal = {0};
|
||||
tagVal.cid = pColInfo->info.colId;
|
||||
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
|
||||
|
||||
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){
|
||||
colDataAppend(pColInfo, i, p, true);
|
||||
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colDataAppend(pColInfo, i, p, false);
|
||||
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
|
||||
char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
|
||||
varDataSetLen(tmp, tagVal.nData);
|
||||
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
|
||||
colDataAppend(pColInfo, i, tmp, false);
|
||||
#if TAG_FILTER_DEBUG
|
||||
qDebug("tagfilter varch:%s", tmp+2);
|
||||
#endif
|
||||
taosMemoryFree(tmp);
|
||||
} else {
|
||||
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
|
||||
#if TAG_FILTER_DEBUG
|
||||
if(pColInfo->info.type == TSDB_DATA_TYPE_INT){
|
||||
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
|
||||
}else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){
|
||||
qDebug("tagfilter double:%f", *(double *)(&tagVal.i64));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pResBlock->info.rows = rows;
|
||||
|
||||
// int64_t st1 = taosGetTimestampUs();
|
||||
// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
|
||||
|
||||
pBlockList = taosArrayInit(2, POINTER_BYTES);
|
||||
taosArrayPush(pBlockList, &pResBlock);
|
||||
|
||||
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
|
||||
code = createResultData(&type, rows, &output);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
code = scalarCalculate(pTagCond, pBlockList, &output);
|
||||
if(code != TSDB_CODE_SUCCESS){
|
||||
terrno = code;
|
||||
}
|
||||
// int64_t st2 = taosGetTimestampUs();
|
||||
// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
|
||||
|
||||
end:
|
||||
taosHashCleanup(tags);
|
||||
taosHashCleanup(ctx.colHash);
|
||||
taosArrayDestroy(ctx.cInfoList);
|
||||
blockDataDestroy(pResBlock);
|
||||
taosArrayDestroy(pBlockList);
|
||||
return output.columnData;
|
||||
}
|
||||
|
||||
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
STableListInfo* pListInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -308,62 +511,67 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
|
|||
}
|
||||
|
||||
uint64_t tableUid = pScanNode->uid;
|
||||
|
||||
pListInfo->suid = pScanNode->suid;
|
||||
SArray* res = taosArrayInit(8, sizeof(uint64_t));
|
||||
|
||||
if (pScanNode->tableType == TSDB_SUPER_TABLE) {
|
||||
if (pTagIndexCond) {
|
||||
SIndexMetaArg metaArg = {
|
||||
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
|
||||
|
||||
SArray* res = taosArrayInit(8, sizeof(uint64_t));
|
||||
// int64_t stt = taosGetTimestampUs();
|
||||
SIdxFltStatus status = SFLT_NOT_INDEX;
|
||||
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
|
||||
if (code != 0 || status == SFLT_NOT_INDEX) {
|
||||
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
|
||||
// code = TSDB_CODE_INDEX_REBUILDING;
|
||||
code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
|
||||
} else {
|
||||
qDebug("success to get tableIds, size:%d, suid:%" PRIu64, (int)taosArrayGetSize(res), tableUid);
|
||||
code = TDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// int64_t stt1 = taosGetTimestampUs();
|
||||
// qDebug("generate table list, cost:%ld us", stt1-stt);
|
||||
}else if(!pTagCond){
|
||||
vnodeGetCtbIdList(pVnode, pScanNode->suid, res);
|
||||
}
|
||||
} else { // Create one table group.
|
||||
taosArrayPush(res, &tableUid);
|
||||
}
|
||||
|
||||
if (pTagCond) {
|
||||
SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond);
|
||||
if(terrno != TDB_CODE_SUCCESS){
|
||||
colDataDestroy(pColInfoData);
|
||||
taosMemoryFreeClear(pColInfoData);
|
||||
taosArrayDestroy(res);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
int32_t i = 0;
|
||||
int32_t j = 0;
|
||||
int32_t len = taosArrayGetSize(res);
|
||||
while (i < taosArrayGetSize(res) && j < len && pColInfoData) {
|
||||
void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
|
||||
|
||||
int64_t* uid = taosArrayGet(res, i);
|
||||
qDebug("tagfilter get uid:%ld, res:%d", *uid, *(bool*)var);
|
||||
if (*(bool*)var == false) {
|
||||
taosArrayRemove(res, i);
|
||||
j++;
|
||||
continue;
|
||||
}
|
||||
i++;
|
||||
j++;
|
||||
}
|
||||
colDataDestroy(pColInfoData);
|
||||
taosMemoryFreeClear(pColInfoData);
|
||||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(res); i++) {
|
||||
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
qDebug("tagfilter get uid:%ld", info.uid);
|
||||
}
|
||||
|
||||
taosArrayDestroy(res);
|
||||
} else {
|
||||
code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
|
||||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to get tableIds, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
} else { // Create one table group.
|
||||
STableKeyInfo info = {.uid = tableUid, .groupId = 0};
|
||||
taosArrayPush(pListInfo->pTableList, &info);
|
||||
}
|
||||
|
||||
if (pTagCond) {
|
||||
int32_t i = 0;
|
||||
while (i < taosArrayGetSize(pListInfo->pTableList)) {
|
||||
STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i);
|
||||
|
||||
bool qualified = true;
|
||||
code = isQualifiedTable(info, pTagCond, metaHandle, &qualified);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (!qualified) {
|
||||
taosArrayRemove(pListInfo->pTableList, i);
|
||||
continue;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
pListInfo->pGroupList = taosArrayInit(4, POINTER_BYTES);
|
||||
if (pListInfo->pGroupList == NULL) {
|
||||
|
|
|
@ -440,7 +440,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int
|
|||
} else { // these are tags
|
||||
STagVal tagVal = {0};
|
||||
tagVal.cid = pExpr->base.pParam[0].pCol->colId;
|
||||
const char* p = metaGetTableTagVal(&mr.me, pColInfoData->info.type, &tagVal);
|
||||
const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pColInfoData->info.type, &tagVal);
|
||||
|
||||
char* data = NULL;
|
||||
if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
|
||||
|
@ -2506,7 +2506,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
|
|||
} else { // it is a tag value
|
||||
STagVal val = {0};
|
||||
val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
|
||||
const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val);
|
||||
const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pDst->info.type, &val);
|
||||
|
||||
char* data = NULL;
|
||||
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
|
||||
|
|
|
@ -292,6 +292,9 @@ int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t
|
|||
}
|
||||
|
||||
SColumnInfoData *columnData = (SColumnInfoData *)taosArrayGet(block->pDataBlock, ref->slotId);
|
||||
#if TAG_FILTER_DEBUG
|
||||
qDebug("tagfilter column info, slotId:%d, colId:%d, type:%d", ref->slotId, columnData->info.colId, columnData->info.type);
|
||||
#endif
|
||||
param->numOfRows = block->info.rows;
|
||||
param->columnData = columnData;
|
||||
break;
|
||||
|
|
|
@ -32,7 +32,6 @@ typedef struct {
|
|||
|
||||
static SStreamGlobalEnv streamEnv;
|
||||
|
||||
int32_t streamExec(SStreamTask* pTask);
|
||||
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch);
|
||||
|
||||
int32_t streamDispatch(SStreamTask* pTask);
|
||||
|
|
|
@ -185,7 +185,9 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
|||
tFreeStreamDispatchReq(pReq);
|
||||
|
||||
if (exec) {
|
||||
streamTryExec(pTask);
|
||||
if (streamTryExec(pTask) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
streamDispatch(pTask);
|
||||
|
@ -221,7 +223,9 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
|
|||
}
|
||||
|
||||
int32_t streamProcessRunReq(SStreamTask* pTask) {
|
||||
streamTryExec(pTask);
|
||||
if (streamTryExec(pTask) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
streamDispatch(pTask);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "executor.h"
|
||||
#include "tstream.h"
|
||||
#include "ttimer.h"
|
||||
|
||||
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) {
|
||||
SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta));
|
||||
|
@ -99,16 +100,19 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char*
|
|||
goto FAIL;
|
||||
}
|
||||
|
||||
taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
|
||||
if (taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn) < 0) {
|
||||
taosHashRemove(pMeta->pTasks, &pTask->taskId, sizeof(int32_t));
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
goto FAIL;
|
||||
}
|
||||
return 0;
|
||||
|
||||
FAIL:
|
||||
if (pTask) taosMemoryFree(pTask);
|
||||
if (pTask) tFreeSStreamTask(pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -158,11 +162,28 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) {
|
|||
SStreamTask* pTask = *ppTask;
|
||||
taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t));
|
||||
atomic_store_8(&pTask->taskStatus, TASK_STATUS__DROPPING);
|
||||
}
|
||||
|
||||
if (tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), &pMeta->txn) < 0) {
|
||||
/*return -1;*/
|
||||
}
|
||||
|
||||
if (pTask->triggerParam != 0) {
|
||||
taosTmrStop(pTask->timer);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
int8_t schedStatus =
|
||||
atomic_val_compare_exchange_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE, TASK_SCHED_STATUS__DROPPING);
|
||||
if (schedStatus == TASK_SCHED_STATUS__INACTIVE) {
|
||||
tFreeSStreamTask(pTask);
|
||||
break;
|
||||
} else if (schedStatus == TASK_SCHED_STATUS__DROPPING) {
|
||||
break;
|
||||
}
|
||||
taosMsleep(10);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tstream.h"
|
||||
#include "streamInc.h"
|
||||
|
||||
SStreamQueue* streamQueueOpen() {
|
||||
SStreamQueue* pQueue = taosMemoryCalloc(1, sizeof(SStreamQueue));
|
||||
|
@ -36,9 +36,12 @@ void streamQueueClose(SStreamQueue* queue) {
|
|||
while (1) {
|
||||
void* qItem = streamQueueNextItem(queue);
|
||||
if (qItem) {
|
||||
taosFreeQitem(qItem);
|
||||
streamFreeQitem(qItem);
|
||||
} else {
|
||||
return;
|
||||
break;
|
||||
}
|
||||
}
|
||||
taosFreeQall(queue->qall);
|
||||
taosCloseQueue(queue->queue);
|
||||
taosMemoryFree(queue);
|
||||
}
|
||||
|
|
|
@ -152,9 +152,17 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
|
|||
}
|
||||
|
||||
void tFreeSStreamTask(SStreamTask* pTask) {
|
||||
streamQueueClose(pTask->inputQueue);
|
||||
streamQueueClose(pTask->outputQueue);
|
||||
if (pTask->inputQueue) streamQueueClose(pTask->inputQueue);
|
||||
if (pTask->outputQueue) streamQueueClose(pTask->outputQueue);
|
||||
if (pTask->exec.qmsg) taosMemoryFree(pTask->exec.qmsg);
|
||||
if (pTask->exec.executor) qDestroyTask(pTask->exec.executor);
|
||||
taosArrayDestroy(pTask->childEpInfo);
|
||||
if (pTask->outputType == TASK_OUTPUT__TABLE) {
|
||||
tDeleteSSchemaWrapper(pTask->tbSink.pSchemaWrapper);
|
||||
taosMemoryFree(pTask->tbSink.pTSchema);
|
||||
}
|
||||
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
|
||||
}
|
||||
taosMemoryFree(pTask);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,10 @@ typedef struct SSyncIndexMgr {
|
|||
SRaftId (*replicas)[TSDB_MAX_REPLICA];
|
||||
SyncIndex index[TSDB_MAX_REPLICA];
|
||||
SyncTerm privateTerm[TSDB_MAX_REPLICA]; // for advanced function
|
||||
|
||||
int64_t startTimeArr[TSDB_MAX_REPLICA];
|
||||
int64_t recvTimeArr[TSDB_MAX_REPLICA];
|
||||
|
||||
int32_t replicaNum;
|
||||
SSyncNode *pSyncNode;
|
||||
} SSyncIndexMgr;
|
||||
|
@ -41,8 +45,13 @@ void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr);
|
|||
void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr);
|
||||
void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncIndex index);
|
||||
SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
|
||||
cJSON * syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr);
|
||||
char * syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr);
|
||||
cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr);
|
||||
char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr);
|
||||
|
||||
void syncIndexMgrSetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t startTime);
|
||||
int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
|
||||
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime);
|
||||
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
|
||||
|
||||
// void syncIndexMgrSetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncTerm term);
|
||||
// SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
|
||||
|
|
|
@ -269,6 +269,8 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode);
|
|||
int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader);
|
||||
int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry);
|
||||
|
||||
int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode);
|
||||
|
||||
// trace log
|
||||
void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
|
||||
void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
|
||||
|
|
|
@ -55,6 +55,8 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
|
|||
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode);
|
||||
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode);
|
||||
|
||||
int32_t syncNodeAppendEntriesOnePeer(SSyncNode* pSyncNode, SRaftId* pDestId, SyncIndex nextIndex);
|
||||
|
||||
int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer);
|
||||
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg);
|
||||
int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg);
|
||||
|
|
|
@ -148,6 +148,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
pReply->term = ths->pRaftStore->currentTerm;
|
||||
pReply->success = false;
|
||||
pReply->matchIndex = SYNC_INDEX_INVALID;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
@ -290,6 +291,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
pReply->matchIndex = pMsg->prevLogIndex;
|
||||
}
|
||||
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
||||
|
@ -603,6 +606,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = matchIndex;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
@ -651,6 +655,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = false;
|
||||
pReply->matchIndex = ths->commitIndex;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
@ -729,6 +734,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
|
|||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + pMsg->dataCount : pMsg->prevLogIndex;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
@ -874,6 +880,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
|
|||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = matchIndex;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
@ -919,6 +926,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
|
|||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = false;
|
||||
pReply->matchIndex = SYNC_INDEX_INVALID;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
@ -984,6 +992,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
|
|||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + 1 : pMsg->prevLogIndex;
|
||||
pReply->startTime = ths->startTime;
|
||||
|
||||
// msg event log
|
||||
syncLogSendAppendEntriesReply(ths, pReply, "");
|
||||
|
|
|
@ -64,6 +64,10 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
|
|||
|
||||
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
|
||||
|
||||
// update time
|
||||
syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime);
|
||||
syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs());
|
||||
|
||||
SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
|
||||
SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
|
||||
|
||||
|
@ -170,6 +174,10 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
|
|||
|
||||
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
|
||||
|
||||
// update time
|
||||
syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime);
|
||||
syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs());
|
||||
|
||||
SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
|
||||
SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
|
||||
|
||||
|
@ -330,6 +338,10 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries
|
|||
|
||||
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
|
||||
|
||||
// update time
|
||||
syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime);
|
||||
syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs());
|
||||
|
||||
SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
|
||||
SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
|
||||
|
||||
|
|
|
@ -133,6 +133,63 @@ bool syncAgreeIndex(SSyncNode* pSyncNode, SRaftId* pRaftId, SyncIndex index) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline int64_t syncNodeAbs64(int64_t a, int64_t b) {
|
||||
ASSERT(a >= 0);
|
||||
ASSERT(b >= 0);
|
||||
|
||||
int64_t c = a > b ? a - b : b - a;
|
||||
return c;
|
||||
}
|
||||
|
||||
int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) {
|
||||
int32_t quorum = 1; // self
|
||||
|
||||
int64_t timeNow = taosGetTimestampMs();
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
int64_t peerStartTime = syncIndexMgrGetStartTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]);
|
||||
int64_t peerRecvTime = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]);
|
||||
|
||||
int64_t recvTimeDiff = syncNodeAbs64(peerRecvTime, timeNow);
|
||||
int64_t startTimeDiff = syncNodeAbs64(peerStartTime, pSyncNode->startTime);
|
||||
|
||||
int32_t addQuorum = 0;
|
||||
|
||||
if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) {
|
||||
addQuorum = 1;
|
||||
} else {
|
||||
addQuorum = 0;
|
||||
}
|
||||
|
||||
if (startTimeDiff > SYNC_MAX_START_TIME_RANGE_MS) {
|
||||
addQuorum = 0;
|
||||
}
|
||||
|
||||
quorum += addQuorum;
|
||||
}
|
||||
|
||||
ASSERT(quorum <= pSyncNode->replicaNum);
|
||||
|
||||
if (quorum < pSyncNode->quorum) {
|
||||
quorum = pSyncNode->quorum;
|
||||
}
|
||||
|
||||
return quorum;
|
||||
}
|
||||
|
||||
bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
|
||||
int agreeCount = 0;
|
||||
for (int i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||
if (syncAgreeIndex(pSyncNode, &(pSyncNode->replicasId[i]), index)) {
|
||||
++agreeCount;
|
||||
}
|
||||
if (agreeCount >= syncNodeDynamicQuorum(pSyncNode)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
|
||||
int agreeCount = 0;
|
||||
for (int i = 0; i < pSyncNode->replicaNum; ++i) {
|
||||
|
@ -145,3 +202,4 @@ bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
|
|
|
@ -47,6 +47,13 @@ void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr) {
|
|||
void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr) {
|
||||
memset(pSyncIndexMgr->index, 0, sizeof(pSyncIndexMgr->index));
|
||||
memset(pSyncIndexMgr->privateTerm, 0, sizeof(pSyncIndexMgr->privateTerm));
|
||||
|
||||
// int64_t timeNow = taosGetMonotonicMs();
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
pSyncIndexMgr->startTimeArr[i] = 0;
|
||||
pSyncIndexMgr->recvTimeArr[i] = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
pSyncIndexMgr->index[i] = 0;
|
||||
|
@ -68,7 +75,8 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId,
|
|||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port);
|
||||
sError("vgId:%d, index mgr set for %s:%d, index:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port, index);
|
||||
sError("vgId:%d, index mgr set for %s:%d, index:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port,
|
||||
index);
|
||||
}
|
||||
|
||||
SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
|
||||
|
@ -125,11 +133,65 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
|
|||
|
||||
char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr) {
|
||||
cJSON *pJson = syncIndexMgr2Json(pSyncIndexMgr);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
void syncIndexMgrSetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t startTime) {
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
|
||||
(pSyncIndexMgr->startTimeArr)[i] = startTime;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// maybe config change
|
||||
// ASSERT(0);
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port);
|
||||
sError("vgId:%d, index mgr set for %s:%d, start-time:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port,
|
||||
startTime);
|
||||
}
|
||||
|
||||
int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
|
||||
int64_t startTime = (pSyncIndexMgr->startTimeArr)[i];
|
||||
return startTime;
|
||||
}
|
||||
}
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) {
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
|
||||
(pSyncIndexMgr->recvTimeArr)[i] = recvTime;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// maybe config change
|
||||
// ASSERT(0);
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port);
|
||||
sError("vgId:%d, index mgr set for %s:%d, recv-time:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port,
|
||||
recvTime);
|
||||
}
|
||||
|
||||
int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
|
||||
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
|
||||
if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
|
||||
int64_t recvTime = (pSyncIndexMgr->recvTimeArr)[i];
|
||||
return recvTime;
|
||||
}
|
||||
}
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
// for debug -------------------
|
||||
void syncIndexMgrPrint(SSyncIndexMgr *pObj) {
|
||||
char *serialized = syncIndexMgr2Str(pObj);
|
||||
|
|
|
@ -1682,13 +1682,13 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
|
|||
", sby:%d, "
|
||||
"stgy:%d, bch:%d, "
|
||||
"r-num:%d, "
|
||||
"lcfg:%" PRId64 ", chging:%d, rsto:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
|
||||
"lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
|
||||
pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm,
|
||||
pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
|
||||
pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize,
|
||||
pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing,
|
||||
pSyncNode->restoreFinish, pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser,
|
||||
printStr);
|
||||
pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), pSyncNode->electTimerLogicClockUser,
|
||||
pSyncNode->heartbeatTimerLogicClockUser, printStr);
|
||||
} else {
|
||||
snprintf(logBuf, sizeof(logBuf), "%s", str);
|
||||
}
|
||||
|
@ -1706,12 +1706,13 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
|
|||
", sby:%d, "
|
||||
"stgy:%d, bch:%d, "
|
||||
"r-num:%d, "
|
||||
"lcfg:%" PRId64 ", chging:%d, rsto:%d, %s",
|
||||
"lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
|
||||
pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm,
|
||||
pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
|
||||
pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize,
|
||||
pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing,
|
||||
pSyncNode->restoreFinish, printStr);
|
||||
pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), pSyncNode->electTimerLogicClockUser,
|
||||
pSyncNode->heartbeatTimerLogicClockUser, printStr);
|
||||
} else {
|
||||
snprintf(s, len, "%s", str);
|
||||
}
|
||||
|
|
|
@ -1947,6 +1947,8 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
|
|||
cJSON_AddNumberToObject(pRoot, "success", pMsg->success);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->matchIndex);
|
||||
cJSON_AddStringToObject(pRoot, "matchIndex", u64buf);
|
||||
snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->startTime);
|
||||
cJSON_AddStringToObject(pRoot, "startTime", u64buf);
|
||||
}
|
||||
|
||||
cJSON* pJson = cJSON_CreateObject();
|
||||
|
|
|
@ -116,6 +116,120 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeAppendEntriesOnePeer(SSyncNode* pSyncNode, SRaftId* pDestId, SyncIndex nextIndex) {
|
||||
int32_t ret = 0;
|
||||
|
||||
// pre index, pre term
|
||||
SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex);
|
||||
SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex);
|
||||
if (preLogTerm == SYNC_TERM_INVALID) {
|
||||
SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
|
||||
// SyncIndex newNextIndex = nextIndex + 1;
|
||||
|
||||
syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex);
|
||||
syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID);
|
||||
sError("vgId:%d, sync get pre term error, nextIndex:%" PRId64 ", update next-index:%" PRId64
|
||||
", match-index:%d, raftid:%" PRId64,
|
||||
pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// entry pointer array
|
||||
SSyncRaftEntry* entryPArr[SYNC_MAX_BATCH_SIZE];
|
||||
memset(entryPArr, 0, sizeof(entryPArr));
|
||||
|
||||
// get entry batch
|
||||
int32_t getCount = 0;
|
||||
SyncIndex getEntryIndex = nextIndex;
|
||||
for (int32_t i = 0; i < pSyncNode->pRaftCfg->batchSize; ++i) {
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, getEntryIndex, &pEntry);
|
||||
if (code == 0) {
|
||||
ASSERT(pEntry != NULL);
|
||||
entryPArr[i] = pEntry;
|
||||
getCount++;
|
||||
getEntryIndex++;
|
||||
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// event log
|
||||
do {
|
||||
char logBuf[128];
|
||||
char host[64];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port);
|
||||
snprintf(logBuf, sizeof(logBuf), "build batch:%d for %s:%d", getCount, host, port);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
|
||||
// build msg
|
||||
SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(entryPArr, getCount, pSyncNode->vgId);
|
||||
ASSERT(pMsg != NULL);
|
||||
|
||||
// free entries
|
||||
for (int32_t i = 0; i < pSyncNode->pRaftCfg->batchSize; ++i) {
|
||||
SSyncRaftEntry* pEntry = entryPArr[i];
|
||||
if (pEntry != NULL) {
|
||||
syncEntryDestory(pEntry);
|
||||
entryPArr[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// prepare msg
|
||||
pMsg->srcId = pSyncNode->myRaftId;
|
||||
pMsg->destId = *pDestId;
|
||||
pMsg->term = pSyncNode->pRaftStore->currentTerm;
|
||||
pMsg->prevLogIndex = preLogIndex;
|
||||
pMsg->prevLogTerm = preLogTerm;
|
||||
pMsg->commitIndex = pSyncNode->commitIndex;
|
||||
pMsg->privateTerm = 0;
|
||||
pMsg->dataCount = getCount;
|
||||
|
||||
// send msg
|
||||
syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg);
|
||||
|
||||
// speed up
|
||||
if (pMsg->dataCount > 0 && pSyncNode->commitIndex - pMsg->prevLogIndex > SYNC_SLOW_DOWN_RANGE) {
|
||||
ret = 1;
|
||||
|
||||
#if 0
|
||||
do {
|
||||
char logBuf[128];
|
||||
char host[64];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port);
|
||||
snprintf(logBuf, sizeof(logBuf), "maybe speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex);
|
||||
syncNodeEventLog(pSyncNode, logBuf);
|
||||
} while (0);
|
||||
#endif
|
||||
}
|
||||
|
||||
syncAppendEntriesBatchDestroy(pMsg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t ret = 0;
|
||||
for (int i = 0; i < pSyncNode->peersNum; ++i) {
|
||||
SRaftId* pDestId = &(pSyncNode->peersId[i]);
|
||||
|
||||
// next index
|
||||
SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId);
|
||||
ret = syncNodeAppendEntriesOnePeer(pSyncNode, pDestId, nextIndex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
return -1;
|
||||
|
@ -221,6 +335,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
|
|||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
|
||||
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER);
|
||||
|
|
|
@ -24,6 +24,7 @@ SyncAppendEntriesReply *createMsg() {
|
|||
pMsg->matchIndex = 77;
|
||||
pMsg->term = 33;
|
||||
pMsg->privateTerm = 44;
|
||||
pMsg->startTime = taosGetTimestampMs();
|
||||
return pMsg;
|
||||
}
|
||||
|
||||
|
@ -89,6 +90,8 @@ void test5() {
|
|||
}
|
||||
|
||||
int main() {
|
||||
gRaftDetailLog = true;
|
||||
|
||||
tsAsyncLog = 0;
|
||||
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
|
||||
logTest();
|
||||
|
|
|
@ -386,6 +386,7 @@ void* taosArrayDestroy(SArray* pArray) {
|
|||
}
|
||||
|
||||
void taosArrayDestroyP(SArray* pArray, FDelete fp) {
|
||||
if(!pArray) return;
|
||||
for (int32_t i = 0; i < pArray->size; i++) {
|
||||
fp(*(void**)TARRAY_GET_ELEM(pArray, i));
|
||||
}
|
||||
|
|
|
@ -293,6 +293,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_MUST_BE_DELETED, "Stream must be dropped first")
|
||||
|
||||
// mnode-sma
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
|
||||
|
|
|
@ -3,6 +3,21 @@ system sh/deploy.sh -n dnode1 -i 1
|
|||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print =============== conflict stb
|
||||
sql create database db vgroups 1;
|
||||
sql use db;
|
||||
sql create table stb (ts timestamp, i int) tags (j int);
|
||||
sql_error create table stb using stb tags (1);
|
||||
sql_error create table stb (ts timestamp, i int);
|
||||
|
||||
sql create table ctb (ts timestamp, i int);
|
||||
sql_error create table ctb (ts timestamp, i int) tags (j int);
|
||||
|
||||
sql create table ntb (ts timestamp, i int);
|
||||
sql_error create table ntb (ts timestamp, i int) tags (j int);
|
||||
|
||||
sql drop database db
|
||||
|
||||
print =============== create database d1
|
||||
sql create database d1
|
||||
sql use d1
|
||||
|
|
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
|
||||
# test where with json tag
|
||||
tdSql.query(f"select * from {dbname}.jsons1_1 where jtag is not null")
|
||||
tdSql.query(f"select * from {dbname}.jsons1 where jtag='{{\"tag1\":11,\"tag2\":\"\"}}'")
|
||||
tdSql.error(f"select * from {dbname}.jsons1 where jtag='{{\"tag1\":11,\"tag2\":\"\"}}'")
|
||||
tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1'={{}}")
|
||||
|
||||
# test json error
|
||||
|
|
Loading…
Reference in New Issue