Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/TS-5554-3.0
This commit is contained in:
commit
7ec7c6abe6
|
@ -0,0 +1,58 @@
|
|||
name: TaosKeeper CI
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- tools/keeper/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run unit tests
|
||||
|
||||
steps:
|
||||
- name: Checkout the repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt install -y build-essential cmake libgeos-dev
|
||||
|
||||
- name: Install TDengine
|
||||
run: |
|
||||
mkdir debug
|
||||
cd debug
|
||||
cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true
|
||||
make -j 4
|
||||
sudo make install
|
||||
which taosd
|
||||
which taosadapter
|
||||
which taoskeeper
|
||||
|
||||
- name: Start taosd
|
||||
run: |
|
||||
cp /etc/taos/taos.cfg ./
|
||||
sudo echo "supportVnodes 256" >> taos.cfg
|
||||
nohup sudo taosd -c taos.cfg &
|
||||
|
||||
- name: Start taosadapter
|
||||
run: nohup sudo taosadapter &
|
||||
|
||||
- name: Run tests with coverage
|
||||
working-directory: tools/keeper
|
||||
run: |
|
||||
go mod tidy
|
||||
go test -v -coverpkg=./... -coverprofile=coverage.out ./...
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
- name: Clean up
|
||||
if: always()
|
||||
run: |
|
||||
if pgrep taosd; then sudo pkill taosd; fi
|
||||
if pgrep taosadapter; then sudo pkill taosadapter; fi
|
|
@ -1,6 +1,7 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
set(TD_BUILD_KEEPER_INTERNAL FALSE)
|
||||
|
||||
# set output directory
|
||||
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
|
||||
|
@ -57,6 +58,19 @@ IF(TD_BUILD_HTTP)
|
|||
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
|
||||
ENDIF()
|
||||
|
||||
IF("${BUILD_KEEPER}" STREQUAL "")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ELSEIF(${BUILD_KEEPER} MATCHES "false")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ELSEIF(${BUILD_KEEPER} MATCHES "true")
|
||||
SET(TD_BUILD_KEEPER TRUE)
|
||||
ELSEIF(${BUILD_KEEPER} MATCHES "internal")
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
SET(TD_BUILD_KEEPER_INTERNAL TRUE)
|
||||
ELSE()
|
||||
SET(TD_BUILD_KEEPER FALSE)
|
||||
ENDIF()
|
||||
|
||||
IF("${BUILD_TOOLS}" STREQUAL "")
|
||||
IF(TD_LINUX)
|
||||
IF(TD_ARM_32)
|
||||
|
|
|
@ -27,11 +27,15 @@ The preceding SQL command shows all dnodes in the cluster with the ID, endpoint,
|
|||
## Delete a DNODE
|
||||
|
||||
```sql
|
||||
DROP DNODE dnode_id
|
||||
DROP DNODE dnode_id [force] [unsafe]
|
||||
```
|
||||
|
||||
Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted.
|
||||
|
||||
Only online node is allowed to be deleted. Drop is executed with force option if the offline node need to be deleted.
|
||||
|
||||
Drop is executed with unsafe option if the node with single replica is offline, and the data on it is not able to be restored.
|
||||
|
||||
## Modify Dnode Configuration
|
||||
|
||||
```sql
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.tmq.*;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.*;
|
||||
import java.time.Duration;
|
||||
|
@ -60,7 +61,7 @@ public class ConsumerLoopFull {
|
|||
// ANCHOR_END: create_consumer
|
||||
}
|
||||
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: poll_data_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -73,7 +74,7 @@ public class ConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
|
@ -91,7 +92,7 @@ public class ConsumerLoopFull {
|
|||
// ANCHOR_END: poll_data_code_piece
|
||||
}
|
||||
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: consumer_seek
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -99,7 +100,7 @@ public class ConsumerLoopFull {
|
|||
consumer.subscribe(topics);
|
||||
System.out.println("Subscribe topics successfully.");
|
||||
Set<TopicPartition> assignment = consumer.assignment();
|
||||
System.out.println("Now assignment: " + JSON.toJSONString(assignment));
|
||||
System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment));
|
||||
|
||||
ConsumerRecords<ResultBean> records = ConsumerRecords.emptyRecord();
|
||||
// make sure we have got some data
|
||||
|
@ -125,7 +126,7 @@ public class ConsumerLoopFull {
|
|||
}
|
||||
|
||||
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: commit_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -135,7 +136,7 @@ public class ConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
if (!records.isEmpty()) {
|
||||
// after processing the data, commit the offset manually
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
|
@ -31,7 +31,11 @@ public class ConsumerLoopImp {
|
|||
final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() {
|
||||
@Override
|
||||
public void process(ResultBean result) {
|
||||
System.out.println("data: " + JSON.toJSONString(result));
|
||||
try{
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.tmq.*;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.*;
|
||||
import java.time.Duration;
|
||||
|
@ -60,7 +61,7 @@ public class WsConsumerLoopFull {
|
|||
// ANCHOR_END: create_consumer
|
||||
}
|
||||
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void pollExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: poll_data_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -73,7 +74,7 @@ public class WsConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
|
@ -91,7 +92,7 @@ public class WsConsumerLoopFull {
|
|||
// ANCHOR_END: poll_data_code_piece
|
||||
}
|
||||
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void seekExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: consumer_seek
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -99,7 +100,7 @@ public class WsConsumerLoopFull {
|
|||
consumer.subscribe(topics);
|
||||
System.out.println("Subscribe topics successfully.");
|
||||
Set<TopicPartition> assignment = consumer.assignment();
|
||||
System.out.println("Now assignment: " + JSON.toJSONString(assignment));
|
||||
System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment));
|
||||
|
||||
ConsumerRecords<ResultBean> records = ConsumerRecords.emptyRecord();
|
||||
// make sure we have got some data
|
||||
|
@ -125,7 +126,7 @@ public class WsConsumerLoopFull {
|
|||
}
|
||||
|
||||
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException {
|
||||
public static void commitExample(TaosConsumer<ResultBean> consumer) throws SQLException, JsonProcessingException {
|
||||
// ANCHOR: commit_code_piece
|
||||
List<String> topics = Collections.singletonList("topic_meters");
|
||||
try {
|
||||
|
@ -135,7 +136,7 @@ public class WsConsumerLoopFull {
|
|||
for (ConsumerRecord<ResultBean> record : records) {
|
||||
ResultBean bean = record.value();
|
||||
// Add your data processing logic here
|
||||
System.out.println("data: " + JSON.toJSONString(bean));
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean));
|
||||
}
|
||||
if (!records.isEmpty()) {
|
||||
// after processing the data, commit the offset manually
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.taos.example;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.taosdata.jdbc.TSDBDriver;
|
||||
import com.taosdata.jdbc.utils.JsonUtil;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
|
@ -28,7 +28,11 @@ public abstract class WsConsumerLoopImp {
|
|||
final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() {
|
||||
@Override
|
||||
public void process(ResultBean result) {
|
||||
System.out.println("data: " + JSON.toJSONString(result));
|
||||
try{
|
||||
System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -13,6 +13,9 @@ public class DataBaseMonitor {
|
|||
public DataBaseMonitor init() throws SQLException {
|
||||
if (conn == null) {
|
||||
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
|
||||
if (jdbcURL == null || jdbcURL == ""){
|
||||
jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
}
|
||||
conn = DriverManager.getConnection(jdbcURL);
|
||||
stmt = conn.createStatement();
|
||||
}
|
||||
|
|
|
@ -69,6 +69,9 @@ public class SQLWriter {
|
|||
*/
|
||||
private static Connection getConnection() throws SQLException {
|
||||
String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
|
||||
if (jdbcURL == null || jdbcURL == ""){
|
||||
jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
}
|
||||
return DriverManager.getConnection(jdbcURL);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,37 @@ public class TestAll {
|
|||
stmt.execute("drop database if exists " + dbName);
|
||||
}
|
||||
}
|
||||
waitTransaction();
|
||||
}
|
||||
|
||||
public void dropTopic(String topicName) throws SQLException {
|
||||
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("drop topic if exists " + topicName);
|
||||
}
|
||||
}
|
||||
waitTransaction();
|
||||
}
|
||||
|
||||
public void waitTransaction() throws SQLException {
|
||||
|
||||
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
|
||||
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
|
||||
try (Statement stmt = conn.createStatement()) {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
stmt.execute("show transactions");
|
||||
try (ResultSet resultSet = stmt.getResultSet()) {
|
||||
if (resultSet.next()) {
|
||||
int count = resultSet.getInt(1);
|
||||
if (count == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void insertData() throws SQLException {
|
||||
|
@ -104,14 +135,20 @@ public class TestAll {
|
|||
SubscribeDemo.main(args);
|
||||
}
|
||||
|
||||
// @Test
|
||||
// public void testSubscribeJni() throws SQLException, InterruptedException {
|
||||
// dropDB("power");
|
||||
// ConsumerLoopFull.main(args);
|
||||
// }
|
||||
// @Test
|
||||
// public void testSubscribeWs() throws SQLException, InterruptedException {
|
||||
// dropDB("power");
|
||||
// WsConsumerLoopFull.main(args);
|
||||
// }
|
||||
@Test
|
||||
public void testSubscribeJni() throws SQLException, InterruptedException {
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
ConsumerLoopFull.main(args);
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
}
|
||||
@Test
|
||||
public void testSubscribeWs() throws SQLException, InterruptedException {
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
WsConsumerLoopFull.main(args);
|
||||
dropTopic("topic_meters");
|
||||
dropDB("power");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ docker pull tdengine/tdengine:latest
|
|||
或者指定版本的容器镜像:
|
||||
|
||||
```shell
|
||||
docker pull tdengine/tdengine:3.0.1.4
|
||||
docker pull tdengine/tdengine:3.3.3.0
|
||||
```
|
||||
|
||||
然后只需执行下面的命令:
|
||||
|
@ -121,4 +121,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
||||
|
|
|
@ -317,4 +317,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
|
@ -54,4 +54,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1
|
|||
SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s);
|
||||
```
|
||||
|
||||
在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。
|
||||
在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。
|
|
@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100)
|
|||
|
||||
与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。
|
||||
|
||||
由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险
|
||||
由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险
|
||||
|
||||
**注意**
|
||||
- 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。
|
||||
|
@ -146,11 +146,11 @@ TDengine 的多级存储功能在使用上还具备以下优点。
|
|||
|
||||
下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。
|
||||
|
||||
|接口或组件 | 端口 |
|
||||
|:---------------------------:|:---------:|
|
||||
|原生接口(taosc) | 6030 |
|
||||
|RESTful 接口 | 6041 |
|
||||
|WebSocket 接口 |6041 |
|
||||
|taosKeeper | 6043 |
|
||||
|taosX | 6050, 6055 |
|
||||
|taosExplorer | 6060 |
|
||||
| 接口或组件 | 端口 |
|
||||
| :---------------: | :--------: |
|
||||
| 原生接口(taosc) | 6030 |
|
||||
| RESTful 接口 | 6041 |
|
||||
| WebSocket 接口 | 6041 |
|
||||
| taosKeeper | 6043 |
|
||||
| taosX | 6050, 6055 |
|
||||
| taosExplorer | 6060 |
|
||||
|
|
|
@ -163,7 +163,7 @@ charset 的有效值是 UTF-8。
|
|||
|
||||
| 参数名称 | 参数说明 |
|
||||
| :----------------: | :---------------------------------------------: |
|
||||
| numOfCommitThreads | 写入线程的最大数量,取值范围 0-1024,缺省值为 4 |
|
||||
| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024,缺省值为 4 |
|
||||
|
||||
### 日志相关
|
||||
|
||||
|
@ -223,16 +223,16 @@ lossyColumns float|double
|
|||
|
||||
| 参数名称 | 参数说明 |
|
||||
| :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
|
||||
| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值 为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 |
|
||||
| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值 为 0 |
|
||||
| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 |
|
||||
| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值 为20 |
|
||||
| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值: 3 |
|
||||
| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下 <br/> 2、手动启动,就在 taosd 执行目录下。 |
|
||||
| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值为 0 |
|
||||
| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变;默认值为 0 |
|
||||
| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值为20 |
|
||||
| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值为 3 |
|
||||
|
||||
|
||||
## taosd 监控指标
|
||||
|
||||
taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。
|
||||
|
||||
### taosd\_cluster\_basic 表
|
||||
|
||||
|
@ -458,4 +458,3 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。
|
|||
3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。
|
||||
4. 慢日志文件不自动删除,不压缩。
|
||||
5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。
|
||||
|
|
@ -8,7 +8,7 @@ taosExplorer 是一个为用户提供 TDengine 实例的可视化管理交互工
|
|||
|
||||
## 安装
|
||||
|
||||
taosEexplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。
|
||||
taosExplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。
|
||||
|
||||
## 配置
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] USING [db_name.]stb_name (field1_name [, field2_nam
|
|||
|
||||
**参数说明**
|
||||
|
||||
1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考数据写入章节。
|
||||
1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考'数据写入'章节。
|
||||
2. 为指定的 stb_name 创建子表,该超级表必须已经存在。
|
||||
3. field_name 列表顺序与 CSV 文件各列内容顺序一致。列表中不允许出现重复项,且必须包含 `tbname`,可包含零个或多个超级表中已定义的标签列。未包含在列表中的标签值将被设置为 NULL。
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: 数据写入
|
||||
title: 数据写入
|
||||
description: 写入数据的详细语法
|
||||
description: 写入数据的详细语法
|
||||
---
|
||||
|
||||
## 写入语法
|
||||
|
@ -25,9 +25,9 @@ INSERT INTO tb_name [(field1_name, ...)] subquery
|
|||
### 超级表语法
|
||||
```sql
|
||||
INSERT INTO
|
||||
stb1_name [(field1_name, ...)]
|
||||
stb1_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
[stb2_name [(field1_name, ...)]
|
||||
[stb2_name [(field1_name, ...)]
|
||||
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
|
||||
...];
|
||||
```
|
||||
|
@ -47,7 +47,7 @@ INSERT INTO
|
|||
|
||||
2. VALUES 语法表示了要插入的一行或多行数据。
|
||||
|
||||
3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
|
||||
3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。如仅需创建子表,请参考'表'章节。
|
||||
|
||||
4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
|
||||
|
||||
|
@ -154,12 +154,20 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c
|
|||
INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv'
|
||||
d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv';
|
||||
```
|
||||
## 超级表语法
|
||||
## 向超级表插入数据并自动创建子表
|
||||
|
||||
自动建表, 表名通过tbname列指定
|
||||
自动建表, 表名通过 tbname 列指定
|
||||
```sql
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||
values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||
VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32)
|
||||
('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33)
|
||||
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33)
|
||||
```
|
||||
## 通过 CSV 文件向超级表插入数据并自动创建子表
|
||||
|
||||
根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag
|
||||
|
||||
```sql
|
||||
INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase)
|
||||
FILE '/tmp/csvfile_21002.csv'
|
||||
```
|
||||
|
|
|
@ -27,11 +27,15 @@ SHOW DNODES;
|
|||
## 删除数据节点
|
||||
|
||||
```sql
|
||||
DROP DNODE dnode_id
|
||||
DROP DNODE dnode_id [force] [unsafe]
|
||||
```
|
||||
|
||||
注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。
|
||||
|
||||
只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定force选项。
|
||||
|
||||
当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定unsafe,并且数据不可再恢复。
|
||||
|
||||
## 修改数据节点配置
|
||||
|
||||
```sql
|
||||
|
|
|
@ -26,10 +26,10 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表"
|
|||
| ----------- | ------------- | ------------- | --------- | ------------- | --------- |
|
||||
| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** |
|
||||
| **C/C++** | ● | ● | ● | ● | ● |
|
||||
| **JDBC** | ● | ● | ● | ○ | ○ |
|
||||
| **JDBC** | ● | ● | ● | ● | ● |
|
||||
| **Python** | ● | ● | ● | ● | ● |
|
||||
| **Go** | ● | ● | ● | ● | ● |
|
||||
| **NodeJs** | ● | ● | ● | ○ | ○ |
|
||||
| **NodeJs** | ● | ● | ● | ● | ● |
|
||||
| **C#** | ● | ● | ○ | ○ | ○ |
|
||||
| **Rust** | ● | ● | ○ | ● | ● |
|
||||
| **RESTful** | ● | ● | ● | ● | ● |
|
||||
|
|
|
@ -4104,18 +4104,16 @@ void tDeleteMqMetaRsp(SMqMetaRsp* pRsp);
|
|||
#define MQ_DATA_RSP_VERSION 100
|
||||
|
||||
typedef struct {
|
||||
struct {
|
||||
SMqRspHead head;
|
||||
STqOffsetVal rspOffset;
|
||||
STqOffsetVal reqOffset;
|
||||
int32_t blockNum;
|
||||
int8_t withTbName;
|
||||
int8_t withSchema;
|
||||
SArray* blockDataLen;
|
||||
SArray* blockData;
|
||||
SArray* blockTbName;
|
||||
SArray* blockSchema;
|
||||
};
|
||||
SMqRspHead head;
|
||||
STqOffsetVal rspOffset;
|
||||
STqOffsetVal reqOffset;
|
||||
int32_t blockNum;
|
||||
int8_t withTbName;
|
||||
int8_t withSchema;
|
||||
SArray* blockDataLen;
|
||||
SArray* blockData;
|
||||
SArray* blockTbName;
|
||||
SArray* blockSchema;
|
||||
|
||||
union{
|
||||
struct{
|
||||
|
|
|
@ -151,8 +151,9 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
|
|||
* @param tversion
|
||||
* @return
|
||||
*/
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
|
||||
int32_t* tversion, int32_t idx, bool* tbGet);
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName,
|
||||
int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx,
|
||||
bool* tbGet);
|
||||
|
||||
/**
|
||||
* The main task execution function, including query on both table and multiple tables,
|
||||
|
@ -209,7 +210,7 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
|
|||
|
||||
const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo);
|
||||
|
||||
const char* qExtractTbnameFromTask(qTaskInfo_t tinfo);
|
||||
const char* qExtractTbnameFromTask(qTaskInfo_t tinfo);
|
||||
|
||||
void* qExtractReaderFromStreamScanner(void* scanner);
|
||||
|
||||
|
|
|
@ -176,8 +176,8 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS
|
|||
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
|
||||
char* msgBuf, int32_t msgBufLen);
|
||||
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
|
||||
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* fields,
|
||||
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen);
|
||||
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields,
|
||||
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw);
|
||||
|
||||
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
|
||||
int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut);
|
||||
|
|
|
@ -70,7 +70,7 @@ extern int32_t tdbDebugFlag;
|
|||
extern int32_t sndDebugFlag;
|
||||
extern int32_t simDebugFlag;
|
||||
|
||||
extern int32_t tqClientDebug;
|
||||
extern int32_t tqClientDebugFlag;
|
||||
|
||||
int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc);
|
||||
void taosCloseLog();
|
||||
|
|
|
@ -12,9 +12,18 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
|
|||
|
||||
rem // stop and delete service
|
||||
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close)
|
||||
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
|
||||
|
||||
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
|
||||
echo This might take a few moment to accomplish deleting service taosd/taosadapter ...
|
||||
)
|
||||
|
||||
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
|
||||
echo This might take a few moment to accomplish deleting service taosd/taoskeeper ...
|
||||
)
|
||||
|
||||
call :check_svc taosd
|
||||
call :check_svc taosadapter
|
||||
call :check_svc taoskeeper
|
||||
|
||||
set source_dir=%2
|
||||
set source_dir=%source_dir:/=\\%
|
||||
|
@ -46,6 +55,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
|
|||
copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul
|
||||
)
|
||||
)
|
||||
if exist %binary_dir%\\test\\cfg\\taoskeeper.toml (
|
||||
if not exist %target_dir%\\cfg\\taoskeeper.toml (
|
||||
copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul
|
||||
)
|
||||
)
|
||||
copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul
|
||||
copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul
|
||||
copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul
|
||||
|
@ -98,12 +112,15 @@ if %Enterprise% == TRUE (
|
|||
copy %binary_dir%\\build\\bin\\*explorer.exe %target_dir% > nul
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul
|
||||
copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul
|
||||
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
|
||||
copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul
|
||||
)
|
||||
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
|
||||
copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul
|
||||
)
|
||||
|
||||
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)
|
||||
|
||||
|
@ -116,6 +133,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe (
|
|||
echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m
|
||||
)
|
||||
|
||||
if exist %binary_dir%\\build\\bin\\taoskeeper.exe (
|
||||
echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m
|
||||
)
|
||||
|
||||
goto :eof
|
||||
|
||||
:hasAdmin
|
||||
|
@ -123,6 +144,7 @@ goto :eof
|
|||
call :stop_delete
|
||||
call :check_svc taosd
|
||||
call :check_svc taosadapter
|
||||
call :check_svc taoskeeper
|
||||
|
||||
if exist c:\\windows\\sysnative (
|
||||
echo x86
|
||||
|
@ -141,6 +163,7 @@ if exist c:\\windows\\sysnative (
|
|||
rem // create services
|
||||
sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
|
||||
sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
|
||||
sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND
|
||||
|
||||
set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment"
|
||||
for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\<Path\>"') do (
|
||||
|
@ -181,6 +204,8 @@ sc stop taosd
|
|||
sc delete taosd
|
||||
sc stop taosadapter
|
||||
sc delete taosadapter
|
||||
sc stop taoskeeper
|
||||
sc delete taoskeeper
|
||||
exit /B 0
|
||||
|
||||
:check_svc
|
||||
|
|
|
@ -129,6 +129,13 @@ function kill_taosadapter() {
|
|||
fi
|
||||
}
|
||||
|
||||
function kill_taoskeeper() {
|
||||
pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
${csudo}kill -9 $pid || :
|
||||
fi
|
||||
}
|
||||
|
||||
function kill_taosd() {
|
||||
pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}')
|
||||
if [ -n "$pid" ]; then
|
||||
|
@ -155,6 +162,7 @@ function install_bin() {
|
|||
${csudo}rm -f ${bin_link_dir}/${clientName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/${serverName} || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosadapter || :
|
||||
${csudo}rm -f ${bin_link_dir}/taoskeeper || :
|
||||
${csudo}rm -f ${bin_link_dir}/udfd || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdemo || :
|
||||
${csudo}rm -f ${bin_link_dir}/taosdump || :
|
||||
|
@ -169,6 +177,7 @@ function install_bin() {
|
|||
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
|
||||
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
|
||||
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
|
||||
|
@ -183,6 +192,7 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || :
|
||||
|
@ -197,6 +207,7 @@ function install_bin() {
|
|||
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
|
||||
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
|
||||
[ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || :
|
||||
|
@ -208,6 +219,7 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || :
|
||||
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || :
|
||||
[ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || :
|
||||
|
@ -407,6 +419,29 @@ function install_taosadapter_config() {
|
|||
fi
|
||||
}
|
||||
|
||||
function install_taoskeeper_config() {
|
||||
if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then
|
||||
${csudo}mkdir -p ${cfg_install_dir} || :
|
||||
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
|
||||
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} &&
|
||||
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
|
||||
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
|
||||
${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || :
|
||||
[ -f ${binary_dir}/test/cfg/taoskeeper.toml ] &&
|
||||
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
|
||||
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
|
||||
[ -f ${cfg_install_dir}/taoskeeper.toml ] &&
|
||||
${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \
|
||||
${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || :
|
||||
else
|
||||
if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then
|
||||
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \
|
||||
${cfg_install_dir}/taoskeeper.toml.${verNumber} || :
|
||||
${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || :
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function install_log() {
|
||||
${csudo}rm -rf ${log_dir} || :
|
||||
${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir}
|
||||
|
@ -526,6 +561,15 @@ function install_taosadapter_service() {
|
|||
fi
|
||||
}
|
||||
|
||||
function install_taoskeeper_service() {
|
||||
if ((${service_mod} == 0)); then
|
||||
[ -f ${binary_dir}/test/cfg/taoskeeper.service ] &&
|
||||
${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \
|
||||
${service_config_dir}/ || :
|
||||
${csudo}systemctl daemon-reload
|
||||
fi
|
||||
}
|
||||
|
||||
function install_service_on_launchctl() {
|
||||
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || :
|
||||
${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist
|
||||
|
@ -534,6 +578,10 @@ function install_service_on_launchctl() {
|
|||
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
|
||||
${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist
|
||||
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || :
|
||||
|
||||
${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
|
||||
${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist
|
||||
${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || :
|
||||
}
|
||||
|
||||
function install_service() {
|
||||
|
@ -549,6 +597,7 @@ function install_service() {
|
|||
install_service_on_launchctl
|
||||
fi
|
||||
}
|
||||
|
||||
function install_app() {
|
||||
if [ "$osType" = "Darwin" ]; then
|
||||
${csudo}rm -rf /Applications/TDengine.app &&
|
||||
|
@ -573,6 +622,7 @@ function update_TDengine() {
|
|||
elif ((${service_mod} == 1)); then
|
||||
${csudo}service ${serverName} stop || :
|
||||
else
|
||||
kill_taoskeeper
|
||||
kill_taosadapter
|
||||
kill_taosd
|
||||
fi
|
||||
|
@ -591,9 +641,11 @@ function update_TDengine() {
|
|||
|
||||
install_service
|
||||
install_taosadapter_service
|
||||
install_taoskeeper_service
|
||||
|
||||
install_config
|
||||
install_taosadapter_config
|
||||
install_taoskeeper_config
|
||||
|
||||
echo
|
||||
echo -e "\033[44;32;1m${productName} is updated successfully!${NC}"
|
||||
|
@ -602,22 +654,33 @@ function update_TDengine() {
|
|||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
||||
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
|
||||
else
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
|
||||
[ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
|
||||
[ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -643,9 +706,11 @@ function install_TDengine() {
|
|||
|
||||
install_service
|
||||
install_taosadapter_service
|
||||
install_taoskeeper_service
|
||||
|
||||
install_config
|
||||
install_taosadapter_config
|
||||
install_taoskeeper_config
|
||||
|
||||
# Ask if to start the service
|
||||
echo
|
||||
|
@ -654,22 +719,33 @@ function install_TDengine() {
|
|||
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
|
||||
[ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml"
|
||||
[ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml"
|
||||
if ((${service_mod} == 0)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
|
||||
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}"
|
||||
elif ((${service_mod} == 1)); then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
|
||||
[ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}"
|
||||
[ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}"
|
||||
else
|
||||
if [ "$osType" != "Darwin" ]; then
|
||||
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}"
|
||||
[ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}"
|
||||
else
|
||||
echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}"
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
|
||||
[ -f ${installDir}/bin/taosadapter ] && \
|
||||
echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}"
|
||||
[ -f ${installDir}/bin/taoskeeper ] && \
|
||||
echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
|
|
@ -52,10 +52,8 @@
|
|||
|
||||
#define TMQ_META_VERSION "1.0"
|
||||
|
||||
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
|
||||
|
||||
static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen);
|
||||
static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); }
|
||||
|
||||
static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t,
|
||||
SColCmprWrapper* pColCmprRow, cJSON** pJson) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -457,7 +455,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) {
|
|||
|
||||
cJSON* tvalue = NULL;
|
||||
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
||||
char* buf = NULL;
|
||||
char* buf = NULL;
|
||||
int64_t bufSize = 0;
|
||||
if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) {
|
||||
bufSize = pTagVal->nData * 2 + 2 + 3;
|
||||
|
@ -890,9 +888,6 @@ end:
|
|||
}
|
||||
|
||||
static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SVCreateStbReq req = {0};
|
||||
SDecoder coder;
|
||||
SMCreateStbReq pReq = {0};
|
||||
|
@ -1003,9 +998,6 @@ end:
|
|||
}
|
||||
|
||||
static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SVDropStbReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
SMDropStbReq pReq = {0};
|
||||
|
@ -1115,9 +1107,6 @@ static void destroyCreateTbReqBatch(void* data) {
|
|||
}
|
||||
|
||||
static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SVCreateTbBatchReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -1304,9 +1293,6 @@ static void destroyDropTbReqBatch(void* data) {
|
|||
}
|
||||
|
||||
static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SVDropTbBatchReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -1419,9 +1405,6 @@ end:
|
|||
}
|
||||
|
||||
static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SDeleteRes req = {0};
|
||||
SDecoder coder = {0};
|
||||
char sql[256] = {0};
|
||||
|
@ -1457,9 +1440,6 @@ end:
|
|||
}
|
||||
|
||||
static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
|
||||
if (taos == NULL || meta == NULL) {
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
SVAlterTbReq req = {0};
|
||||
SDecoder dcoder = {0};
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
@ -1622,7 +1602,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat
|
|||
RAW_NULL_CHECK(pVgHash);
|
||||
RAW_RETURN_CHECK(
|
||||
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData)));
|
||||
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0));
|
||||
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0, false));
|
||||
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
|
@ -1682,7 +1662,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha
|
|||
RAW_NULL_CHECK(pVgHash);
|
||||
RAW_RETURN_CHECK(
|
||||
taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData)));
|
||||
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0));
|
||||
RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0, false));
|
||||
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
|
@ -1708,116 +1688,6 @@ static void* getRawDataFromRes(void* pRetrieve) {
|
|||
return rawData;
|
||||
}
|
||||
|
||||
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||
if (taos == NULL || data == NULL) {
|
||||
SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SHashObj* pVgHash = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SMqRspObj rspObj = {0};
|
||||
SDecoder decoder = {0};
|
||||
STableMeta* pTableMeta = NULL;
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest));
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
|
||||
pRequest->syncQuery = true;
|
||||
rspObj.resIter = -1;
|
||||
rspObj.resType = RES_TYPE__TMQ;
|
||||
|
||||
int8_t dataVersion = *(int8_t*)data;
|
||||
if (dataVersion >= MQ_DATA_RSP_VERSION) {
|
||||
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
|
||||
dataLen -= sizeof(int8_t) + sizeof(int32_t);
|
||||
}
|
||||
tDecoderInit(&decoder, data, dataLen);
|
||||
code = tDecodeMqDataRsp(&decoder, &rspObj.dataRsp);
|
||||
if (code != 0) {
|
||||
SET_ERROR_MSG("decode mq data rsp failed");
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!pRequest->pDb) {
|
||||
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
|
||||
goto end;
|
||||
}
|
||||
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog));
|
||||
|
||||
SRequestConnInfo conn = {0};
|
||||
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
|
||||
conn.requestId = pRequest->requestId;
|
||||
conn.requestObjRefId = pRequest->self;
|
||||
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
RAW_NULL_CHECK(pVgHash);
|
||||
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
|
||||
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pRetrieve);
|
||||
if (!rspObj.dataRsp.withSchema) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
|
||||
RAW_NULL_CHECK(tbName);
|
||||
|
||||
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
|
||||
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
|
||||
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
|
||||
|
||||
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta));
|
||||
|
||||
SVgroupInfo vg = {0};
|
||||
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg));
|
||||
|
||||
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
|
||||
if (hData == NULL) {
|
||||
RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
|
||||
}
|
||||
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pSW);
|
||||
TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD));
|
||||
RAW_NULL_CHECK(fields);
|
||||
for (int i = 0; i < pSW->nCols; i++) {
|
||||
fields[i].type = pSW->pSchema[i].type;
|
||||
fields[i].bytes = pSW->pSchema[i].bytes;
|
||||
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
|
||||
}
|
||||
void* rawData = getRawDataFromRes(pRetrieve);
|
||||
char err[ERR_MSG_LEN] = {0};
|
||||
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true, err, ERR_MSG_LEN);
|
||||
taosMemoryFree(fields);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SET_ERROR_MSG("table:%s, err:%s", tbName, err);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
tDeleteMqDataRsp(&rspObj.dataRsp);
|
||||
tDecoderClear(&decoder);
|
||||
qDestroyQuery(pQuery);
|
||||
destroyRequest(pRequest);
|
||||
taosHashCleanup(pVgHash);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) {
|
||||
// find schema data info
|
||||
int32_t code = 0;
|
||||
|
@ -1855,152 +1725,368 @@ end:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||
if (taos == NULL || data == NULL) {
|
||||
SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
typedef enum {
|
||||
WRITE_RAW_INIT_START = 0,
|
||||
WRITE_RAW_INIT_OK,
|
||||
WRITE_RAW_INIT_FAIL,
|
||||
} WRITE_RAW_INIT_STATUS;
|
||||
|
||||
static SHashObj* writeRawCache = NULL;
|
||||
static int8_t initFlag = 0;
|
||||
static int8_t initedFlag = WRITE_RAW_INIT_START;
|
||||
|
||||
typedef struct {
|
||||
SHashObj* pVgHash;
|
||||
SHashObj* pNameHash;
|
||||
SHashObj* pMetaHash;
|
||||
} rawCacheInfo;
|
||||
|
||||
typedef struct {
|
||||
SVgroupInfo vgInfo;
|
||||
int64_t uid;
|
||||
int64_t suid;
|
||||
} tbInfo;
|
||||
|
||||
static void tmqFreeMeta(void* data) {
|
||||
STableMeta* pTableMeta = *(STableMeta**)data;
|
||||
taosMemoryFree(pTableMeta);
|
||||
}
|
||||
|
||||
static void freeRawCache(void* data) {
|
||||
rawCacheInfo* pRawCache = (rawCacheInfo*)data;
|
||||
taosHashCleanup(pRawCache->pMetaHash);
|
||||
taosHashCleanup(pRawCache->pNameHash);
|
||||
taosHashCleanup(pRawCache->pVgHash);
|
||||
}
|
||||
|
||||
static int32_t initRawCacheHash() {
|
||||
if (writeRawCache == NULL) {
|
||||
writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
|
||||
if (writeRawCache == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
taosHashSetFreeFp(writeRawCache, freeRawCache);
|
||||
}
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SHashObj* pVgHash = NULL;
|
||||
SQuery* pQuery = NULL;
|
||||
SMqRspObj rspObj = {0};
|
||||
SDecoder decoder = {0};
|
||||
STableMeta* pTableMeta = NULL;
|
||||
SHashObj* pCreateTbHash = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest));
|
||||
static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) {
|
||||
char* p = (char*)rawData;
|
||||
// | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each
|
||||
// column length |
|
||||
p += sizeof(int32_t);
|
||||
p += sizeof(int32_t);
|
||||
p += sizeof(int32_t);
|
||||
p += sizeof(int32_t);
|
||||
p += sizeof(int32_t);
|
||||
p += sizeof(uint64_t);
|
||||
int8_t* fields = p;
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
|
||||
pRequest->syncQuery = true;
|
||||
rspObj.resIter = -1;
|
||||
rspObj.resType = RES_TYPE__TMQ_METADATA;
|
||||
if (pSW->nCols != pTableMeta->tableInfo.numOfColumns) {
|
||||
return true;
|
||||
}
|
||||
for (int i = 0; i < pSW->nCols; i++) {
|
||||
int j = 0;
|
||||
for (; j < pTableMeta->tableInfo.numOfColumns; j++) {
|
||||
SSchema* pColSchema = &pTableMeta->schema[j];
|
||||
char* fieldName = pSW->pSchema[i].name;
|
||||
|
||||
int8_t dataVersion = *(int8_t*)data;
|
||||
if (dataVersion >= MQ_DATA_RSP_VERSION) {
|
||||
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
|
||||
dataLen -= sizeof(int8_t) + sizeof(int32_t);
|
||||
if (strcmp(pColSchema->name, fieldName) == 0) {
|
||||
if (*fields != pColSchema->type || *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) {
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
fields += sizeof(int8_t) + sizeof(int32_t);
|
||||
|
||||
if (j == pTableMeta->tableInfo.numOfColumns) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) {
|
||||
int32_t code = 0;
|
||||
void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES);
|
||||
if (cacheInfo == NULL) {
|
||||
*pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
RAW_NULL_CHECK(*pVgHash);
|
||||
*pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
RAW_NULL_CHECK(*pNameHash);
|
||||
*pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
|
||||
RAW_NULL_CHECK(*pMetaHash);
|
||||
taosHashSetFreeFp(*pMetaHash, tmqFreeMeta);
|
||||
rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash};
|
||||
RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo)));
|
||||
} else {
|
||||
rawCacheInfo* info = (rawCacheInfo*)cacheInfo;
|
||||
*pVgHash = info->pVgHash;
|
||||
*pNameHash = info->pNameHash;
|
||||
*pMetaHash = info->pMetaHash;
|
||||
}
|
||||
|
||||
tDecoderInit(&decoder, data, dataLen);
|
||||
code = tDecodeSTaosxRsp(&decoder, &rspObj.dataRsp);
|
||||
if (code != 0) {
|
||||
SET_ERROR_MSG("decode mq taosx data rsp failed");
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto end;
|
||||
}
|
||||
return 0;
|
||||
end:
|
||||
taosHashCleanup(*pMetaHash);
|
||||
taosHashCleanup(*pNameHash);
|
||||
taosHashCleanup(*pVgHash);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (!pRequest->pDb) {
|
||||
static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) {
|
||||
int32_t code = 0;
|
||||
RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, pRequest));
|
||||
(*pRequest)->syncQuery = true;
|
||||
if (!(*pRequest)->pDb) {
|
||||
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
|
||||
goto end;
|
||||
}
|
||||
|
||||
struct SCatalog* pCatalog = NULL;
|
||||
RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog));
|
||||
RAW_RETURN_CHECK(catalogGetHandle((*pRequest)->pTscObj->pAppInfo->clusterId, pCatalog));
|
||||
conn->pTrans = (*pRequest)->pTscObj->pAppInfo->pTransporter;
|
||||
conn->requestId = (*pRequest)->requestId;
|
||||
conn->requestObjRefId = (*pRequest)->self;
|
||||
conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp);
|
||||
|
||||
end:
|
||||
return code;
|
||||
}
|
||||
|
||||
typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp);
|
||||
static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func,
|
||||
SMqRspObj* rspObj) {
|
||||
int8_t dataVersion = *(int8_t*)data;
|
||||
if (dataVersion >= MQ_DATA_RSP_VERSION) {
|
||||
data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t));
|
||||
dataLen -= sizeof(int8_t) + sizeof(int32_t);
|
||||
}
|
||||
|
||||
rspObj->resIter = -1;
|
||||
tDecoderInit(decoder, data, dataLen);
|
||||
int32_t code = func(decoder, &rspObj->dataRsp);
|
||||
if (code != 0) {
|
||||
SET_ERROR_MSG("decode mq taosx data rsp failed");
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash,
|
||||
SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName,
|
||||
STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) {
|
||||
int32_t code = 0;
|
||||
STableMeta* pTableMeta = NULL;
|
||||
tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname));
|
||||
if (tmpInfo == NULL || retry > 0) {
|
||||
tbInfo info = {0};
|
||||
|
||||
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, conn, pName, &info.vgInfo));
|
||||
if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta
|
||||
tstrncpy(pName->tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN);
|
||||
}
|
||||
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta));
|
||||
info.uid = pTableMeta->uid;
|
||||
if (pTableMeta->tableType == TSDB_CHILD_TABLE) {
|
||||
info.suid = pTableMeta->suid;
|
||||
} else {
|
||||
info.suid = pTableMeta->uid;
|
||||
}
|
||||
code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(pTableMeta);
|
||||
goto end;
|
||||
}
|
||||
if (pCreateReqDst) {
|
||||
pTableMeta->vgId = info.vgInfo.vgId;
|
||||
pTableMeta->uid = pCreateReqDst->uid;
|
||||
pCreateReqDst->ctb.suid = pTableMeta->suid;
|
||||
}
|
||||
|
||||
RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo)));
|
||||
tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname));
|
||||
RAW_RETURN_CHECK(
|
||||
taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo)));
|
||||
}
|
||||
|
||||
if (pTableMeta == NULL || retry > 0) {
|
||||
STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES);
|
||||
if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) {
|
||||
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta));
|
||||
code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES);
|
||||
if (code != 0) {
|
||||
taosMemoryFree(pTableMeta);
|
||||
goto end;
|
||||
}
|
||||
|
||||
} else {
|
||||
pTableMeta = *pTableMetaTmp;
|
||||
pTableMeta->uid = tmpInfo->uid;
|
||||
pTableMeta->vgId = tmpInfo->vgInfo.vgId;
|
||||
}
|
||||
}
|
||||
*pMeta = pTableMeta;
|
||||
|
||||
end:
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SQuery* pQuery = NULL;
|
||||
SMqRspObj rspObj = {0};
|
||||
SDecoder decoder = {0};
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
SCatalog* pCatalog = NULL;
|
||||
SRequestConnInfo conn = {0};
|
||||
conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
|
||||
conn.requestId = pRequest->requestId;
|
||||
conn.requestObjRefId = pRequest->self;
|
||||
conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
|
||||
RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn));
|
||||
uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
|
||||
RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj));
|
||||
|
||||
SHashObj* pVgHash = NULL;
|
||||
SHashObj* pNameHash = NULL;
|
||||
SHashObj* pMetaHash = NULL;
|
||||
RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos));
|
||||
int retry = 0;
|
||||
while (1) {
|
||||
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
|
||||
uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
|
||||
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
|
||||
if (!rspObj.dataRsp.withSchema) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
|
||||
RAW_NULL_CHECK(tbName);
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pSW);
|
||||
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pRetrieve);
|
||||
void* rawData = getRawDataFromRes(pRetrieve);
|
||||
RAW_NULL_CHECK(rawData);
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName);
|
||||
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
|
||||
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
|
||||
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
|
||||
|
||||
STableMeta* pTableMeta = NULL;
|
||||
RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, &pName, &pTableMeta, pSW,
|
||||
rawData, retry));
|
||||
char err[ERR_MSG_LEN] = {0};
|
||||
code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SET_ERROR_MSG("table:%s, err:%s", pName.tname, err);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
||||
if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) {
|
||||
uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code));
|
||||
qDestroyQuery(pQuery);
|
||||
pQuery = NULL;
|
||||
rspObj.resIter = -1;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
tDeleteMqDataRsp(&rspObj.dataRsp);
|
||||
tDecoderClear(&decoder);
|
||||
qDestroyQuery(pQuery);
|
||||
destroyRequest(pRequest);
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SQuery* pQuery = NULL;
|
||||
SMqRspObj rspObj = {0};
|
||||
SDecoder decoder = {0};
|
||||
SHashObj* pCreateTbHash = NULL;
|
||||
|
||||
SRequestObj* pRequest = NULL;
|
||||
SCatalog* pCatalog = NULL;
|
||||
SRequestConnInfo conn = {0};
|
||||
|
||||
RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn));
|
||||
uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen);
|
||||
RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj));
|
||||
|
||||
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
|
||||
pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
RAW_NULL_CHECK(pVgHash);
|
||||
pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
RAW_NULL_CHECK(pCreateTbHash);
|
||||
RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash));
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
|
||||
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
|
||||
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pRetrieve);
|
||||
if (!rspObj.dataRsp.withSchema) {
|
||||
goto end;
|
||||
}
|
||||
SHashObj* pVgHash = NULL;
|
||||
SHashObj* pNameHash = NULL;
|
||||
SHashObj* pMetaHash = NULL;
|
||||
RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos));
|
||||
int retry = 0;
|
||||
while (1) {
|
||||
RAW_RETURN_CHECK(smlInitHandle(&pQuery));
|
||||
uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum);
|
||||
while (++rspObj.resIter < rspObj.dataRsp.blockNum) {
|
||||
if (!rspObj.dataRsp.withSchema) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
|
||||
if (!tbName) {
|
||||
SET_ERROR_MSG("block tbname is null");
|
||||
code = terrno;
|
||||
goto end;
|
||||
}
|
||||
const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter);
|
||||
RAW_NULL_CHECK(tbName);
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pSW);
|
||||
void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pRetrieve);
|
||||
void* rawData = getRawDataFromRes(pRetrieve);
|
||||
RAW_NULL_CHECK(rawData);
|
||||
|
||||
uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName);
|
||||
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
|
||||
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
|
||||
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
|
||||
uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName);
|
||||
SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
|
||||
tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN);
|
||||
tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN);
|
||||
|
||||
// find schema data info
|
||||
SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName));
|
||||
SVgroupInfo vg = {0};
|
||||
RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg));
|
||||
if (pCreateReqDst) { // change stable name to get meta
|
||||
tstrncpy(pName.tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN);
|
||||
// find schema data info
|
||||
SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname));
|
||||
STableMeta* pTableMeta = NULL;
|
||||
RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, &pName,
|
||||
&pTableMeta, pSW, rawData, retry));
|
||||
char err[ERR_MSG_LEN] = {0};
|
||||
code =
|
||||
rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SET_ERROR_MSG("table:%s, err:%s", pName.tname, err);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta));
|
||||
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
||||
if (pCreateReqDst) {
|
||||
pTableMeta->vgId = vg.vgId;
|
||||
pTableMeta->uid = pCreateReqDst->uid;
|
||||
pCreateReqDst->ctb.suid = pTableMeta->suid;
|
||||
}
|
||||
void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId));
|
||||
if (hData == NULL) {
|
||||
RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
|
||||
}
|
||||
|
||||
SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter);
|
||||
RAW_NULL_CHECK(pSW);
|
||||
TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD));
|
||||
if (fields == NULL) {
|
||||
SET_ERROR_MSG("calloc fields failed");
|
||||
code = terrno;
|
||||
goto end;
|
||||
}
|
||||
for (int i = 0; i < pSW->nCols; i++) {
|
||||
fields[i].type = pSW->pSchema[i].type;
|
||||
fields[i].bytes = pSW->pSchema[i].bytes;
|
||||
tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name));
|
||||
}
|
||||
void* rawData = getRawDataFromRes(pRetrieve);
|
||||
char err[ERR_MSG_LEN] = {0};
|
||||
SVCreateTbReq* pCreateReqTmp = NULL;
|
||||
if (pCreateReqDst) {
|
||||
RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp));
|
||||
}
|
||||
code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN);
|
||||
if (pCreateReqTmp != NULL) {
|
||||
tdDestroySVCreateTbReq(pCreateReqTmp);
|
||||
taosMemoryFree(pCreateReqTmp);
|
||||
}
|
||||
taosMemoryFree(fields);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
SET_ERROR_MSG("table:%s, err:%s", tbName, err);
|
||||
goto end;
|
||||
if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) {
|
||||
uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code));
|
||||
qDestroyQuery(pQuery);
|
||||
pQuery = NULL;
|
||||
rspObj.resIter = -1;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash));
|
||||
|
||||
launchQueryImpl(pRequest, pQuery, true, NULL);
|
||||
code = pRequest->code;
|
||||
|
||||
end:
|
||||
uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code));
|
||||
tDeleteSTaosxRsp(&rspObj.dataRsp);
|
||||
void* pIter = taosHashIterate(pCreateTbHash, NULL);
|
||||
while (pIter) {
|
||||
tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE);
|
||||
pIter = taosHashIterate(pCreateTbHash, pIter);
|
||||
}
|
||||
taosHashCleanup(pCreateTbHash);
|
||||
tDeleteSTaosxRsp(&rspObj.dataRsp);
|
||||
tDecoderClear(&decoder);
|
||||
qDestroyQuery(pQuery);
|
||||
destroyRequest(pRequest);
|
||||
taosHashCleanup(pVgHash);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2076,18 +2162,18 @@ char* tmq_get_json_meta(TAOS_RES* res) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
char* string = NULL;
|
||||
char* string = NULL;
|
||||
SMqRspObj* rspObj = (SMqRspObj*)res;
|
||||
if (TD_RES_TMQ_METADATA(res)) {
|
||||
processAutoCreateTable(&rspObj->dataRsp, &string);
|
||||
} else if (TD_RES_TMQ_BATCH_META(res)) {
|
||||
processBatchMetaToJson(&rspObj->batchMetaRsp, &string);
|
||||
} else if (TD_RES_TMQ_META(res)) {
|
||||
cJSON* pJson = NULL;
|
||||
cJSON* pJson = NULL;
|
||||
processSimpleMeta(&rspObj->metaRsp, &pJson);
|
||||
string = cJSON_PrintUnformatted(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
} else{
|
||||
} else {
|
||||
uError("tmq_get_json_meta res:%d, invalid type", *(int8_t*)res);
|
||||
}
|
||||
|
||||
|
@ -2098,7 +2184,7 @@ char* tmq_get_json_meta(TAOS_RES* res) {
|
|||
void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
|
||||
|
||||
static int32_t getOffSetLen(const SMqDataRsp* pRsp) {
|
||||
SEncoder coder = {0};
|
||||
SEncoder coder = {0};
|
||||
tEncoderInit(&coder, NULL, 0);
|
||||
if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1;
|
||||
if (tEncodeSTqOffsetVal(&coder, &pRsp->rspOffset) < 0) return -1;
|
||||
|
@ -2108,45 +2194,45 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) {
|
|||
}
|
||||
|
||||
typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
||||
static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) {
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
SEncoder encoder = {0};
|
||||
void* buf = NULL;
|
||||
tEncodeSize(encodeFunc, rspObj, len, code);
|
||||
if (code < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) {
|
||||
int32_t len = 0;
|
||||
int32_t code = 0;
|
||||
SEncoder encoder = {0};
|
||||
void* buf = NULL;
|
||||
tEncodeSize(encodeFunc, rspObj, len, code);
|
||||
if (code < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
len += sizeof(int8_t) + sizeof(int32_t);
|
||||
buf = taosMemoryCalloc(1, len);
|
||||
if (buf == NULL) {
|
||||
code = terrno;
|
||||
goto FAILED;
|
||||
len += sizeof(int8_t) + sizeof(int32_t);
|
||||
buf = taosMemoryCalloc(1, len);
|
||||
if (buf == NULL) {
|
||||
code = terrno;
|
||||
goto FAILED;
|
||||
}
|
||||
tEncoderInit(&encoder, buf, len);
|
||||
if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
tEncoderInit(&encoder, buf, len);
|
||||
if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
int32_t offsetLen = getOffSetLen(rspObj);
|
||||
if (offsetLen <= 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
int32_t offsetLen = getOffSetLen(rspObj);
|
||||
if (offsetLen <= 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
if (tEncodeI32(&encoder, offsetLen) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
if (tEncodeI32(&encoder, offsetLen) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
if (encodeFunc(&encoder, rspObj) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
if (encodeFunc(&encoder, rspObj) < 0) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
goto FAILED;
|
||||
}
|
||||
tEncoderClear(&encoder);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
raw->raw = buf;
|
||||
raw->raw_len = len;
|
||||
return code;
|
||||
raw->raw = buf;
|
||||
raw->raw_len = len;
|
||||
return code;
|
||||
FAILED:
|
||||
tEncoderClear(&encoder);
|
||||
taosMemoryFree(buf);
|
||||
|
@ -2164,7 +2250,7 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
|
|||
raw->raw_type = rspObj->metaRsp.resMsgType;
|
||||
uDebug("tmq get raw type meta:%p", raw);
|
||||
} else if (TD_RES_TMQ(res)) {
|
||||
int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw);
|
||||
int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw);
|
||||
if (code != 0) {
|
||||
uError("tmq get raw type error:%d", terrno);
|
||||
return code;
|
||||
|
@ -2199,7 +2285,31 @@ void tmq_free_raw(tmq_raw_data raw) {
|
|||
(void)memset(terrMsg, 0, ERR_MSG_LEN);
|
||||
}
|
||||
|
||||
static int32_t writeRawInit() {
|
||||
while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) {
|
||||
int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1);
|
||||
if (old == 0) {
|
||||
int32_t code = initRawCacheHash();
|
||||
if (code != 0) {
|
||||
uError("tmq writeRawImpl init error:%d", code);
|
||||
atomic_store_8(&initedFlag, WRITE_RAW_INIT_FAIL);
|
||||
return code;
|
||||
}
|
||||
atomic_store_8(&initedFlag, WRITE_RAW_INIT_OK);
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL) {
|
||||
return TSDB_CODE_INTERNAL_ERROR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) {
|
||||
if (writeRawInit() != 0) {
|
||||
return TSDB_CODE_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
if (type == TDMT_VND_CREATE_STB) {
|
||||
return taosCreateStb(taos, buf, len);
|
||||
} else if (type == TDMT_VND_ALTER_STB) {
|
||||
|
@ -2214,10 +2324,10 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type)
|
|||
return taosDropTable(taos, buf, len);
|
||||
} else if (type == TDMT_VND_DELETE) {
|
||||
return taosDeleteData(taos, buf, len);
|
||||
} else if (type == RES_TYPE__TMQ) {
|
||||
return tmqWriteRawDataImpl(taos, buf, len);
|
||||
} else if (type == RES_TYPE__TMQ_METADATA) {
|
||||
return tmqWriteRawMetaDataImpl(taos, buf, len);
|
||||
} else if (type == RES_TYPE__TMQ) {
|
||||
return tmqWriteRawDataImpl(taos, buf, len);
|
||||
} else if (type == RES_TYPE__TMQ_BATCH_META) {
|
||||
return tmqWriteBatchMetaDataImpl(taos, buf, len);
|
||||
}
|
||||
|
@ -2225,7 +2335,8 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type)
|
|||
}
|
||||
|
||||
int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
|
||||
if (!taos) {
|
||||
if (taos == NULL || raw.raw == NULL || raw.raw_len <= 0) {
|
||||
SET_ERROR_MSG("taos:%p or data:%p is NULL or raw_len <= 0", taos, raw.raw);
|
||||
return TSDB_CODE_INVALID_PARA;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,12 +24,9 @@
|
|||
#include "tref.h"
|
||||
#include "ttimer.h"
|
||||
|
||||
#define tqFatalC(...) do { if (cDebugFlag & DEBUG_FATAL || tqClientDebug) { taosPrintLog("TQ FATAL ", DEBUG_FATAL, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebug) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqWarnC(...) do { if (cDebugFlag & DEBUG_WARN || tqClientDebug) { taosPrintLog("TQ WARN ", DEBUG_WARN, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebug) { taosPrintLog("TQ ", DEBUG_INFO, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebug) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqTraceC(...) do { if (cDebugFlag & DEBUG_TRACE || tqClientDebug) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0)
|
||||
|
||||
#define EMPTY_BLOCK_POLL_IDLE_DURATION 10
|
||||
#define DEFAULT_AUTO_COMMIT_INTERVAL 5000
|
||||
|
@ -831,8 +828,8 @@ static int32_t innerCommitAll(tmq_t* tmq, SMqCommitCbParamSet* pParamSet){
|
|||
}
|
||||
|
||||
code = innerCommit(tmq, pTopic->topicName, &pVg->offsetInfo.endOffset, pVg, pParamSet);
|
||||
if (code != 0){
|
||||
tqDebugC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d",
|
||||
if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){
|
||||
tqErrorC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d",
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, tstrerror(code), pVg->offsetInfo.endOffset.version, j + 1, numOfVgroups);
|
||||
}
|
||||
}
|
||||
|
@ -857,7 +854,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us
|
|||
return;
|
||||
}
|
||||
code = innerCommitAll(tmq, pParamSet);
|
||||
if (code != 0){
|
||||
if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){
|
||||
tqErrorC("consumer:0x%" PRIx64 " innerCommitAll failed, code:%s", tmq->consumerId, tstrerror(code));
|
||||
}
|
||||
|
||||
|
@ -957,7 +954,8 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
}
|
||||
|
||||
tqClientDebug = rsp.debugFlag;
|
||||
tqClientDebugFlag = rsp.debugFlag;
|
||||
|
||||
tDestroySMqHbRsp(&rsp);
|
||||
|
||||
END:
|
||||
|
@ -978,6 +976,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
|
|||
req.consumerId = tmq->consumerId;
|
||||
req.epoch = tmq->epoch;
|
||||
req.pollFlag = atomic_load_8(&tmq->pollFlag);
|
||||
tqDebugC("consumer:0x%" PRIx64 " send heartbeat, pollFlag:%d", tmq->consumerId, req.pollFlag);
|
||||
req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows));
|
||||
if (req.topics == NULL) {
|
||||
goto END;
|
||||
|
@ -1063,7 +1062,7 @@ END:
|
|||
tDestroySMqHbReq(&req);
|
||||
if (tmrId != NULL) {
|
||||
bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer);
|
||||
tqDebugC("reset timer fo tmq hb:%d", ret);
|
||||
tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag);
|
||||
}
|
||||
int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId);
|
||||
if (ret != 0){
|
||||
|
@ -1269,7 +1268,9 @@ static int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
}
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code));
|
||||
if (code != TSDB_CODE_MND_CONSUMER_NOT_READY){
|
||||
tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code));
|
||||
}
|
||||
goto END;
|
||||
}
|
||||
|
||||
|
@ -1422,7 +1423,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) {
|
|||
tqDebugC("consumer:0x%" PRIx64 " retrieve ep from mnode in 1s", pTmq->consumerId);
|
||||
bool ret = taosTmrReset(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(pTmq->refId), tmqMgmt.timer,
|
||||
&pTmq->epTimer);
|
||||
tqDebugC("reset timer fo tmq ask ep:%d", ret);
|
||||
tqDebugC("reset timer for tmq ask ep:%d", ret);
|
||||
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
|
||||
tmq_commit_cb* pCallbackFn = (pTmq->commitCb != NULL) ? pTmq->commitCb : defaultCommitCbFn;
|
||||
asyncCommitAllOffsets(pTmq, pCallbackFn, pTmq->commitCbUserParam);
|
||||
|
@ -1430,7 +1431,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) {
|
|||
pTmq->autoCommitInterval / 1000.0);
|
||||
bool ret = taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, (void*)(pTmq->refId), tmqMgmt.timer,
|
||||
&pTmq->commitTimer);
|
||||
tqDebugC("reset timer fo commit:%d", ret);
|
||||
tqDebugC("reset timer for commit:%d", ret);
|
||||
} else {
|
||||
tqErrorC("consumer:0x%" PRIx64 " invalid task type:%d", pTmq->consumerId, *pTaskType);
|
||||
}
|
||||
|
|
|
@ -548,7 +548,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) {
|
|||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebug", tqClientDebug, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
|
||||
|
@ -2000,7 +2000,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
|
|||
{"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag},
|
||||
{"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag},
|
||||
{"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag},
|
||||
{"tqClientDebug", &tqClientDebug},
|
||||
{"tqClientDebugFlag", &tqClientDebugFlag},
|
||||
};
|
||||
|
||||
static OptionNameAndVar options[] = {{"audit", &tsEnableAudit},
|
||||
|
|
|
@ -10983,6 +10983,7 @@ _exit:
|
|||
int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
|
||||
TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp));
|
||||
TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -11094,6 +11095,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
|
|||
TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen));
|
||||
}
|
||||
}
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -36,12 +36,13 @@ typedef struct SVnodeMgmt {
|
|||
SSingleWorker mgmtWorker;
|
||||
SSingleWorker mgmtMultiWorker;
|
||||
SHashObj *hash;
|
||||
SHashObj *closedHash;
|
||||
TdThreadRwlock lock;
|
||||
SVnodesStat state;
|
||||
STfs *pTfs;
|
||||
TdThread thread;
|
||||
bool stop;
|
||||
TdThreadMutex createLock;
|
||||
TdThreadMutex fileLock;
|
||||
} SVnodeMgmt;
|
||||
|
||||
typedef struct {
|
||||
|
@ -94,7 +95,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId);
|
|||
SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict);
|
||||
void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode);
|
||||
int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl);
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal);
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed);
|
||||
|
||||
// vmHandle.c
|
||||
SArray *vmGetMsgHandles();
|
||||
|
@ -111,6 +112,7 @@ int32_t vmProcessArbHeartBeatReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
|
|||
int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);
|
||||
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt);
|
||||
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
||||
int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes);
|
||||
|
||||
// vmWorker.c
|
||||
int32_t vmStartWorker(SVnodeMgmt *pMgmt);
|
||||
|
|
|
@ -19,6 +19,54 @@
|
|||
|
||||
#define MAX_CONTENT_LEN 2 * 1024 * 1024
|
||||
|
||||
int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
|
||||
(void)taosThreadRwlockRdlock(&pMgmt->lock);
|
||||
|
||||
int32_t num = 0;
|
||||
int32_t size = taosHashGetSize(pMgmt->hash);
|
||||
int32_t closedSize = taosHashGetSize(pMgmt->closedHash);
|
||||
size += closedSize;
|
||||
SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *));
|
||||
if (pVnodes == NULL) {
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
void *pIter = taosHashIterate(pMgmt->hash, NULL);
|
||||
while (pIter) {
|
||||
SVnodeObj **ppVnode = pIter;
|
||||
SVnodeObj *pVnode = *ppVnode;
|
||||
if (pVnode && num < size) {
|
||||
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||
// dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount);
|
||||
pVnodes[num++] = (*ppVnode);
|
||||
pIter = taosHashIterate(pMgmt->hash, pIter);
|
||||
} else {
|
||||
taosHashCancelIterate(pMgmt->hash, pIter);
|
||||
}
|
||||
}
|
||||
|
||||
pIter = taosHashIterate(pMgmt->closedHash, NULL);
|
||||
while (pIter) {
|
||||
SVnodeObj **ppVnode = pIter;
|
||||
SVnodeObj *pVnode = *ppVnode;
|
||||
if (pVnode && num < size) {
|
||||
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||
// dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount);
|
||||
pVnodes[num++] = (*ppVnode);
|
||||
pIter = taosHashIterate(pMgmt->closedHash, pIter);
|
||||
} else {
|
||||
taosHashCancelIterate(pMgmt->closedHash, pIter);
|
||||
}
|
||||
}
|
||||
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
*numOfVnodes = num;
|
||||
*ppVnodes = pVnodes;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) {
|
||||
(void)taosThreadRwlockRdlock(&pMgmt->lock);
|
||||
|
||||
|
@ -203,6 +251,8 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
|||
SVnodeObj **ppVnodes = NULL;
|
||||
char file[PATH_MAX] = {0};
|
||||
char realfile[PATH_MAX] = {0};
|
||||
int32_t lino = 0;
|
||||
int32_t ret = -1;
|
||||
|
||||
int32_t nBytes = snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP);
|
||||
if (nBytes <= 0 || nBytes >= sizeof(file)) {
|
||||
|
@ -215,8 +265,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
|||
}
|
||||
|
||||
int32_t numOfVnodes = 0;
|
||||
code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes);
|
||||
if (code) goto _OVER;
|
||||
TAOS_CHECK_GOTO(vmGetAllVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes), &lino, _OVER);
|
||||
|
||||
// terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
pJson = tjsonCreateObject();
|
||||
|
@ -224,39 +273,56 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
|
|||
code = terrno;
|
||||
goto _OVER;
|
||||
}
|
||||
if ((code = vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes)) != 0) goto _OVER;
|
||||
TAOS_CHECK_GOTO(vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes), &lino, _OVER);
|
||||
|
||||
buffer = tjsonToString(pJson);
|
||||
if (buffer == NULL) {
|
||||
code = TSDB_CODE_INVALID_JSON_FORMAT;
|
||||
lino = __LINE__;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
code = taosThreadMutexLock(&pMgmt->fileLock);
|
||||
if (code != 0) {
|
||||
lino = __LINE__;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH);
|
||||
if (pFile == NULL) {
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
lino = __LINE__;
|
||||
goto _OVER1;
|
||||
}
|
||||
|
||||
int32_t len = strlen(buffer);
|
||||
if (taosWriteFile(pFile, buffer, len) <= 0) {
|
||||
code = terrno;
|
||||
goto _OVER;
|
||||
lino = __LINE__;
|
||||
goto _OVER1;
|
||||
}
|
||||
if (taosFsyncFile(pFile) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _OVER;
|
||||
lino = __LINE__;
|
||||
goto _OVER1;
|
||||
}
|
||||
|
||||
code = taosCloseFile(&pFile);
|
||||
if (code != 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _OVER;
|
||||
lino = __LINE__;
|
||||
goto _OVER1;
|
||||
}
|
||||
TAOS_CHECK_GOTO(taosRenameFile(file, realfile), NULL, _OVER);
|
||||
TAOS_CHECK_GOTO(taosRenameFile(file, realfile), &lino, _OVER1);
|
||||
|
||||
dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes);
|
||||
|
||||
_OVER1:
|
||||
ret = taosThreadMutexUnlock(&pMgmt->fileLock);
|
||||
if (ret != 0) {
|
||||
dError("failed to unlock since %s", tstrerror(ret));
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (pJson != NULL) tjsonDelete(pJson);
|
||||
if (buffer != NULL) taosMemoryFree(buffer);
|
||||
|
@ -272,7 +338,8 @@ _OVER:
|
|||
}
|
||||
|
||||
if (code != 0) {
|
||||
dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, tstrerror(code), numOfVnodes);
|
||||
dError("failed to write vnodes file:%s at line:%d since %s, vnodes:%d", realfile, lino, tstrerror(code),
|
||||
numOfVnodes);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -415,27 +415,30 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
code = taosThreadMutexLock(&pMgmt->createLock);
|
||||
if (code != 0) {
|
||||
dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(code));
|
||||
goto _OVER;
|
||||
}
|
||||
code = vmWriteVnodeListToFile(pMgmt);
|
||||
if (code != 0) {
|
||||
code = terrno != 0 ? terrno : code;
|
||||
int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock);
|
||||
if (ret != 0) {
|
||||
dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret));
|
||||
}
|
||||
goto _OVER;
|
||||
}
|
||||
int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock);
|
||||
if (ret != 0) {
|
||||
dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret));
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0) {
|
||||
int32_t r = 0;
|
||||
r = taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(r));
|
||||
}
|
||||
if (r == 0) {
|
||||
dInfo("vgId:%d, remove from hash", req.vgId);
|
||||
r = taosHashRemove(pMgmt->hash, &req.vgId, sizeof(int32_t));
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to remove vnode since %s", req.vgId, tstrerror(r));
|
||||
}
|
||||
}
|
||||
r = taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(r));
|
||||
}
|
||||
vnodeClose(pImpl);
|
||||
vnodeDestroy(0, path, pMgmt->pTfs, 0);
|
||||
} else {
|
||||
|
@ -535,7 +538,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
|
||||
|
||||
bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl);
|
||||
vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal);
|
||||
vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true);
|
||||
|
||||
int32_t diskPrimary = wrapperCfg.diskPrimary;
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
@ -683,7 +686,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
}
|
||||
|
||||
dInfo("vgId:%d, close vnode", srcVgId);
|
||||
vmCloseVnode(pMgmt, pVnode, true);
|
||||
vmCloseVnode(pMgmt, pVnode, true, false);
|
||||
|
||||
int32_t diskPrimary = wrapperCfg.diskPrimary;
|
||||
char srcPath[TSDB_FILENAME_LEN] = {0};
|
||||
|
@ -792,7 +795,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
|
||||
|
||||
bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl);
|
||||
vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal);
|
||||
vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true);
|
||||
|
||||
int32_t diskPrimary = wrapperCfg.diskPrimary;
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
|
@ -860,7 +863,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
vmCloseVnode(pMgmt, pVnode, false);
|
||||
vmCloseVnode(pMgmt, pVnode, false, false);
|
||||
if (vmWriteVnodeListToFile(pMgmt) != 0) {
|
||||
dError("vgId:%d, failed to write vnode list since %s", vgId, terrstr());
|
||||
}
|
||||
|
|
|
@ -166,16 +166,34 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
|
|||
(void)taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
SVnodeObj *pOld = NULL;
|
||||
int32_t r = taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld);
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to get vnode from hash", pVnode->vgId);
|
||||
}
|
||||
if (pOld) {
|
||||
vmFreeVnodeObj(&pOld);
|
||||
}
|
||||
int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *));
|
||||
|
||||
pOld = NULL;
|
||||
r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld);
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId);
|
||||
}
|
||||
if (pOld) {
|
||||
vmFreeVnodeObj(&pOld);
|
||||
}
|
||||
|
||||
dInfo("vgId:%d, remove from closedHash", pVnode->vgId);
|
||||
r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t));
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId);
|
||||
}
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) {
|
||||
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed) {
|
||||
char path[TSDB_FILENAME_LEN] = {0};
|
||||
bool atExit = true;
|
||||
|
||||
|
@ -185,7 +203,40 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal)
|
|||
|
||||
(void)taosThreadRwlockWrlock(&pMgmt->lock);
|
||||
int32_t r = taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId);
|
||||
}
|
||||
if (keepClosed) {
|
||||
SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj));
|
||||
(void)memset(pClosedVnode, 0, sizeof(SVnodeObj));
|
||||
if (pVnode == NULL) {
|
||||
dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr());
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
pClosedVnode->vgId = pVnode->vgId;
|
||||
pClosedVnode->dropped = pVnode->dropped;
|
||||
pClosedVnode->vgVersion = pVnode->vgVersion;
|
||||
pClosedVnode->diskPrimary = pVnode->diskPrimary;
|
||||
pClosedVnode->toVgId = pVnode->toVgId;
|
||||
|
||||
SVnodeObj *pOld = NULL;
|
||||
r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld);
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId);
|
||||
}
|
||||
if (pOld) {
|
||||
vmFreeVnodeObj(&pOld);
|
||||
}
|
||||
dInfo("vgId:%d, put vnode to closedHash", pVnode->vgId);
|
||||
r = taosHashPut(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), &pClosedVnode, sizeof(SVnodeObj *));
|
||||
if (r != 0) {
|
||||
dError("vgId:%d, failed to put vnode to closedHash", pVnode->vgId);
|
||||
}
|
||||
}
|
||||
(void)taosThreadRwlockUnlock(&pMgmt->lock);
|
||||
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
|
||||
if (pVnode->failed) {
|
||||
|
@ -362,9 +413,15 @@ static void *vmOpenVnodeInThread(void *param) {
|
|||
static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) {
|
||||
pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||
if (pMgmt->hash == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
dError("failed to init vnode hash since %s", terrstr());
|
||||
return -1;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
pMgmt->closedHash =
|
||||
taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
|
||||
if (pMgmt->hash == NULL) {
|
||||
dError("failed to init vnode closed hash since %s", terrstr());
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SWrapperCfg *pCfgs = NULL;
|
||||
|
@ -459,7 +516,7 @@ static void *vmCloseVnodeInThread(void *param) {
|
|||
pMgmt->state.openVnodes, pMgmt->state.totalVnodes);
|
||||
tmsgReportStartup("vnode-close", stepDesc);
|
||||
|
||||
vmCloseVnode(pMgmt, pVnode, false);
|
||||
vmCloseVnode(pMgmt, pVnode, false, false);
|
||||
}
|
||||
|
||||
dInfo("thread:%d, numOfVnodes:%d is closed", pThread->threadIndex, pThread->vnodeNum);
|
||||
|
@ -537,6 +594,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) {
|
|||
pMgmt->hash = NULL;
|
||||
}
|
||||
|
||||
void *pIter = taosHashIterate(pMgmt->closedHash, NULL);
|
||||
while (pIter) {
|
||||
SVnodeObj **ppVnode = pIter;
|
||||
vmFreeVnodeObj(ppVnode);
|
||||
pIter = taosHashIterate(pMgmt->closedHash, pIter);
|
||||
}
|
||||
|
||||
if (pMgmt->closedHash != NULL) {
|
||||
taosHashCleanup(pMgmt->closedHash);
|
||||
pMgmt->closedHash = NULL;
|
||||
}
|
||||
|
||||
dInfo("total vnodes:%d are all closed", numOfVnodes);
|
||||
}
|
||||
|
||||
|
@ -545,7 +614,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) {
|
|||
vmStopWorker(pMgmt);
|
||||
vnodeCleanup();
|
||||
(void)taosThreadRwlockDestroy(&pMgmt->lock);
|
||||
(void)taosThreadMutexDestroy(&pMgmt->createLock);
|
||||
(void)taosThreadMutexDestroy(&pMgmt->fileLock);
|
||||
taosMemoryFree(pMgmt);
|
||||
}
|
||||
|
||||
|
@ -637,7 +706,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
code = taosThreadMutexInit(&pMgmt->createLock, NULL);
|
||||
code = taosThreadMutexInit(&pMgmt->fileLock, NULL);
|
||||
if (code != 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
goto _OVER;
|
||||
|
|
|
@ -15,13 +15,10 @@
|
|||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "mndArbGroup.h"
|
||||
#include "audit.h"
|
||||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndPrivilege.h"
|
||||
#include "mndShow.h"
|
||||
#include "mndTrans.h"
|
||||
#include "mndUser.h"
|
||||
#include "mndVgroup.h"
|
||||
|
||||
#define ARBGROUP_VER_NUMBER 1
|
||||
|
@ -245,11 +242,11 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p
|
|||
}
|
||||
|
||||
for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) {
|
||||
(void)memcpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE);
|
||||
tstrncpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE);
|
||||
}
|
||||
pOld->isSync = pNew->isSync;
|
||||
pOld->assignedLeader.dnodeId = pNew->assignedLeader.dnodeId;
|
||||
(void)memcpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE);
|
||||
tstrncpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE);
|
||||
pOld->assignedLeader.acked = pNew->assignedLeader.acked;
|
||||
pOld->version++;
|
||||
|
||||
|
@ -834,12 +831,12 @@ static int32_t mndProcessArbUpdateGroupBatchReq(SRpcMsg *pReq) {
|
|||
newGroup.dbUid = pUpdateGroup->dbUid;
|
||||
for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) {
|
||||
newGroup.members[i].info.dnodeId = pUpdateGroup->members[i].dnodeId;
|
||||
(void)memcpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE);
|
||||
tstrncpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE);
|
||||
}
|
||||
|
||||
newGroup.isSync = pUpdateGroup->isSync;
|
||||
newGroup.assignedLeader.dnodeId = pUpdateGroup->assignedLeader.dnodeId;
|
||||
(void)memcpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE);
|
||||
tstrncpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE);
|
||||
newGroup.assignedLeader.acked = pUpdateGroup->assignedLeader.acked;
|
||||
newGroup.version = pUpdateGroup->version;
|
||||
|
||||
|
@ -897,7 +894,7 @@ static void mndArbGroupSetAssignedLeader(SArbGroup *pGroup, int32_t index) {
|
|||
SArbGroupMember *pMember = &pGroup->members[index];
|
||||
|
||||
pGroup->assignedLeader.dnodeId = pMember->info.dnodeId;
|
||||
(void)strncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE);
|
||||
tstrncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE);
|
||||
pGroup->assignedLeader.acked = false;
|
||||
}
|
||||
|
||||
|
@ -979,7 +976,7 @@ bool mndUpdateArbGroupByHeartBeat(SArbGroup *pGroup, SVArbHbRspMember *pRspMembe
|
|||
|
||||
// update token
|
||||
mndArbGroupDupObj(pGroup, pNewGroup);
|
||||
(void)memcpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE);
|
||||
tstrncpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE);
|
||||
pNewGroup->isSync = false;
|
||||
|
||||
bool resetAssigned = false;
|
||||
|
|
|
@ -239,12 +239,13 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
MND_TMQ_RETURN_CHECK(mndAcquireConsumer(pMnode, consumerId, &pConsumer));
|
||||
MND_TMQ_RETURN_CHECK(checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user));
|
||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||
mDebug("consumer:0x%" PRIx64 " receive hb pollFlag:%d %d", consumerId, req.pollFlag, pConsumer->pollStatus);
|
||||
if (req.pollFlag == 1){
|
||||
atomic_store_32(&pConsumer->pollStatus, 0);
|
||||
}
|
||||
|
||||
storeOffsetRows(pMnode, &req, pConsumer);
|
||||
rsp.debugFlag = tqClientDebug;
|
||||
rsp.debugFlag = tqClientDebugFlag;
|
||||
code = buildMqHbRsp(pMsg, &rsp);
|
||||
|
||||
END:
|
||||
|
|
|
@ -400,8 +400,8 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
|
|||
pSdb->commitTerm = pSdb->applyTerm;
|
||||
pSdb->commitConfig = pSdb->applyConfig;
|
||||
memcpy(pSdb->tableVer, tableVer, sizeof(tableVer));
|
||||
mInfo("read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, pSdb->commitIndex,
|
||||
pSdb->commitTerm, pSdb->commitConfig);
|
||||
mInfo("vgId:1, trans:0, read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file,
|
||||
pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig);
|
||||
|
||||
_OVER:
|
||||
if ((ret = taosCloseFile(&pFile)) != 0) {
|
||||
|
@ -573,7 +573,8 @@ static int32_t sdbWriteFileImp(SSdb *pSdb, int32_t skip_type) {
|
|||
pSdb->commitIndex = pSdb->applyIndex;
|
||||
pSdb->commitTerm = pSdb->applyTerm;
|
||||
pSdb->commitConfig = pSdb->applyConfig;
|
||||
mInfo("write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s",
|
||||
mInfo("vgId:1, trans:0, write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64
|
||||
" file:%s",
|
||||
pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile);
|
||||
}
|
||||
|
||||
|
@ -610,8 +611,8 @@ int32_t sdbWriteFile(SSdb *pSdb, int32_t delta) {
|
|||
if (code != 0) {
|
||||
mError("failed to write sdb file since %s", tstrerror(code));
|
||||
} else {
|
||||
mInfo("write sdb file success, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64, pSdb->applyIndex,
|
||||
pSdb->applyTerm, pSdb->applyConfig);
|
||||
mInfo("vgId:1, trans:0, write sdb file success, apply index:%" PRId64 ", term:%" PRId64 ", config:%" PRId64,
|
||||
pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig);
|
||||
}
|
||||
(void)taosThreadMutexUnlock(&pSdb->filelock);
|
||||
return code;
|
||||
|
|
|
@ -243,7 +243,7 @@ int32_t extractMsgFromWal(SWalReader *pReader, void **pItem, int64_t maxVer, con
|
|||
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
|
||||
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
|
||||
int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char *idstr);
|
||||
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
|
||||
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet, int64_t *createTime);
|
||||
int32_t tqGetStreamExecInfo(SVnode *pVnode, int64_t streamId, int64_t *pDelay, bool *fhFinished);
|
||||
|
||||
// sma
|
||||
|
|
|
@ -163,7 +163,7 @@ int32_t metaDropTables(SMeta* pMeta, SArray* tbUids);
|
|||
int metaTtlFindExpired(SMeta* pMeta, int64_t timePointMs, SArray* tbUids, int32_t ttlDropMaxCount);
|
||||
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
|
||||
int metaUpdateChangeTimeWithLock(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs);
|
||||
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
|
||||
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime);
|
||||
int32_t metaGetTbTSchemaNotNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema);
|
||||
int32_t metaGetTbTSchemaMaybeNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema);
|
||||
STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
|
||||
|
|
|
@ -371,7 +371,7 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) {
|
||||
SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime) {
|
||||
void *pData = NULL;
|
||||
int nData = 0;
|
||||
int64_t version;
|
||||
|
@ -407,6 +407,9 @@ _query:
|
|||
}
|
||||
} else if (me.type == TSDB_CHILD_TABLE) {
|
||||
uid = me.ctbEntry.suid;
|
||||
if (createTime != NULL){
|
||||
*createTime = me.ctbEntry.btime;
|
||||
}
|
||||
tDecoderClear(&dc);
|
||||
goto _query;
|
||||
} else {
|
||||
|
@ -617,7 +620,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) {
|
|||
STSchema *pTSchema = NULL;
|
||||
SSchemaWrapper *pSW = NULL;
|
||||
|
||||
pSW = metaGetTableSchema(pMeta, uid, sver, lock);
|
||||
pSW = metaGetTableSchema(pMeta, uid, sver, lock, NULL);
|
||||
if (!pSW) return NULL;
|
||||
|
||||
pTSchema = tBuildTSchema(pSW->pSchema, pSW->nCols, pSW->version);
|
||||
|
|
|
@ -552,7 +552,7 @@ int32_t setForSnapShot(SSnapContext* ctx, int64_t uid) {
|
|||
|
||||
void taosXSetTablePrimaryKey(SSnapContext* ctx, int64_t uid) {
|
||||
bool ret = false;
|
||||
SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1);
|
||||
SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1, NULL);
|
||||
if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) {
|
||||
ret = true;
|
||||
}
|
||||
|
|
|
@ -263,7 +263,7 @@ bool tqGetTablePrimaryKey(STqReader* pReader) { return pReader->hasPrimaryKey; }
|
|||
|
||||
void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) {
|
||||
bool ret = false;
|
||||
SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1);
|
||||
SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1, NULL);
|
||||
if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) {
|
||||
ret = true;
|
||||
}
|
||||
|
@ -669,7 +669,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char*
|
|||
(pReader->cachedSchemaVer != sversion)) {
|
||||
tDeleteSchemaWrapper(pReader->pSchemaWrapper);
|
||||
|
||||
pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1);
|
||||
pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, NULL);
|
||||
if (pReader->pSchemaWrapper == NULL) {
|
||||
tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", uid:%" PRId64
|
||||
"version %d, possibly dropped table",
|
||||
|
@ -961,10 +961,8 @@ END:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) {
|
||||
tqDebug("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk);
|
||||
SSDataBlock* block = NULL;
|
||||
|
||||
int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet, int64_t *createTime) {
|
||||
tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk);
|
||||
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
|
||||
if (pSubmitTbData == NULL) {
|
||||
return terrno;
|
||||
|
@ -980,7 +978,7 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
pReader->lastBlkUid = uid;
|
||||
|
||||
tDeleteSchemaWrapper(pReader->pSchemaWrapper);
|
||||
pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1);
|
||||
pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, createTime);
|
||||
if (pReader->pSchemaWrapper == NULL) {
|
||||
tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table",
|
||||
pReader->pWalReader->pWal->cfg.vgId, uid, pReader->cachedSchemaVer);
|
||||
|
|
|
@ -210,36 +210,21 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat
|
|||
|
||||
if (pDataBlock != NULL && pDataBlock->info.rows > 0) {
|
||||
if (pRsp->withTbName) {
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
int64_t uid = pExec->pTqReader->lastBlkUid;
|
||||
if (tqAddTbNameToRsp(pTq, uid, pRsp, 1) < 0) {
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
char* tbName = taosStrdup(qExtractTbnameFromTask(task));
|
||||
if (tbName == NULL) {
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId);
|
||||
return terrno;
|
||||
}
|
||||
if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
char* tbName = taosStrdup(qExtractTbnameFromTask(task));
|
||||
if (tbName == NULL) {
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId);
|
||||
return terrno;
|
||||
}
|
||||
if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){
|
||||
tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (pRsp->withSchema) {
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
if (tqAddBlockSchemaToRsp(pExec, pRsp) != 0){
|
||||
tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
|
||||
if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){
|
||||
tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
|
||||
if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){
|
||||
tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,12 +234,9 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat
|
|||
continue;
|
||||
}
|
||||
pRsp->blockNum++;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
continue;
|
||||
} else {
|
||||
rowCnt += pDataBlock->info.rows;
|
||||
if (rowCnt <= tmqRowSize) continue;
|
||||
}
|
||||
rowCnt += pDataBlock->info.rows;
|
||||
if (rowCnt <= tmqRowSize) continue;
|
||||
|
||||
}
|
||||
|
||||
// get meta
|
||||
|
@ -296,6 +278,54 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t buildCreateTbInfo(SMqDataRsp* pRsp, SVCreateTbReq* pCreateTbReq){
|
||||
int32_t code = 0;
|
||||
void* createReq = NULL;
|
||||
if (pRsp->createTableNum == 0) {
|
||||
pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
|
||||
if (pRsp->createTableLen == NULL) {
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
|
||||
if (pRsp->createTableReq == NULL) {
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t len = 0;
|
||||
tEncodeSize(tEncodeSVCreateTbReq, pCreateTbReq, len, code);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
goto END;
|
||||
}
|
||||
createReq = taosMemoryCalloc(1, len);
|
||||
if (createReq == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, createReq, len);
|
||||
code = tEncodeSVCreateTbReq(&encoder, pCreateTbReq);
|
||||
tEncoderClear(&encoder);
|
||||
if (code < 0) {
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(pRsp->createTableLen, &len) == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
pRsp->createTableNum++;
|
||||
|
||||
return 0;
|
||||
END:
|
||||
taosMemoryFree(createReq);
|
||||
return code;
|
||||
}
|
||||
|
||||
static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded){
|
||||
int32_t code = 0;
|
||||
|
@ -315,7 +345,8 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int
|
|||
}
|
||||
|
||||
SSubmitTbData* pSubmitTbDataRet = NULL;
|
||||
code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet);
|
||||
int64_t createTime = INT64_MAX;
|
||||
code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet, &createTime);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d, failed to retrieve block", pTq->pVnode->config.vgId);
|
||||
goto END;
|
||||
|
@ -333,46 +364,13 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int
|
|||
}
|
||||
}
|
||||
if (pHandle->fetchMeta != WITH_DATA && pSubmitTbDataRet->pCreateTbReq != NULL) {
|
||||
if (pRsp->createTableNum == 0) {
|
||||
pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
|
||||
if (pRsp->createTableLen == NULL) {
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
|
||||
if (pRsp->createTableReq == NULL) {
|
||||
code = terrno;
|
||||
if (pSubmitTbDataRet->ctimeMs - createTime <= 1000) { // judge if table is already created to avoid sending crateTbReq
|
||||
code = buildCreateTbInfo(pRsp, pSubmitTbDataRet->pCreateTbReq);
|
||||
if (code != 0){
|
||||
tqError("vgId:%d, failed to build create table info", pTq->pVnode->config.vgId);
|
||||
goto END;
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t len = 0;
|
||||
tEncodeSize(tEncodeSVCreateTbReq, pSubmitTbDataRet->pCreateTbReq, len, code);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
goto END;
|
||||
}
|
||||
void* createReq = taosMemoryCalloc(1, len);
|
||||
if (createReq == NULL){
|
||||
code = terrno;
|
||||
goto END;
|
||||
}
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, createReq, len);
|
||||
code = tEncodeSVCreateTbReq(&encoder, pSubmitTbDataRet->pCreateTbReq);
|
||||
tEncoderClear(&encoder);
|
||||
if (code < 0) {
|
||||
taosMemoryFree(createReq);
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(pRsp->createTableLen, &len) == NULL){
|
||||
taosMemoryFree(createReq);
|
||||
goto END;
|
||||
}
|
||||
if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){
|
||||
taosMemoryFree(createReq);
|
||||
goto END;
|
||||
}
|
||||
pRsp->createTableNum++;
|
||||
}
|
||||
if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) {
|
||||
goto END;
|
||||
|
|
|
@ -51,7 +51,8 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) {
|
|||
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
|
||||
pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
|
||||
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
|
||||
if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL ||
|
||||
pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
|
||||
if (pRsp->blockData != NULL) {
|
||||
taosArrayDestroy(pRsp->blockData);
|
||||
pRsp->blockData = NULL;
|
||||
|
@ -71,6 +72,7 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) {
|
|||
taosArrayDestroy(pRsp->blockSchema);
|
||||
pRsp->blockSchema = NULL;
|
||||
}
|
||||
|
||||
return terrno;
|
||||
}
|
||||
|
||||
|
|
|
@ -723,34 +723,32 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid,
|
|||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
{
|
||||
SLastCol *pLastCol = NULL;
|
||||
code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol);
|
||||
if (code == TSDB_CODE_INVALID_PARA) {
|
||||
tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
if (values_list[0] != NULL) {
|
||||
code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[0], klen);
|
||||
}
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
}
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[0], klen);
|
||||
}
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
|
||||
pLastCol = NULL;
|
||||
code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol);
|
||||
if (code == TSDB_CODE_INVALID_PARA) {
|
||||
tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
if (values_list[1] != NULL) {
|
||||
code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[1], klen);
|
||||
}
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
}
|
||||
if (NULL != pLastCol) {
|
||||
rocksdb_writebatch_delete(wb, keys_list[1], klen);
|
||||
}
|
||||
taosMemoryFreeClear(pLastCol);
|
||||
|
||||
rocksdb_free(values_list[0]);
|
||||
rocksdb_free(values_list[1]);
|
||||
|
@ -1218,14 +1216,13 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray
|
|||
SColVal *pColVal = &updCtx->colVal;
|
||||
|
||||
SLastCol *pLastCol = NULL;
|
||||
code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol);
|
||||
if (code == TSDB_CODE_INVALID_PARA) {
|
||||
tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
if (values_list[i] != NULL) {
|
||||
code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
/*
|
||||
if (code) {
|
||||
|
@ -1692,14 +1689,13 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA
|
|||
continue;
|
||||
}
|
||||
|
||||
code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol);
|
||||
if (code == TSDB_CODE_INVALID_PARA) {
|
||||
tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
if (values_list[i] != NULL) {
|
||||
code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
SLastCol *pToFree = pLastCol;
|
||||
SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j];
|
||||
|
@ -1959,14 +1955,13 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
|
|||
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
|
||||
for (int i = 0; i < numKeys; ++i) {
|
||||
SLastCol *pLastCol = NULL;
|
||||
code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol);
|
||||
if (code == TSDB_CODE_INVALID_PARA) {
|
||||
tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
} else if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
if (values_list[i] != NULL) {
|
||||
code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__,
|
||||
tstrerror(code));
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
SIdxKey *idxKey = taosArrayGet(remainCols, i);
|
||||
SLastKey *pLastKey = &idxKey->key;
|
||||
|
|
|
@ -702,7 +702,7 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
|||
}
|
||||
|
||||
int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
|
||||
SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0);
|
||||
SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0, NULL);
|
||||
if (pSW) {
|
||||
*num = pSW->nCols;
|
||||
tDeleteSchemaWrapper(pSW);
|
||||
|
|
|
@ -86,7 +86,7 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p
|
|||
|
||||
pOperator->exprSupp.hasWindowOrGroup = true;
|
||||
pInfo->tsSlotId = ((SColumnNode*)pAnomalyNode->window.pTspk)->slotId;
|
||||
strncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt));
|
||||
tstrncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt));
|
||||
|
||||
if (pAnomalyNode->window.pExprs != NULL) {
|
||||
int32_t numOfScalarExpr = 0;
|
||||
|
|
|
@ -320,7 +320,7 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const
|
|||
if (!pInfo->pTaskId) {
|
||||
return terrno;
|
||||
}
|
||||
strncpy(pInfo->pTaskId, id, len);
|
||||
tstrncpy(pInfo->pTaskId, id, len);
|
||||
for (int32_t i = 0; i < numOfSources; ++i) {
|
||||
SSourceDataInfo dataInfo = {0};
|
||||
dataInfo.status = EX_SOURCE_DATA_NOT_READY;
|
||||
|
|
|
@ -545,8 +545,9 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
|
||||
int32_t* tversion, int32_t idx, bool* tbGet) {
|
||||
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName,
|
||||
int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx,
|
||||
bool* tbGet) {
|
||||
*tbGet = false;
|
||||
|
||||
if (tinfo == NULL || dbName == NULL || tableName == NULL) {
|
||||
|
@ -567,12 +568,12 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
|
|||
*sversion = pSchemaInfo->sw->version;
|
||||
*tversion = pSchemaInfo->tversion;
|
||||
if (pSchemaInfo->dbname) {
|
||||
strcpy(dbName, pSchemaInfo->dbname);
|
||||
tstrncpy(dbName, pSchemaInfo->dbname, dbNameBuffLen);
|
||||
} else {
|
||||
dbName[0] = 0;
|
||||
}
|
||||
if (pSchemaInfo->tablename) {
|
||||
strcpy(tableName, pSchemaInfo->tablename);
|
||||
tstrncpy(tableName, pSchemaInfo->tablename, tbaleNameBuffLen);
|
||||
} else {
|
||||
tableName[0] = 0;
|
||||
}
|
||||
|
@ -1494,6 +1495,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
|
|||
|
||||
cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
|
||||
tstrncpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName, TSDB_TABLE_NAME_LEN);
|
||||
// pTaskInfo->streamInfo.suid = mtInfo.suid == 0 ? mtInfo.uid : mtInfo.suid;
|
||||
tDeleteSchemaWrapper(pTaskInfo->streamInfo.schema);
|
||||
pTaskInfo->streamInfo.schema = mtInfo.schema;
|
||||
|
||||
|
|
|
@ -6345,7 +6345,7 @@ int32_t fillTableCountScanDataBlock(STableCountScanSupp* pSupp, char* dbName, ch
|
|||
QUERY_CHECK_NULL(colInfoData, code, lino, _end, terrno);
|
||||
if (strlen(stbName) != 0) {
|
||||
char varStbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
strncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN);
|
||||
tstrncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN);
|
||||
varDataSetLen(varStbName, strlen(stbName));
|
||||
code = colDataSetVal(colInfoData, 0, varStbName, false);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
|
|
@ -425,7 +425,7 @@ static bool sysTableIsOperatorCondOnOneTable(SNode* pCond, char* condTable) {
|
|||
SValueNode* pValue = (SValueNode*)node->pRight;
|
||||
if (pValue->node.resType.type == TSDB_DATA_TYPE_NCHAR || pValue->node.resType.type == TSDB_DATA_TYPE_VARCHAR) {
|
||||
char* value = nodesGetValueFromNode(pValue);
|
||||
strncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN);
|
||||
tstrncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -914,41 +914,41 @@ _end:
|
|||
}
|
||||
}
|
||||
|
||||
int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len) {
|
||||
int32_t convertTagDataToStr(char* str, int32_t strBuffLen, int type, void* buf, int32_t bufSize, int32_t* len) {
|
||||
int32_t n = 0;
|
||||
|
||||
switch (type) {
|
||||
case TSDB_DATA_TYPE_NULL:
|
||||
n = sprintf(str, "null");
|
||||
n = tsnprintf(str, strBuffLen, "null");
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
n = sprintf(str, (*(int8_t*)buf) ? "true" : "false");
|
||||
n = tsnprintf(str, strBuffLen, (*(int8_t*)buf) ? "true" : "false");
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
n = sprintf(str, "%d", *(int8_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%d", *(int8_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
n = sprintf(str, "%d", *(int16_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%d", *(int16_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
n = sprintf(str, "%d", *(int32_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%d", *(int32_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
n = sprintf(str, "%" PRId64, *(int64_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%" PRId64, *(int64_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
n = sprintf(str, "%.5f", GET_FLOAT_VAL(buf));
|
||||
n = tsnprintf(str, strBuffLen, "%.5f", GET_FLOAT_VAL(buf));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
n = sprintf(str, "%.9f", GET_DOUBLE_VAL(buf));
|
||||
n = tsnprintf(str, strBuffLen, "%.9f", GET_DOUBLE_VAL(buf));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
|
@ -973,19 +973,19 @@ int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int
|
|||
n = length;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
n = sprintf(str, "%u", *(uint8_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%u", *(uint8_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
n = sprintf(str, "%u", *(uint16_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%u", *(uint16_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
n = sprintf(str, "%u", *(uint32_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%u", *(uint32_t*)buf);
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
n = sprintf(str, "%" PRIu64, *(uint64_t*)buf);
|
||||
n = tsnprintf(str, strBuffLen, "%" PRIu64, *(uint64_t*)buf);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1065,14 +1065,21 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo,
|
|||
int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type;
|
||||
pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4);
|
||||
QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno);
|
||||
int32_t tagStrBufflen = 32;
|
||||
char tagTypeStr[VARSTR_HEADER_SIZE + 32];
|
||||
int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name);
|
||||
int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), tagStrBufflen, "%s", tDataTypes[tagType].name);
|
||||
tagStrBufflen -= tagTypeLen;
|
||||
if (tagStrBufflen <= 0) {
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
|
||||
if (tagType == TSDB_DATA_TYPE_NCHAR) {
|
||||
tagTypeLen += sprintf(
|
||||
varDataVal(tagTypeStr) + tagTypeLen, "(%d)",
|
||||
tagTypeLen += tsnprintf(
|
||||
varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)",
|
||||
(int32_t)(((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
} else if (IS_VAR_DATA_TYPE(tagType)) {
|
||||
tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)",
|
||||
tagTypeLen += tsnprintf(varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)",
|
||||
(int32_t)((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE));
|
||||
}
|
||||
varDataSetLen(tagTypeStr, tagTypeLen);
|
||||
|
@ -1127,7 +1134,7 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo,
|
|||
QUERY_CHECK_NULL(tagVarChar, code, lino, _end, terrno);
|
||||
int32_t len = -1;
|
||||
if (tagLen > 0)
|
||||
convertTagDataToStr(varDataVal(tagVarChar), tagType, tagData, tagLen, &len);
|
||||
convertTagDataToStr(varDataVal(tagVarChar), bufSize + 1 - VARSTR_HEADER_SIZE, tagType, tagData, tagLen, &len);
|
||||
else
|
||||
len = 0;
|
||||
varDataSetLen(tagVarChar, len);
|
||||
|
@ -1197,13 +1204,19 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo,
|
|||
int8_t colType = schemaRow->pSchema[i].type;
|
||||
pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4);
|
||||
QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno);
|
||||
int32_t colStrBufflen = 32;
|
||||
char colTypeStr[VARSTR_HEADER_SIZE + 32];
|
||||
int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name);
|
||||
int colTypeLen = tsnprintf(varDataVal(colTypeStr), colStrBufflen, "%s", tDataTypes[colType].name);
|
||||
colStrBufflen -= colTypeLen;
|
||||
if (colStrBufflen <= 0) {
|
||||
code = TSDB_CODE_INVALID_PARA;
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
}
|
||||
if (colType == TSDB_DATA_TYPE_VARCHAR) {
|
||||
colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)",
|
||||
colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)",
|
||||
(int32_t)(schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE));
|
||||
} else if (colType == TSDB_DATA_TYPE_NCHAR) {
|
||||
colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)",
|
||||
colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)",
|
||||
(int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
varDataSetLen(colTypeStr, colTypeLen);
|
||||
|
@ -2019,8 +2032,7 @@ static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) {
|
|||
|
||||
SValueNode* node = (SValueNode*)pNode;
|
||||
char* dbName = nodesGetValueFromNode(node);
|
||||
strncpy(pContext, varDataVal(dbName), varDataLen(dbName));
|
||||
*((char*)pContext + varDataLen(dbName)) = 0;
|
||||
tstrncpy((char*)pContext, varDataVal(dbName), TSDB_DB_NAME_LEN);
|
||||
return DEAL_RES_END; // stop walk
|
||||
}
|
||||
default:
|
||||
|
@ -2056,11 +2068,11 @@ static int32_t doSysTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes)
|
|||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
if (strncasecmp(name, TSDB_INS_TABLE_COMPACTS, TSDB_TABLE_FNAME_LEN) != 0 &&
|
||||
strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) {
|
||||
sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName));
|
||||
}
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) {
|
||||
getDBNameFromCondition(pInfo->pCondition, dbName);
|
||||
if (dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName);
|
||||
if (dbName[0]) TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName));
|
||||
(void)sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb);
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ SSDataBlock* getDummyBlock(SOperatorInfo* pOperator) {
|
|||
int32_t code = colDataSetVal(pColInfo, i, reinterpret_cast<const char*>(&v), false);
|
||||
ASSERT(code == 0);
|
||||
|
||||
// sprintf(buf, "this is %d row", i);
|
||||
// tsnprintf(buf, "this is %d row", i);
|
||||
// STR_TO_VARSTR(b1, buf);
|
||||
//
|
||||
// SColumnInfoData* pColInfo2 = static_cast<SColumnInfoData*>(TARRAY_GET_ELEM(pBlock->pDataBlock, 1));
|
||||
|
@ -179,7 +179,7 @@ SSDataBlock* get2ColsDummyBlock(SOperatorInfo* pOperator) {
|
|||
code = colDataSetVal(pColInfo1, i, reinterpret_cast<const char*>(&v), false);
|
||||
ASSERT(code == 0);
|
||||
|
||||
// sprintf(buf, "this is %d row", i);
|
||||
// tsnprintf(buf, "this is %d row", i);
|
||||
// STR_TO_VARSTR(b1, buf);
|
||||
//
|
||||
// SColumnInfoData* pColInfo2 = static_cast<SColumnInfoData*>(TARRAY_GET_ELEM(pBlock->pDataBlock, 1));
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
TEST(testCase, linear_hash_Tests) {
|
||||
taosSeedRand(taosGetTimestampSec());
|
||||
strcpy(tsTempDir, "/tmp/");
|
||||
tstrncpy((char*)tsTempDir, "/tmp/", sizeof(tsTempDir));
|
||||
|
||||
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT);
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ void monGenClusterInfoTable(SMonInfo *pMonitor){
|
|||
}
|
||||
|
||||
if (taosHashRemove(tsMonitor.metrics, metric_names[i], strlen(metric_names[i])) != 0) {
|
||||
uError("failed to remove metric %s", metric_names[i]);
|
||||
uTrace("failed to remove metric %s", metric_names[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -652,7 +652,7 @@ void monGenMnodeRoleTable(SMonInfo *pMonitor){
|
|||
}
|
||||
|
||||
if (taosHashRemove(tsMonitor.metrics, mnodes_role_gauges[i], strlen(mnodes_role_gauges[i])) != 0) {
|
||||
uError("failed to remove metric %s", mnodes_role_gauges[i]);
|
||||
uTrace("failed to remove metric %s", mnodes_role_gauges[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -725,7 +725,7 @@ void monGenVnodeRoleTable(SMonInfo *pMonitor){
|
|||
}
|
||||
|
||||
if (taosHashRemove(tsMonitor.metrics, vnodes_role_gauges[i], strlen(vnodes_role_gauges[i])) != 0) {
|
||||
uError("failed to remove metric %s", vnodes_role_gauges[i]);
|
||||
uTrace("failed to remove metric %s", vnodes_role_gauges[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,10 @@ class MonitorTest : public ::testing::Test {
|
|||
monInit(&cfg);
|
||||
}
|
||||
|
||||
static void TearDownTestSuite() { monCleanup(); }
|
||||
static void TearDownTestSuite() {
|
||||
monCleanup();
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
public:
|
||||
void SetUp() override {}
|
||||
|
|
|
@ -1974,14 +1974,13 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED
|
|||
case DB_OPTION_S3_COMPACT:
|
||||
pDbOptions->s3Compact = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
|
||||
break;
|
||||
case DB_OPTION_KEEP_TIME_OFFSET: {
|
||||
case DB_OPTION_KEEP_TIME_OFFSET:
|
||||
pDbOptions->keepTimeOffset = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
|
||||
break;
|
||||
case DB_OPTION_ENCRYPT_ALGORITHM:
|
||||
COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal);
|
||||
pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO;
|
||||
break;
|
||||
}
|
||||
case DB_OPTION_ENCRYPT_ALGORITHM:
|
||||
COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal);
|
||||
pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -886,17 +886,32 @@ static bool findFileds(SSchema* pSchema, TAOS_FIELD* fields, int numFields) {
|
|||
return false;
|
||||
}
|
||||
|
||||
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* tFields,
|
||||
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen) {
|
||||
int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* tFields,
|
||||
int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw) {
|
||||
int ret = 0;
|
||||
if(data == NULL) {
|
||||
uError("rawBlockBindData, data is NULL");
|
||||
return TSDB_CODE_APP_ERROR;
|
||||
}
|
||||
void* tmp =
|
||||
taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid));
|
||||
SVCreateTbReq *pCreateReqTmp = NULL;
|
||||
if (tmp == NULL && pCreateTb != NULL){
|
||||
ret = cloneSVreateTbReq(pCreateTb, &pCreateReqTmp);
|
||||
if (ret != TSDB_CODE_SUCCESS){
|
||||
uError("cloneSVreateTbReq error");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
STableDataCxt* pTableCxt = NULL;
|
||||
int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid,
|
||||
sizeof(pTableMeta->uid), pTableMeta, pCreateTb, &pTableCxt, true, false);
|
||||
ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid,
|
||||
sizeof(pTableMeta->uid), pTableMeta, &pCreateReqTmp, &pTableCxt, true, false);
|
||||
if (pCreateReqTmp != NULL) {
|
||||
tdDestroySVCreateTbReq(pCreateReqTmp);
|
||||
taosMemoryFree(pCreateReqTmp);
|
||||
}
|
||||
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
uError("insGetTableDataCxt error");
|
||||
goto end;
|
||||
|
@ -948,12 +963,17 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
ret = TSDB_CODE_INVALID_PARA;
|
||||
goto end;
|
||||
}
|
||||
if (tFields != NULL && numFields > boundInfo->numOfBound) {
|
||||
if (errstr != NULL)
|
||||
snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound);
|
||||
// if (tFields != NULL && numFields > boundInfo->numOfBound) {
|
||||
// if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound);
|
||||
// ret = TSDB_CODE_INVALID_PARA;
|
||||
// goto end;
|
||||
// }
|
||||
if (tFields == NULL && numOfCols != boundInfo->numOfBound) {
|
||||
if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d not equal to num of bound cols:%d", numOfCols, boundInfo->numOfBound);
|
||||
ret = TSDB_CODE_INVALID_PARA;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (tFields == NULL) {
|
||||
for (int j = 0; j < boundInfo->numOfBound; j++) {
|
||||
SSchema* pColSchema = &pSchema[j];
|
||||
|
@ -991,7 +1011,13 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
for (int i = 0; i < numFields; i++) {
|
||||
for (int j = 0; j < boundInfo->numOfBound; j++) {
|
||||
SSchema* pColSchema = &pSchema[j];
|
||||
if (strcmp(pColSchema->name, tFields[i].name) == 0) {
|
||||
char* fieldName = NULL;
|
||||
if (raw) {
|
||||
fieldName = ((SSchemaWrapper*)tFields)->pSchema[i].name;
|
||||
} else {
|
||||
fieldName = ((TAOS_FIELD*)tFields)[i].name;
|
||||
}
|
||||
if (strcmp(pColSchema->name, fieldName) == 0) {
|
||||
if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) {
|
||||
if (errstr != NULL)
|
||||
snprintf(errstr, errstrLen,
|
||||
|
@ -1011,6 +1037,11 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate
|
|||
pStart += numOfRows * sizeof(int32_t);
|
||||
} else {
|
||||
pStart += BitmapLen(numOfRows);
|
||||
// for(int k = 0; k < numOfRows; k++) {
|
||||
// if(!colDataIsNull_f(offset, k) && pColSchema->type == TSDB_DATA_TYPE_INT){
|
||||
// printf("colName:%s,val:%d", fieldName, *(int32_t*)(pStart + k * sizeof(int32_t)));
|
||||
// }
|
||||
// }
|
||||
}
|
||||
char* pData = pStart;
|
||||
|
||||
|
|
|
@ -533,7 +533,7 @@ int32_t qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) {
|
|||
|
||||
while (true) {
|
||||
tbGet = false;
|
||||
code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, tbName, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet);
|
||||
code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, TSDB_DB_FNAME_LEN, tbName, TSDB_TABLE_NAME_LEN, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet);
|
||||
if (TSDB_CODE_SUCCESS != code || !tbGet) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -3364,7 +3364,7 @@ int32_t streamStateClear_rocksdb(SStreamState* pState) {
|
|||
return 0;
|
||||
}
|
||||
void streamStateCurNext_rocksdb(SStreamStateCur* pCur) {
|
||||
if (pCur) {
|
||||
if (pCur && pCur->iter && rocksdb_iter_valid(pCur->iter)) {
|
||||
rocksdb_iter_next(pCur->iter);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i
|
|||
SStreamTask* pStreamTask = pTask;
|
||||
pState->streamId = streamId;
|
||||
pState->taskId = taskId;
|
||||
sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId);
|
||||
TAOS_UNUSED(tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId));
|
||||
|
||||
code = streamTaskSetDb(pStreamTask->pMeta, pTask, pState->pTdbState->idstr);
|
||||
QUERY_CHECK_CODE(code, lino, _end);
|
||||
|
|
|
@ -777,7 +777,7 @@ _end:
|
|||
|
||||
int32_t forceRemoveCheckpoint(SStreamFileState* pFileState, int64_t checkpointId) {
|
||||
char keyBuf[128] = {0};
|
||||
sprintf(keyBuf, "%s:%" PRId64 "", TASK_KEY, checkpointId);
|
||||
TAOS_UNUSED(tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId));
|
||||
return streamDefaultDel_rocksdb(pFileState->pFileStore, keyBuf);
|
||||
}
|
||||
|
||||
|
@ -799,14 +799,14 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
|
|||
}
|
||||
memcpy(buf, val, len);
|
||||
buf[len] = 0;
|
||||
maxCheckPointId = atol((char*)buf);
|
||||
maxCheckPointId = taosStr2Int64((char*)buf, NULL, 10);
|
||||
taosMemoryFree(val);
|
||||
}
|
||||
for (int64_t i = maxCheckPointId; i > 0; i--) {
|
||||
char buf[128] = {0};
|
||||
void* val = 0;
|
||||
int32_t len = 0;
|
||||
sprintf(buf, "%s:%" PRId64 "", TASK_KEY, i);
|
||||
TAOS_UNUSED(tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i));
|
||||
code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len);
|
||||
if (code != 0) {
|
||||
return TSDB_CODE_FAILED;
|
||||
|
@ -816,7 +816,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
|
|||
taosMemoryFree(val);
|
||||
|
||||
TSKEY ts;
|
||||
ts = atol((char*)buf);
|
||||
ts = taosStr2Int64((char*)buf, NULL, 10);
|
||||
if (ts < mark) {
|
||||
// statekey winkey.ts < mark
|
||||
int32_t tmpRes = forceRemoveCheckpoint(pFileState, i);
|
||||
|
|
|
@ -95,6 +95,8 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
bool accepted = false;
|
||||
SSyncRaftEntry* pEntry = NULL;
|
||||
bool resetElect = false;
|
||||
const STraceId* trace = &pRpcMsg->info.traceId;
|
||||
char tbuf[40] = {0};
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) {
|
||||
|
@ -150,10 +152,10 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
goto _IGNORE;
|
||||
}
|
||||
|
||||
sTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64
|
||||
", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64,
|
||||
pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex,
|
||||
pEntry->term);
|
||||
sGTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64
|
||||
", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64,
|
||||
pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex,
|
||||
pEntry->term);
|
||||
|
||||
if (ths->fsmState == SYNC_FSM_STATE_INCOMPLETE) {
|
||||
pReply->fsmState = ths->fsmState;
|
||||
|
@ -179,6 +181,11 @@ _SEND_RESPONSE:
|
|||
sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex);
|
||||
}
|
||||
|
||||
TRACE_SET_MSGID(&(rpcRsp.info.traceId), tGenIdPI64());
|
||||
trace = &(rpcRsp.info.traceId);
|
||||
sGTrace("vgId:%d, send append reply matchIndex:%" PRId64 " term:%" PRId64 " lastSendIndex:%" PRId64
|
||||
" to dest: 0x%016" PRIx64,
|
||||
ths->vgId, pReply->matchIndex, pReply->term, pReply->lastSendIndex, pReply->destId.addr);
|
||||
// ack, i.e. send response
|
||||
TAOS_CHECK_RETURN(syncNodeSendMsgById(&pReply->destId, ths, &rpcRsp));
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
int32_t code = 0;
|
||||
SyncAppendEntriesReply* pMsg = (SyncAppendEntriesReply*)pRpcMsg->pCont;
|
||||
int32_t ret = 0;
|
||||
const STraceId* trace = &pRpcMsg->info.traceId;
|
||||
char tbuf[40] = {0};
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) {
|
||||
|
@ -63,8 +65,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
return TSDB_CODE_SYN_WRONG_TERM;
|
||||
}
|
||||
|
||||
sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "",
|
||||
pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex);
|
||||
sGTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "",
|
||||
pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex);
|
||||
|
||||
if (pMsg->success) {
|
||||
SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
|
||||
|
|
|
@ -1026,6 +1026,14 @@ int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEn
|
|||
int32_t code = 0;
|
||||
if (pMgr->restored != false) return TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
||||
sTrace("vgId:%d, begin to recover sync log repl. peer: dnode:%d (%" PRIx64 "), repl-mgr:[%" PRId64 ", %" PRId64
|
||||
", %" PRId64 ") restore:%d, buffer: [%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64
|
||||
"), msg: {lastSendIndex:%" PRId64 ", matchIndex:%" PRId64 ", fsmState:%d, success:%d, lastMatchTerm:%" PRId64
|
||||
"}",
|
||||
pNode->vgId, DID(&destId), destId.addr, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored,
|
||||
pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, pMsg->lastSendIndex, pMsg->matchIndex,
|
||||
pMsg->fsmState, pMsg->success, pMsg->lastMatchTerm);
|
||||
|
||||
if (pMgr->endIndex == 0) {
|
||||
if (pMgr->startIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
if (pMgr->matchIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
@ -1171,6 +1179,11 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde
|
|||
int64_t nowMs = taosGetMonoTimestampMs();
|
||||
int32_t code = 0;
|
||||
|
||||
sTrace("vgId:%d, begin to probe peer:%" PRIx64 " with msg of index:%" PRId64 ". repl-mgr:[%" PRId64 ", %" PRId64
|
||||
", %" PRId64 "), restored:%d",
|
||||
pNode->vgId, pNode->replicasId[pMgr->peerId].addr, index, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex,
|
||||
pMgr->restored);
|
||||
|
||||
if (pMgr->endIndex > pMgr->startIndex &&
|
||||
nowMs < pMgr->states[pMgr->startIndex % pMgr->size].timeMs + retryMaxWaitMs) {
|
||||
return 0;
|
||||
|
@ -1206,6 +1219,10 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde
|
|||
int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
|
||||
if (!pMgr->restored) return TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
|
||||
sTrace("vgId:%d, begin to attempt replicate log entries from end to match. repl-mgr:[%" PRId64 ", %" PRId64
|
||||
", %" PRId64 "), restore:%d",
|
||||
pNode->vgId, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored);
|
||||
|
||||
SRaftId* pDestId = &pNode->replicasId[pMgr->peerId];
|
||||
int32_t batchSize = TMAX(1, pMgr->size >> (4 + pMgr->retryBackoff));
|
||||
int32_t code = 0;
|
||||
|
@ -1527,11 +1544,12 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind
|
|||
goto _err;
|
||||
}
|
||||
|
||||
TRACE_SET_MSGID(&(msgOut.info.traceId), tGenIdPI64());
|
||||
STraceId* trace = &(msgOut.info.traceId);
|
||||
sGTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64,
|
||||
pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr);
|
||||
TAOS_CHECK_GOTO(syncNodeSendAppendEntries(pNode, pDestId, &msgOut), &lino, _err);
|
||||
|
||||
sTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64,
|
||||
pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr);
|
||||
|
||||
if (!inBuf) {
|
||||
syncEntryDestroy(pEntry);
|
||||
pEntry = NULL;
|
||||
|
|
|
@ -152,8 +152,8 @@ static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLe
|
|||
for (int32_t i = 0; i < pSyncNode->replicaNum; i++) {
|
||||
SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i];
|
||||
if (pMgr == NULL) break;
|
||||
len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 "]", i, pMgr->restored,
|
||||
pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex);
|
||||
len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "]", i, pMgr->restored,
|
||||
pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex);
|
||||
if (i + 1 < pSyncNode->replicaNum) {
|
||||
len += tsnprintf(buf + len, bufLen - len, "%s", ", ");
|
||||
}
|
||||
|
|
|
@ -278,19 +278,19 @@ bool transAsyncPoolIsEmpty(SAsyncPool* pool);
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \
|
||||
do { \
|
||||
if (id > 0) { \
|
||||
SExHandle* exh2 = transAcquireExHandle(idMgt, id); \
|
||||
if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \
|
||||
tError("handle not match, exh1:%p, exh2:%p, refId:%"PRId64"", exh1, exh2, id); \
|
||||
code = TSDB_CODE_INVALID_MSG; \
|
||||
goto _return1; \
|
||||
} \
|
||||
} else { \
|
||||
tError("invalid handle to release"); \
|
||||
goto _return2; \
|
||||
} \
|
||||
#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \
|
||||
do { \
|
||||
if (id > 0) { \
|
||||
SExHandle* exh2 = transAcquireExHandle(idMgt, id); \
|
||||
if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \
|
||||
tDebug("handle not match, exh1:%p, exh2:%p, refId:%" PRId64 "", exh1, exh2, id); \
|
||||
code = TSDB_CODE_INVALID_MSG; \
|
||||
goto _return1; \
|
||||
} \
|
||||
} else { \
|
||||
tDebug("invalid handle to release"); \
|
||||
goto _return2; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
int32_t transInitBuffer(SConnBuffer* buf);
|
||||
|
|
|
@ -3090,7 +3090,7 @@ int32_t transReleaseCliHandle(void* handle) {
|
|||
|
||||
static int32_t transInitMsg(void* pInstRef, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx, SCliReq** pCliMsg) {
|
||||
int32_t code = 0;
|
||||
TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64());
|
||||
if (pReq->info.traceId.msgId == 0) TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64());
|
||||
|
||||
SCliReq* pCliReq = NULL;
|
||||
SReqCtx* pCtx = taosMemoryCalloc(1, sizeof(SReqCtx));
|
||||
|
|
|
@ -21,10 +21,12 @@
|
|||
#include "tjson.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#define LOG_MAX_LINE_SIZE (10024)
|
||||
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
|
||||
#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024)
|
||||
#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128)
|
||||
#define LOG_MAX_LINE_SIZE (10024)
|
||||
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
|
||||
#define LOG_MAX_STACK_LINE_SIZE (512)
|
||||
#define LOG_MAX_STACK_LINE_BUFFER_SIZE (LOG_MAX_STACK_LINE_SIZE + 3)
|
||||
#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024)
|
||||
#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128)
|
||||
|
||||
#define LOG_FILE_DAY_LEN 64
|
||||
|
||||
|
@ -126,7 +128,7 @@ int32_t idxDebugFlag = 131;
|
|||
int32_t sndDebugFlag = 131;
|
||||
int32_t simDebugFlag = 131;
|
||||
|
||||
int32_t tqClientDebug = 0;
|
||||
int32_t tqClientDebugFlag = 131;
|
||||
|
||||
int64_t dbgEmptyW = 0;
|
||||
int64_t dbgWN = 0;
|
||||
|
@ -669,16 +671,40 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b
|
|||
}
|
||||
}
|
||||
|
||||
void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) {
|
||||
if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return;
|
||||
/*
|
||||
use taosPrintLogImpl_useStackBuffer to avoid stack overflow
|
||||
|
||||
char buffer[LOG_MAX_LINE_BUFFER_SIZE];
|
||||
*/
|
||||
static int8_t taosPrintLogImplUseStackBuffer(const char *flags, int32_t level, int32_t dflag, const char *format,
|
||||
va_list args) {
|
||||
char buffer[LOG_MAX_STACK_LINE_BUFFER_SIZE];
|
||||
int32_t len = taosBuildLogHead(buffer, flags);
|
||||
|
||||
va_list argpointer;
|
||||
va_start(argpointer, format);
|
||||
int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len, format, argpointer);
|
||||
va_end(argpointer);
|
||||
int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_STACK_LINE_BUFFER_SIZE - len - 1, format, args);
|
||||
if (writeLen > LOG_MAX_STACK_LINE_SIZE) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
buffer[writeLen++] = '\n';
|
||||
buffer[writeLen] = 0;
|
||||
|
||||
taosPrintLogImp(level, dflag, buffer, writeLen);
|
||||
|
||||
if (tsLogFp && level <= DEBUG_INFO) {
|
||||
buffer[writeLen - 1] = 0;
|
||||
(*tsLogFp)(taosGetTimestampMs(), level, buffer + len);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
static int8_t taosPrintLogImplUseHeapBuffer(const char *flags, int32_t level, int32_t dflag, const char *format,
|
||||
va_list args) {
|
||||
char *buffer = taosMemoryCalloc(1, LOG_MAX_LINE_BUFFER_SIZE + 1);
|
||||
if (buffer == NULL) {
|
||||
return 1;
|
||||
}
|
||||
int32_t len = taosBuildLogHead(buffer, flags);
|
||||
|
||||
int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len - 1, format, args);
|
||||
|
||||
if (writeLen > LOG_MAX_LINE_SIZE) writeLen = LOG_MAX_LINE_SIZE;
|
||||
buffer[writeLen++] = '\n';
|
||||
|
@ -690,6 +716,22 @@ void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *f
|
|||
buffer[writeLen - 1] = 0;
|
||||
(*tsLogFp)(taosGetTimestampMs(), level, buffer + len);
|
||||
}
|
||||
taosMemoryFree(buffer);
|
||||
return 0;
|
||||
}
|
||||
void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) {
|
||||
if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return;
|
||||
|
||||
va_list argpointer, argpointer_copy;
|
||||
va_start(argpointer, format);
|
||||
va_copy(argpointer_copy, argpointer);
|
||||
|
||||
if (taosPrintLogImplUseStackBuffer(flags, level, dflag, format, argpointer) == 0) {
|
||||
} else {
|
||||
TAOS_UNUSED(taosPrintLogImplUseHeapBuffer(flags, level, dflag, format, argpointer_copy));
|
||||
}
|
||||
va_end(argpointer_copy);
|
||||
va_end(argpointer);
|
||||
}
|
||||
|
||||
void taosPrintLongString(const char *flags, int32_t level, int32_t dflag, const char *format, ...) {
|
||||
|
|
|
@ -126,6 +126,13 @@ add_test(
|
|||
COMMAND regexTest
|
||||
)
|
||||
|
||||
add_executable(logTest "log.cpp")
|
||||
target_link_libraries(logTest os util common gtest_main)
|
||||
add_test(
|
||||
NAME logTest
|
||||
COMMAND logTest
|
||||
)
|
||||
|
||||
add_executable(decompressTest "decompressTest.cpp")
|
||||
target_link_libraries(decompressTest os util common gtest_main)
|
||||
add_test(
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <random>
|
||||
#include <tlog.h>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
||||
TEST(log, check_log_refactor) {
|
||||
const char *logDir = "/tmp";
|
||||
const char *defaultLogFileNamePrefix = "taoslog";
|
||||
const int32_t maxLogFileNum = 10000;
|
||||
tsAsyncLog = 0;
|
||||
// idxDebugFlag = 143;
|
||||
strcpy(tsLogDir, (char *)logDir);
|
||||
taosInitLog(tsLogDir, 10, false);
|
||||
tsAsyncLog = 0;
|
||||
uDebugFlag = 143;
|
||||
|
||||
std::string str;
|
||||
str.push_back('a');
|
||||
|
||||
for (int i = 0; i < 10000; i += 2) {
|
||||
str.push_back('a');
|
||||
uError("write to file %s", str.c_str());
|
||||
}
|
||||
str.clear();
|
||||
for (int i = 0; i < 10000; i += 2) {
|
||||
str.push_back('a');
|
||||
uDebug("write to file %s", str.c_str());
|
||||
}
|
||||
|
||||
for (int i = 0; i < 10000; i += 2) {
|
||||
str.push_back('a');
|
||||
uInfo("write to file %s", str.c_str());
|
||||
}
|
||||
str.clear();
|
||||
|
||||
for (int i = 0; i < 10000; i += 2) {
|
||||
str.push_back('a');
|
||||
uTrace("write to file %s", str.c_str());
|
||||
}
|
||||
taosCloseLog();
|
||||
}
|
|
@ -803,11 +803,14 @@ class TDCom:
|
|||
else:
|
||||
tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}")
|
||||
|
||||
def killProcessor(self, processorName):
|
||||
def kill_signal_process(self, signal=15, processor_name: str = "taosd"):
|
||||
if (platform.system().lower() == 'windows'):
|
||||
os.system("TASKKILL /F /IM %s.exe"%processorName)
|
||||
os.system(f"TASKKILL /F /IM {processor_name}.exe")
|
||||
else:
|
||||
os.system("unset LD_PRELOAD; pkill %s " % processorName)
|
||||
command = f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}'"
|
||||
tdLog.debug(f"command: {command}")
|
||||
os.system(command)
|
||||
|
||||
|
||||
def gen_tag_col_str(self, gen_type, data_type, count):
|
||||
"""
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
import time
|
||||
import os
|
||||
import threading
|
||||
import datetime
|
||||
from taos.tmq import Consumer
|
||||
from taos.error import TmqError
|
||||
|
||||
from frame.log import tdLog
|
||||
from frame.cases import tdCases
|
||||
from frame.sql import tdSql
|
||||
from frame.caseBase import *
|
||||
from frame import etool
|
||||
from frame.common import tdCom
|
||||
|
||||
|
||||
class TaosConsumer:
|
||||
# TODO: Move this class to tq.py and remove it from here
|
||||
def __init__(self):
|
||||
self.sub_once = True
|
||||
self.once_consumer_rows = 0
|
||||
self.sub_log = False
|
||||
self.safe_counter = ThreadSafeCounter()
|
||||
|
||||
def log_info(self, message):
|
||||
if self.sub_log:
|
||||
tdLog.info(message)
|
||||
|
||||
#TODO merge sub_consumer and sub_consumer_once
|
||||
def sub_consumer(self, consumer, group_id, topic_name):
|
||||
group_id = int(group_id)
|
||||
if group_id < 100:
|
||||
try:
|
||||
consumer.subscribe([topic_name])
|
||||
except TmqError:
|
||||
tdLog.exit(f"subscribe error")
|
||||
nrows = 0
|
||||
while True:
|
||||
start = datetime.datetime.now()
|
||||
tdLog.info(f"time:{start},consumer:{group_id}, start to consume")
|
||||
message = consumer.poll(timeout=10.0)
|
||||
|
||||
if message:
|
||||
message_offset = message.offset()
|
||||
# topic = message.topic()
|
||||
# database = message.database()
|
||||
|
||||
for block in message:
|
||||
addrows = block.nrows()
|
||||
nrows += block.nrows()
|
||||
ncols = block.ncols()
|
||||
# values = block.fetchall
|
||||
end = datetime.datetime.now()
|
||||
elapsed_time = end - start
|
||||
tdLog.info(
|
||||
f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},"
|
||||
f"consumer_nrows:{nrows},consumer_addrows:{addrows},"
|
||||
f"consumer_ncols:{ncols},offset:{id}"
|
||||
)
|
||||
consumer.commit()
|
||||
tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}")
|
||||
# consumer.unsubscribe()
|
||||
# consumer.close()
|
||||
|
||||
def set_conf(
|
||||
self,
|
||||
td_connect_ip="localhost",
|
||||
group_id=1,
|
||||
client_id="test_consumer_py",
|
||||
enable_auto_commit="false",
|
||||
auto_commit_interval_ms="1000",
|
||||
auto_offset_reset="earliest",
|
||||
msg_with_table_name="true",
|
||||
session_timeout_ms=10000,
|
||||
max_poll_interval_ms=180000,
|
||||
experimental_snapshot_enable="false",
|
||||
):
|
||||
conf = {
|
||||
# auth options
|
||||
# consume options
|
||||
"td.connect.ip": f"{td_connect_ip}",
|
||||
"group.id": f"{group_id}",
|
||||
"client.id": f"{client_id}",
|
||||
"enable.auto.commit": f"{enable_auto_commit}",
|
||||
"auto.commit.interval.ms": f"{auto_commit_interval_ms}",
|
||||
"auto.offset.reset": f"{auto_offset_reset}",
|
||||
"msg.with.table.name": f"{msg_with_table_name}",
|
||||
"session.timeout.ms": f"{session_timeout_ms}",
|
||||
"max.poll.interval.ms": f"{max_poll_interval_ms}",
|
||||
"experimental.snapshot.enable": f"{experimental_snapshot_enable}",
|
||||
}
|
||||
return conf
|
||||
|
||||
def sub_consumer_once(self, consumer, group_id, topic_name, stop_event):
|
||||
group_id = int(group_id)
|
||||
if group_id < 100:
|
||||
consumer.subscribe([topic_name])
|
||||
nrows = 0
|
||||
consumer_nrows = 0
|
||||
count = 0
|
||||
while not stop_event.is_set():
|
||||
start = datetime.datetime.now()
|
||||
# self.log_info(
|
||||
# f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}"
|
||||
# )
|
||||
message = None
|
||||
if consumer_nrows < self.once_consumer_rows:
|
||||
message = consumer.poll(timeout=1.0)
|
||||
elif consumer_nrows >= self.once_consumer_rows:
|
||||
if count == 0:
|
||||
# when break the loop, the consumer will be closed, so we need to continue to keep consumer alive util the stop_event is set
|
||||
tdLog.info("stop consumer when consumer all rows")
|
||||
count += 1
|
||||
# tdLog.info("stop consumer when consumer all rows")
|
||||
else:
|
||||
continue
|
||||
if message:
|
||||
message_offset = message.offset()
|
||||
# topic = message.topic()
|
||||
# database = message.database()
|
||||
for block in message:
|
||||
addrows = block.nrows()
|
||||
nrows += block.nrows()
|
||||
self.safe_counter.rows(block.nrows())
|
||||
ncols = block.ncols()
|
||||
# values = block.fetchall
|
||||
end = datetime.datetime.now()
|
||||
elapsed_time = end - start
|
||||
|
||||
# self.log_info(
|
||||
# f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{message_offset}"
|
||||
# )
|
||||
self.log_info(
|
||||
f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter},counter.get():{self.safe_counter.get()}"
|
||||
)
|
||||
|
||||
# consumer.commit()
|
||||
consumer_nrows = nrows
|
||||
|
||||
tdLog.info("Consumer subscription thread is stopping.")
|
||||
|
||||
def taosc_consumer(self, conf: list, topic_name: str, stop_event: threading.Event):
|
||||
try:
|
||||
tdLog.info(conf)
|
||||
tdLog.info("start to config consumer")
|
||||
consumer = Consumer(conf)
|
||||
tdLog.info("start to subscribe")
|
||||
group_id = int(conf["group.id"])
|
||||
tdLog.info(f"{consumer},{group_id}")
|
||||
if self.sub_once:
|
||||
self.sub_consumer_once(consumer, group_id, topic_name, stop_event)
|
||||
else:
|
||||
self.sub_consumer(consumer, group_id, topic_name)
|
||||
# only consumer once
|
||||
except Exception as e:
|
||||
tdLog.exit(f"{e}")
|
||||
|
||||
# consumer.close()
|
||||
|
||||
|
||||
class ThreadSafeCounter:
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def rows(self, rows):
|
||||
with self.lock:
|
||||
self.counter += rows
|
||||
|
||||
def get(self):
|
||||
with self.lock:
|
||||
return self.counter
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
# updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def __init__(self):
|
||||
# db parameter
|
||||
self.table_number = 1000
|
||||
self.rows_per_table = 1000
|
||||
# consumer parameter
|
||||
self.consumer_groups_num = 2
|
||||
self.session_timeout_ms = 180000
|
||||
self.max_poll_interval_ms = 180000
|
||||
# case consumer parameter
|
||||
self.consumer_rows_per_thread = self.table_number * self.rows_per_table
|
||||
self.consumer_all_rows = (
|
||||
self.consumer_rows_per_thread * self.consumer_groups_num
|
||||
)
|
||||
self.topic_name = "select_d1"
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.consumer_instance = TaosConsumer()
|
||||
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
|
||||
|
||||
def caseDescription(self):
|
||||
"""
|
||||
drop_lost_consmuers<hrchen>:
|
||||
1. verifying that the boundary and valid values of session_timeout_ms are in effect
|
||||
2. verifying that the boundary and valid values of max_poll_interval_ms are in effect
|
||||
3. verifying that consumer will be closed when the session_timeout_ms and max_poll_interval_ms is expired
|
||||
"""
|
||||
return
|
||||
|
||||
def check_consumer(self, count, rows, stop_event=None):
|
||||
time.sleep(count)
|
||||
try:
|
||||
tdLog.info(
|
||||
f"wait timeout count:{count} and check consumer status whether is closed"
|
||||
)
|
||||
for _ in range(2):
|
||||
tdSql.query("show consumers")
|
||||
anser_rows = tdSql.getRows()
|
||||
if anser_rows == rows:
|
||||
break
|
||||
time.sleep(1)
|
||||
tdLog.info(
|
||||
f"wait for {count} seconds to check that consumers number is {anser_rows}"
|
||||
)
|
||||
if anser_rows != rows:
|
||||
if stop_event:
|
||||
stop_event.set()
|
||||
tdLog.exit(f"consumer number is {anser_rows } but not expected {rows}")
|
||||
except Exception as e:
|
||||
tdLog.exit(f"{e},check consumer error")
|
||||
|
||||
def drop_session_timeout_consmuers(self):
|
||||
tdSql.execute(f"drop topic if exists {self.topic_name};")
|
||||
tdSql.execute("use db_sub")
|
||||
tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;")
|
||||
|
||||
# start consumer and config some parameters
|
||||
os.system(
|
||||
f"nohup python3 ./tmq/per_consumer.py -c {self.consumer_groups_num} -s {self.session_timeout_ms} -p {self.max_poll_interval_ms} -t {self.topic_name} > consumer.log &"
|
||||
)
|
||||
# wait 5s for consuming data
|
||||
time.sleep(5)
|
||||
# kill consumer to simulate session_timeout_ms
|
||||
tdLog.info("kill per_consumer.py")
|
||||
tdCom.kill_signal_process(
|
||||
signal=9, processor_name=r"python3\s*./tmq/per_consumer.py"
|
||||
)
|
||||
self.check_consumer(int(self.session_timeout_ms / 1000), 0)
|
||||
tdSql.execute(f"drop topic if exists {self.topic_name};")
|
||||
os.system("rm -rf consumer.log")
|
||||
|
||||
def drop_max_poll_timeout_consmuers(self):
|
||||
tdSql.execute(f"drop topic if exists {self.topic_name};")
|
||||
tdSql.execute("use db_sub")
|
||||
tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;")
|
||||
|
||||
threads = []
|
||||
self.safe_counter = ThreadSafeCounter()
|
||||
self.consumer_instance.safe_counter = self.safe_counter
|
||||
stop_event = threading.Event()
|
||||
self.consumer_instance.once_consumer_rows = self.consumer_rows_per_thread
|
||||
tdLog.info(f"consumer_rows:{self.consumer_instance.once_consumer_rows}")
|
||||
self.consumer_instance.sub_once = True
|
||||
for group_id in range(self.consumer_groups_num):
|
||||
conf = self.consumer_instance.set_conf(
|
||||
group_id=group_id,
|
||||
session_timeout_ms=self.session_timeout_ms,
|
||||
max_poll_interval_ms=self.max_poll_interval_ms,
|
||||
)
|
||||
threads.append(
|
||||
threading.Thread(
|
||||
target=self.consumer_instance.taosc_consumer,
|
||||
args=(conf, self.topic_name, stop_event),
|
||||
)
|
||||
)
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
while True:
|
||||
if self.safe_counter.counter < self.consumer_all_rows:
|
||||
# control print log frequency
|
||||
time.sleep(1)
|
||||
tdLog.info(
|
||||
f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.counter}"
|
||||
)
|
||||
elif self.safe_counter.counter == self.consumer_all_rows:
|
||||
# adding 5s is for heartbeat check
|
||||
self.check_consumer(int(self.max_poll_interval_ms / 1000 ) + 5, 0, stop_event)
|
||||
stop_event.set()
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
tdSql.execute(f"drop topic if exists {self.topic_name};")
|
||||
|
||||
def case_session_timeout(self):
|
||||
"""
|
||||
TEST CASE: verifying that the boundary and valid values of session_timeout_ms are in effect
|
||||
"""
|
||||
|
||||
tdLog.info("start to test session_timeout_ms=12s")
|
||||
# test session_timeout_ms=12s
|
||||
self.session_timeout_ms = 12000
|
||||
self.max_poll_interval_ms = 180000
|
||||
# self.set_session_timeout = int(self.session_timeout_ms / 1000)
|
||||
self.drop_session_timeout_consmuers()
|
||||
tdLog.info("stop to test session_timeout_ms=12s and done ")
|
||||
|
||||
def case_max_poll_timeout(self):
|
||||
"""
|
||||
TEST CASE: verifying that the boundary and valid values of max_poll_interval_ms are in effect
|
||||
"""
|
||||
tdLog.info("start to test max_poll_interval_ms=20s")
|
||||
# test max_poll_interval_ms=20s
|
||||
self.session_timeout_ms = 300000
|
||||
self.max_poll_interval_ms = 20000
|
||||
self.drop_max_poll_timeout_consmuers()
|
||||
tdLog.info("stop to test max_poll_interval_ms=20s and done ")
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Run the test cases for session timeout and max poll timeout.
|
||||
"""
|
||||
vgroups = 4
|
||||
etool.benchMark(
|
||||
command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y"
|
||||
)
|
||||
# test case start here
|
||||
self.topic_name = "select_d1"
|
||||
# self.case_session_timeout()
|
||||
self.case_max_poll_timeout()
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Closes the taos connection and logs the success message.
|
||||
"""
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,182 @@
|
|||
import os
|
||||
import taos
|
||||
import sys
|
||||
from datetime import datetime
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from frame.log import tdLog
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
from taos.tmq import Consumer
|
||||
import click
|
||||
|
||||
# TDDO
|
||||
# 1. using tmq common class to replace the function, file drop_lost_consumers.py has the same function
|
||||
|
||||
try:
|
||||
conn = taos.connect()
|
||||
except Exception as e:
|
||||
tdLog.info(str(e))
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option(
|
||||
"-c",
|
||||
"--consumer-groups-num",
|
||||
"consumer_group_num",
|
||||
default=1,
|
||||
help="Number of consumer group.",
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--session-timeout-ms",
|
||||
"session_timeout_ms",
|
||||
default=60000,
|
||||
help="session timeout:ms",
|
||||
)
|
||||
@click.option(
|
||||
"-p",
|
||||
"--max-poll-interval-ms",
|
||||
"max_poll_interval_ms",
|
||||
default=180000,
|
||||
help="max poll interval timeout:ms",
|
||||
)
|
||||
@click.option(
|
||||
"-t",
|
||||
"--topic-name",
|
||||
"topic_name",
|
||||
default="select_d1",
|
||||
help="topic name",
|
||||
)
|
||||
def test_timeout_sub(consumer_group_num, session_timeout_ms, max_poll_interval_ms, topic_name):
|
||||
threads = []
|
||||
tdLog.info(f"consumer_group_num:{consumer_group_num}, session_timeout_ms:{session_timeout_ms}, max_poll_interval_ms:{max_poll_interval_ms}")
|
||||
for id in range(consumer_group_num):
|
||||
conf = set_conf(
|
||||
group_id=id,
|
||||
session_timeout_ms=session_timeout_ms,
|
||||
max_poll_interval_ms=max_poll_interval_ms,
|
||||
)
|
||||
tdLog.info(f"conf:{conf}")
|
||||
threads.append(threading.Thread(target=taosc_consumer, args=(conf,topic_name)))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
||||
|
||||
def sub_consumer(consumer, group_id, topic_name):
|
||||
group_id = int(group_id)
|
||||
if group_id < 100:
|
||||
try:
|
||||
consumer.subscribe([topic_name])
|
||||
except Exception as e:
|
||||
tdLog.info(f"subscribe error")
|
||||
exit(1)
|
||||
|
||||
nrows = 0
|
||||
while True:
|
||||
start = datetime.now()
|
||||
tdLog.info(f"time:{start},consumer:{group_id}, start to consume")
|
||||
message = consumer.poll(timeout=10.0)
|
||||
|
||||
if message:
|
||||
id = message.offset()
|
||||
topic = message.topic()
|
||||
database = message.database()
|
||||
|
||||
for block in message:
|
||||
addrows = block.nrows()
|
||||
nrows += block.nrows()
|
||||
ncols = block.ncols()
|
||||
values = block.fetchall
|
||||
end = datetime.now()
|
||||
elapsed_time = end - start
|
||||
tdLog.info(
|
||||
f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}"
|
||||
)
|
||||
consumer.commit()
|
||||
tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}")
|
||||
# consumer.unsubscribe()
|
||||
# consumer.close()
|
||||
|
||||
|
||||
def sub_consumer_once(consumer, group_id, topic_name):
|
||||
group_id = int(group_id)
|
||||
if group_id < 100:
|
||||
consumer.subscribe([topic_name])
|
||||
nrows = 0
|
||||
consumer_nrows = 0
|
||||
while True:
|
||||
start = datetime.now()
|
||||
tdLog.info(f"time:{start},consumer:{group_id}, start to consume")
|
||||
# start = datetime.now()
|
||||
# tdLog.info(f"time:{start},consumer:{group_id}, start to consume")
|
||||
tdLog.info(f"consumer_nrows:{consumer_nrows}")
|
||||
if consumer_nrows < 1000000:
|
||||
message = consumer.poll(timeout=10.0)
|
||||
else:
|
||||
tdLog.info(" stop consumer when consumer all rows")
|
||||
|
||||
if message:
|
||||
id = message.offset()
|
||||
topic = message.topic()
|
||||
database = message.database()
|
||||
|
||||
for block in message:
|
||||
addrows = block.nrows()
|
||||
nrows += block.nrows()
|
||||
ncols = block.ncols()
|
||||
values = block.fetchall
|
||||
end = datetime.now()
|
||||
elapsed_time = end - start
|
||||
# tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}")
|
||||
consumer.commit()
|
||||
# tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}")
|
||||
consumer_nrows = nrows
|
||||
# consumer.unsubscribe()
|
||||
# consumer.close()
|
||||
# break
|
||||
|
||||
|
||||
def set_conf(
|
||||
td_connect_ip="localhost",
|
||||
group_id=1,
|
||||
client_id="test_consumer_py",
|
||||
enable_auto_commit="false",
|
||||
auto_commit_interval_ms="1000",
|
||||
auto_offset_reset="earliest",
|
||||
msg_with_table_name="true",
|
||||
session_timeout_ms=10000,
|
||||
max_poll_interval_ms=20000,
|
||||
experimental_snapshot_enable="false",
|
||||
):
|
||||
conf = {
|
||||
# auth options
|
||||
# consume options
|
||||
"td.connect.ip": f"{td_connect_ip}",
|
||||
"group.id": f"{group_id}",
|
||||
"client.id": f"{client_id}",
|
||||
"enable.auto.commit": f"{enable_auto_commit}",
|
||||
"auto.commit.interval.ms": f"{auto_commit_interval_ms}",
|
||||
"auto.offset.reset": f"{auto_offset_reset}",
|
||||
"msg.with.table.name": f"{msg_with_table_name}",
|
||||
"session.timeout.ms": f"{session_timeout_ms}",
|
||||
"max.poll.interval.ms": f"{max_poll_interval_ms}",
|
||||
"experimental.snapshot.enable": f"{experimental_snapshot_enable}",
|
||||
}
|
||||
return conf
|
||||
|
||||
|
||||
def taosc_consumer(conf,topic_name):
|
||||
consumer = Consumer(conf)
|
||||
group_id = int(conf["group.id"])
|
||||
tdLog.info(f"{consumer},{group_id}")
|
||||
try:
|
||||
sub_consumer_once(consumer, group_id, topic_name)
|
||||
except Exception as e:
|
||||
tdLog.info(str(e))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_timeout_sub()
|
|
@ -47,7 +47,7 @@
|
|||
,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py
|
||||
,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3
|
||||
,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py
|
||||
|
||||
,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py
|
||||
#
|
||||
# system test
|
||||
#
|
||||
|
|
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 2, 0)
|
||||
|
||||
tdSql.query("show dnode 1 variables like '%debugFlag'")
|
||||
tdSql.checkRows(24)
|
||||
tdSql.checkRows(25)
|
||||
|
||||
tdSql.query("show dnode 1 variables like '____debugFlag'")
|
||||
tdSql.checkRows(2)
|
||||
|
|
|
@ -17,6 +17,8 @@ sys.path.append("./7-tmq")
|
|||
from tmqCommon import *
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
updatecfgDict = {'sDebugFlag':143}
|
||||
def __init__(self):
|
||||
self.vgroups = 1
|
||||
self.ctbNum = 10
|
||||
|
|
|
@ -131,14 +131,14 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 2, 1)
|
||||
|
||||
tdSql.query("select * from ct3 order by c1 desc")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkRows(5)
|
||||
tdSql.checkData(0, 1, 51)
|
||||
tdSql.checkData(0, 4, 940)
|
||||
tdSql.checkData(1, 1, 23)
|
||||
tdSql.checkData(1, 4, None)
|
||||
|
||||
tdSql.query("select * from st1 order by ts")
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkRows(14)
|
||||
tdSql.checkData(0, 1, 1)
|
||||
tdSql.checkData(1, 1, 3)
|
||||
tdSql.checkData(4, 1, 4)
|
||||
|
@ -180,7 +180,7 @@ class TDTestCase:
|
|||
tdSql.checkData(6, 8, None)
|
||||
|
||||
tdSql.query("select * from ct1")
|
||||
tdSql.checkRows(4)
|
||||
tdSql.checkRows(7)
|
||||
|
||||
tdSql.query("select * from ct2")
|
||||
tdSql.checkRows(0)
|
||||
|
|
|
@ -218,3 +218,75 @@ ELSE()
|
|||
)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF(TD_BUILD_KEEPER)
|
||||
MESSAGE("")
|
||||
MESSAGE("${Green} build taoskeeper, current platform is ${PLATFORM_ARCH_STR} ${ColourReset}")
|
||||
|
||||
EXECUTE_PROCESS(
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/keeper
|
||||
COMMAND git rev-parse HEAD
|
||||
OUTPUT_VARIABLE taoskeeper_commit_sha1
|
||||
)
|
||||
|
||||
IF("${taoskeeper_commit_sha1}" STREQUAL "")
|
||||
SET(taoskeeper_commit_sha1 "unknown")
|
||||
ELSE()
|
||||
STRING(STRIP "${taoskeeper_commit_sha1}" taoskeeper_commit_sha1)
|
||||
ENDIF()
|
||||
|
||||
SET(taos_version ${TD_VER_NUMBER})
|
||||
MESSAGE("${Green} taoskeeper will use ${taos_version} and commit ${taoskeeper_commit_sha1} as version ${ColourReset}")
|
||||
MESSAGE(" current source dir is ${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
|
||||
IF(TD_WINDOWS)
|
||||
MESSAGE("Building taoskeeper on Windows")
|
||||
INCLUDE(ExternalProject)
|
||||
ExternalProject_Add(taoskeeper
|
||||
PREFIX "taoskeeper"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper
|
||||
BUILD_ALWAYS off
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E echo "Comparessing taoskeeper.exe"
|
||||
COMMAND cmake -E time upx taoskeeper.exe
|
||||
COMMAND cmake -E echo "Copy taoskeeper.exe"
|
||||
COMMAND cmake -E copy taoskeeper.exe ${CMAKE_BINARY_DIR}/build/bin/taoskeeper.exe
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E echo "Copy taoskeeper.toml"
|
||||
COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
)
|
||||
ELSE()
|
||||
IF(TD_DARWIN)
|
||||
MESSAGE("Building taoskeeper on macOS")
|
||||
ELSE()
|
||||
MESSAGE("Building taoskeeper on Linux")
|
||||
ENDIF()
|
||||
|
||||
INCLUDE(ExternalProject)
|
||||
ExternalProject_Add(taoskeeper
|
||||
PREFIX "taoskeeper"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper
|
||||
BUILD_ALWAYS off
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config"
|
||||
PATCH_COMMAND
|
||||
COMMAND git clean -f -d
|
||||
BUILD_COMMAND
|
||||
COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'"
|
||||
INSTALL_COMMAND
|
||||
COMMAND cmake -E echo "Copy taoskeeper"
|
||||
COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin
|
||||
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E echo "Copy taoskeeper.toml"
|
||||
COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
COMMAND cmake -E echo "Copy taoskeeper.service"
|
||||
COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/
|
||||
)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
!taoskeeper
|
|
@ -0,0 +1,22 @@
|
|||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
*.html
|
||||
*.data
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
vendor
|
||||
/debug/
|
||||
/.idea/
|
||||
/taoskeeper
|
||||
/test_data
|
||||
/.vscode
|
|
@ -0,0 +1,10 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Conventional Changelog](https://www.conventionalcommits.org/en/v1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## Footnote
|
||||
|
||||
This changelog is automatically generated.
|
|
@ -0,0 +1,16 @@
|
|||
FROM golang:1.18.6-alpine as builder
|
||||
LABEL maintainer = "Linhe Huo <linhe.huo@gmail.com>"
|
||||
|
||||
WORKDIR /usr/src/taoskeeper
|
||||
COPY ./ /usr/src/taoskeeper/
|
||||
ENV GO111MODULE=on \
|
||||
GOPROXY=https://goproxy.cn,direct
|
||||
RUN go mod tidy && go build
|
||||
|
||||
FROM alpine:3
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
|
||||
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
RUN chmod u+rw /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
|
@ -0,0 +1,24 @@
|
|||
FROM golang:1.18.6-alpine as builder
|
||||
LABEL maintainer = "TDengine"
|
||||
|
||||
ARG latestv
|
||||
ARG gitinfo
|
||||
ARG buildinfo
|
||||
|
||||
RUN apk --no-cache add upx && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /usr/src/taoskeeper
|
||||
COPY ./ /usr/src/taoskeeper/
|
||||
ENV GO111MODULE=on \
|
||||
GOPROXY=https://goproxy.cn,direct
|
||||
|
||||
RUN echo "$latestv $gitinfo $buildinfo"
|
||||
RUN go mod tidy && go build -ldflags="-s -w -X 'github.com/taosdata/taoskeeper/version.Version=${latestv}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${gitinfo}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${buildinfo}'" -o taoskeeper . && upx -9 taoskeeper
|
||||
FROM alpine:3
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
|
||||
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
RUN chmod u+rw /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
|
@ -0,0 +1,267 @@
|
|||
# TaosKeeper
|
||||
|
||||
taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。
|
||||
|
||||
taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
|
||||
|
||||
## 构建
|
||||
|
||||
### 获取源码
|
||||
|
||||
从 GitHub 克隆源码:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/taosdata/TDengine
|
||||
cd TDengine/tools/keeper
|
||||
```
|
||||
|
||||
### 编译
|
||||
|
||||
taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
go build
|
||||
```
|
||||
|
||||
## 安装
|
||||
|
||||
如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。
|
||||
|
||||
```sh
|
||||
sudo install taoskeeper /usr/bin/
|
||||
```
|
||||
|
||||
## 启动
|
||||
|
||||
在启动前,应该做好如下配置:
|
||||
在 `/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。
|
||||
|
||||
```toml
|
||||
# gin 框架是否启用 debug
|
||||
debug = false
|
||||
|
||||
# 服务监听端口, 默认为 6043
|
||||
port = 6043
|
||||
|
||||
# 日志级别,包含 panic、error、info、debug、trace等
|
||||
loglevel = "info"
|
||||
|
||||
# 程序中使用协程池的大小
|
||||
gopoolsize = 50000
|
||||
|
||||
# 查询 TDengine 监控数据轮询间隔
|
||||
RotationInterval = "15s"
|
||||
|
||||
[tdengine]
|
||||
host = "127.0.0.1"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
|
||||
# 需要被监控的 taosAdapter
|
||||
[taosAdapter]
|
||||
address = ["127.0.0.1:6041"]
|
||||
|
||||
[metrics]
|
||||
# 监控指标前缀
|
||||
prefix = "taos"
|
||||
|
||||
# 存放监控数据的数据库
|
||||
database = "log"
|
||||
|
||||
# 指定需要监控的普通表
|
||||
tables = []
|
||||
|
||||
[environment]
|
||||
# 是否在容器中运行,影响 taosKeeper 自身的监控数据
|
||||
incgroup = false
|
||||
```
|
||||
|
||||
现在可以启动服务,输入:
|
||||
|
||||
```sh
|
||||
taoskeeper
|
||||
```
|
||||
|
||||
如果你使用 `systemd`,复制 `taoskeeper.service` 到 `/lib/systemd/system/`,并启动服务。
|
||||
|
||||
```sh
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
```
|
||||
|
||||
让 taosKeeper 随系统开机自启动。
|
||||
|
||||
```sh
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
|
||||
如果使用 `systemd`,你可以使用如下命令完成安装。
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
go build
|
||||
sudo install taoskeeper /usr/bin/
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
如下介绍了如何在 docker 中构建 taosKeeper:
|
||||
|
||||
在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。
|
||||
|
||||
```dockerfile
|
||||
FROM golang:1.18.6-alpine as builder
|
||||
|
||||
WORKDIR /usr/src/taoskeeper
|
||||
COPY ./ /usr/src/taoskeeper/
|
||||
ENV GO111MODULE=on \
|
||||
GOPROXY=https://goproxy.cn,direct
|
||||
RUN go mod tidy && go build
|
||||
|
||||
FROM alpine:3
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
|
||||
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
|
||||
如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu:18.04
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY ./taoskeeper /usr/bin/
|
||||
COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
|
||||
## 使用(**企业版**)
|
||||
|
||||
### Prometheus (by scrape)
|
||||
|
||||
taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\
|
||||
在 `/etc/prometheus/prometheus.yml` 添加配置:
|
||||
|
||||
```yml
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
static_configs:
|
||||
- targets: ["taoskeeper:6043"]
|
||||
```
|
||||
|
||||
现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比:
|
||||
|
||||
```promql
|
||||
taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
|
||||
```
|
||||
|
||||
你可以使用 `docker-compose` 测试完整的链路。
|
||||
`docker-compose.yml`示例:
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
volumes:
|
||||
- taosdata:/var/lib/taos
|
||||
taoskeeper:
|
||||
build: ./
|
||||
depends_on:
|
||||
- tdengine
|
||||
environment:
|
||||
TDENGINE_HOST: tdengine
|
||||
TDENGINE_PORT: 6041
|
||||
volumes:
|
||||
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
|
||||
ports:
|
||||
- 6043:6043
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
volumes:
|
||||
- ./prometheus/:/etc/prometheus/
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
taosdata:
|
||||
```
|
||||
|
||||
启动:
|
||||
|
||||
```sh
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
现在通过访问 <http://localhost:9090> 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。
|
||||
|
||||
### Telegraf
|
||||
|
||||
如果使用 telegraf 来收集各个指标,仅需要在配置中增加:
|
||||
|
||||
```toml
|
||||
[[inputs.prometheus]]
|
||||
## An array of urls to scrape metrics from.
|
||||
urls = ["http://taoskeeper:6043/metrics"]
|
||||
```
|
||||
|
||||
可以通过 `docker-compose` 来测试
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
|
||||
```
|
||||
|
||||
由于可以在 `telegraf.conf` 设置日志为标准输出:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
|
||||
```
|
||||
|
||||
### Zabbix
|
||||
|
||||
1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`。
|
||||
2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}` 和 `{$COLLECTION_INTERVAL}`。
|
||||
3. 等待并查看到自动创建的条目。
|
||||
|
||||
### 常见问题
|
||||
|
||||
* 启动报错,显示connection refused
|
||||
|
||||
**解析**:taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。
|
||||
|
||||
* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致?
|
||||
|
||||
**解析**:如果 TDengine 中未创建某项指标,taoskeeper 不能获取对应的检测结果。
|
||||
|
||||
* 不能接收到 TDengine 的监控日志。
|
||||
|
||||
**解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数:
|
||||
|
||||
```cfg
|
||||
monitor 1 // 启用monitor
|
||||
monitorInterval 30 // 发送间隔 (s)
|
||||
monitorFqdn localhost // 接收消息的FQDN,默认为空
|
||||
monitorPort 6043 // 接收消息的端口号
|
||||
monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量
|
||||
```
|
|
@ -0,0 +1,273 @@
|
|||
# TaosKeeper
|
||||
|
||||
TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations.
|
||||
|
||||
This tool uses TDengine RESTful API, so you could just build it without TDengine client.
|
||||
|
||||
## Build
|
||||
|
||||
### Get the source codes
|
||||
|
||||
```sh
|
||||
git clone https://github.com/taosdata/TDengine
|
||||
cd TDengine/tools/keeper
|
||||
```
|
||||
|
||||
### compile
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
go build
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`.
|
||||
|
||||
```sh
|
||||
sudo install taoskeeper /usr/bin/
|
||||
```
|
||||
|
||||
## Start
|
||||
|
||||
Before start, you should configure some options like database ip, port or the prefix and others for exported metrics.
|
||||
|
||||
in `/etc/taos/taoskeeper.toml`.
|
||||
|
||||
```toml
|
||||
# Start with debug middleware for gin
|
||||
debug = false
|
||||
|
||||
# Listen port, default is 6043
|
||||
port = 6043
|
||||
|
||||
# log level
|
||||
loglevel = "info"
|
||||
|
||||
# go pool size
|
||||
gopoolsize = 50000
|
||||
|
||||
# interval for TDengine metrics
|
||||
RotationInterval = "15s"
|
||||
|
||||
[tdengine]
|
||||
host = "127.0.0.1"
|
||||
port = 6041
|
||||
username = "root"
|
||||
password = "taosdata"
|
||||
|
||||
# list of taosAdapter that need to be monitored
|
||||
[taosAdapter]
|
||||
address = ["127.0.0.1:6041"]
|
||||
|
||||
[metrics]
|
||||
# metrics prefix in metrics names.
|
||||
prefix = "taos"
|
||||
|
||||
# database for storing metrics data
|
||||
database = "log"
|
||||
|
||||
# export some tables that are not super table
|
||||
tables = []
|
||||
|
||||
[environment]
|
||||
# Whether running in cgroup.
|
||||
incgroup = false
|
||||
```
|
||||
|
||||
Now you could run the tool:
|
||||
|
||||
```sh
|
||||
taoskeeper
|
||||
```
|
||||
|
||||
If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service.
|
||||
|
||||
```sh
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
```
|
||||
|
||||
To start taoskeeper whenever os rebooted, you should enable the systemd service:
|
||||
|
||||
```sh
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
|
||||
So if use `systemd`, you'd better install it with these lines all-in-one:
|
||||
|
||||
```sh
|
||||
go mod tidy
|
||||
go build
|
||||
sudo install taoskeeper /usr/bin/
|
||||
sudo cp taoskeeper.service /lib/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start taoskeeper
|
||||
sudo systemctl enable taoskeeper
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
Here is an example to show how to build this tool in docker:
|
||||
|
||||
Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example.
|
||||
|
||||
```dockerfile
|
||||
FROM golang:1.18.2 as builder
|
||||
|
||||
WORKDIR /usr/src/taoskeeper
|
||||
COPY ./ /usr/src/taoskeeper/
|
||||
ENV GO111MODULE=on \
|
||||
GOPROXY=https://goproxy.cn,direct
|
||||
RUN go mod tidy && go build
|
||||
|
||||
FROM alpine:3
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/
|
||||
COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
|
||||
If you already have taosKeeper binary file, you can build this tool like:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu:18.04
|
||||
RUN mkdir -p /etc/taos
|
||||
COPY ./taoskeeper /usr/bin/
|
||||
COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml
|
||||
EXPOSE 6043
|
||||
CMD ["taoskeeper"]
|
||||
```
|
||||
|
||||
## Usage (**Enterprise Edition**)
|
||||
|
||||
### Prometheus (by scrape)
|
||||
|
||||
It's now act as a prometheus exporter like `node-exporter`.
|
||||
|
||||
Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`:
|
||||
|
||||
```yml
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "taoskeeper"
|
||||
static_configs:
|
||||
- targets: [ "taoskeeper:6043" ]
|
||||
```
|
||||
|
||||
Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex
|
||||
match expression:
|
||||
|
||||
```promql
|
||||
taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"}
|
||||
```
|
||||
|
||||
You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack.
|
||||
|
||||
Here is the `docker-compose.yml`:
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
tdengine:
|
||||
image: tdengine/tdengine
|
||||
environment:
|
||||
TAOS_FQDN: tdengine
|
||||
volumes:
|
||||
- taosdata:/var/lib/taos
|
||||
taoskeeper:
|
||||
build: ./
|
||||
depends_on:
|
||||
- tdengine
|
||||
environment:
|
||||
TDENGINE_HOST: tdengine
|
||||
TDENGINE_PORT: 6041
|
||||
volumes:
|
||||
- ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml
|
||||
ports:
|
||||
- 6043:6043
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
volumes:
|
||||
- ./prometheus/:/etc/prometheus/
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
taosdata:
|
||||
|
||||
```
|
||||
|
||||
Start the stack:
|
||||
|
||||
```sh
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Now you point to <http://localhost:9090> (if you have not started a prometheus server by yourself) and query.
|
||||
|
||||
For a quick demo with TaosKeeper + Prometheus + Grafana, we provide
|
||||
a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine.
|
||||
|
||||
### Telegraf
|
||||
|
||||
If you are using telegraf to collect metrics, just add inputs like this:
|
||||
|
||||
```toml
|
||||
[[inputs.prometheus]]
|
||||
## An array of urls to scrape metrics from.
|
||||
urls = ["http://taoskeeper:6043/metrics"]
|
||||
```
|
||||
|
||||
You can test it with `docker-compose`:
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper
|
||||
```
|
||||
|
||||
Since we have set an stdout file output in `telegraf.conf`:
|
||||
|
||||
```toml
|
||||
[[outputs.file]]
|
||||
files = ["stdout"]
|
||||
```
|
||||
|
||||
So you can track with TDengine metrics in standard output with `docker-compose logs`:
|
||||
|
||||
```sh
|
||||
docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf
|
||||
```
|
||||
|
||||
### Zabbix
|
||||
|
||||
1. Import the zabbix template file `zbx_taos_keeper_templates.xml`.
|
||||
2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}`
|
||||
and `{$COLLECTION_INTERVAL}`.
|
||||
3. Waiting for monitoring items to be created automatically.
|
||||
|
||||
### FAQ
|
||||
|
||||
* Error occurred: Connection refused, while taosKeeper was starting
|
||||
|
||||
**Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether
|
||||
the taosAdapter address in taoskeeper.toml is correct.
|
||||
|
||||
* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring?
|
||||
|
||||
**Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results.
|
||||
|
||||
* Cannot receive log from TDengine server.
|
||||
|
||||
**Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like:
|
||||
|
||||
```cfg
|
||||
monitor 1 // start monitor
|
||||
monitorInterval 30 // send log interval (s)
|
||||
monitorFqdn localhost
|
||||
monitorPort 6043 // taosKeeper port
|
||||
monitorMaxLogs 100
|
||||
```
|
|
@ -0,0 +1,260 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var adapterLog = log.GetLogger("ADP")
|
||||
|
||||
type adapterReqType int
|
||||
|
||||
const (
|
||||
rest adapterReqType = iota // 0 - rest
|
||||
ws // 1 - ws
|
||||
)
|
||||
|
||||
type Adapter struct {
|
||||
username string
|
||||
password string
|
||||
host string
|
||||
port int
|
||||
usessl bool
|
||||
conn *db.Connector
|
||||
db string
|
||||
dbOptions map[string]interface{}
|
||||
}
|
||||
|
||||
func NewAdapter(c *config.Config) *Adapter {
|
||||
return &Adapter{
|
||||
username: c.TDengine.Username,
|
||||
password: c.TDengine.Password,
|
||||
host: c.TDengine.Host,
|
||||
port: c.TDengine.Port,
|
||||
usessl: c.TDengine.Usessl,
|
||||
db: c.Metrics.Database.Name,
|
||||
dbOptions: c.Metrics.Database.Options,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adapter) Init(c gin.IRouter) error {
|
||||
if err := a.createDatabase(); err != nil {
|
||||
return fmt.Errorf("create database error:%s", err)
|
||||
}
|
||||
if err := a.initConnect(); err != nil {
|
||||
return fmt.Errorf("init db connect error:%s", err)
|
||||
}
|
||||
if err := a.createTable(); err != nil {
|
||||
return fmt.Errorf("create table error:%s", err)
|
||||
}
|
||||
c.POST("/adapter_report", a.handleFunc())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Adapter) handleFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
adapterLog := adapterLog.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
if a.conn == nil {
|
||||
adapterLog.Error("no connection")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
adapterLog.Errorf("get adapter report data error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get adapter report data error. %s", err)})
|
||||
return
|
||||
}
|
||||
if adapterLog.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
adapterLog.Tracef("received adapter report data:%s", string(data))
|
||||
}
|
||||
|
||||
var report AdapterReport
|
||||
if err = json.Unmarshal(data, &report); err != nil {
|
||||
adapterLog.Errorf("parse adapter report data error, data:%s, error:%s", string(data), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse adapter report data error: %s", err)})
|
||||
return
|
||||
}
|
||||
sql := a.parseSql(report)
|
||||
adapterLog.Debugf("adapter report sql:%s", sql)
|
||||
|
||||
if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil {
|
||||
adapterLog.Errorf("adapter report error, msg:%s", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adapter) initConnect() error {
|
||||
conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl)
|
||||
if err != nil {
|
||||
adapterLog.Dup().Errorf("init db connect error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
a.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Adapter) parseSql(report AdapterReport) string {
|
||||
// reqType: 0: rest, 1: websocket
|
||||
restTbName := a.tableName(report.Endpoint, rest)
|
||||
wsTbName := a.tableName(report.Endpoint, ws)
|
||||
ts := time.Unix(report.Timestamp, 0).Format(time.RFC3339)
|
||||
metric := report.Metric
|
||||
return fmt.Sprintf("insert into %s using adapter_requests tags ('%s', %d) "+
|
||||
"values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d) "+
|
||||
"%s using adapter_requests tags ('%s', %d) "+
|
||||
"values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)",
|
||||
restTbName, report.Endpoint, rest, ts, metric.RestTotal, metric.RestQuery, metric.RestWrite, metric.RestOther,
|
||||
metric.RestInProcess, metric.RestSuccess, metric.RestFail, metric.RestQuerySuccess, metric.RestQueryFail,
|
||||
metric.RestWriteSuccess, metric.RestWriteFail, metric.RestOtherSuccess, metric.RestOtherFail,
|
||||
metric.RestQueryInProcess, metric.RestWriteInProcess,
|
||||
wsTbName, report.Endpoint, ws, ts, metric.WSTotal,
|
||||
metric.WSQuery, metric.WSWrite, metric.WSOther, metric.WSInProcess, metric.WSSuccess, metric.WSFail,
|
||||
metric.WSQuerySuccess, metric.WSQueryFail, metric.WSWriteSuccess, metric.WSWriteFail, metric.WSOtherSuccess,
|
||||
metric.WSOtherFail, metric.WSQueryInProcess, metric.WSWriteInProcess)
|
||||
}
|
||||
|
||||
func (a *Adapter) tableName(endpoint string, reqType adapterReqType) string {
|
||||
var tbname string
|
||||
if reqType == rest {
|
||||
tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "rest")
|
||||
} else {
|
||||
tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "ws")
|
||||
}
|
||||
|
||||
if len(tbname) <= util.MAX_TABLE_NAME_LEN {
|
||||
return util.ToValidTableName(tbname)
|
||||
} else {
|
||||
sum := md5.Sum([]byte(fmt.Sprintf("%s%d", endpoint, reqType)))
|
||||
return fmt.Sprintf("adapter_req_%s", hex.EncodeToString(sum[:]))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Adapter) createDatabase() error {
|
||||
qid := util.GetQidOwn()
|
||||
|
||||
adapterLog := adapterLog.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect to database error, msg:%s", err)
|
||||
}
|
||||
defer func() { _ = conn.Close() }()
|
||||
sql := a.createDBSql()
|
||||
adapterLog.Infof("create database, sql:%s", sql)
|
||||
_, err = conn.Exec(context.Background(), sql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
adapterLog.Errorf("create database error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *Adapter) createDBSql() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf("create database if not exists %s ", a.db))
|
||||
|
||||
for k, v := range a.dbOptions {
|
||||
buf.WriteString(k)
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
buf.WriteString(fmt.Sprintf(" '%s'", v))
|
||||
default:
|
||||
buf.WriteString(fmt.Sprintf(" %v", v))
|
||||
}
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var adapterTableSql = "create stable if not exists `adapter_requests` (" +
|
||||
"`ts` timestamp, " +
|
||||
"`total` int unsigned, " +
|
||||
"`query` int unsigned, " +
|
||||
"`write` int unsigned, " +
|
||||
"`other` int unsigned, " +
|
||||
"`in_process` int unsigned, " +
|
||||
"`success` int unsigned, " +
|
||||
"`fail` int unsigned, " +
|
||||
"`query_success` int unsigned, " +
|
||||
"`query_fail` int unsigned, " +
|
||||
"`write_success` int unsigned, " +
|
||||
"`write_fail` int unsigned, " +
|
||||
"`other_success` int unsigned, " +
|
||||
"`other_fail` int unsigned, " +
|
||||
"`query_in_process` int unsigned, " +
|
||||
"`write_in_process` int unsigned ) " +
|
||||
"tags (`endpoint` varchar(32), `req_type` tinyint unsigned )"
|
||||
|
||||
func (a *Adapter) createTable() error {
|
||||
if a.conn == nil {
|
||||
return errNoConnection
|
||||
}
|
||||
_, err := a.conn.Exec(context.Background(), adapterTableSql, util.GetQidOwn())
|
||||
return err
|
||||
}
|
||||
|
||||
type AdapterReport struct {
|
||||
Timestamp int64 `json:"ts"`
|
||||
Metric AdapterMetrics `json:"metrics"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
}
|
||||
|
||||
type AdapterMetrics struct {
|
||||
RestTotal int `json:"rest_total"`
|
||||
RestQuery int `json:"rest_query"`
|
||||
RestWrite int `json:"rest_write"`
|
||||
RestOther int `json:"rest_other"`
|
||||
RestInProcess int `json:"rest_in_process"`
|
||||
RestSuccess int `json:"rest_success"`
|
||||
RestFail int `json:"rest_fail"`
|
||||
RestQuerySuccess int `json:"rest_query_success"`
|
||||
RestQueryFail int `json:"rest_query_fail"`
|
||||
RestWriteSuccess int `json:"rest_write_success"`
|
||||
RestWriteFail int `json:"rest_write_fail"`
|
||||
RestOtherSuccess int `json:"rest_other_success"`
|
||||
RestOtherFail int `json:"rest_other_fail"`
|
||||
RestQueryInProcess int `json:"rest_query_in_process"`
|
||||
RestWriteInProcess int `json:"rest_write_in_process"`
|
||||
WSTotal int `json:"ws_total"`
|
||||
WSQuery int `json:"ws_query"`
|
||||
WSWrite int `json:"ws_write"`
|
||||
WSOther int `json:"ws_other"`
|
||||
WSInProcess int `json:"ws_in_process"`
|
||||
WSSuccess int `json:"ws_success"`
|
||||
WSFail int `json:"ws_fail"`
|
||||
WSQuerySuccess int `json:"ws_query_success"`
|
||||
WSQueryFail int `json:"ws_query_fail"`
|
||||
WSWriteSuccess int `json:"ws_write_success"`
|
||||
WSWriteFail int `json:"ws_write_fail"`
|
||||
WSOtherSuccess int `json:"ws_other_success"`
|
||||
WSOtherFail int `json:"ws_other_fail"`
|
||||
WSQueryInProcess int `json:"ws_query_in_process"`
|
||||
WSWriteInProcess int `json:"ws_write_in_process"`
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
func TestAdapter2(t *testing.T) {
|
||||
c := &config.Config{
|
||||
InstanceID: 64,
|
||||
Port: 6043,
|
||||
TDengine: config.TDengineRestful{
|
||||
Host: "127.0.0.1",
|
||||
Port: 6041,
|
||||
Username: "root",
|
||||
Password: "taosdata",
|
||||
Usessl: false,
|
||||
},
|
||||
Metrics: config.MetricsConfig{
|
||||
Database: config.Database{
|
||||
Name: "adapter_report_test",
|
||||
Options: map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
}
|
||||
a := NewAdapter(c)
|
||||
err := a.Init(router)
|
||||
assert.NoError(t, err)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(" {\"ts\": 1696928323, \"metrics\": {\"rest_total\": 10, \"rest_query\": 2, " +
|
||||
"\"rest_write\": 5, \"rest_other\": 3, \"rest_in_process\": 1, \"rest_fail\": 5, \"rest_success\": 3, " +
|
||||
"\"rest_query_success\": 1, \"rest_query_fail\": 2, \"rest_write_success\": 2, \"rest_write_fail\": 3, " +
|
||||
"\"rest_other_success\": 1, \"rest_other_fail\": 2, \"rest_query_in_process\": 1, \"rest_write_in_process\": 2, " +
|
||||
"\"ws_total\": 10, \"ws_query\": 2, \"ws_write\": 3, \"ws_other\": 5, \"ws_in_process\": 1, \"ws_success\": 3, " +
|
||||
"\"ws_fail\": 3, \"ws_query_success\": 1, \"ws_query_fail\": 1, \"ws_write_success\": 2, \"ws_write_fail\": 2, " +
|
||||
"\"ws_other_success\": 1, \"ws_other_fail\": 2, \"ws_query_in_process\": 1, \"ws_write_in_process\": 2 }, " +
|
||||
"\"endpoint\": \"adapter-1:6041\"}")
|
||||
req, _ := http.NewRequest(http.MethodPost, "/adapter_report", body)
|
||||
req.Header.Set("X-QID", "0x1234567890ABCD00")
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
conn, err := db.NewConnectorWithDb(c.TDengine.Username, c.TDengine.Password, c.TDengine.Host, c.TDengine.Port, c.Metrics.Database.Name, c.TDengine.Usessl)
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), "drop database if exists adapter_report_test", util.GetQidOwn())
|
||||
}()
|
||||
|
||||
assert.NoError(t, err)
|
||||
data, err := conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=0", util.GetQidOwn())
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, uint32(10), data.Data[0][1])
|
||||
assert.Equal(t, uint32(2), data.Data[0][2])
|
||||
assert.Equal(t, uint32(5), data.Data[0][3])
|
||||
assert.Equal(t, uint32(3), data.Data[0][4])
|
||||
assert.Equal(t, uint32(1), data.Data[0][5])
|
||||
assert.Equal(t, uint32(3), data.Data[0][6])
|
||||
assert.Equal(t, uint32(5), data.Data[0][7])
|
||||
assert.Equal(t, uint32(1), data.Data[0][8])
|
||||
assert.Equal(t, uint32(2), data.Data[0][9])
|
||||
assert.Equal(t, uint32(2), data.Data[0][10])
|
||||
assert.Equal(t, uint32(3), data.Data[0][11])
|
||||
assert.Equal(t, uint32(1), data.Data[0][12])
|
||||
assert.Equal(t, uint32(2), data.Data[0][13])
|
||||
assert.Equal(t, uint32(1), data.Data[0][14])
|
||||
assert.Equal(t, uint32(2), data.Data[0][15])
|
||||
|
||||
data, err = conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=1", util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, uint32(10), data.Data[0][1])
|
||||
assert.Equal(t, uint32(2), data.Data[0][2])
|
||||
assert.Equal(t, uint32(3), data.Data[0][3])
|
||||
assert.Equal(t, uint32(5), data.Data[0][4])
|
||||
assert.Equal(t, uint32(1), data.Data[0][5])
|
||||
assert.Equal(t, uint32(3), data.Data[0][6])
|
||||
assert.Equal(t, uint32(3), data.Data[0][7])
|
||||
assert.Equal(t, uint32(1), data.Data[0][8])
|
||||
assert.Equal(t, uint32(1), data.Data[0][9])
|
||||
assert.Equal(t, uint32(2), data.Data[0][10])
|
||||
assert.Equal(t, uint32(2), data.Data[0][11])
|
||||
assert.Equal(t, uint32(1), data.Data[0][12])
|
||||
assert.Equal(t, uint32(2), data.Data[0][13])
|
||||
assert.Equal(t, uint32(1), data.Data[0][14])
|
||||
assert.Equal(t, uint32(2), data.Data[0][15])
|
||||
|
||||
conn.Exec(context.Background(), "drop database "+c.Metrics.Database.Name, util.GetQidOwn())
|
||||
}
|
|
@ -0,0 +1,336 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var auditLogger = log.GetLogger("AUD")
|
||||
|
||||
const MAX_DETAIL_LEN = 50000
|
||||
|
||||
type Audit struct {
|
||||
username string
|
||||
password string
|
||||
host string
|
||||
port int
|
||||
usessl bool
|
||||
conn *db.Connector
|
||||
db string
|
||||
dbOptions map[string]interface{}
|
||||
}
|
||||
|
||||
type AuditInfo struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
ClusterID string `json:"cluster_id"`
|
||||
User string `json:"user"`
|
||||
Operation string `json:"operation"`
|
||||
Db string `json:"db"`
|
||||
Resource string `json:"resource"`
|
||||
ClientAdd string `json:"client_add"` // client address
|
||||
Details string `json:"details"`
|
||||
}
|
||||
|
||||
type AuditArrayInfo struct {
|
||||
Records []AuditInfo `json:"records"`
|
||||
}
|
||||
|
||||
type AuditInfoOld struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
ClusterID string `json:"cluster_id"`
|
||||
User string `json:"user"`
|
||||
Operation string `json:"operation"`
|
||||
Db string `json:"db"`
|
||||
Resource string `json:"resource"`
|
||||
ClientAdd string `json:"client_add"` // client address
|
||||
Details string `json:"details"`
|
||||
}
|
||||
|
||||
func NewAudit(c *config.Config) (*Audit, error) {
|
||||
a := Audit{
|
||||
username: c.TDengine.Username,
|
||||
password: c.TDengine.Password,
|
||||
host: c.TDengine.Host,
|
||||
port: c.TDengine.Port,
|
||||
usessl: c.TDengine.Usessl,
|
||||
db: c.Audit.Database.Name,
|
||||
dbOptions: c.Audit.Database.Options,
|
||||
}
|
||||
if a.db == "" {
|
||||
a.db = "audit"
|
||||
}
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
func (a *Audit) Init(c gin.IRouter) error {
|
||||
if err := a.createDatabase(); err != nil {
|
||||
return fmt.Errorf("create database error, msg:%s", err)
|
||||
}
|
||||
if err := a.initConnect(); err != nil {
|
||||
return fmt.Errorf("init db connect error, msg:%s", err)
|
||||
}
|
||||
if err := a.createSTables(); err != nil {
|
||||
return fmt.Errorf("create stable error, msg:%s", err)
|
||||
}
|
||||
c.POST("/audit", a.handleFunc())
|
||||
c.POST("/audit_v2", a.handleFunc())
|
||||
c.POST("/audit-batch", a.handleBatchFunc())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Audit) handleBatchFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
auditLogger := auditLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
if a.conn == nil {
|
||||
auditLogger.Error("no connection")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
auditLogger.Errorf("get audit data error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
auditLogger.Tracef("receive audit request, data:%s", string(data))
|
||||
}
|
||||
var auditArray AuditArrayInfo
|
||||
|
||||
if err := json.Unmarshal(data, &auditArray); err != nil {
|
||||
auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
if len(auditArray.Records) == 0 {
|
||||
if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
auditLogger.Trace("handle request successfully (no records)")
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
return
|
||||
}
|
||||
|
||||
err = handleBatchRecord(auditArray.Records, a.conn, qid)
|
||||
|
||||
if err != nil {
|
||||
auditLogger.Errorf("process records error, error:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Audit) handleFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
auditLogger := auditLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
if a.conn == nil {
|
||||
auditLogger.Error("no connection")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
auditLogger.Errorf("get audit data error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)})
|
||||
return
|
||||
}
|
||||
if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
auditLogger.Tracef("receive audit request, data:%s", string(data))
|
||||
}
|
||||
sql := ""
|
||||
|
||||
isStrTime, _ := regexp.MatchString(`"timestamp"\s*:\s*"[^"]*"`, string(data))
|
||||
if isStrTime {
|
||||
var audit AuditInfo
|
||||
if err := json.Unmarshal(data, &audit); err != nil {
|
||||
auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
sql = parseSql(audit)
|
||||
} else {
|
||||
var audit AuditInfoOld
|
||||
if err := json.Unmarshal(data, &audit); err != nil {
|
||||
auditLogger.Errorf("parse old audit error, data:%s, error:%s", string(data), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
sql = parseSqlOld(audit)
|
||||
}
|
||||
|
||||
if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil {
|
||||
auditLogger.Errorf("save audit data error, sql:%s, error:%s", sql, err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("save audit data error: %s", err)})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
}
|
||||
}
|
||||
|
||||
func handleDetails(details string) string {
|
||||
if strings.Contains(details, "'") {
|
||||
details = strings.ReplaceAll(details, "'", "\\'")
|
||||
}
|
||||
if strings.Contains(details, "\"") {
|
||||
details = strings.ReplaceAll(details, "\"", "\\\"")
|
||||
}
|
||||
if len(details) > MAX_DETAIL_LEN {
|
||||
details = details[:MAX_DETAIL_LEN]
|
||||
}
|
||||
return details
|
||||
}
|
||||
|
||||
func parseSql(audit AuditInfo) string {
|
||||
details := handleDetails(audit.Details)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
|
||||
getTableName(audit), audit.ClusterID, audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
|
||||
}
|
||||
|
||||
func parseSqlOld(audit AuditInfoOld) string {
|
||||
details := handleDetails(audit.Details)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
|
||||
getTableNameOld(audit), audit.ClusterID, strconv.FormatInt(audit.Timestamp, 10)+"000000", audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
|
||||
}
|
||||
|
||||
func handleBatchRecord(auditArray []AuditInfo, conn *db.Connector, qid uint64) error {
|
||||
var builder strings.Builder
|
||||
var head = fmt.Sprintf(
|
||||
"insert into %s using operations tags ('%s') values",
|
||||
getTableName(auditArray[0]), auditArray[0].ClusterID)
|
||||
|
||||
builder.WriteString(head)
|
||||
var qid_counter uint8 = 0
|
||||
for _, audit := range auditArray {
|
||||
|
||||
details := handleDetails(audit.Details)
|
||||
valuesStr := fmt.Sprintf(
|
||||
"(%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
|
||||
audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
|
||||
|
||||
if (builder.Len() + len(valuesStr)) > MAX_SQL_LEN {
|
||||
sql := builder.String()
|
||||
if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil {
|
||||
return err
|
||||
}
|
||||
builder.Reset()
|
||||
builder.WriteString(head)
|
||||
}
|
||||
builder.WriteString(valuesStr)
|
||||
qid_counter++
|
||||
}
|
||||
|
||||
if builder.Len() > len(head) {
|
||||
sql := builder.String()
|
||||
if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTableName(audit AuditInfo) string {
|
||||
return fmt.Sprintf("t_operations_%s", audit.ClusterID)
|
||||
}
|
||||
|
||||
func getTableNameOld(audit AuditInfoOld) string {
|
||||
return fmt.Sprintf("t_operations_%s", audit.ClusterID)
|
||||
}
|
||||
|
||||
func (a *Audit) initConnect() error {
|
||||
conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl)
|
||||
if err != nil {
|
||||
auditLogger.Errorf("init db connect error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
a.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Audit) createDatabase() error {
|
||||
conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect to database error, msg:%s", err)
|
||||
}
|
||||
defer func() { _ = conn.Close() }()
|
||||
sql := a.createDBSql()
|
||||
auditLogger.Infof("create database, sql:%s", sql)
|
||||
_, err = conn.Exec(context.Background(), sql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
auditLogger.Errorf("create database error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var errNoConnection = errors.New("no connection")
|
||||
|
||||
func (a *Audit) createDBSql() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf("create database if not exists %s precision 'ns' ", a.db))
|
||||
|
||||
for k, v := range a.dbOptions {
|
||||
buf.WriteString(k)
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
buf.WriteString(fmt.Sprintf(" '%s'", v))
|
||||
default:
|
||||
buf.WriteString(fmt.Sprintf(" %v", v))
|
||||
}
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (a *Audit) createSTables() error {
|
||||
var createTableSql = "create stable if not exists operations " +
|
||||
"(ts timestamp, user_name varchar(25), operation varchar(20), db varchar(65), resource varchar(193), client_address varchar(25), details varchar(50000)) " +
|
||||
"tags (cluster_id varchar(64))"
|
||||
|
||||
if a.conn == nil {
|
||||
return errNoConnection
|
||||
}
|
||||
_, err := a.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
auditLogger.Errorf("## create stable error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
func TestAudit(t *testing.T) {
|
||||
cfg := util.GetCfg()
|
||||
cfg.Audit = config.AuditConfig{
|
||||
Database: config.Database{
|
||||
Name: "keepter_test_audit",
|
||||
},
|
||||
Enable: true,
|
||||
}
|
||||
|
||||
a, err := NewAudit(cfg)
|
||||
assert.NoError(t, err)
|
||||
err = a.Init(router)
|
||||
assert.NoError(t, err)
|
||||
|
||||
longDetails := strings.Repeat("0123456789", 5000)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
ts int64
|
||||
detail string
|
||||
data string
|
||||
expect string
|
||||
}{
|
||||
{
|
||||
name: "1",
|
||||
ts: 1699839716440000000,
|
||||
data: `{"timestamp": "1699839716440000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"}`,
|
||||
expect: "detail",
|
||||
},
|
||||
{
|
||||
name: "2",
|
||||
ts: 1699839716441000000,
|
||||
data: `{"timestamp": "1699839716441000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "` + longDetails + `"}`,
|
||||
expect: longDetails[:50000],
|
||||
},
|
||||
{
|
||||
name: "3",
|
||||
ts: 1699839716442000000,
|
||||
data: "{\"timestamp\": \"1699839716442000000\", \"cluster_id\": \"cluster_id\", \"user\": \"user\", \"operation\": \"operation\", \"db\":\"dbnameb\", \"resource\":\"resourcenameb\", \"client_add\": \"localhost:30000\", \"details\": \"create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'\"}",
|
||||
expect: "create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'",
|
||||
},
|
||||
}
|
||||
|
||||
cases2 := []struct {
|
||||
name string
|
||||
ts int64
|
||||
detail string
|
||||
data string
|
||||
expect string
|
||||
}{
|
||||
{
|
||||
name: "1",
|
||||
ts: 1699839716445000000,
|
||||
data: `{"timestamp":1699839716445, "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "details"}`,
|
||||
expect: "details",
|
||||
},
|
||||
}
|
||||
conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Audit.Database.Name, cfg.TDengine.Usessl)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(c.data)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/audit_v2", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, c.expect, data.Data[0][1])
|
||||
})
|
||||
}
|
||||
|
||||
for _, c := range cases2 {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(c.data)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/audit", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, c.expect, data.Data[0][1])
|
||||
})
|
||||
}
|
||||
|
||||
for _, c := range cases2 {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(c.data)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/audit", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, c.expect, data.Data[0][1])
|
||||
})
|
||||
}
|
||||
|
||||
MAX_SQL_LEN = 300
|
||||
// test audit batch
|
||||
input := `{"records":[{"timestamp":"1702548856940013848","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d630302"},{"timestamp":"1702548856939746458","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45230","db":"test","resource":"","details":"d130277"},{"timestamp":"1702548856939586665","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5268"},{"timestamp":"1702548856939528940","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50222","db":"test","resource":"","details":"d255282"},{"timestamp":"1702548856939336371","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45126","db":"test","resource":"","details":"d755297"},{"timestamp":"1702548856939075131","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d380325"},{"timestamp":"1702548856938640661","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45152","db":"test","resource":"","details":"d255281"},{"timestamp":"1702548856938505795","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d130276"},{"timestamp":"1702548856938363319","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45178","db":"test","resource":"","details":"d755296"},{"timestamp":"1702548856938201478","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d380324"},{"timestamp":"1702548856937740618","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5266"}]}`
|
||||
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
t.Run("testbatch", func(t *testing.T) {
|
||||
//test empty array
|
||||
w1 := httptest.NewRecorder()
|
||||
body1 := strings.NewReader(`{"records": []}`)
|
||||
|
||||
req1, _ := http.NewRequest(http.MethodPost, "/audit-batch", body1)
|
||||
router.ServeHTTP(w1, req1)
|
||||
assert.Equal(t, 200, w1.Code)
|
||||
|
||||
//test 2 items array
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(input)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/audit-batch", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
data, err := conn.Query(context.Background(), "select ts, details from "+cfg.Audit.Database.Name+".operations where cluster_id='8468922059162439502'", util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 11, len(data.Data))
|
||||
})
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func NewCheckHealth(version string) *CheckHealth {
|
||||
return &CheckHealth{version: version}
|
||||
}
|
||||
|
||||
type CheckHealth struct {
|
||||
version string
|
||||
}
|
||||
|
||||
func (h *CheckHealth) Init(c gin.IRouter) {
|
||||
c.GET("check_health", func(context *gin.Context) {
|
||||
context.JSON(http.StatusOK, map[string]string{"version": h.version})
|
||||
})
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var commonLogger = log.GetLogger("CMN")
|
||||
|
||||
func CreateDatabase(username string, password string, host string, port int, usessl bool, dbname string, databaseOptions map[string]interface{}) {
|
||||
qid := util.GetQidOwn()
|
||||
|
||||
commonLogger := commonLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
conn, err := db.NewConnector(username, password, host, port, usessl)
|
||||
if err != nil {
|
||||
commonLogger.Errorf("connect to adapter error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer closeConn(conn)
|
||||
|
||||
createDBSql := generateCreateDBSql(dbname, databaseOptions)
|
||||
commonLogger.Warningf("create database sql: %s", createDBSql)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil {
|
||||
commonLogger.Errorf("try %v times: create database %s error, msg:%v", i+1, dbname, err)
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func generateCreateDBSql(dbname string, databaseOptions map[string]interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("create database if not exists ")
|
||||
buf.WriteString(dbname)
|
||||
|
||||
for k, v := range databaseOptions {
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(k)
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
buf.WriteString(fmt.Sprintf(" '%s'", v))
|
||||
default:
|
||||
buf.WriteString(fmt.Sprintf(" %v", v))
|
||||
}
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func CreatTables(username string, password string, host string, port int, usessl bool, dbname string, createList []string) {
|
||||
ctx := context.Background()
|
||||
conn, err := db.NewConnectorWithDb(username, password, host, port, dbname, usessl)
|
||||
if err != nil {
|
||||
commonLogger.Errorf("connect to database error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
defer closeConn(conn)
|
||||
|
||||
for _, createSql := range createList {
|
||||
commonLogger.Infof("execute sql:%s", createSql)
|
||||
if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil {
|
||||
commonLogger.Errorf("execute sql: %s, error: %s", createSql, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func closeConn(conn *db.Connector) {
|
||||
if err := conn.Close(); err != nil {
|
||||
commonLogger.Errorf("close connection error, msg:%s", err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,297 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/shopspring/decimal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/cmd"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/process"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var router *gin.Engine
|
||||
var conf *config.Config
|
||||
var dbName = "exporter_test"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
conf = config.InitConfig()
|
||||
log.ConfigLog()
|
||||
|
||||
conf.Metrics.Database.Name = dbName
|
||||
conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
ctx := context.Background()
|
||||
conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn())
|
||||
|
||||
if _, err = conn.Exec(ctx, fmt.Sprintf("create database if not exists %s", dbName), util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("create database %s", dbName), err)
|
||||
}
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
router = gin.New()
|
||||
reporter := NewReporter(conf)
|
||||
reporter.Init(router)
|
||||
|
||||
var createList = []string{
|
||||
CreateClusterInfoSql,
|
||||
CreateDnodeSql,
|
||||
CreateMnodeSql,
|
||||
CreateDnodeInfoSql,
|
||||
CreateDataDirSql,
|
||||
CreateLogDirSql,
|
||||
CreateTempDirSql,
|
||||
CreateVgroupsInfoSql,
|
||||
CreateVnodeRoleSql,
|
||||
CreateSummarySql,
|
||||
CreateGrantInfoSql,
|
||||
CreateKeeperSql,
|
||||
}
|
||||
CreatTables(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl, conf.Metrics.Database.Name, createList)
|
||||
|
||||
processor := process.NewProcessor(conf)
|
||||
node := NewNodeExporter(processor)
|
||||
node.Init(router)
|
||||
m.Run()
|
||||
if _, err = conn.Exec(ctx, fmt.Sprintf("drop database if exists %s", dbName), util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("drop database %s", dbName), err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMetrics(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest(http.MethodGet, "/metrics", nil)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
}
|
||||
|
||||
var now = time.Now()
|
||||
var nowStr = now.Format(time.RFC3339Nano)
|
||||
|
||||
var report = Report{
|
||||
Ts: nowStr,
|
||||
DnodeID: 1,
|
||||
DnodeEp: "localhost:7100",
|
||||
ClusterID: "6980428120398645172",
|
||||
Protocol: 1,
|
||||
ClusterInfo: &ClusterInfo{
|
||||
FirstEp: "localhost:7100",
|
||||
FirstEpDnodeID: 1,
|
||||
Version: "3.0.0.0",
|
||||
MasterUptime: 2.3090276954462752e-05,
|
||||
MonitorInterval: 1,
|
||||
VgroupsTotal: 2,
|
||||
VgroupsAlive: 2,
|
||||
VnodesTotal: 2,
|
||||
VnodesAlive: 2,
|
||||
ConnectionsTotal: 1,
|
||||
Dnodes: []Dnode{
|
||||
{
|
||||
DnodeID: 1,
|
||||
DnodeEp: "localhost:7100",
|
||||
Status: "ready",
|
||||
},
|
||||
},
|
||||
Mnodes: []Mnode{
|
||||
{
|
||||
MnodeID: 1,
|
||||
MnodeEp: "localhost:7100",
|
||||
Role: "master",
|
||||
},
|
||||
},
|
||||
},
|
||||
VgroupInfos: []VgroupInfo{
|
||||
{
|
||||
VgroupID: 1,
|
||||
DatabaseName: "test",
|
||||
TablesNum: 1,
|
||||
Status: "ready",
|
||||
Vnodes: []Vnode{
|
||||
{
|
||||
DnodeID: 1,
|
||||
VnodeRole: "LEADER",
|
||||
},
|
||||
{
|
||||
DnodeID: 2,
|
||||
VnodeRole: "FOLLOWER",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
GrantInfo: &GrantInfo{
|
||||
ExpireTime: 2147483647,
|
||||
TimeseriesUsed: 800,
|
||||
TimeseriesTotal: 2147483647,
|
||||
},
|
||||
DnodeInfo: DnodeInfo{
|
||||
Uptime: 0.000291412026854232,
|
||||
CPUEngine: 0.0828500414250207,
|
||||
CPUSystem: 0.4971002485501243,
|
||||
CPUCores: 12,
|
||||
MemEngine: 9268,
|
||||
MemSystem: 54279816,
|
||||
MemTotal: 65654816,
|
||||
DiskEngine: 0,
|
||||
DiskUsed: 39889702912,
|
||||
DiskTotal: 210304475136,
|
||||
NetIn: 4727.45292368682,
|
||||
NetOut: 2194.251734390486,
|
||||
IoRead: 3789.8909811694753,
|
||||
IoWrite: 12311.19920713578,
|
||||
IoReadDisk: 0,
|
||||
IoWriteDisk: 12178.394449950447,
|
||||
ReqSelect: 2,
|
||||
ReqSelectRate: 0,
|
||||
ReqInsert: 6,
|
||||
ReqInsertSuccess: 4,
|
||||
ReqInsertRate: 0,
|
||||
ReqInsertBatch: 10,
|
||||
ReqInsertBatchSuccess: 8,
|
||||
ReqInsertBatchRate: 0,
|
||||
Errors: 2,
|
||||
VnodesNum: 2,
|
||||
Masters: 2,
|
||||
HasMnode: 1,
|
||||
HasQnode: 1,
|
||||
HasSnode: 1,
|
||||
HasBnode: 1,
|
||||
},
|
||||
DiskInfos: DiskInfo{
|
||||
Datadir: []DataDir{
|
||||
{
|
||||
Name: "/root/TDengine/sim/dnode1/data",
|
||||
Level: 0,
|
||||
Avail: decimal.NewFromInt(171049893888),
|
||||
Used: decimal.NewFromInt(39254581248),
|
||||
Total: decimal.NewFromInt(210304475136),
|
||||
},
|
||||
{
|
||||
Name: "/root/TDengine/sim/dnode2/data",
|
||||
Level: 1,
|
||||
Avail: decimal.NewFromInt(171049893888),
|
||||
Used: decimal.NewFromInt(39254581248),
|
||||
Total: decimal.NewFromInt(210304475136),
|
||||
},
|
||||
},
|
||||
Logdir: LogDir{
|
||||
Name: "/root/TDengine/sim/dnode1/log",
|
||||
Avail: decimal.NewFromInt(171049771008),
|
||||
Used: decimal.NewFromInt(39254704128),
|
||||
Total: decimal.NewFromInt(210304475136),
|
||||
},
|
||||
Tempdir: TempDir{
|
||||
Name: "/tmp",
|
||||
Avail: decimal.NewFromInt(171049771008),
|
||||
Used: decimal.NewFromInt(39254704128),
|
||||
Total: decimal.NewFromInt(210304475136),
|
||||
},
|
||||
},
|
||||
LogInfos: LogInfo{
|
||||
Summary: []Summary{
|
||||
{
|
||||
Level: "error",
|
||||
Total: 0,
|
||||
}, {
|
||||
Level: "info",
|
||||
Total: 114,
|
||||
}, {
|
||||
Level: "debug",
|
||||
Total: 117,
|
||||
}, {
|
||||
Level: "trace",
|
||||
Total: 126,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestPutMetrics(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
b, _ := json.Marshal(report)
|
||||
body := strings.NewReader(string(b))
|
||||
req, _ := http.NewRequest(http.MethodPost, "/report", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host,
|
||||
conf.TDengine.Port, dbName, conf.TDengine.Usessl)
|
||||
if err != nil {
|
||||
logger.Errorf("connect to database error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
data, err := conn.Query(ctx, "select info from log_summary", util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("execute sql:%s, error:%s", "select * from log_summary", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, info := range data.Data {
|
||||
assert.Equal(t, int32(114), info[0])
|
||||
}
|
||||
|
||||
var tenMinutesBefore = now.Add(-10 * time.Minute)
|
||||
var tenMinutesBeforeStr = tenMinutesBefore.Format(time.RFC3339Nano)
|
||||
|
||||
conf.FromTime = tenMinutesBeforeStr
|
||||
conf.Transfer = "old_taosd_metric"
|
||||
|
||||
var cmd = cmd.NewCommand(conf)
|
||||
cmd.Process(conf)
|
||||
|
||||
type TableInfo struct {
|
||||
TsName string
|
||||
RowNum int
|
||||
}
|
||||
|
||||
tables := map[string]*TableInfo{
|
||||
"taosd_cluster_basic": {"ts", 1},
|
||||
"taosd_cluster_info": {"_ts", 1},
|
||||
"taosd_vgroups_info": {"_ts", 1},
|
||||
"taosd_dnodes_info": {"_ts", 1},
|
||||
"taosd_dnodes_status": {"_ts", 1},
|
||||
"taosd_dnodes_data_dirs": {"_ts", 1},
|
||||
"taosd_dnodes_log_dirs": {"_ts", 2},
|
||||
"taosd_mnodes_info": {"_ts", 1},
|
||||
"taosd_vnodes_info": {"_ts", 1},
|
||||
}
|
||||
|
||||
for table, tableInfo := range tables {
|
||||
data, err = conn.Query(ctx, fmt.Sprintf("select %s from %s", tableInfo.TsName, table), util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("execute sql:%s, error:%s", "select * from "+table, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, tableInfo.RowNum, len(data.Data))
|
||||
assert.Equal(t, now.UnixMilli(), data.Data[0][0].(time.Time).UnixMilli())
|
||||
}
|
||||
|
||||
conf.Transfer = ""
|
||||
conf.Drop = "old_taosd_metric_stables"
|
||||
cmd.Process(conf)
|
||||
|
||||
data, err = conn.Query(ctx, "select * from information_schema.ins_stables where stable_name = 'm_info'", util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("execute sql:%s, error:%s", "m_info is not droped", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, 0, len(data.Data))
|
||||
logger.Infof("ALL OK !!!")
|
||||
}
|
|
@ -0,0 +1,770 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var re = regexp.MustCompile("'+")
|
||||
var gmLogger = log.GetLogger("GEN")
|
||||
|
||||
var MAX_SQL_LEN = 1000000
|
||||
|
||||
var STABLE_NAME_KEY = "priv_stn"
|
||||
|
||||
type ColumnSeq struct {
|
||||
tagNames []string
|
||||
metricNames []string
|
||||
}
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
gColumnSeqMap = make(map[string]ColumnSeq)
|
||||
)
|
||||
|
||||
type GeneralMetric struct {
|
||||
client *http.Client
|
||||
conn *db.Connector
|
||||
username string
|
||||
password string
|
||||
host string
|
||||
port int
|
||||
usessl bool
|
||||
database string
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
type Tag struct {
|
||||
Name string `json:"name"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Name string `json:"name"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
type MetricGroup struct {
|
||||
Tags []Tag `json:"tags"`
|
||||
Metrics []Metric `json:"metrics"`
|
||||
}
|
||||
|
||||
type StableInfo struct {
|
||||
Name string `json:"name"`
|
||||
MetricGroups []MetricGroup `json:"metric_groups"`
|
||||
}
|
||||
|
||||
type StableArrayInfo struct {
|
||||
Ts string `json:"ts"`
|
||||
Protocol int `json:"protocol"`
|
||||
Tables []StableInfo `json:"tables"`
|
||||
}
|
||||
|
||||
type ClusterBasic struct {
|
||||
ClusterId string `json:"cluster_id"`
|
||||
Ts string `json:"ts"`
|
||||
FirstEp string `json:"first_ep"`
|
||||
FirstEpDnodeId int32 `json:"first_ep_dnode_id"`
|
||||
ClusterVersion string `json:"cluster_version"`
|
||||
}
|
||||
|
||||
type SlowSqlDetailInfo struct {
|
||||
StartTs string `json:"start_ts"`
|
||||
RequestId string `json:"request_id"`
|
||||
QueryTime int32 `json:"query_time"`
|
||||
Code int32 `json:"code"`
|
||||
ErrorInfo string `json:"error_info"`
|
||||
Type int8 `json:"type"`
|
||||
RowsNum int64 `json:"rows_num"`
|
||||
Sql string `json:"sql"`
|
||||
ProcessName string `json:"process_name"`
|
||||
ProcessId string `json:"process_id"`
|
||||
Db string `json:"db"`
|
||||
User string `json:"user"`
|
||||
Ip string `json:"ip"`
|
||||
ClusterId string `json:"cluster_id"`
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) Init(c gin.IRouter) error {
|
||||
c.POST("/general-metric", gm.handleFunc())
|
||||
c.POST("/taosd-cluster-basic", gm.handleTaosdClusterBasic())
|
||||
c.POST("/slow-sql-detail-batch", gm.handleSlowSqlDetailBatch())
|
||||
|
||||
conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
|
||||
if err != nil {
|
||||
gmLogger.Errorf("init db connect error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
gm.conn = conn
|
||||
|
||||
err = gm.createSTables()
|
||||
if err != nil {
|
||||
gmLogger.Errorf("create stable error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = gm.initColumnSeqMap()
|
||||
if err != nil {
|
||||
gmLogger.Errorf("init gColumnSeqMap error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func NewGeneralMetric(conf *config.Config) *GeneralMetric {
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableCompression: true,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var protocol string
|
||||
if conf.TDengine.Usessl {
|
||||
protocol = "https"
|
||||
} else {
|
||||
protocol = "http"
|
||||
}
|
||||
|
||||
imp := &GeneralMetric{
|
||||
client: client,
|
||||
username: conf.TDengine.Username,
|
||||
password: conf.TDengine.Password,
|
||||
host: conf.TDengine.Host,
|
||||
port: conf.TDengine.Port,
|
||||
usessl: conf.TDengine.Usessl,
|
||||
database: conf.Metrics.Database.Name,
|
||||
url: &url.URL{
|
||||
Scheme: protocol,
|
||||
Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port),
|
||||
Path: "/influxdb/v1/write",
|
||||
RawQuery: fmt.Sprintf("db=%s&precision=ms&table_name_key=%s", conf.Metrics.Database.Name, STABLE_NAME_KEY),
|
||||
},
|
||||
}
|
||||
return imp
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) handleFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
gmLogger := gmLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
if gm.client == nil {
|
||||
gmLogger.Error("no connection")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
gmLogger.Errorf("get general metric data error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
var request []StableArrayInfo
|
||||
|
||||
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
gmLogger.Tracef("data:%s", string(data))
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &request); err != nil {
|
||||
gmLogger.Errorf("parse general metric data error, data:%s, error:%s", string(data), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
if len(request) == 0 {
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
return
|
||||
}
|
||||
|
||||
err = gm.handleBatchMetrics(request, qid)
|
||||
|
||||
if err != nil {
|
||||
gmLogger.Errorf("process records error. msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
}
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) handleBatchMetrics(request []StableArrayInfo, qid uint64) error {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for _, stableArrayInfo := range request {
|
||||
if stableArrayInfo.Ts == "" {
|
||||
gmLogger.Error("ts data is empty")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, table := range stableArrayInfo.Tables {
|
||||
if table.Name == "" {
|
||||
gmLogger.Error("stable name is empty")
|
||||
continue
|
||||
}
|
||||
|
||||
table.Name = strings.ToLower(table.Name)
|
||||
if _, ok := Load(table.Name); !ok {
|
||||
Init(table.Name)
|
||||
}
|
||||
|
||||
for _, metricGroup := range table.MetricGroups {
|
||||
buf.WriteString(table.Name)
|
||||
writeTags(metricGroup.Tags, table.Name, &buf)
|
||||
buf.WriteString(" ")
|
||||
writeMetrics(metricGroup.Metrics, table.Name, &buf)
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(stableArrayInfo.Ts)
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if buf.Len() > 0 {
|
||||
return gm.lineWriteBody(&buf, qid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) lineWriteBody(buf *bytes.Buffer, qid uint64) error {
|
||||
gmLogger := gmLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
header := map[string][]string{
|
||||
"Connection": {"keep-alive"},
|
||||
}
|
||||
req_data := buf.String()
|
||||
|
||||
//build new URL,add qid to URL
|
||||
urlWithQid := *gm.url
|
||||
query := urlWithQid.Query()
|
||||
query.Set("qid", fmt.Sprintf("%d", qid))
|
||||
urlWithQid.RawQuery = query.Encode()
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: &urlWithQid,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: header,
|
||||
Host: gm.url.Host,
|
||||
}
|
||||
req.SetBasicAuth(gm.username, gm.password)
|
||||
|
||||
req.Body = io.NopCloser(buf)
|
||||
|
||||
startTime := time.Now()
|
||||
resp, err := gm.client.Do(req)
|
||||
|
||||
endTime := time.Now()
|
||||
latency := endTime.Sub(startTime)
|
||||
|
||||
if err != nil {
|
||||
gmLogger.Errorf("latency:%v, req_data:%v, url:%s, resp:%d, err:%s", latency, req_data, urlWithQid.String(), resp.StatusCode, err)
|
||||
return err
|
||||
}
|
||||
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
gmLogger.Tracef("latency:%v, req_data:%v, url:%s, resp:%d", latency, req_data, urlWithQid.String(), resp.StatusCode)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) handleTaosdClusterBasic() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
gmLogger := gmLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
if gm.conn == nil {
|
||||
gmLogger.Error("no connection")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
gmLogger.Errorf("get taosd cluster basic data error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)})
|
||||
return
|
||||
}
|
||||
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
gmLogger.Tracef("receive taosd cluster basic data:%s", string(data))
|
||||
}
|
||||
|
||||
var request ClusterBasic
|
||||
|
||||
if err := json.Unmarshal(data, &request); err != nil {
|
||||
gmLogger.Errorf("parse general metric data error, data:%s, msg:%s", string(data), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
sql := fmt.Sprintf(
|
||||
"insert into %s.taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values (%s, '%s', %d, '%s') ",
|
||||
gm.database, request.ClusterId, request.ClusterId, request.Ts, request.FirstEp, request.FirstEpDnodeId, request.ClusterVersion)
|
||||
|
||||
if _, err = gm.conn.Exec(context.Background(), sql, qid); err != nil {
|
||||
gmLogger.Errorf("insert taosd_cluster_basic error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taosd_cluster_basic error. %s", err)})
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
}
|
||||
}
|
||||
|
||||
func processString(input string) string {
|
||||
// remove number in the beginning
|
||||
re := regexp.MustCompile(`^\d+`)
|
||||
input = re.ReplaceAllString(input, "")
|
||||
|
||||
// replage "." to "_"
|
||||
input = strings.ReplaceAll(input, ".", "_")
|
||||
|
||||
// remove special characters
|
||||
re = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
input = re.ReplaceAllString(input, "")
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) handleSlowSqlDetailBatch() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
gmLogger := gmLogger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
if gm.conn == nil {
|
||||
gmLogger.Error("no connection")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"})
|
||||
return
|
||||
}
|
||||
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
gmLogger.Errorf("get taos slow sql detail data error, msg:%s", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get taos slow sql detail data error. %s", err)})
|
||||
return
|
||||
}
|
||||
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
gmLogger.Tracef("receive taos slow sql detail data:%s", string(data))
|
||||
}
|
||||
|
||||
var request []SlowSqlDetailInfo
|
||||
|
||||
if err := json.Unmarshal(data, &request); err != nil {
|
||||
gmLogger.Errorf("parse taos slow sql detail error, msg:%s", string(data))
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse taos slow sql detail error: %s", err)})
|
||||
return
|
||||
}
|
||||
|
||||
var sql_head = "INSERT INTO `taos_slow_sql_detail` (tbname, `db`, `user`, `ip`, `cluster_id`, `start_ts`, `request_id`, `query_time`, `code`, `error_info`, `type`, `rows_num`, `sql`, `process_name`, `process_id`) values "
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(sql_head)
|
||||
var qid_counter uint8 = 0
|
||||
for _, slowSqlDetailInfo := range request {
|
||||
if slowSqlDetailInfo.StartTs == "" {
|
||||
gmLogger.Error("start_ts data is empty")
|
||||
continue
|
||||
}
|
||||
|
||||
// cut string to max len
|
||||
slowSqlDetailInfo.Sql = re.ReplaceAllString(slowSqlDetailInfo.Sql, "'") // 将匹配到的部分替换为一个单引号
|
||||
slowSqlDetailInfo.Sql = strings.ReplaceAll(slowSqlDetailInfo.Sql, "'", "''")
|
||||
slowSqlDetailInfo.Sql = util.SafeSubstring(slowSqlDetailInfo.Sql, 16384)
|
||||
slowSqlDetailInfo.ClusterId = util.SafeSubstring(slowSqlDetailInfo.ClusterId, 32)
|
||||
slowSqlDetailInfo.Db = util.SafeSubstring(slowSqlDetailInfo.Db, 1024)
|
||||
if slowSqlDetailInfo.Db == "" {
|
||||
slowSqlDetailInfo.Db = "unknown"
|
||||
}
|
||||
slowSqlDetailInfo.User = util.SafeSubstring(slowSqlDetailInfo.User, 32)
|
||||
slowSqlDetailInfo.Ip = util.SafeSubstring(slowSqlDetailInfo.Ip, 32)
|
||||
slowSqlDetailInfo.ProcessName = util.SafeSubstring(slowSqlDetailInfo.ProcessName, 32)
|
||||
slowSqlDetailInfo.ProcessId = util.SafeSubstring(slowSqlDetailInfo.ProcessId, 32)
|
||||
slowSqlDetailInfo.ErrorInfo = util.SafeSubstring(slowSqlDetailInfo.ErrorInfo, 128)
|
||||
|
||||
// max len 192
|
||||
var sub_table_name = slowSqlDetailInfo.User + "_" + util.SafeSubstring(slowSqlDetailInfo.Db, 80) + "_" + slowSqlDetailInfo.Ip + "_clusterId_" + slowSqlDetailInfo.ClusterId
|
||||
sub_table_name = strings.ToLower(processString(sub_table_name))
|
||||
|
||||
var sql = fmt.Sprintf(
|
||||
"('%s', '%s', '%s', '%s', '%s', %s, %s, %d, %d, '%s', %d, %d, '%s', '%s', '%s') ",
|
||||
sub_table_name,
|
||||
slowSqlDetailInfo.Db, slowSqlDetailInfo.User, slowSqlDetailInfo.Ip, slowSqlDetailInfo.ClusterId, slowSqlDetailInfo.StartTs, slowSqlDetailInfo.RequestId,
|
||||
slowSqlDetailInfo.QueryTime, slowSqlDetailInfo.Code, slowSqlDetailInfo.ErrorInfo, slowSqlDetailInfo.Type, slowSqlDetailInfo.RowsNum, slowSqlDetailInfo.Sql,
|
||||
slowSqlDetailInfo.ProcessName, slowSqlDetailInfo.ProcessId)
|
||||
if (buf.Len() + len(sql)) < MAX_SQL_LEN {
|
||||
buf.WriteString(sql)
|
||||
} else {
|
||||
if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil {
|
||||
gmLogger.Errorf("insert taos_slow_sql_detail error, sql:%s, error:%s", buf.String(), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)})
|
||||
return
|
||||
}
|
||||
buf.Reset()
|
||||
buf.WriteString(sql_head)
|
||||
buf.WriteString(sql)
|
||||
qid_counter++
|
||||
}
|
||||
}
|
||||
|
||||
if buf.Len() > len(sql_head) {
|
||||
if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil {
|
||||
gmLogger.Errorf("insert taos_slow_sql_detail error, data:%s, msg:%s", buf.String(), err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)})
|
||||
return
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusOK, gin.H{})
|
||||
}
|
||||
}
|
||||
|
||||
func writeTags(tags []Tag, stbName string, buf *bytes.Buffer) {
|
||||
var nameArray []string
|
||||
if columnSeq, ok := Load(stbName); ok {
|
||||
if len(columnSeq.tagNames) < len(tags) {
|
||||
// add column, only schema change will hit here
|
||||
for _, tag := range tags {
|
||||
if !contains(columnSeq.tagNames, tag.Name) {
|
||||
columnSeq.tagNames = append(columnSeq.tagNames, tag.Name)
|
||||
}
|
||||
}
|
||||
Store(stbName, columnSeq)
|
||||
}
|
||||
nameArray = columnSeq.tagNames
|
||||
}
|
||||
|
||||
// 将 Tag 切片转换为 map
|
||||
tagMap := make(map[string]string)
|
||||
for _, tag := range tags {
|
||||
tagMap[tag.Name] = tag.Value
|
||||
}
|
||||
|
||||
for _, name := range nameArray {
|
||||
if value, ok := tagMap[name]; ok {
|
||||
if value != "" {
|
||||
buf.WriteString(fmt.Sprintf(",%s=%s", name, util.EscapeInfluxProtocol(value)))
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown"))
|
||||
gmLogger.Errorf("tag value is empty, tag name:%s", name)
|
||||
}
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown"))
|
||||
}
|
||||
}
|
||||
|
||||
// have sub table name
|
||||
if _, ok := tagMap[STABLE_NAME_KEY]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
subTableName := get_sub_table_name_valid(stbName, tagMap)
|
||||
if subTableName != "" {
|
||||
buf.WriteString(fmt.Sprintf(",%s=%s", STABLE_NAME_KEY, subTableName))
|
||||
} else {
|
||||
gmLogger.Errorf("get sub stable name error, stable name:%s, tag map:%v", stbName, tagMap)
|
||||
}
|
||||
}
|
||||
|
||||
func checkKeysExist(data map[string]string, keys ...string) bool {
|
||||
for _, key := range keys {
|
||||
_, ok := data[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func get_sub_table_name_valid(stbName string, tagMap map[string]string) string {
|
||||
stbName = get_sub_table_name(stbName, tagMap)
|
||||
return util.ToValidTableName(stbName)
|
||||
}
|
||||
|
||||
func get_sub_table_name(stbName string, tagMap map[string]string) string {
|
||||
if strings.HasPrefix(stbName, "taosx") {
|
||||
switch stbName {
|
||||
case "taosx_sys":
|
||||
if checkKeysExist(tagMap, "taosx_id") {
|
||||
return fmt.Sprintf("sys_%s", tagMap["taosx_id"])
|
||||
}
|
||||
case "taosx_agent":
|
||||
if checkKeysExist(tagMap, "taosx_id", "agent_id") {
|
||||
return fmt.Sprintf("agent_%s_%s", tagMap["taosx_id"], tagMap["agent_id"])
|
||||
}
|
||||
case "taosx_connector":
|
||||
if checkKeysExist(tagMap, "taosx_id", "ds_name", "task_id") {
|
||||
return fmt.Sprintf("connector_%s_%s_%s", tagMap["taosx_id"], tagMap["ds_name"], tagMap["task_id"])
|
||||
}
|
||||
default:
|
||||
if strings.HasPrefix(stbName, "taosx_task_") {
|
||||
ds_name := stbName[len("taosx_task_"):]
|
||||
if checkKeysExist(tagMap, "taosx_id", "task_id") {
|
||||
return fmt.Sprintf("task_%s_%s_%s", tagMap["taosx_id"], ds_name, tagMap["task_id"])
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
switch stbName {
|
||||
case "taosd_cluster_info":
|
||||
if checkKeysExist(tagMap, "cluster_id") {
|
||||
return fmt.Sprintf("cluster_%s", tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_vgroups_info":
|
||||
if checkKeysExist(tagMap, "cluster_id", "vgroup_id", "database_name") {
|
||||
return fmt.Sprintf("vginfo_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["vgroup_id"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_dnodes_info":
|
||||
if checkKeysExist(tagMap, "cluster_id", "dnode_id") {
|
||||
return fmt.Sprintf("dinfo_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_dnodes_status":
|
||||
if checkKeysExist(tagMap, "cluster_id", "dnode_id") {
|
||||
return fmt.Sprintf("dstatus_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_dnodes_log_dirs":
|
||||
if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name") {
|
||||
subTableName := fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["cluster_id"])
|
||||
if len(subTableName) <= util.MAX_TABLE_NAME_LEN {
|
||||
return subTableName
|
||||
}
|
||||
return fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"],
|
||||
util.GetMd5HexStr(tagMap["data_dir_name"]),
|
||||
tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_dnodes_data_dirs":
|
||||
if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name", "data_dir_level") {
|
||||
subTableName := fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["data_dir_level"], tagMap["cluster_id"])
|
||||
if len(subTableName) <= util.MAX_TABLE_NAME_LEN {
|
||||
return subTableName
|
||||
}
|
||||
return fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"],
|
||||
util.GetMd5HexStr(tagMap["data_dir_name"]),
|
||||
tagMap["data_dir_level"],
|
||||
tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_mnodes_info":
|
||||
if checkKeysExist(tagMap, "cluster_id", "mnode_id") {
|
||||
return fmt.Sprintf("minfo_%s_cluster_%s", tagMap["mnode_id"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_vnodes_info":
|
||||
if checkKeysExist(tagMap, "cluster_id", "database_name", "vgroup_id", "dnode_id") {
|
||||
return fmt.Sprintf("vninfo_%s_dnode_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taosd_sql_req":
|
||||
if checkKeysExist(tagMap, "username", "sql_type", "result", "dnode_id", "vgroup_id", "cluster_id") {
|
||||
return fmt.Sprintf("taosdsql_%s_%s_%s_%s_vgroup_%s_cluster_%s", tagMap["username"],
|
||||
tagMap["sql_type"], tagMap["result"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taos_sql_req":
|
||||
if checkKeysExist(tagMap, "username", "sql_type", "result", "cluster_id") {
|
||||
return fmt.Sprintf("taossql_%s_%s_%s_cluster_%s", tagMap["username"],
|
||||
tagMap["sql_type"], tagMap["result"], tagMap["cluster_id"])
|
||||
}
|
||||
case "taos_slow_sql":
|
||||
if checkKeysExist(tagMap, "username", "duration", "result", "cluster_id") {
|
||||
return fmt.Sprintf("slowsql_%s_%s_%s_cluster_%s", tagMap["username"],
|
||||
tagMap["duration"], tagMap["result"], tagMap["cluster_id"])
|
||||
}
|
||||
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func contains(array []string, item string) bool {
|
||||
for _, value := range array {
|
||||
if value == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func writeMetrics(metrics []Metric, stbName string, buf *bytes.Buffer) {
|
||||
var nameArray []string
|
||||
if columnSeq, ok := Load(stbName); ok {
|
||||
if len(columnSeq.metricNames) < len(metrics) {
|
||||
// add column, only schema change will hit here
|
||||
for _, metric := range metrics {
|
||||
if !contains(columnSeq.metricNames, metric.Name) {
|
||||
columnSeq.metricNames = append(columnSeq.metricNames, metric.Name)
|
||||
}
|
||||
}
|
||||
Store(stbName, columnSeq)
|
||||
}
|
||||
nameArray = columnSeq.metricNames
|
||||
}
|
||||
|
||||
// 将 Metric 切片转换为 map
|
||||
metricMap := make(map[string]float64)
|
||||
for _, metric := range metrics {
|
||||
metricMap[metric.Name] = metric.Value
|
||||
}
|
||||
|
||||
for i, name := range nameArray {
|
||||
if value, ok := metricMap[name]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s=%sf64", name, strconv.FormatFloat(value, 'f', -1, 64)))
|
||||
if i != len(nameArray)-1 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 存储数据
|
||||
func Store(key string, value ColumnSeq) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
gColumnSeqMap[key] = value
|
||||
}
|
||||
|
||||
// 加载数据
|
||||
func Load(key string) (ColumnSeq, bool) {
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
value, ok := gColumnSeqMap[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// 初始化单表的列序列
|
||||
func Init(key string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if _, ok := gColumnSeqMap[key]; !ok {
|
||||
columnSeq := ColumnSeq{
|
||||
tagNames: []string{},
|
||||
metricNames: []string{},
|
||||
}
|
||||
gColumnSeqMap[key] = columnSeq
|
||||
}
|
||||
}
|
||||
|
||||
// 初始化所有列序列
|
||||
func (gm *GeneralMetric) initColumnSeqMap() error {
|
||||
query := fmt.Sprintf(`
|
||||
select stable_name
|
||||
from information_schema.ins_stables
|
||||
where db_name = '%s'
|
||||
and (
|
||||
stable_name like 'taos_%%'
|
||||
or stable_name like 'taosd_%%'
|
||||
or stable_name like 'taosx_%%'
|
||||
)
|
||||
order by stable_name asc;
|
||||
`, gm.database)
|
||||
|
||||
data, err := gm.conn.Query(context.Background(), query, util.GetQidOwn())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//get all stables, then init gColumnSeqMap
|
||||
for _, row := range data.Data {
|
||||
stableName := row[0].(string)
|
||||
Init(stableName)
|
||||
}
|
||||
//set gColumnSeqMap with desc stables
|
||||
for tableName, columnSeq := range gColumnSeqMap {
|
||||
data, err := gm.conn.Query(context.Background(), fmt.Sprintf(`desc %s.%s;`, gm.database, tableName), util.GetQidOwn())
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(data.Data) < 1 || len(data.Data[0]) < 4 {
|
||||
return fmt.Errorf("desc %s.%s error", gm.database, tableName)
|
||||
}
|
||||
|
||||
for i, row := range data.Data {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if row[3].(string) == "TAG" {
|
||||
columnSeq.tagNames = append(columnSeq.tagNames, row[0].(string))
|
||||
} else {
|
||||
columnSeq.metricNames = append(columnSeq.metricNames, row[0].(string))
|
||||
}
|
||||
}
|
||||
Store(tableName, columnSeq)
|
||||
}
|
||||
|
||||
gmLogger.Infof("gColumnSeqMap:%v", gColumnSeqMap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gm *GeneralMetric) createSTables() error {
|
||||
var createTableSql = "create stable if not exists taosd_cluster_basic " +
|
||||
"(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " +
|
||||
"tags (cluster_id varchar(50))"
|
||||
|
||||
if gm.conn == nil {
|
||||
return errNoConnection
|
||||
}
|
||||
_, err := gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createTableSql = "create stable if not exists taos_slow_sql_detail" +
|
||||
" (start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " +
|
||||
"type TINYINT, rows_num BIGINT, sql varchar(16384), process_name varchar(32), process_id varchar(32)) " +
|
||||
"tags (db varchar(1024), `user` varchar(32), ip varchar(32), cluster_id varchar(32))"
|
||||
|
||||
_, err = gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn())
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,358 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var router_inited bool = false
|
||||
|
||||
func TestClusterBasic(t *testing.T) {
|
||||
cfg := util.GetCfg()
|
||||
|
||||
CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options)
|
||||
|
||||
gm := NewGeneralMetric(cfg)
|
||||
if !router_inited {
|
||||
err := gm.Init(router)
|
||||
assert.NoError(t, err)
|
||||
router_inited = true
|
||||
}
|
||||
|
||||
testcfg := struct {
|
||||
name string
|
||||
ts int64
|
||||
tbname string
|
||||
data string
|
||||
expect string
|
||||
}{
|
||||
name: "1",
|
||||
tbname: "taosd_cluster_basic",
|
||||
ts: 1705655770381,
|
||||
data: `{"ts":"1705655770381","cluster_id":"7648966395564416484","protocol":2,"first_ep":"ssfood06:6130","first_ep_dnode_id":1,"cluster_version":"3.2.1.0.alp"}`,
|
||||
expect: "7648966395564416484",
|
||||
}
|
||||
|
||||
conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
t.Run(testcfg.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(testcfg.data)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/taosd-cluster-basic", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, cluster_id from %s.%s where ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, testcfg.expect, data.Data[0][1])
|
||||
})
|
||||
|
||||
testcfg = struct {
|
||||
name string
|
||||
ts int64
|
||||
tbname string
|
||||
data string
|
||||
expect string
|
||||
}{
|
||||
name: "1",
|
||||
tbname: "taos_slow_sql_detail",
|
||||
ts: 1703226836762,
|
||||
data: `[{
|
||||
"start_ts": "1703226836762",
|
||||
"request_id": "1",
|
||||
"query_time": 100,
|
||||
"code": 0,
|
||||
"error_info": "",
|
||||
"type": 1,
|
||||
"rows_num": 5,
|
||||
"sql": "select * from abc;",
|
||||
"process_name": "abc",
|
||||
"process_id": "123",
|
||||
"db": "dbname",
|
||||
"user": "root",
|
||||
"ip": "127.0.0.1",
|
||||
"cluster_id": "1234567"
|
||||
},
|
||||
{
|
||||
"start_ts": "1703226836763",
|
||||
"request_id": "2",
|
||||
"query_time": 100,
|
||||
"code": 0,
|
||||
"error_info": "",
|
||||
"type": 1,
|
||||
"rows_num": 5,
|
||||
"sql": "insert into abc ('a', 'b') values ('aaa', 'bbb');",
|
||||
"process_name": "abc",
|
||||
"process_id": "123",
|
||||
"db": "dbname",
|
||||
"user": "root",
|
||||
"ip": "127.0.0.1",
|
||||
"cluster_id": "1234567"
|
||||
}]`,
|
||||
expect: "1234567",
|
||||
}
|
||||
|
||||
conn, err = db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
t.Run(testcfg.name, func(t *testing.T) {
|
||||
MAX_SQL_LEN = 1000000
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(testcfg.data)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/slow-sql-detail-batch", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
data, err := conn.Query(context.Background(), fmt.Sprintf("select start_ts, cluster_id from %s.%s where start_ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, testcfg.expect, data.Data[0][1])
|
||||
})
|
||||
}
|
||||
|
||||
func TestGenMetric(t *testing.T) {
|
||||
cfg := util.GetCfg()
|
||||
|
||||
CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options)
|
||||
|
||||
gm := NewGeneralMetric(cfg)
|
||||
if !router_inited {
|
||||
err := gm.Init(router)
|
||||
assert.NoError(t, err)
|
||||
router_inited = true
|
||||
}
|
||||
|
||||
testcfg := struct {
|
||||
name string
|
||||
ts []int64
|
||||
tbname []string
|
||||
data string
|
||||
expect string
|
||||
}{
|
||||
name: "1",
|
||||
tbname: []string{"taosd_cluster_info", "taosd_dnodes_info"},
|
||||
ts: []int64{1703226836761, 1703226836762},
|
||||
data: `[{
|
||||
"ts": "1703226836761",
|
||||
"protocol": 2,
|
||||
"tables": [{
|
||||
"name": "taosd_cluster_info",
|
||||
"metric_groups": [{
|
||||
"tags": [{
|
||||
"name": "cluster_id",
|
||||
"value": "1397715317673023180"
|
||||
}],
|
||||
"metrics": [{
|
||||
"name": "dbs_total",
|
||||
"value": 1
|
||||
}, {
|
||||
"name": "master_uptime",
|
||||
"value": 0
|
||||
}]
|
||||
}]
|
||||
}, {
|
||||
"name": "taosd_dnodes_info",
|
||||
"metric_groups": [{
|
||||
"tags": [{
|
||||
"name": "cluster_id",
|
||||
"value": "1397715317673023180"
|
||||
}, {
|
||||
"name": "dnode_id",
|
||||
"value": "1"
|
||||
}, {
|
||||
"name": "dnode_ep",
|
||||
"value": "ssfood06:6130"
|
||||
}],
|
||||
"metrics": [{
|
||||
"name": "uptime",
|
||||
"value": 0
|
||||
}, {
|
||||
"name": "cpu_engine",
|
||||
"value": 0
|
||||
}]
|
||||
}]
|
||||
}]
|
||||
}, {
|
||||
"ts": "1703226836762",
|
||||
"protocol": 2,
|
||||
"tables": [{
|
||||
"name": "taosd_cluster_info",
|
||||
"metric_groups": [{
|
||||
"tags": [{
|
||||
"name": "cluster_id",
|
||||
"value": "1397715317673023180"
|
||||
}],
|
||||
"metrics": [{
|
||||
"name": "dbs_total",
|
||||
"value": 1
|
||||
}, {
|
||||
"name": "master_uptime",
|
||||
"value": 0
|
||||
}]
|
||||
}]
|
||||
}, {
|
||||
"name": "taosd_dnodes_info",
|
||||
"metric_groups": [{
|
||||
"tags": [{
|
||||
"name": "cluster_id",
|
||||
"value": "1397715317673023180"
|
||||
}, {
|
||||
"name": "dnode_id",
|
||||
"value": "1"
|
||||
}, {
|
||||
"name": "dnode_ep",
|
||||
"value": ", =\"ssfood06:6130"
|
||||
}],
|
||||
"metrics": [{
|
||||
"name": "uptime",
|
||||
"value": 0
|
||||
}, {
|
||||
"name": "cpu_engine",
|
||||
"value": 0
|
||||
}]
|
||||
}]
|
||||
}]
|
||||
}]`,
|
||||
expect: "1397715317673023180",
|
||||
}
|
||||
|
||||
conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
t.Run(testcfg.name, func(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
body := strings.NewReader(testcfg.data)
|
||||
req, _ := http.NewRequest(http.MethodPost, "/general-metric", body)
|
||||
router.ServeHTTP(w, req)
|
||||
assert.Equal(t, 200, w.Code)
|
||||
|
||||
for _, tbname := range testcfg.tbname {
|
||||
for _, ts := range testcfg.ts {
|
||||
data, err := conn.Query(context.Background(), fmt.Sprintf("select _ts, cluster_id from %s.%s where _ts=%d", gm.database, tbname, ts), util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
assert.Equal(t, testcfg.expect, data.Data[0][1])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
func TestGetSubTableName(t *testing.T) {
|
||||
tests := []struct {
|
||||
stbName string
|
||||
tagMap map[string]string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
stbName: "taosx_sys",
|
||||
tagMap: map[string]string{"taosx_id": "123"},
|
||||
want: "sys_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosx_agent",
|
||||
tagMap: map[string]string{"taosx_id": "123", "agent_id": "456"},
|
||||
want: "agent_123_456",
|
||||
},
|
||||
{
|
||||
stbName: "taosx_connector",
|
||||
tagMap: map[string]string{"taosx_id": "123", "ds_name": "ds", "task_id": "789"},
|
||||
want: "connector_123_ds_789",
|
||||
},
|
||||
{
|
||||
stbName: "taosx_task_example",
|
||||
tagMap: map[string]string{"taosx_id": "123", "task_id": "789"},
|
||||
want: "task_123_example_789",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_cluster_info",
|
||||
tagMap: map[string]string{"cluster_id": "123"},
|
||||
want: "cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_vgroups_info",
|
||||
tagMap: map[string]string{"cluster_id": "123", "vgroup_id": "456", "database_name": "db"},
|
||||
want: "vginfo_db_vgroup_456_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_dnodes_info",
|
||||
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"},
|
||||
want: "dinfo_123_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_dnodes_status",
|
||||
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"},
|
||||
want: "dstatus_123_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_dnodes_log_dirs",
|
||||
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "log"},
|
||||
want: "dlog_123_log_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_dnodes_log_dirs",
|
||||
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "loglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglog"},
|
||||
want: "dlog_123_9cdc719961a632a27603cd5ed9f1aee2_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_dnodes_data_dirs",
|
||||
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "data", "data_dir_level": "5"},
|
||||
want: "ddata_123_data_level_5_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_dnodes_data_dirs",
|
||||
tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "datadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadata", "data_dir_level": "5"},
|
||||
want: "ddata_123_03bf8dffdf6b97e08f347c6ae795998b_level_5_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_mnodes_info",
|
||||
tagMap: map[string]string{"cluster_id": "123", "mnode_id": "12"},
|
||||
want: "minfo_12_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_vnodes_info",
|
||||
tagMap: map[string]string{"cluster_id": "123", "database_name": "db", "vgroup_id": "456", "dnode_id": "789"},
|
||||
want: "vninfo_db_dnode_789_vgroup_456_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taosd_sql_req",
|
||||
tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "dnode_id": "123", "vgroup_id": "456", "cluster_id": "123"},
|
||||
want: "taosdsql_user_select_success_123_vgroup_456_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taos_sql_req",
|
||||
tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "cluster_id": "123"},
|
||||
want: "taossql_user_select_success_cluster_123",
|
||||
},
|
||||
{
|
||||
stbName: "taos_slow_sql",
|
||||
tagMap: map[string]string{"username": "user", "duration": "100ms", "result": "success", "cluster_id": "123"},
|
||||
want: "slowsql_user_100ms_success_cluster_123",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.stbName, func(t *testing.T) {
|
||||
if got := get_sub_table_name_valid(tt.stbName, tt.tagMap); got != tt.want {
|
||||
panic(fmt.Sprintf("get_sub_table_name() = %v, want %v", got, tt.want))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
crand "crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
func TestHttps(t *testing.T) {
|
||||
server := startProxy()
|
||||
defer server.Shutdown(context.Background())
|
||||
|
||||
cfg := util.GetCfg()
|
||||
cfg.TDengine.Usessl = true
|
||||
cfg.TDengine.Port = 34443
|
||||
|
||||
CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options)
|
||||
|
||||
conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Metrics.Database.Name, cfg.TDengine.Usessl)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Metrics.Database.Name), util.GetQidOwn())
|
||||
}()
|
||||
|
||||
data, err := conn.Query(context.Background(), "select server_version()", util.GetQidOwn())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(data.Data))
|
||||
}
|
||||
|
||||
func generateSelfSignedCert() (tls.Certificate, error) {
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P384(), crand.Reader)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(365 * 24 * time.Hour)
|
||||
|
||||
serialNumber, err := crand.Int(crand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"Your Company"},
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(crand.Reader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||
keyPEM, err := x509.MarshalECPrivateKey(priv)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
|
||||
keyPEMBlock := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyPEM})
|
||||
|
||||
return tls.X509KeyPair(certPEM, keyPEMBlock)
|
||||
}
|
||||
|
||||
func startProxy() *http.Server {
|
||||
// Generate self-signed certificate
|
||||
cert, err := generateSelfSignedCert()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to generate self-signed certificate: %v", err)
|
||||
}
|
||||
|
||||
target := "http://127.0.0.1:6041"
|
||||
proxyURL, err := url.Parse(target)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse target URL: %v", err)
|
||||
}
|
||||
|
||||
proxy := httputil.NewSingleHostReverseProxy(proxyURL)
|
||||
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, e error) {
|
||||
http.Error(w, "Proxy error", http.StatusBadGateway)
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/", proxy)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: ":34443",
|
||||
Handler: mux,
|
||||
TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}},
|
||||
// Setup server timeouts for better handling of idle connections and slowloris attacks
|
||||
WriteTimeout: 10 * time.Second,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
log.Println("Starting server on :34443")
|
||||
go func() {
|
||||
err = server.ListenAndServeTLS("", "")
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
log.Fatalf("Failed to start HTTPS server: %v", err)
|
||||
}
|
||||
}()
|
||||
return server
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/taosdata/taoskeeper/process"
|
||||
)
|
||||
|
||||
type NodeExporter struct {
|
||||
processor *process.Processor
|
||||
}
|
||||
|
||||
func NewNodeExporter(processor *process.Processor) *NodeExporter {
|
||||
return &NodeExporter{processor: processor}
|
||||
}
|
||||
|
||||
func (z *NodeExporter) Init(c gin.IRouter) {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
reg.MustRegister(z.processor)
|
||||
c.GET("metrics", z.myMiddleware(promhttp.HandlerFor(reg, promhttp.HandlerOpts{})))
|
||||
}
|
||||
|
||||
func (z *NodeExporter) myMiddleware(next http.Handler) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
z.processor.Process()
|
||||
// call Prometheus handler
|
||||
next.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,478 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/go-utils/json"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
)
|
||||
|
||||
var logger = log.GetLogger("REP")
|
||||
|
||||
var createList = []string{
|
||||
// CreateClusterInfoSql,
|
||||
// CreateDnodeSql,
|
||||
// CreateMnodeSql,
|
||||
// CreateDnodeInfoSql,
|
||||
// CreateDataDirSql,
|
||||
// CreateLogDirSql,
|
||||
// CreateTempDirSql,
|
||||
// CreateVgroupsInfoSql,
|
||||
// CreateVnodeRoleSql,
|
||||
// CreateSummarySql,
|
||||
// CreateGrantInfoSql,
|
||||
CreateKeeperSql,
|
||||
}
|
||||
|
||||
type Reporter struct {
|
||||
username string
|
||||
password string
|
||||
host string
|
||||
port int
|
||||
usessl bool
|
||||
dbname string
|
||||
databaseOptions map[string]interface{}
|
||||
totalRep atomic.Value
|
||||
}
|
||||
|
||||
func NewReporter(conf *config.Config) *Reporter {
|
||||
r := &Reporter{
|
||||
username: conf.TDengine.Username,
|
||||
password: conf.TDengine.Password,
|
||||
host: conf.TDengine.Host,
|
||||
port: conf.TDengine.Port,
|
||||
usessl: conf.TDengine.Usessl,
|
||||
dbname: conf.Metrics.Database.Name,
|
||||
databaseOptions: conf.Metrics.Database.Options,
|
||||
}
|
||||
r.totalRep.Store(0)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Reporter) Init(c gin.IRouter) {
|
||||
c.POST("report", r.handlerFunc())
|
||||
r.createDatabase()
|
||||
r.creatTables()
|
||||
// todo: it can delete in the future.
|
||||
if r.shouldDetectFields() {
|
||||
r.detectGrantInfoFieldType()
|
||||
r.detectClusterInfoFieldType()
|
||||
r.detectVgroupsInfoType()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) getConn() *db.Connector {
|
||||
conn, err := db.NewConnector(r.username, r.password, r.host, r.port, r.usessl)
|
||||
if err != nil {
|
||||
qid := util.GetQidOwn()
|
||||
|
||||
logger := logger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
logger.Errorf("connect to database error, msg:%s", err)
|
||||
panic(err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
func (r *Reporter) detectGrantInfoFieldType() {
|
||||
// `expire_time` `timeseries_used` `timeseries_total` in table `grant_info` changed to bigint from TS-3003.
|
||||
ctx := context.Background()
|
||||
conn := r.getConn()
|
||||
defer r.closeConn(conn)
|
||||
|
||||
r.detectFieldType(ctx, conn, "grants_info", "expire_time", "bigint")
|
||||
r.detectFieldType(ctx, conn, "grants_info", "timeseries_used", "bigint")
|
||||
r.detectFieldType(ctx, conn, "grants_info", "timeseries_total", "bigint")
|
||||
if r.tagExist(ctx, conn, "grants_info", "dnode_id") {
|
||||
r.dropTag(ctx, conn, "grants_info", "dnode_id")
|
||||
}
|
||||
if r.tagExist(ctx, conn, "grants_info", "dnode_ep") {
|
||||
r.dropTag(ctx, conn, "grants_info", "dnode_ep")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) detectClusterInfoFieldType() {
|
||||
// `tbs_total` in table `cluster_info` changed to bigint from TS-3003.
|
||||
ctx := context.Background()
|
||||
conn := r.getConn()
|
||||
defer r.closeConn(conn)
|
||||
|
||||
r.detectFieldType(ctx, conn, "cluster_info", "tbs_total", "bigint")
|
||||
|
||||
// add column `topics_total` and `streams_total` from TD-22032
|
||||
// if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "topics_total"); !exists {
|
||||
// logger.Warningf("## %s.cluster_info.topics_total not exists, will add it", r.dbname)
|
||||
// r.addColumn(ctx, conn, "cluster_info", "topics_total", "int")
|
||||
// }
|
||||
// if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "streams_total"); !exists {
|
||||
// logger.Warningf("## %s.cluster_info.streams_total not exists, will add it", r.dbname)
|
||||
// r.addColumn(ctx, conn, "cluster_info", "streams_total", "int")
|
||||
// }
|
||||
}
|
||||
|
||||
func (r *Reporter) detectVgroupsInfoType() {
|
||||
// `tables_num` in table `vgroups_info` changed to bigint from TS-3003.
|
||||
ctx := context.Background()
|
||||
conn := r.getConn()
|
||||
defer r.closeConn(conn)
|
||||
|
||||
r.detectFieldType(ctx, conn, "vgroups_info", "tables_num", "bigint")
|
||||
}
|
||||
|
||||
func (r *Reporter) detectFieldType(ctx context.Context, conn *db.Connector, table, field, fieldType string) {
|
||||
_, colType := r.columnInfo(ctx, conn, table, field)
|
||||
if colType == "INT" {
|
||||
logger.Warningf("%s.%s.%s type is %s, will change to %s", r.dbname, table, field, colType, fieldType)
|
||||
// drop column `tables_num`
|
||||
r.dropColumn(ctx, conn, table, field)
|
||||
|
||||
// add column `tables_num`
|
||||
r.addColumn(ctx, conn, table, field, fieldType)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) shouldDetectFields() bool {
|
||||
ctx := context.Background()
|
||||
conn := r.getConn()
|
||||
defer r.closeConn(conn)
|
||||
|
||||
version, err := r.serverVersion(ctx, conn)
|
||||
if err != nil {
|
||||
logger.Errorf("get server version error:%s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// if server version is less than v3.0.3.0, should not detect fields.
|
||||
versions := strings.Split(version, ".")
|
||||
if len(versions) < 4 {
|
||||
logger.Errorf("get server version error. version:%s", version)
|
||||
return false
|
||||
}
|
||||
|
||||
v1, _ := strconv.Atoi(versions[0])
|
||||
v2, _ := strconv.Atoi(versions[1])
|
||||
v3, _ := strconv.Atoi(versions[2])
|
||||
|
||||
if v1 > 3 || v2 > 0 || v3 >= 3 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Reporter) serverVersion(ctx context.Context, conn *db.Connector) (version string, err error) {
|
||||
res, err := conn.Query(ctx, "select server_version()", util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("get server version error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(res.Data) == 0 {
|
||||
logger.Errorf("get server version error. response:%+v", res)
|
||||
return
|
||||
}
|
||||
|
||||
if len(res.Data) != 1 && len(res.Data[0]) != 1 {
|
||||
logger.Errorf("get server version error. response:%+v", res)
|
||||
return
|
||||
}
|
||||
|
||||
version = res.Data[0][0].(string)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Reporter) columnInfo(ctx context.Context, conn *db.Connector, table string, field string) (exists bool, colType string) {
|
||||
res, err := conn.Query(ctx, fmt.Sprintf("select col_type from information_schema.ins_columns where table_name='%s' and db_name='%s' and col_name='%s'", table, r.dbname, field), util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("get %s field type error, msg:%s", r.dbname, err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(res.Data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if len(res.Data) != 1 && len(res.Data[0]) != 1 {
|
||||
logger.Errorf("get field type for %s error. response:%+v", table, res)
|
||||
panic(fmt.Sprintf("get field type for %s error. response:%+v", table, res))
|
||||
}
|
||||
|
||||
exists = true
|
||||
colType = res.Data[0][0].(string)
|
||||
colType = strings.ToUpper(colType)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Reporter) tagExist(ctx context.Context, conn *db.Connector, stable string, tag string) (exists bool) {
|
||||
res, err := conn.Query(ctx, fmt.Sprintf("select tag_name from information_schema.ins_tags where stable_name='%s' and db_name='%s' and tag_name='%s'", stable, r.dbname, tag), util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("get %s tag_name error, msg:%s", r.dbname, err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(res.Data) == 0 {
|
||||
exists = false
|
||||
return
|
||||
}
|
||||
|
||||
if len(res.Data) != 1 && len(res.Data[0]) != 1 {
|
||||
logger.Errorf("get tag_name for %s error. response:%+v", stable, res)
|
||||
panic(fmt.Sprintf("get tag_name for %s error. response:%+v", stable, res))
|
||||
}
|
||||
|
||||
exists = true
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Reporter) dropColumn(ctx context.Context, conn *db.Connector, table string, field string) {
|
||||
if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s drop column %s", r.dbname, table, field), util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("drop column %s from table %s error, msg:%s", field, table, err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) dropTag(ctx context.Context, conn *db.Connector, stable string, tag string) {
|
||||
if _, err := conn.Exec(ctx, fmt.Sprintf("alter stable %s.%s drop tag %s", r.dbname, stable, tag), util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("drop tag %s from stable %s error, msg:%s", tag, stable, err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) addColumn(ctx context.Context, conn *db.Connector, table string, field string, fieldType string) {
|
||||
if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s add column %s %s", r.dbname, table, field, fieldType), util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("add column %s to table %s error, msg:%s", field, table, err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) createDatabase() {
|
||||
ctx := context.Background()
|
||||
conn := r.getConn()
|
||||
defer r.closeConn(conn)
|
||||
|
||||
createDBSql := r.generateCreateDBSql()
|
||||
logger.Warningf("create database sql: %s", createDBSql)
|
||||
|
||||
if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("create database %s error, msg:%v", r.dbname, err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) generateCreateDBSql() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("create database if not exists ")
|
||||
buf.WriteString(r.dbname)
|
||||
|
||||
for k, v := range r.databaseOptions {
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(k)
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
buf.WriteString(fmt.Sprintf(" '%s'", v))
|
||||
default:
|
||||
buf.WriteString(fmt.Sprintf(" %v", v))
|
||||
}
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (r *Reporter) creatTables() {
|
||||
ctx := context.Background()
|
||||
conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl)
|
||||
if err != nil {
|
||||
logger.Errorf("connect to database error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
defer r.closeConn(conn)
|
||||
|
||||
for _, createSql := range createList {
|
||||
logger.Infof("execute sql:%s", createSql)
|
||||
if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("execute sql:%s, error:%s", createSql, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) closeConn(conn *db.Connector) {
|
||||
if err := conn.Close(); err != nil {
|
||||
logger.Errorf("close connection error, msg:%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) handlerFunc() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
qid := util.GetQid(c.GetHeader("X-QID"))
|
||||
|
||||
logger := logger.WithFields(
|
||||
logrus.Fields{config.ReqIDKey: qid},
|
||||
)
|
||||
|
||||
r.recordTotalRep()
|
||||
// data parse
|
||||
data, err := c.GetRawData()
|
||||
if err != nil {
|
||||
logger.Errorf("receiving taosd data error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
var report Report
|
||||
|
||||
logger.Tracef("report data:%s", string(data))
|
||||
if e := json.Unmarshal(data, &report); e != nil {
|
||||
logger.Errorf("error occurred while unmarshal request, data:%s, error:%s", data, err)
|
||||
return
|
||||
}
|
||||
var sqls []string
|
||||
if report.ClusterInfo != nil {
|
||||
sqls = append(sqls, insertClusterInfoSql(*report.ClusterInfo, report.ClusterID, report.Protocol, report.Ts)...)
|
||||
}
|
||||
sqls = append(sqls, insertDnodeSql(report.DnodeInfo, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts))
|
||||
if report.GrantInfo != nil {
|
||||
sqls = append(sqls, insertGrantSql(*report.GrantInfo, report.DnodeID, report.ClusterID, report.Ts))
|
||||
}
|
||||
sqls = append(sqls, insertDataDirSql(report.DiskInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...)
|
||||
for _, group := range report.VgroupInfos {
|
||||
sqls = append(sqls, insertVgroupSql(group, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...)
|
||||
}
|
||||
sqls = append(sqls, insertLogSummary(report.LogInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts))
|
||||
|
||||
conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl)
|
||||
if err != nil {
|
||||
logger.Errorf("connect to database error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
defer r.closeConn(conn)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, sql := range sqls {
|
||||
logger.Tracef("execute sql:%s", sql)
|
||||
if _, err := conn.Exec(ctx, sql, util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("execute sql error, sql:%s, error:%s", sql, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) recordTotalRep() {
|
||||
old := r.totalRep.Load().(int)
|
||||
for i := 0; i < 3; i++ {
|
||||
r.totalRep.CompareAndSwap(old, old+1)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) GetTotalRep() *atomic.Value {
|
||||
return &r.totalRep
|
||||
}
|
||||
|
||||
func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts string) []string {
|
||||
var sqls []string
|
||||
var dtotal, dalive, mtotal, malive int
|
||||
for _, dnode := range info.Dnodes {
|
||||
sqls = append(sqls, fmt.Sprintf("insert into d_info_%s using d_info tags (%d, '%s', '%s') values ('%s', '%s')",
|
||||
ClusterID+strconv.Itoa(dnode.DnodeID), dnode.DnodeID, dnode.DnodeEp, ClusterID, ts, dnode.Status))
|
||||
dtotal++
|
||||
if "ready" == dnode.Status {
|
||||
dalive++
|
||||
}
|
||||
}
|
||||
|
||||
for _, mnode := range info.Mnodes {
|
||||
sqls = append(sqls, fmt.Sprintf("insert into m_info_%s using m_info tags (%d, '%s', '%s') values ('%s', '%s')",
|
||||
ClusterID+strconv.Itoa(mnode.MnodeID), mnode.MnodeID, mnode.MnodeEp, ClusterID, ts, mnode.Role))
|
||||
mtotal++
|
||||
//LEADER FOLLOWER CANDIDATE ERROR
|
||||
if "ERROR" != mnode.Role {
|
||||
malive++
|
||||
}
|
||||
}
|
||||
|
||||
sqls = append(sqls, fmt.Sprintf(
|
||||
"insert into cluster_info_%s using cluster_info tags('%s') (ts, first_ep, first_ep_dnode_id, version, "+
|
||||
"master_uptime, monitor_interval, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, "+
|
||||
"mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, "+
|
||||
"topics_total, streams_total, protocol) values ('%s', '%s', %d, '%s', %f, %d, %d, %d, %d, %d, %d, %d, %d, "+
|
||||
"%d, %d, %d, %d, %d, %d, %d, %d)",
|
||||
ClusterID, ClusterID, ts, info.FirstEp, info.FirstEpDnodeID, info.Version, info.MasterUptime, info.MonitorInterval,
|
||||
info.DbsTotal, info.TbsTotal, info.StbsTotal, dtotal, dalive, mtotal, malive, info.VgroupsTotal, info.VgroupsAlive,
|
||||
info.VnodesTotal, info.VnodesAlive, info.ConnectionsTotal, info.TopicsTotal, info.StreamsTotal, protocol))
|
||||
return sqls
|
||||
}
|
||||
|
||||
func insertDnodeSql(info DnodeInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string {
|
||||
return fmt.Sprintf("insert into dnode_info_%s using dnodes_info tags (%d, '%s', '%s') values ('%s', %f, %f, %f, %f, %d, %d, %d, %d, %d, %d, %f, %f, %f, %f, %f, %f, %d, %f, %d, %d, %f, %d, %d, %f, %d, %d, %d, %d, %d, %d, %d)",
|
||||
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
|
||||
ts, info.Uptime, info.CPUEngine, info.CPUSystem, info.CPUCores, info.MemEngine, info.MemSystem, info.MemTotal,
|
||||
info.DiskEngine, info.DiskUsed, info.DiskTotal, info.NetIn, info.NetOut, info.IoRead, info.IoWrite,
|
||||
info.IoReadDisk, info.IoWriteDisk, info.ReqSelect, info.ReqSelectRate, info.ReqInsert, info.ReqInsertSuccess,
|
||||
info.ReqInsertRate, info.ReqInsertBatch, info.ReqInsertBatchSuccess, info.ReqInsertBatchRate, info.Errors,
|
||||
info.VnodesNum, info.Masters, info.HasMnode, info.HasQnode, info.HasSnode, info.HasBnode)
|
||||
}
|
||||
|
||||
func insertDataDirSql(disk DiskInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string {
|
||||
var sqls []string
|
||||
for _, data := range disk.Datadir {
|
||||
sqls = append(sqls,
|
||||
fmt.Sprintf("insert into data_dir_%s using data_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d, %d)",
|
||||
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
|
||||
ts, data.Name, data.Level, data.Avail.IntPart(), data.Used.IntPart(), data.Total.IntPart()),
|
||||
)
|
||||
}
|
||||
sqls = append(sqls,
|
||||
fmt.Sprintf("insert into log_dir_%s using log_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)",
|
||||
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
|
||||
ts, disk.Logdir.Name, disk.Logdir.Avail.IntPart(), disk.Logdir.Used.IntPart(), disk.Logdir.Total.IntPart()),
|
||||
fmt.Sprintf("insert into temp_dir_%s using temp_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)",
|
||||
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID,
|
||||
ts, disk.Tempdir.Name, disk.Tempdir.Avail.IntPart(), disk.Tempdir.Used.IntPart(), disk.Tempdir.Total.IntPart()),
|
||||
)
|
||||
return sqls
|
||||
}
|
||||
|
||||
func insertVgroupSql(g VgroupInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string {
|
||||
var sqls []string
|
||||
sqls = append(sqls, fmt.Sprintf("insert into vgroups_info_%s using vgroups_info tags (%d, '%s', '%s') "+
|
||||
"(ts, vgroup_id, database_name, tables_num, status, ) values ( '%s','%d', '%s', %d, '%s')",
|
||||
ClusterID+strconv.Itoa(DnodeID)+strconv.Itoa(g.VgroupID), DnodeID, DnodeEp, ClusterID,
|
||||
ts, g.VgroupID, g.DatabaseName, g.TablesNum, g.Status))
|
||||
for _, v := range g.Vnodes {
|
||||
sqls = append(sqls, fmt.Sprintf("insert into vnodes_role_%s using vnodes_role tags (%d, '%s', '%s') values ('%s', '%s')",
|
||||
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, v.VnodeRole))
|
||||
}
|
||||
return sqls
|
||||
}
|
||||
|
||||
func insertLogSummary(log LogInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string {
|
||||
var e, info, debug, trace int
|
||||
for _, s := range log.Summary {
|
||||
switch s.Level {
|
||||
case "error":
|
||||
e = s.Total
|
||||
case "info":
|
||||
info = s.Total
|
||||
case "debug":
|
||||
debug = s.Total
|
||||
case "trace":
|
||||
trace = s.Total
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("insert into log_summary_%s using log_summary tags (%d, '%s', '%s') values ('%s', %d, %d, %d, %d)",
|
||||
ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, e, info, debug, trace)
|
||||
}
|
||||
|
||||
func insertGrantSql(g GrantInfo, DnodeID int, ClusterID string, ts string) string {
|
||||
return fmt.Sprintf("insert into grants_info_%s using grants_info tags ('%s') (ts, expire_time, "+
|
||||
"timeseries_used, timeseries_total) values ('%s', %d, %d, %d)", ClusterID+strconv.Itoa(DnodeID), ClusterID, ts, g.ExpireTime, g.TimeseriesUsed, g.TimeseriesTotal)
|
||||
}
|
|
@ -0,0 +1,286 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
type Report struct {
|
||||
Ts string `json:"ts"`
|
||||
DnodeID int `json:"dnode_id"`
|
||||
DnodeEp string `json:"dnode_ep"`
|
||||
ClusterID string `json:"cluster_id"`
|
||||
Protocol int `json:"protocol"`
|
||||
ClusterInfo *ClusterInfo `json:"cluster_info"` // only reported by master
|
||||
StbInfos []StbInfo `json:"stb_infos"`
|
||||
VgroupInfos []VgroupInfo `json:"vgroup_infos"` // only reported by master
|
||||
GrantInfo *GrantInfo `json:"grant_info"` // only reported by master
|
||||
DnodeInfo DnodeInfo `json:"dnode_info"`
|
||||
DiskInfos DiskInfo `json:"disk_infos"`
|
||||
LogInfos LogInfo `json:"log_infos"`
|
||||
}
|
||||
|
||||
type ClusterInfo struct {
|
||||
FirstEp string `json:"first_ep"`
|
||||
FirstEpDnodeID int `json:"first_ep_dnode_id"`
|
||||
Version string `json:"version"`
|
||||
MasterUptime float32 `json:"master_uptime"`
|
||||
MonitorInterval int `json:"monitor_interval"`
|
||||
DbsTotal int `json:"dbs_total"`
|
||||
TbsTotal int64 `json:"tbs_total"` // change to bigint since TS-3003
|
||||
StbsTotal int `json:"stbs_total"`
|
||||
VgroupsTotal int `json:"vgroups_total"`
|
||||
VgroupsAlive int `json:"vgroups_alive"`
|
||||
VnodesTotal int `json:"vnodes_total"`
|
||||
VnodesAlive int `json:"vnodes_alive"`
|
||||
ConnectionsTotal int `json:"connections_total"`
|
||||
TopicsTotal int `json:"topics_total"`
|
||||
StreamsTotal int `json:"streams_total"`
|
||||
Dnodes []Dnode `json:"dnodes"`
|
||||
Mnodes []Mnode `json:"mnodes"`
|
||||
}
|
||||
|
||||
var dnodeEpLen = strconv.Itoa(255)
|
||||
|
||||
var CreateClusterInfoSql = "create table if not exists cluster_info (" +
|
||||
"ts timestamp, " +
|
||||
"first_ep binary(134), " +
|
||||
"first_ep_dnode_id int, " +
|
||||
"version binary(12), " +
|
||||
"master_uptime float, " +
|
||||
"monitor_interval int, " +
|
||||
"dbs_total int, " +
|
||||
"tbs_total bigint, " + // change to bigint since TS-3003
|
||||
"stbs_total int, " +
|
||||
"dnodes_total int, " +
|
||||
"dnodes_alive int, " +
|
||||
"mnodes_total int, " +
|
||||
"mnodes_alive int, " +
|
||||
"vgroups_total int, " +
|
||||
"vgroups_alive int, " +
|
||||
"vnodes_total int, " +
|
||||
"vnodes_alive int, " +
|
||||
"connections_total int, " +
|
||||
"topics_total int, " +
|
||||
"streams_total int, " +
|
||||
"protocol int " +
|
||||
") tags (cluster_id nchar(32))"
|
||||
|
||||
type Dnode struct {
|
||||
DnodeID int `json:"dnode_id"`
|
||||
DnodeEp string `json:"dnode_ep"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
var CreateDnodeSql = "create table if not exists d_info (" +
|
||||
"ts timestamp, " +
|
||||
"status binary(10)" +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type Mnode struct {
|
||||
MnodeID int `json:"mnode_id"`
|
||||
MnodeEp string `json:"mnode_ep"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
var CreateMnodeSql = "create table if not exists m_info (" +
|
||||
"ts timestamp, " +
|
||||
"role binary(10)" +
|
||||
") tags (mnode_id int, mnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type DnodeInfo struct {
|
||||
Uptime float32 `json:"uptime"`
|
||||
CPUEngine float32 `json:"cpu_engine"`
|
||||
CPUSystem float32 `json:"cpu_system"`
|
||||
CPUCores float32 `json:"cpu_cores"`
|
||||
MemEngine int `json:"mem_engine"`
|
||||
MemSystem int `json:"mem_system"`
|
||||
MemTotal int `json:"mem_total"`
|
||||
DiskEngine int64 `json:"disk_engine"`
|
||||
DiskUsed int64 `json:"disk_used"`
|
||||
DiskTotal int64 `json:"disk_total"`
|
||||
NetIn float32 `json:"net_in"`
|
||||
NetOut float32 `json:"net_out"`
|
||||
IoRead float32 `json:"io_read"`
|
||||
IoWrite float32 `json:"io_write"`
|
||||
IoReadDisk float32 `json:"io_read_disk"`
|
||||
IoWriteDisk float32 `json:"io_write_disk"`
|
||||
ReqSelect int `json:"req_select"`
|
||||
ReqSelectRate float32 `json:"req_select_rate"`
|
||||
ReqInsert int `json:"req_insert"`
|
||||
ReqInsertSuccess int `json:"req_insert_success"`
|
||||
ReqInsertRate float32 `json:"req_insert_rate"`
|
||||
ReqInsertBatch int `json:"req_insert_batch"`
|
||||
ReqInsertBatchSuccess int `json:"req_insert_batch_success"`
|
||||
ReqInsertBatchRate float32 `json:"req_insert_batch_rate"`
|
||||
Errors int `json:"errors"`
|
||||
VnodesNum int `json:"vnodes_num"`
|
||||
Masters int `json:"masters"`
|
||||
HasMnode int8 `json:"has_mnode"`
|
||||
HasQnode int8 `json:"has_qnode"`
|
||||
HasSnode int8 `json:"has_snode"`
|
||||
HasBnode int8 `json:"has_bnode"`
|
||||
}
|
||||
|
||||
var CreateDnodeInfoSql = "create table if not exists dnodes_info (" +
|
||||
"ts timestamp, " +
|
||||
"uptime float, " +
|
||||
"cpu_engine float, " +
|
||||
"cpu_system float, " +
|
||||
"cpu_cores float, " +
|
||||
"mem_engine int, " +
|
||||
"mem_system int, " +
|
||||
"mem_total int, " +
|
||||
"disk_engine bigint, " +
|
||||
"disk_used bigint, " +
|
||||
"disk_total bigint, " +
|
||||
"net_in float, " +
|
||||
"net_out float, " +
|
||||
"io_read float, " +
|
||||
"io_write float, " +
|
||||
"io_read_disk float, " +
|
||||
"io_write_disk float, " +
|
||||
"req_select int, " +
|
||||
"req_select_rate float, " +
|
||||
"req_insert int, " +
|
||||
"req_insert_success int, " +
|
||||
"req_insert_rate float, " +
|
||||
"req_insert_batch int, " +
|
||||
"req_insert_batch_success int, " +
|
||||
"req_insert_batch_rate float, " +
|
||||
"errors int, " +
|
||||
"vnodes_num int, " +
|
||||
"masters int, " +
|
||||
"has_mnode int, " +
|
||||
"has_qnode int, " +
|
||||
"has_snode int, " +
|
||||
"has_bnode int " +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type DiskInfo struct {
|
||||
Datadir []DataDir `json:"datadir"`
|
||||
Logdir LogDir `json:"logdir"`
|
||||
Tempdir TempDir `json:"tempdir"`
|
||||
}
|
||||
|
||||
type DataDir struct {
|
||||
Name string `json:"name"`
|
||||
Level int `json:"level"`
|
||||
Avail decimal.Decimal `json:"avail"`
|
||||
Used decimal.Decimal `json:"used"`
|
||||
Total decimal.Decimal `json:"total"`
|
||||
}
|
||||
|
||||
var CreateDataDirSql = "create table if not exists data_dir (" +
|
||||
"ts timestamp, " +
|
||||
"name nchar(200), " +
|
||||
"`level` int, " +
|
||||
"avail bigint, " +
|
||||
"used bigint, " +
|
||||
"total bigint" +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type LogDir struct {
|
||||
Name string `json:"name"`
|
||||
Avail decimal.Decimal `json:"avail"`
|
||||
Used decimal.Decimal `json:"used"`
|
||||
Total decimal.Decimal `json:"total"`
|
||||
}
|
||||
|
||||
var CreateLogDirSql = "create table if not exists log_dir (" +
|
||||
"ts timestamp, " +
|
||||
"name nchar(200), " +
|
||||
"avail bigint, " +
|
||||
"used bigint, " +
|
||||
"total bigint" +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type TempDir struct {
|
||||
Name string `json:"name"`
|
||||
Avail decimal.Decimal `json:"avail"`
|
||||
Used decimal.Decimal `json:"used"`
|
||||
Total decimal.Decimal `json:"total"`
|
||||
}
|
||||
|
||||
var CreateTempDirSql = "create table if not exists temp_dir(" +
|
||||
"ts timestamp, " +
|
||||
"name nchar(200), " +
|
||||
"avail bigint, " +
|
||||
"used bigint, " +
|
||||
"total bigint " +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type StbInfo struct {
|
||||
StbName string `json:"stb_name"`
|
||||
DataBaseName string `json:"database_name"`
|
||||
}
|
||||
|
||||
type VgroupInfo struct {
|
||||
VgroupID int `json:"vgroup_id"`
|
||||
DatabaseName string `json:"database_name"`
|
||||
TablesNum int64 `json:"tables_num"`
|
||||
Status string `json:"status"`
|
||||
Vnodes []Vnode `json:"vnodes"`
|
||||
}
|
||||
|
||||
var CreateVgroupsInfoSql = "create table if not exists vgroups_info (" +
|
||||
"ts timestamp, " +
|
||||
"vgroup_id int, " +
|
||||
"database_name binary(33), " +
|
||||
"tables_num bigint, " + // change to bigint since TS-3003
|
||||
"status binary(512) " +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type Vnode struct {
|
||||
DnodeID int `json:"dnode_id"`
|
||||
VnodeRole string `json:"vnode_role"`
|
||||
}
|
||||
|
||||
var CreateVnodeRoleSql = "create table if not exists vnodes_role (" +
|
||||
"ts timestamp, " +
|
||||
"vnode_role binary(10) " +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type LogInfo struct {
|
||||
Summary []Summary `json:"summary"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
Ts string `json:"ts"`
|
||||
Level string `json:"level"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
Level string `json:"level"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
var CreateSummarySql = "create table if not exists log_summary(" +
|
||||
"ts timestamp, " +
|
||||
"error int, " +
|
||||
"info int, " +
|
||||
"debug int, " +
|
||||
"trace int " +
|
||||
") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))"
|
||||
|
||||
type GrantInfo struct {
|
||||
ExpireTime int64 `json:"expire_time"`
|
||||
TimeseriesUsed int64 `json:"timeseries_used"`
|
||||
TimeseriesTotal int64 `json:"timeseries_total"`
|
||||
}
|
||||
|
||||
var CreateGrantInfoSql = "create table if not exists grants_info(" +
|
||||
"ts timestamp, " +
|
||||
"expire_time bigint, " +
|
||||
"timeseries_used bigint, " +
|
||||
"timeseries_total bigint " +
|
||||
") tags (cluster_id nchar(32))"
|
||||
|
||||
var CreateKeeperSql = "create table if not exists keeper_monitor (" +
|
||||
"ts timestamp, " +
|
||||
"cpu float, " +
|
||||
"mem float, " +
|
||||
"total_reports int " +
|
||||
") tags (identify nchar(50))"
|
|
@ -0,0 +1,113 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/taosdata/taoskeeper/process"
|
||||
"github.com/taosdata/taoskeeper/util/pool"
|
||||
)
|
||||
|
||||
type Zabbix struct {
|
||||
processor *process.Processor
|
||||
floatGroup []*process.Metric
|
||||
strGroup []*process.Metric
|
||||
}
|
||||
|
||||
func NewZabbix(processor *process.Processor) *Zabbix {
|
||||
z := &Zabbix{processor: processor}
|
||||
z.processorMetrics()
|
||||
return z
|
||||
}
|
||||
|
||||
type zabbixMetric struct {
|
||||
Data []*ZMetric `json:"data"`
|
||||
}
|
||||
|
||||
type ZMetric struct {
|
||||
Metric string `json:"{#METRIC}"`
|
||||
Key string `json:"key"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
const (
|
||||
FloatType = iota + 1
|
||||
StringType
|
||||
)
|
||||
|
||||
func (z *Zabbix) Init(c gin.IRouter) {
|
||||
api := c.Group("zabbix")
|
||||
api.GET("float", z.getFloat)
|
||||
api.GET("string", z.getString)
|
||||
}
|
||||
|
||||
func (z *Zabbix) getFloat(c *gin.Context) {
|
||||
z.returnData(c, FloatType)
|
||||
}
|
||||
|
||||
func (z *Zabbix) getString(c *gin.Context) {
|
||||
z.returnData(c, StringType)
|
||||
}
|
||||
|
||||
func (z *Zabbix) returnData(c *gin.Context, valueType int) {
|
||||
var metrics []*process.Metric
|
||||
switch valueType {
|
||||
case FloatType:
|
||||
metrics = z.floatGroup
|
||||
case StringType:
|
||||
metrics = z.strGroup
|
||||
}
|
||||
var d zabbixMetric
|
||||
b := pool.BytesPoolGet()
|
||||
defer pool.BytesPoolPut(b)
|
||||
for _, metric := range metrics {
|
||||
values := metric.GetValue()
|
||||
for _, value := range values {
|
||||
label := z.sortLabel(value.Label)
|
||||
b.Reset()
|
||||
b.WriteString(metric.FQName)
|
||||
if len(label) > 0 {
|
||||
b.WriteByte(',')
|
||||
b.WriteString(label)
|
||||
}
|
||||
metricName := b.String()
|
||||
d.Data = append(d.Data, &ZMetric{
|
||||
Metric: metricName,
|
||||
Key: metricName,
|
||||
Value: value.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusOK, d)
|
||||
}
|
||||
|
||||
func (z *Zabbix) sortLabel(labels map[string]string) string {
|
||||
if len(labels) == 0 {
|
||||
return ""
|
||||
}
|
||||
result := make([]string, 0, len(labels))
|
||||
b := pool.BytesPoolGet()
|
||||
defer pool.BytesPoolPut(b)
|
||||
for k, v := range labels {
|
||||
b.Reset()
|
||||
b.WriteString(k)
|
||||
b.WriteByte('=')
|
||||
b.WriteString(v)
|
||||
result = append(result, b.String())
|
||||
}
|
||||
sort.Strings(result)
|
||||
return strings.Join(result, "_")
|
||||
}
|
||||
|
||||
func (z *Zabbix) processorMetrics() {
|
||||
metrics := z.processor.GetMetric()
|
||||
for _, metric := range metrics {
|
||||
if metric.Type == process.Gauge || metric.Type == process.Counter {
|
||||
z.floatGroup = append(z.floatGroup, metric)
|
||||
} else if metric.Type == process.Info {
|
||||
z.strGroup = append(z.strGroup, metric)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,461 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/taosdata/taoskeeper/db"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/config"
|
||||
"github.com/taosdata/taoskeeper/infrastructure/log"
|
||||
"github.com/taosdata/taoskeeper/util"
|
||||
"github.com/taosdata/taoskeeper/util/pool"
|
||||
)
|
||||
|
||||
var logger = log.GetLogger("CMD")
|
||||
|
||||
var MAX_SQL_LEN = 1000000
|
||||
|
||||
type Command struct {
|
||||
fromTime time.Time
|
||||
client *http.Client
|
||||
conn *db.Connector
|
||||
username string
|
||||
password string
|
||||
url *url.URL
|
||||
}
|
||||
|
||||
func NewCommand(conf *config.Config) *Command {
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableCompression: true,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl)
|
||||
if err != nil {
|
||||
logger.Errorf("init db connect error, msg:%s", err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
imp := &Command{
|
||||
client: client,
|
||||
conn: conn,
|
||||
username: conf.TDengine.Username,
|
||||
password: conf.TDengine.Password,
|
||||
url: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port),
|
||||
Path: "/influxdb/v1/write",
|
||||
RawQuery: fmt.Sprintf("db=%s&precision=ms", conf.Metrics.Database.Name),
|
||||
},
|
||||
}
|
||||
return imp
|
||||
}
|
||||
|
||||
func (cmd *Command) Process(conf *config.Config) {
|
||||
if len(conf.Transfer) > 0 && len(conf.Drop) > 0 {
|
||||
logger.Errorf("transfer and drop can't be set at the same time")
|
||||
return
|
||||
}
|
||||
|
||||
if len(conf.Transfer) > 0 && conf.Transfer != "old_taosd_metric" {
|
||||
logger.Errorf("transfer only support old_taosd_metric")
|
||||
return
|
||||
}
|
||||
|
||||
if conf.Transfer == "old_taosd_metric" {
|
||||
cmd.ProcessTransfer(conf)
|
||||
return
|
||||
}
|
||||
|
||||
if len(conf.Drop) > 0 && conf.Drop != "old_taosd_metric_stables" {
|
||||
logger.Errorf("drop only support old_taosd_metric_stables")
|
||||
return
|
||||
}
|
||||
|
||||
if conf.Drop == "old_taosd_metric_stables" {
|
||||
cmd.ProcessDrop(conf)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd *Command) ProcessTransfer(conf *config.Config) {
|
||||
fromTime, err := time.Parse("2006-01-02T15:04:05Z07:00", conf.FromTime)
|
||||
if err != nil {
|
||||
logger.Errorf("parse fromTime error, msg:%s", err)
|
||||
return
|
||||
}
|
||||
cmd.fromTime = fromTime
|
||||
|
||||
funcs := []func() error{
|
||||
cmd.TransferTaosdClusterBasicInfo,
|
||||
cmd.TransferTaosdClusterInfo,
|
||||
cmd.TransferTaosdVgroupsInfo,
|
||||
cmd.TransferTaosdDnodesInfo,
|
||||
cmd.TransferTaosdDnodesStatus,
|
||||
cmd.TransferTaosdDnodesLogDirs1,
|
||||
cmd.TransferTaosdDnodesLogDirs2,
|
||||
cmd.TransferTaosdDnodesDataDirs,
|
||||
cmd.TransferTaosdMnodesInfo,
|
||||
cmd.TransferTaosdVnodesInfo,
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(funcs))
|
||||
|
||||
for i := range funcs {
|
||||
index := i
|
||||
err := pool.GoroutinePool.Submit(func() {
|
||||
defer wg.Done()
|
||||
funcs[index]()
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
logger.Info("transfer all old taosd metric success!!")
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdClusterInfo() error {
|
||||
sql := "select a.cluster_id, master_uptime * 3600 * 24 as cluster_uptime, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, topics_total, streams_total, b.expire_time as grants_expire_time, b.timeseries_used as grants_timeseries_used, b.timeseries_total as grants_timeseries_total, a.ts from cluster_info a, grants_info b where a.ts = b.ts and a.cluster_id = b.cluster_id and"
|
||||
dstTable := "taosd_cluster_info"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 1)
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdVgroupsInfo() error {
|
||||
sql := "select cluster_id, vgroup_id, database_name, tables_num, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from vgroups_info a where "
|
||||
dstTable := "taosd_vgroups_info"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 3)
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdDnodesInfo() error {
|
||||
sql := "select a.cluster_id, a.dnode_id, a.dnode_ep, uptime * 3600 * 24 as uptime, cpu_engine, cpu_system, cpu_cores, mem_engine, mem_system as mem_free, mem_total, disk_used, disk_total, disk_engine, net_in as system_net_in, net_out as system_net_out, io_read, io_write, io_read_disk, io_write_disk, vnodes_num, masters, has_mnode, has_qnode, has_snode, has_bnode, errors, b.error as error_log_count, b.info as info_log_count, b.debug as debug_log_count, b.trace as trace_log_count, a.ts as ts from dnodes_info a, log_summary b where a.ts = b.ts and a.dnode_id = b.dnode_id and a. dnode_ep = b.dnode_ep and "
|
||||
dstTable := "taosd_dnodes_info"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 3)
|
||||
}
|
||||
func (cmd *Command) TransferTaosdDnodesStatus() error {
|
||||
sql := "select cluster_id, dnode_id, dnode_ep, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from d_info a where "
|
||||
dstTable := "taosd_dnodes_status"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 3)
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdDnodesLogDirs1() error {
|
||||
sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from log_dir a where "
|
||||
dstTable := "taosd_dnodes_log_dirs"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 4)
|
||||
}
|
||||
func (cmd *Command) TransferTaosdDnodesLogDirs2() error {
|
||||
sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from temp_dir a where "
|
||||
dstTable := "taosd_dnodes_log_dirs"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 4)
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdDnodesDataDirs() error {
|
||||
sql := "select cluster_id, dnode_id, dnode_ep, name as data_dir_name, `level` as data_dir_level, avail, used, total, ts from data_dir a where "
|
||||
dstTable := "taosd_dnodes_data_dirs"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 5)
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdMnodesInfo() error {
|
||||
sql := "select cluster_id, mnode_id, mnode_ep, CASE role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from m_info a where "
|
||||
dstTable := "taosd_mnodes_info"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 3)
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferTaosdVnodesInfo() error {
|
||||
sql := "select cluster_id, 0 as vgroup_id, 'UNKNOWN' as database_name, dnode_id, CASE vnode_role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from vnodes_role a where "
|
||||
dstTable := "taosd_vnodes_info"
|
||||
return cmd.TransferTableToDst(sql, dstTable, 4)
|
||||
}
|
||||
|
||||
func (cmd *Command) ProcessDrop(conf *config.Config) {
|
||||
var dropStableList = []string{
|
||||
"log_dir",
|
||||
"dnodes_info",
|
||||
"data_dir",
|
||||
"log_summary",
|
||||
"m_info",
|
||||
"vnodes_role",
|
||||
"cluster_info",
|
||||
"temp_dir",
|
||||
"grants_info",
|
||||
"vgroups_info",
|
||||
"d_info",
|
||||
"taosadapter_system_cpu_percent",
|
||||
"taosadapter_restful_http_request_in_flight",
|
||||
"taosadapter_restful_http_request_summary_milliseconds",
|
||||
"taosadapter_restful_http_request_fail",
|
||||
"taosadapter_system_mem_percent",
|
||||
"taosadapter_restful_http_request_total",
|
||||
}
|
||||
ctx := context.Background()
|
||||
logger.Infof("use database:%s", conf.Metrics.Database.Name)
|
||||
|
||||
for _, stable := range dropStableList {
|
||||
if _, err := cmd.conn.Exec(ctx, "DROP STABLE IF EXISTS "+stable, util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("drop stable %s, error:%s", stable, err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
logger.Info("drop old taosd metric stables success!!")
|
||||
}
|
||||
|
||||
func (cmd *Command) TransferDataToDest(data *db.Data, dstTable string, tagNum int) {
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
if len(data.Data) < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, row := range data.Data {
|
||||
// get one row here
|
||||
buf.WriteString(dstTable)
|
||||
|
||||
// write tags
|
||||
var tag string
|
||||
for j := 0; j < tagNum; j++ {
|
||||
switch v := row[j].(type) {
|
||||
case int:
|
||||
tag = fmt.Sprint(v)
|
||||
case int32:
|
||||
tag = fmt.Sprint(v)
|
||||
case int64:
|
||||
tag = fmt.Sprint(v)
|
||||
case string:
|
||||
tag = v
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j]))
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], util.EscapeInfluxProtocol(tag)))
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], "unknown"))
|
||||
logger.Errorf("tag value is empty, tag_name:%s", data.Head[j])
|
||||
}
|
||||
}
|
||||
buf.WriteString(" ")
|
||||
|
||||
// write metrics
|
||||
for j := tagNum; j < len(row)-1; j++ {
|
||||
|
||||
switch v := row[j].(type) {
|
||||
case int:
|
||||
buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v)))
|
||||
case int32:
|
||||
buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v)))
|
||||
case int64:
|
||||
buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v)))
|
||||
case float32:
|
||||
buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(float64(v), 'f', -1, 64)))
|
||||
case float64:
|
||||
buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(v, 'f', -1, 64)))
|
||||
default:
|
||||
panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j]))
|
||||
}
|
||||
|
||||
if j != len(row)-2 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
// write timestamp
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(fmt.Sprintf("%v", row[len(row)-1].(time.Time).UnixMilli()))
|
||||
buf.WriteString("\n")
|
||||
|
||||
if buf.Len() >= MAX_SQL_LEN {
|
||||
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
logger.Tracef("buf:%v", buf.String())
|
||||
}
|
||||
err := cmd.lineWriteBody(&buf)
|
||||
if err != nil {
|
||||
logger.Errorf("insert data error, msg:%s", err)
|
||||
panic(err)
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if buf.Len() > 0 {
|
||||
if logger.Logger.IsLevelEnabled(logrus.TraceLevel) {
|
||||
logger.Tracef("buf:%v", buf.String())
|
||||
}
|
||||
err := cmd.lineWriteBody(&buf)
|
||||
if err != nil {
|
||||
logger.Errorf("insert data error, msg:%s", err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cluster_info
|
||||
func (cmd *Command) TransferTaosdClusterBasicInfo() error {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
endTime := time.Now()
|
||||
delta := time.Hour * 24 * 10
|
||||
|
||||
var createTableSql = "create stable if not exists taosd_cluster_basic " +
|
||||
"(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " +
|
||||
"tags (cluster_id varchar(50))"
|
||||
|
||||
if _, err := cmd.conn.Exec(ctx, createTableSql, util.GetQidOwn()); err != nil {
|
||||
logger.Errorf("create taosd_cluster_basic error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Tracef("fromeTime:%d", cmd.fromTime.UnixMilli())
|
||||
|
||||
for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) {
|
||||
querySql := fmt.Sprintf("select cluster_id, first_ep, first_ep_dnode_id, `version` as cluster_version, ts from cluster_info where ts > %d and ts <= %d",
|
||||
current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli())
|
||||
logger.Tracef("query sql:%s", querySql)
|
||||
data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("query cluster_info error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// transfer data to new table, only this table need use insert statement
|
||||
var buf bytes.Buffer
|
||||
|
||||
// 使用 map 将二维数组切分为多个二维数组
|
||||
result := make(map[string][][]interface{})
|
||||
for _, row := range data.Data {
|
||||
key := row[0].(string) // 使用第一列的值作为 key
|
||||
result[key] = append(result[key], row)
|
||||
}
|
||||
|
||||
// 按照不同 tag 来迁移数据
|
||||
for _, dataByCluster := range result {
|
||||
buf.Reset()
|
||||
|
||||
for _, row := range dataByCluster {
|
||||
if len(buf.Bytes()) == 0 {
|
||||
sql := fmt.Sprintf(
|
||||
"insert into taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values ",
|
||||
row[0].(string), row[0].(string))
|
||||
|
||||
buf.WriteString(sql)
|
||||
}
|
||||
|
||||
sql := fmt.Sprintf(
|
||||
"(%d, '%s', %d, '%s')",
|
||||
row[4].(time.Time).UnixMilli(), row[1].(string), row[2].(int32), row[3].(string))
|
||||
buf.WriteString(sql)
|
||||
|
||||
if buf.Len() >= MAX_SQL_LEN {
|
||||
rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("insert taosd_cluster_basic error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
if rowsAffected <= 0 {
|
||||
logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected)
|
||||
}
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
if buf.Len() > 0 {
|
||||
rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("insert taosd_cluster_basic error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
if rowsAffected <= 0 {
|
||||
logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("transfer stable taosd_cluster_basic success!!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// cluster_info
|
||||
func (cmd *Command) TransferTableToDst(sql string, dstTable string, tagNum int) error {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
endTime := time.Now()
|
||||
delta := time.Hour * 24 * 10
|
||||
|
||||
logger.Tracef("fromTime:%d", cmd.fromTime.UnixMilli())
|
||||
|
||||
for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) {
|
||||
querySql := fmt.Sprintf(sql+" a.ts > %d and a.ts <= %d",
|
||||
current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli())
|
||||
logger.Tracef("query sql:%s", querySql)
|
||||
data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn())
|
||||
if err != nil {
|
||||
logger.Errorf("query cluster_info error, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// transfer data to new table, only this table need use insert statement
|
||||
cmd.TransferDataToDest(data, dstTable, tagNum)
|
||||
}
|
||||
|
||||
logger.Info("transfer stable " + dstTable + " success!!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cmd *Command) lineWriteBody(buf *bytes.Buffer) error {
|
||||
header := map[string][]string{
|
||||
"Connection": {"keep-alive"},
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: cmd.url,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: header,
|
||||
Host: cmd.url.Host,
|
||||
}
|
||||
req.SetBasicAuth(cmd.username, cmd.password)
|
||||
|
||||
req.Body = io.NopCloser(buf)
|
||||
resp, err := cmd.client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("writing metrics exception, msg:%s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body))
|
||||
}
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue