Merge pull request #16264 from taosdata/3.0

relrase: merge from 3.0 to main
This commit is contained in:
Shengliang Guan 2022-08-21 23:55:42 +08:00 committed by GitHub
commit cd7e6b1674
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
197 changed files with 8972 additions and 4876 deletions

View File

@ -43,6 +43,7 @@ def pre_test(){
cd ${WKC}
git reset --hard
git clean -fxd
rm -rf examples/rust/
git remote prune origin
git fetch
'''

View File

@ -1,38 +1,8 @@
IF (EXISTS /var/lib/taos/dnode/dnodeCfg.json)
INSTALL(CODE "MESSAGE(\"The default data directory /var/lib/taos contains old data of tdengine 2.x, please clear it before installing!\")")
ELSEIF (EXISTS C:/TDengine/data/dnode/dnodeCfg.json)
INSTALL(CODE "MESSAGE(\"The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!\")")
ELSEIF (TD_LINUX)
IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
ELSEIF (TD_WINDOWS)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector)
# INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .)
INSTALL(CODE "IF (NOT EXISTS ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg)
execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg)
ENDIF ()")
INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include)
INSTALL(FILES ${TD_SOURCE_DIR}/include/libs/function/taosudf.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
IF (BUILD_TOOLS)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
ENDIF ()
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)
ENDIF ()
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")

View File

@ -1,12 +0,0 @@
# rust-bindings
ExternalProject_Add(rust-bindings
GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git
GIT_TAG 7ed7a97
SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust"
BINARY_DIR "${TD_SOURCE_DIR}/examples/rust"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG 3d21433
GIT_TAG abed566
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -105,11 +105,6 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# rust-bindings
if(${RUST_BINDINGS})
cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${RUST_BINDINGS})
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -140,24 +135,6 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
# clear submodule
execute_process(COMMAND git submodule deinit -f tools/taos-tools
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached tools/taos-tools
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git submodule deinit -f tools/taosadapter
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached tools/taosadapter
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git submodule deinit -f tools/taosws-rs
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached tools/taosws-rs
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git submodule deinit -f examples/rust
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
execute_process(COMMAND git rm --cached examples/rust
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
# ================================================================================================
# Build
@ -354,9 +331,11 @@ endif(${BUILD_WITH_TRAFT})
# LIBUV
if(${BUILD_WITH_UV})
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows")
MESSAGE("Windows need set no-sign-compare")
add_compile_options(-Wno-sign-compare)
if (TD_WINDOWS)
# There is no GetHostNameW function on win7.
file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT)
string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}")
file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}")
endif ()
add_subdirectory(libuv EXCLUDE_FROM_ALL)
endif(${BUILD_WITH_UV})

View File

@ -4,7 +4,8 @@ sidebar_label: Documentation Home
slug: /
---
TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. Its written mainly for architects, developers and system administrators.
TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. Its written mainly for architects, developers and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.

View File

@ -3,7 +3,7 @@ title: Introduction
toc_max_heading_level: 2
---
TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation.
TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the system complexity and cost of development and operation.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
@ -31,25 +31,21 @@ For more details on features, please read through the entire documentation.
## Competitive Advantages
Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages.
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs.
- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source.
- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion.
- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain.
- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
- **Seamless Integration**: Without a single line of code, TDengine provide seamless, configurable integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More third-party tools are being integrated.
- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengines running status can be monitored via Grafana or other DevOps tools.
- **Open Source**: TDengines core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs.
- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming.
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced.
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
This is how TDengine would be situated, in a typical time-series data processing platform:

View File

@ -2,7 +2,7 @@
title: Concepts
---
In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase 2. There are multiple smart meters, and 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
<div className="center-table">
<table>

View File

@ -2,14 +2,16 @@
title: Data Model
---
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
## Create Database
The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database.
```sql
CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1;
CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1;
```
In the above SQL statement:

View File

@ -61,20 +61,20 @@ In summary, records across subtables can be aggregated by a simple query on thei
In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location.
```
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
222.000000000 | California.LosAngeles |
219.200000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.002136s)
taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
avg(voltage) | location |
===============================================================================================
219.200000000 | California.SanFrancisco |
221.666666667 | California.LosAngeles |
Query OK, 2 rows in database (0.005995s)
```
### Example 2
In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2.
In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current from meters whose groupId is 2.
```
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
taos> SELECT count(*), max(current) FROM meters where groupId = 2;
count(*) | max(current) |
==================================
5 | 13.4 |
@ -88,40 +88,41 @@ Join queries are only allowed between subtables of the same STable. In [Select](
In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001.
```
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
ts | sum(current) |
taos> SELECT _wstart, sum(current) FROM d1001 INTERVAL(10s);
_wstart | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
Query OK, 2 rows in database (0.003139s)
```
Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California.
```
taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
ts | sum(current) |
taos> SELECT _wstart, SUM(current) FROM meters where location like "California%" INTERVAL(1s);
_wstart | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
2018-10-03 14:38:05.000 | 32.900000572 |
2018-10-03 14:38:05.000 | 23.699999809 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
2018-10-03 14:38:16.000 | 34.400000572 |
Query OK, 5 rows in database (0.007413s)
```
Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds.
```
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
taos> SELECT _wstart, SUM(current) FROM meters INTERVAL(1s, 500a);
_wstart | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
2018-10-03 14:38:03.500 | 10.199999809 |
2018-10-03 14:38:04.500 | 10.300000191 |
2018-10-03 14:38:05.500 | 13.399999619 |
2018-10-03 14:38:06.500 | 11.500000000 |
2018-10-03 14:38:14.500 | 12.600000381 |
2018-10-03 14:38:16.500 | 34.400000572 |
Query OK, 6 rows in database (0.005515s)
```
In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling.

View File

@ -130,7 +130,7 @@ The configuration parameters in the URL are as follows:
- charset: The character set used by the client, the default value is the system character set.
- locale: Client locale, by default, use the system's current locale.
- timezone: The time zone used by the client, the default value is the system's current time zone.
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).

View File

@ -0,0 +1,275 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "taos.h"
static int running = 1;
static char dbName[64] = "tmqdb";
static char stbName[64] = "stb";
static char topicName[64] = "topicname";
static int32_t msg_process(TAOS_RES* msg) {
char buf[1024];
int32_t rows = 0;
const char* topicName = tmq_get_topic_name(msg);
const char* dbName = tmq_get_db_name(msg);
int32_t vgroupId = tmq_get_vgroup_id(msg);
printf("topic: %s\n", topicName);
printf("db: %s\n", dbName);
printf("vgroup id: %d\n", vgroupId);
while (1) {
TAOS_ROW row = taos_fetch_row(msg);
if (row == NULL) break;
TAOS_FIELD* fields = taos_fetch_fields(msg);
int32_t numOfFields = taos_field_count(msg);
int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
rows++;
taos_print_row(buf, row, fields, numOfFields);
printf("row content: %s\n", buf);
}
return rows;
}
static int32_t init_env() {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
TAOS_RES* pRes;
// drop database if exists
printf("create database\n");
pRes = taos_query(pConn, "drop database if exists tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// create database
pRes = taos_query(pConn, "create database tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// create super table
printf("create super table\n");
pRes = taos_query(
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
if (taos_errno(pRes) != 0) {
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// create sub tables
printf("create sub tables\n");
pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// insert data
printf("insert data into sub tables\n");
pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
int32_t create_topic() {
printf("create topic\n");
TAOS_RES* pRes;
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
pRes = taos_query(pConn, "use tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
}
tmq_t* build_consumer() {
tmq_conf_res_t code;
tmq_conf_t* conf = tmq_conf_new();
code = tmq_conf_set(conf, "enable.auto.commit", "true");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "group.id", "cgrpName");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "client.id", "user defined name");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "td.connect.user", "root");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) return NULL;
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
tmq_conf_destroy(conf);
return tmq;
}
tmq_list_t* build_topic_list() {
tmq_list_t* topicList = tmq_list_new();
int32_t code = tmq_list_append(topicList, "topicname");
if (code) {
return NULL;
}
return topicList;
}
void basic_consume_loop(tmq_t* tmq) {
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 5000;
while (running) {
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
if (tmqmsg) {
msgCnt++;
totalRows += msg_process(tmqmsg);
taos_free_result(tmqmsg);
} else {
break;
}
}
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
int main(int argc, char* argv[]) {
int32_t code;
if (init_env() < 0) {
return -1;
}
if (create_topic() < 0) {
return -1;
}
tmq_t* tmq = build_consumer();
if (NULL == tmq) {
fprintf(stderr, "%% build_consumer() fail!\n");
return -1;
}
tmq_list_t* topic_list = build_topic_list();
if (NULL == topic_list) {
return -1;
}
if ((code = tmq_subscribe(tmq, topic_list))) {
fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
}
tmq_list_destroy(topic_list);
basic_consume_loop(tmq);
code = tmq_consumer_close(tmq);
if (code) {
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
} else {
fprintf(stderr, "%% Consumer closed\n");
}
return 0;
}

View File

@ -28,8 +28,7 @@ function runConsumer() {
console.log(msg.topicPartition);
console.log(msg.block);
console.log(msg.fields)
// fixme(@xiaolei): commented temp, should be fixed.
//consumer.commit(msg);
consumer.commit(msg);
console.log(`=======consumer ${i} done`)
}

View File

@ -4,7 +4,7 @@
"main": "index.js",
"license": "MIT",
"dependencies": {
"@tdengine/client": "^3.0.0",
"@tdengine/client": "^3.0.1",
"@tdengine/rest": "^3.0.0"
}
}

View File

@ -7,12 +7,13 @@ import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import PkgListV3 from "/components/PkgListV3";
TDengine 完整的软件包包括服务端taosd、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动taosc、命令行程序 (CLItaos) 和一些工具软件,目前服务端 taosd 和 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。应用驱动 taosc 与 TDengine CLI 可以在 Windows 或 Linux 上安装和运行。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
为方便使用,标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包
TDengine 完整的软件包包括服务端taosd、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动taosc、命令行程序 (CLItaos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)
在 Linux 系统上TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。TDengine 也提供 Windows x64 平台的安装包。您也可以[用 Docker 立即体验](../../get-started/docker/)。需要注意的是rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
在 Linux 系统上TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
## 安装
@ -205,7 +206,7 @@ Query OK, 2 row(s) in set (0.003128s)
## 使用 taosBenchmark 体验写入速度
启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`
启动 TDengine 的服务,在 Linux 或 windows 终端执行 `taosBenchmark` (曾命名为 `taosdemo`
```bash
taosBenchmark

View File

@ -11,10 +11,10 @@ TDengine 采用类关系型数据模型,需要建库、建表。因此对于
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作TDengine 建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除 SQL 标准的选项外,还可以指定保留时长、副本数、缓存大小、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
```sql
CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1;
CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1;
```
上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB数据库的 VGROUPS 数量,对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。
上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。
创建库之后,需要使用 SQL 命令 `USE` 将当前库切换过来,例如:

View File

@ -54,20 +54,20 @@ Query OK, 2 row(s) in set (0.001100s)
在 TAOS Shell查找加利福尼亚州所有智能电表采集的电压平均值并按照 location 分组。
```
taos> SELECT AVG(voltage) FROM meters GROUP BY location;
avg(voltage) | location |
=============================================================
222.000000000 | California.LosAngeles |
219.200000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.002136s)
taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
avg(voltage) | location |
===============================================================================================
219.200000000 | California.SanFrancisco |
221.666666667 | California.LosAngeles |
Query OK, 2 rows in database (0.005995s)
```
### 示例二
在 TAOS shell, 查找 groupId 为 2 的所有智能电表过去 24 小时的记录条数,电流的最大值。
在 TAOS shell, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
```
taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
taos> SELECT count(*), max(current) FROM meters where groupId = 2;
cunt(*) | max(current) |
==================================
5 | 13.4 |
@ -81,40 +81,41 @@ Query OK, 1 row(s) in set (0.002136s)
物联网场景里经常需要通过降采样down sampling将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每 10 秒钟求和
```
taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
ts | sum(current) |
taos> SELECT _wstart, sum(current) FROM d1001 INTERVAL(10s);
_wstart | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
Query OK, 2 row(s) in set (0.000883s)
Query OK, 2 rows in database (0.003139s)
```
降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和
```
taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
ts | sum(current) |
taos> SELECT _wstart, SUM(current) FROM meters where location like "California%" INTERVAL(1s);
_wstart | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
2018-10-03 14:38:05.000 | 32.900000572 |
2018-10-03 14:38:05.000 | 23.699999809 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
2018-10-03 14:38:16.000 | 34.400000572 |
Query OK, 5 rows in database (0.007413s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
taos> SELECT _wstart, SUM(current) FROM meters INTERVAL(1s, 500a);
_wstart | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
2018-10-03 14:38:03.500 | 10.199999809 |
2018-10-03 14:38:04.500 | 10.300000191 |
2018-10-03 14:38:05.500 | 13.399999619 |
2018-10-03 14:38:06.500 | 11.500000000 |
2018-10-03 14:38:14.500 | 12.600000381 |
2018-10-03 14:38:16.500 | 34.400000572 |
Query OK, 6 rows in database (0.005515s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如 FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用 TDengine 的降采样操作就轻松解决。

View File

@ -724,7 +724,6 @@ consumer.close();
</TabItem>
<TabItem value="Go" label="Go">
```go
@ -769,6 +768,7 @@ consumer.Unsubscribe();
// 关闭消费
consumer.Close();
```
</TabItem>
</Tabs>
@ -807,300 +807,9 @@ SHOW SUBSCRIPTIONS;
以下是各语言的完整示例代码。
<Tabs defaultValue="java" groupId="lang">
<TabItem label="C" value="c">
```c
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "taos.h"
static int running = 1;
static char dbName[64] = "tmqdb";
static char stbName[64] = "stb";
static char topicName[64] = "topicname";
static int32_t msg_process(TAOS_RES* msg) {
char buf[1024];
int32_t rows = 0;
const char* topicName = tmq_get_topic_name(msg);
const char* dbName = tmq_get_db_name(msg);
int32_t vgroupId = tmq_get_vgroup_id(msg);
printf("topic: %s\n", topicName);
printf("db: %s\n", dbName);
printf("vgroup id: %d\n", vgroupId);
while (1) {
TAOS_ROW row = taos_fetch_row(msg);
if (row == NULL) break;
TAOS_FIELD* fields = taos_fetch_fields(msg);
int32_t numOfFields = taos_field_count(msg);
int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
const char* tbName = tmq_get_table_name(msg);
rows++;
taos_print_row(buf, row, fields, numOfFields);
printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
}
return rows;
}
static int32_t init_env() {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
TAOS_RES* pRes;
// drop database if exists
printf("create database\n");
pRes = taos_query(pConn, "drop database if exists tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// create database
pRes = taos_query(pConn, "create database tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// create super table
printf("create super table\n");
pRes = taos_query(
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
if (taos_errno(pRes) != 0) {
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// create sub tables
printf("create sub tables\n");
pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
if (taos_errno(pRes) != 0) {
printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
// insert data
printf("insert data into sub tables\n");
pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
int32_t create_topic() {
printf("create topic\n");
TAOS_RES* pRes;
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
pRes = taos_query(pConn, "use tmqdb");
if (taos_errno(pRes) != 0) {
printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
taos_close(pConn);
return 0;
}
void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
}
tmq_t* build_consumer() {
tmq_conf_res_t code;
tmq_conf_t* conf = tmq_conf_new();
code = tmq_conf_set(conf, "enable.auto.commit", "true");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "group.id", "cgrpName");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "client.id", "user defined name");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "td.connect.user", "root");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "msg.with.table.name", "true");
if (TMQ_CONF_OK != code) return NULL;
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
tmq_conf_destroy(conf);
return tmq;
}
tmq_list_t* build_topic_list() {
tmq_list_t* topicList = tmq_list_new();
int32_t code = tmq_list_append(topicList, "topicname");
if (code) {
return NULL;
}
return topicList;
}
void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
int32_t code;
if ((code = tmq_subscribe(tmq, topicList))) {
fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
return;
}
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 5000;
while (running) {
TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
if (tmqmsg) {
msgCnt++;
totalRows += msg_process(tmqmsg);
taos_free_result(tmqmsg);
/*} else {*/
/*break;*/
}
}
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
int main(int argc, char* argv[]) {
int32_t code;
if (init_env() < 0) {
return -1;
}
if (create_topic() < 0) {
return -1;
}
tmq_t* tmq = build_consumer();
if (NULL == tmq) {
fprintf(stderr, "%% build_consumer() fail!\n");
return -1;
}
tmq_list_t* topic_list = build_topic_list();
if (NULL == topic_list) {
return -1;
}
basic_consume_loop(tmq, topic_list);
code = tmq_unsubscribe(tmq);
if (code) {
fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
} else {
fprintf(stderr, "%% unsubscribe\n");
}
code = tmq_consumer_close(tmq);
if (code) {
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
} else {
fprintf(stderr, "%% Consumer closed\n");
}
return 0;
}
```
[查看源码](https://github.com/taosdata/TDengine/blob/develop/examples/c/tmq.c)
<CDemo />
</TabItem>
<TabItem label="Java" value="java">
@ -1116,22 +825,7 @@ int main(int argc, char* argv[]) {
</TabItem>
<TabItem label="Python" value="Python">
```python
import taos
from taos.tmq import TaosConsumer
import taos
from taos.tmq import *
consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
for msg in consumer:
for row in msg:
print(row)
```
[查看源码](https://github.com/taosdata/TDengine/blob/develop/docs/examples/python/tmq_example.py)
<Python />
</TabItem>
<TabItem label="Node.JS" value="Node.JS">

View File

@ -1,3 +1,3 @@
```c
{{#include docs/examples/c/subscribe_demo.c}}
```
{{#include docs/examples/c/tmq_example.c}}
```

View File

@ -1,3 +1,3 @@
```py
{{#include docs/examples/python/subscribe_demo.py}}
```
{{#include docs/examples/python/tmq_example.py}}
```

View File

@ -110,7 +110,7 @@ alter_table_option: {
对普通表可以进行如下修改操作
1. ADD COLUMN添加列。
2. DROP COLUMN删除列。
3. ODIFY COLUMN修改列定义如果数据列的类型是可变长类型那么可以使用此指令修改其宽度只能改大不能改小。
3. MODIFY COLUMN修改列定义如果数据列的类型是可变长类型那么可以使用此指令修改其宽度只能改大不能改小。
4. RENAME COLUMN修改列名称。
### 增加列
@ -195,4 +195,4 @@ SHOW CREATE TABLE tb_name;
```
DESCRIBE [db_name.]tb_name;
```
```

View File

@ -131,7 +131,7 @@ url 中的配置参数如下:
- charset客户端使用的字符集默认值为系统字符集。
- locale客户端语言环境默认值系统当前 locale。
- timezone客户端使用的时区默认值为系统当前时区。
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为false。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
- batchfetch: true在执行查询时批量拉取结果集false逐行拉取结果集。默认值为true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
- batchErrorIgnoretrue在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false不再执行失败 SQL 后的任何语句。默认值为false。
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。

View File

@ -25,10 +25,11 @@ curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
$ docker exec -it tdengine taos
taos> show databases;
name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
====================================================================================================================================================================================================================================================================================
log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
Query OK, 1 row(s) in set (0.002843s)
name |
=================================
information_schema |
performance_schema |
Query OK, 2 rows in database (0.033802s)
```
因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 taos shell 或者各种连接器(例如 JDBC-JNI从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 taos shell 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
@ -45,10 +46,11 @@ docker run -d --name tdengine --network host tdengine/tdengine
$ taos
taos> show dnodes;
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | |
Query OK, 1 row(s) in set (0.003233s)
id | endpoint | vnodes | support_vnodes | status | create_time | note |
=================================================================================================================================================
1 | vm98:6030 | 0 | 32 | ready | 2022-08-19 14:50:05.337 | |
Query OK, 1 rows in database (0.010654s)
```
## 以指定的 hostname 和 port 启动 TDengine
@ -59,12 +61,13 @@ Query OK, 1 row(s) in set (0.003233s)
docker run -d \
--name tdengine \
-e TAOS_FQDN=tdengine \
-p 6030-6049:6030-6049 \
-p 6030-6049:6030-6049/udp \
-p 6030:6030 \
-p 6041-6049:6041-6049 \
-p 6041-6049:6041-6049/udp \
tdengine/tdengine
```
上面的命令在容器中启动一个 TDengine 服务,其所监听的 hostname 为 tdengine ,并将容器的 6030 到 6049 端口段映射到主机的 6030 到 6049 端口段 tcp 和 udp 都需要映射)。如果主机上该端口段已经被占用,可以修改上述命令指定一个主机上空闲的端口段。如果 `rpcForceTcp` 被设置为 `1` ,可以只映射 tcp 协议
上面的命令在容器中启动一个 TDengine 服务,其所监听的 hostname 为 tdengine ,并将容器的 6030 端口映射到主机的 6030 端口TCP只能映射主机 6030 端口6041-6049 端口段映射到主机 6041-6049 端口段tcp 和 udp 都需要映射,如果主机上该端口段已经被占用,可以修改上述命令指定一个主机上空闲的端口段
接下来,要确保 "tdengine" 这个 hostname 在 `/etc/hosts` 中可解析。
@ -103,9 +106,9 @@ taos -h tdengine -P 6030
3. 在同一网络上的另一容器中启动 TDengine 客户端
```shell
docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos
docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine --entrypoint=taos tdengine/tdengine
# or
#docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine
#docker run --rm -it --network td-net --entrypoint=taos tdengine/tdengine -h tdengine
```
## 在容器中启动客户端应用
@ -115,7 +118,7 @@ taos -h tdengine -P 6030
```docker
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
@ -129,6 +132,14 @@ RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_
以下是一个 go 应用程序的示例:
* 创建 go mod 项目:
```bash
go mod init app
```
* 创建 main.go
```go
/*
* In this test program, we'll create a database and insert 4 records then select out.
@ -212,12 +223,18 @@ func checkErr(err error, prompt string) {
}
```
如下是完整版本的 dockerfile
* 更新 go mod
```docker
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
```bash
go mod tidy
```
如下是完整版本的 dockerfile
```dockerfile
FROM golang:1.19.0-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@ -232,8 +249,8 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@ -248,113 +265,112 @@ CMD ["app"]
目前我们已经有了 `main.go`, `go.mod`, `go.sum`, `app.dockerfile` 现在可以构建出这个应用程序并在 `td-net` 网络上启动它
```shell
$ docker build -t app -f app.dockerfile
$ docker run --rm --network td-net app -h tdengine -p 6030
$ docker build -t app -f app.dockerfile .
$ docker run --rm --network td-net app app -h tdengine -p 6030
============= args parse result: =============
hostName: tdengine
serverPort: 6030
usr: root
password: taosdata
================================================
2022-01-17 15:56:55.48 +0000 UTC 0
2022-01-17 15:56:56.48 +0000 UTC 1
2022-01-17 15:56:57.48 +0000 UTC 2
2022-01-17 15:56:58.48 +0000 UTC 3
2022-01-17 15:58:01.842 +0000 UTC 0
2022-01-17 15:58:02.842 +0000 UTC 1
2022-01-17 15:58:03.842 +0000 UTC 2
2022-01-17 15:58:04.842 +0000 UTC 3
2022-01-18 01:43:48.029 +0000 UTC 0
2022-01-18 01:43:49.029 +0000 UTC 1
2022-01-18 01:43:50.029 +0000 UTC 2
2022-01-18 01:43:51.029 +0000 UTC 3
2022-08-19 07:43:51.68 +0000 UTC 0
2022-08-19 07:43:52.68 +0000 UTC 1
2022-08-19 07:43:53.68 +0000 UTC 2
2022-08-19 07:43:54.68 +0000 UTC 3
```
## 用 docker-compose 启动 TDengine 集群
1. 如下 docker-compose 文件启动一个 2 副本、2 管理节点、2 数据节点以及 1 个 arbitrator 的 TDengine 集群。
1. 如下 docker-compose 文件启动一个 三节点 TDengine 集群。
```docker
version: "3"
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
td-1:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
```
```yml
version: "3"
services:
td-1:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
td-3:
image: tdengine/tdengine:$VERSION
environment:
TAOS_FQDN: "td-3"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td3:/var/lib/taos/
- taoslog-td3:/var/log/taos/
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
taosdata-td3:
taoslog-td3:
```
:::note
- `VERSION` 环境变量被用来设置 tdengine image tag
- 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP`
- `TAOS_REPLICA` 用来设置缺省的数据库副本数量,其取值范围为[1,3]
在双副本环境下,推荐使用 arbitrator, 用 TAOS_ARBITRATOR 来设置
:::
* `VERSION` 环境变量被用来设置 tdengine image tag
* 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP`
:::
2. 启动集群
```shell
$ VERSION=2.4.0.0 docker-compose up -d
Creating network "test_default" with the default driver
Creating volume "test_taosdata-td1" with default driver
Creating volume "test_taoslog-td1" with default driver
Creating volume "test_taosdata-td2" with default driver
Creating volume "test_taoslog-td2" with default driver
Creating test_td-1_1 ... done
Creating test_arbitrator_1 ... done
Creating test_td-2_1 ... done
```
```shell
$ VERSION=3.0.0.0 docker-compose up -d
Creating network "test-docker_default" with the default driver
Creating volume "test-docker_taosdata-td1" with default driver
Creating volume "test-docker_taoslog-td1" with default driver
Creating volume "test-docker_taosdata-td2" with default driver
Creating volume "test-docker_taoslog-td2" with default driver
Creating volume "test-docker_taosdata-td3" with default driver
Creating volume "test-docker_taoslog-td3" with default driver
Creating test-docker_td-3_1 ... done
Creating test-docker_td-1_1 ... done
Creating test-docker_td-2_1 ... done
```
3. 查看节点状态
```shell
$ docker-compose ps
Name Command State Ports
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
```
```shell
docker-compose ps
Name Command State Ports
-------------------------------------------------------------------
test-docker_td-1_1 /tini -- /usr/bin/entrypoi ... Up
test-docker_td-2_1 /tini -- /usr/bin/entrypoi ... Up
test-docker_td-3_1 /tini -- /usr/bin/entrypoi ... Up
```
4. 用 taos shell 查看 dnodes
```shell
$ docker-compose exec td-1 taos -s "show dnodes"
```shell
taos> show dnodes
id | end_point | vnodes | cores | status | role | create_time | offline reason |
======================================================================================================================================
1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | |
2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | |
0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - |
Query OK, 3 row(s) in set (0.000811s)
```
$ docker-compose exec td-1 taos -s "show dnodes"
taos> show dnodes
id | endpoint | vnodes | support_vnodes | status | create_time | note |
=================================================================================================================================================
1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | |
2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | |
3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | |
Query OK, 3 rows in database (0.021262s)
```
## taosAdapter
@ -362,93 +378,80 @@ password: taosdata
2. 同时为了部署灵活起见,可以在独立的容器中启动 taosAdapter
```docker
services:
# ...
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
```
```docker
services:
# ...
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
```
如果要部署多个 taosAdapter 来提高吞吐量并提供高可用性,推荐配置方式为使用 nginx 等反向代理来提供统一的访问入口。具体配置方法请参考 nginx 的官方文档。如下是示例:
如果要部署多个 taosAdapter 来提高吞吐量并提供高可用性,推荐配置方式为使用 nginx 等反向代理来提供统一的访问入口。具体配置方法请参考 nginx 的官方文档。如下是示例:
```docker
version: "3"
```yml
version: "3"
networks:
inter:
api:
networks:
inter:
services:
arbitrator:
image: tdengine/tdengine:$VERSION
command: tarbitrator
networks:
- inter
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
TAOS_NUM_OF_MNODES: "2"
TAOS_REPLICA: "2"
TAOS_ARBITRATOR: arbitrator:6042
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
command: taosadapter
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TAOS_SECOND_EP: "td-2"
deploy:
replicas: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
- api
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
```
services:
td-1:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-1"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td1:/var/lib/taos/
- taoslog-td1:/var/log/taos/
td-2:
image: tdengine/tdengine:$VERSION
networks:
- inter
environment:
TAOS_FQDN: "td-2"
TAOS_FIRST_EP: "td-1"
volumes:
- taosdata-td2:/var/lib/taos/
- taoslog-td2:/var/log/taos/
adapter:
image: tdengine/tdengine:$VERSION
entrypoint: "taosadapter"
networks:
- inter
environment:
TAOS_FIRST_EP: "td-1"
TAOS_SECOND_EP: "td-2"
deploy:
replicas: 4
nginx:
image: nginx
depends_on:
- adapter
networks:
- inter
ports:
- 6041:6041
- 6044:6044/udp
command: [
"sh",
"-c",
"while true;
do curl -s http://adapter:6041/-/ping >/dev/null && break;
done;
printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
> /etc/nginx/conf.d/rest.conf;
printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
>> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
nginx -g 'daemon off;'",
]
volumes:
taosdata-td1:
taoslog-td1:
taosdata-td2:
taoslog-td2:
```
## 使用 docker swarm 部署
@ -457,50 +460,46 @@ password: taosdata
docker-compose 文件可以参考上节。下面是使用 docker swarm 启动 TDengine 的命令:
```shell
$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos
$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos
Creating network taos_inter
Creating network taos_api
Creating service taos_arbitrator
Creating service taos_nginx
Creating service taos_td-1
Creating service taos_td-2
Creating service taos_adapter
Creating service taos_nginx
```
查看和管理
```shell
$ docker stack ps taos
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago
100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago
pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago
tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago
rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago
i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago
lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago
o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
q5m1oxs589cp taos_td-2.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
$ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0
3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0
d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0
9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0
ID NAME MODE REPLICAS IMAGE PORTS
ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
crmhdjw6vxw0 taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
```
从上面的输出可以看到有两个 dnode个 taosAdapter以及一个 nginx 反向代理服务。
从上面的输出可以看到有两个 dnode个 taosAdapter以及一个 nginx 反向代理服务。
接下来,我们可以减少 taosAdapter 服务的数量
```shell
$ docker service scale taos_adapter=1
taos_adapter scaled to 1
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
overall progress: 1 out of 1 tasks
1/1: running [==================================================>]
verify: Service converged
$ docker service ls -f name=taos_adapter
ID NAME MODE REPLICAS IMAGE PORTS
561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0
ID NAME MODE REPLICAS IMAGE PORTS
ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
```

View File

@ -25,7 +25,7 @@ taosKeeper 安装方式:
<!-- taosKeeper 需要在操作系统终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数启动) 和 [配置文件](#配置文件启动)。命令行参数优先级高于配置文件参数。-->
taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。**
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
<!--
### 命令行参数启动
@ -93,7 +93,7 @@ taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产
```shell
$ taos
#
# 如上示例,使用 log 库作为监控日志存储位置
> use log;
> select * from cluster_info limit 1;
```

View File

@ -1,4 +0,0 @@
---
sidebar_label: taosX
title: 使用 taosX 在集群间复制数据
---

View File

@ -26,5 +26,3 @@ TDengine 集群中的时序数据的副本数是与数据库关联的,一个
TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。
当 TDengine 集群中的节点部署在不同的物理机上并设置多个副本数时就实现了系统的高可靠性无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。
另外一种灾备方式是通过 `taosX` 将一个 TDengine 集群的数据同步复制到物理上位于不同数据中心的另一个 TDengine 集群。其详细使用方法请参考 [taosX 参考手册](../../reference/taosX)

View File

@ -193,7 +193,7 @@ docker run -d \
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下:
- INPUT SQL输入要查询的语句该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` 其中from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。
- INPUT SQL输入要查询的语句该 SQL 语句的结果集应为两列多行),例如:`select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)` 其中from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。
- ALIAS BY可设置当前查询别名。
- GENERATE SQL 点击该按钮会自动替换相应变量,并生成最终执行的语句。
@ -205,7 +205,11 @@ docker run -d \
### 导入 Dashboard
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。该 Dashboard 已发布在 Grafana[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

View File

@ -98,10 +98,9 @@ int32_t create_stream() {
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes =
taos_query(pConn,
"create stream stream1 trigger max_delay 10s into outstb as select _wstart, sum(k) from st1 partition "
"by tbname session(ts, 10s) ");
pRes = taos_query(pConn,
"create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, "
"count(k) from st1 partition by tbname interval(20s) ");
if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;

2
examples/rust/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/target
Cargo.lock

18
examples/rust/Cargo.toml Normal file
View File

@ -0,0 +1,18 @@
[package]
name = "rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
taos = "*"
[dev-dependencies]
chrono = "0.4"
itertools = "0.10.3"
pretty_env_logger = "0.4.0"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1", features = ["full"] }
anyhow = "1"

View File

@ -0,0 +1,80 @@
use anyhow::Result;
use serde::Deserialize;
use taos::*;
#[tokio::main]
async fn main() -> Result<()> {
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
taos.exec_many([
"drop database if exists test",
"create database test keep 36500",
"use test",
"create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint,
c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned,
c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t1 varchar(100))",
])
.await?;
let mut stmt = Stmt::init(&taos)?;
stmt.prepare(
"insert into ? using tb1 tags(?) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
)?;
stmt.set_tbname("d0")?;
stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
let params = vec![
ColumnView::from_millis_timestamp(vec![164000000000]),
ColumnView::from_bools(vec![true]),
ColumnView::from_tiny_ints(vec![i8::MAX]),
ColumnView::from_small_ints(vec![i16::MAX]),
ColumnView::from_ints(vec![i32::MAX]),
ColumnView::from_big_ints(vec![i64::MAX]),
ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
ColumnView::from_unsigned_ints(vec![u32::MAX]),
ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
ColumnView::from_floats(vec![f32::MAX]),
ColumnView::from_doubles(vec![f64::MAX]),
ColumnView::from_varchar(vec!["ABC"]),
ColumnView::from_nchar(vec!["涛思数据"]),
];
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
assert_eq!(rows, 1);
#[derive(Debug, Deserialize)]
#[allow(dead_code)]
struct Row {
ts: String,
c1: bool,
c2: i8,
c3: i16,
c4: i32,
c5: i64,
c6: u8,
c7: u16,
c8: u32,
c9: u64,
c10: Option<f32>,
c11: f64,
c12: String,
c13: String,
t1: serde_json::Value,
}
let rows: Vec<Row> = taos
.query("select * from tb1")
.await?
.deserialize()
.try_collect()
.await?;
let row = &rows[0];
dbg!(&row);
assert_eq!(row.c5, i64::MAX);
assert_eq!(row.c8, u32::MAX);
assert_eq!(row.c9, u64::MAX);
assert_eq!(row.c10.unwrap(), f32::MAX);
// assert_eq!(row.c11, f64::MAX);
assert_eq!(row.c12, "ABC");
assert_eq!(row.c13, "涛思数据");
Ok(())
}

View File

@ -0,0 +1,74 @@
use anyhow::Result;
use serde::Deserialize;
use taos::*;
#[tokio::main]
async fn main() -> Result<()> {
let taos = TaosBuilder::from_dsn("taos://")?.build()?;
taos.exec_many([
"drop database if exists test_bindable",
"create database test_bindable keep 36500",
"use test_bindable",
"create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint,
c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned,
c10 float, c11 double, c12 varchar(100), c13 nchar(100))",
])
.await?;
let mut stmt = Stmt::init(&taos)?;
stmt.prepare("insert into tb1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")?;
let params = vec![
ColumnView::from_millis_timestamp(vec![0]),
ColumnView::from_bools(vec![true]),
ColumnView::from_tiny_ints(vec![i8::MAX]),
ColumnView::from_small_ints(vec![i16::MAX]),
ColumnView::from_ints(vec![i32::MAX]),
ColumnView::from_big_ints(vec![i64::MAX]),
ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
ColumnView::from_unsigned_ints(vec![u32::MAX]),
ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
ColumnView::from_floats(vec![f32::MAX]),
ColumnView::from_doubles(vec![f64::MAX]),
ColumnView::from_varchar(vec!["ABC"]),
ColumnView::from_nchar(vec!["涛思数据"]),
];
let rows = stmt.bind(&params)?.add_batch()?.execute()?;
assert_eq!(rows, 1);
#[derive(Debug, Deserialize)]
#[allow(dead_code)]
struct Row {
ts: String,
c1: bool,
c2: i8,
c3: i16,
c4: i32,
c5: i64,
c6: u8,
c7: u16,
c8: u32,
c9: u64,
c10: Option<f32>,
c11: f64,
c12: String,
c13: String,
}
let rows: Vec<Row> = taos
.query("select * from tb1")
.await?
.deserialize()
.try_collect()
.await?;
let row = &rows[0];
dbg!(&row);
assert_eq!(row.c5, i64::MAX);
assert_eq!(row.c8, u32::MAX);
assert_eq!(row.c9, u64::MAX);
assert_eq!(row.c10.unwrap(), f32::MAX);
// assert_eq!(row.c11, f64::MAX);
assert_eq!(row.c12, "ABC");
assert_eq!(row.c13, "涛思数据");
Ok(())
}

View File

@ -0,0 +1,106 @@
use std::time::Duration;
use chrono::{DateTime, Local};
use taos::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let dsn = "taos://";
let opts = PoolBuilder::new()
.max_size(5000) // max connections
.max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
.min_idle(Some(1000)) // minimal idle connections
.connection_timeout(Duration::from_secs(2));
let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
let taos = pool.get()?;
let db = "query";
// prepare database
taos.exec_many([
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
])
.await?;
let inserted = taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))",
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
assert_eq!(inserted, 6);
loop {
let count: usize = taos
.query_one("select count(*) from `meters`")
.await?
.unwrap_or_default();
if count >= 6 {
break;
} else {
println!("waiting for data");
}
}
let mut result = taos.query("select tbname, * from `meters`").await?;
for field in result.fields() {
println!("got field: {}", field.name());
}
// Query option 1, use rows stream.
let mut rows = result.rows();
let mut nrows = 0;
while let Some(row) = rows.try_next().await? {
for (col, (name, value)) in row.enumerate() {
println!(
"[{}] got value in col {} (named `{:>8}`): {}",
nrows, col, name, value
);
}
nrows += 1;
}
// Query options 2, use deserialization with serde.
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
tbname: String,
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
groupid: i32,
// binary/varchar to String
location: String,
}
let records: Vec<Record> = taos
.query("select tbname, * from `meters`")
.await?
.deserialize()
.try_collect()
.await?;
dbg!(result.summary());
assert_eq!(records.len(), 6);
dbg!(records);
Ok(())
}

View File

@ -0,0 +1,103 @@
use std::time::Duration;
use chrono::{DateTime, Local};
use taos::*;
// Query options 2, use deserialization with serde.
#[derive(Debug, serde::Deserialize)]
#[allow(dead_code)]
struct Record {
// deserialize timestamp to chrono::DateTime<Local>
ts: DateTime<Local>,
// float to f32
current: Option<f32>,
// int to i32
voltage: Option<i32>,
phase: Option<f32>,
}
async fn prepare(taos: Taos) -> anyhow::Result<()> {
let inserted = taos.exec_many([
// create child table
"CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
"INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
assert_eq!(inserted, 6);
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "debug");
pretty_env_logger::init();
let dsn = "taos://localhost:6030";
let builder = TaosBuilder::from_dsn(dsn)?;
let taos = builder.build()?;
let db = "tmq";
// prepare database
taos.exec_many([
"DROP TOPIC IF EXISTS tmq_meters".to_string(),
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))".to_string(),
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
.await?;
let task = tokio::spawn(prepare(taos));
tokio::time::sleep(Duration::from_secs(1)).await;
// subscribe
let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
let mut consumer = tmq.build()?;
consumer.subscribe(["tmq_meters"]).await?;
{
let mut stream = consumer.stream();
while let Some((offset, message)) = stream.try_next().await? {
// get information from offset
// the topic
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
println!("* in vgroup id {vgroup_id} of topic {topic}\n");
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
// one block for one table, get table name if needed
let name = block.table_name();
let records: Vec<Record> = block.deserialize().try_collect()?;
println!(
"** table: {}, got {} records: {:#?}\n",
name.unwrap(),
records.len(),
records
);
}
}
consumer.commit(offset).await?;
}
}
consumer.unsubscribe().await;
task.await??;
Ok(())
}

View File

@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

View File

@ -60,6 +60,7 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__DESTROY,
};
typedef enum EStreamType {

View File

@ -38,22 +38,18 @@ typedef struct STagVal STagVal;
typedef struct STag STag;
// bitmap
#define N1(n) ((1 << (n)) - 1)
#define BIT1_SIZE(n) (((n)-1) / 8 + 1)
#define BIT2_SIZE(n) (((n)-1) / 4 + 1)
#define SET_BIT1(p, i, v) \
do { \
(p)[(i) / 8] &= N1((i) % 8); \
(p)[(i) / 8] |= (((uint8_t)(v)) << (((i) % 8))); \
} while (0)
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
{0b00000000, 0b00000100, 0b00001000, 2},
{0b00000000, 0b00010000, 0b00100000, 4},
{0b00000000, 0b01000000, 0b10000000, 6}};
#define GET_BIT1(p, i) (((p)[(i) / 8] >> ((i) % 8)) & ((uint8_t)1))
#define SET_BIT2(p, i, v) \
do { \
p[(i) / 4] &= N1((i) % 4 * 2); \
(p)[(i) / 4] |= (((uint8_t)(v)) << (((i) % 4) * 2)); \
} while (0)
#define GET_BIT2(p, i) (((p)[(i) / 4] >> (((i) % 4) * 2)) & ((uint8_t)3))
#define N1(n) ((((uint8_t)1) << (n)) - 1)
#define BIT1_SIZE(n) ((((n)-1) >> 3) + 1)
#define BIT2_SIZE(n) ((((n)-1) >> 2) + 1)
#define SET_BIT1(p, i, v) ((p)[(i) >> 3] = (p)[(i) >> 3] & N1((i)&7) | (((uint8_t)(v)) << ((i)&7)))
#define GET_BIT1(p, i) (((p)[(i) >> 3] >> ((i)&7)) & ((uint8_t)1))
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
// STSchema
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
@ -171,7 +167,7 @@ struct SColVal {
#pragma pack(push, 1)
struct STagVal {
// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
union {
int16_t cid;
char *pKey;

View File

@ -139,7 +139,6 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
bool tsc);
void taosCleanupCfg();
void taosCfgDynamicOptions(const char *option, const char *value);
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
struct SConfig *taosGetCfg();

View File

@ -2555,10 +2555,14 @@ typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
int64_t ntbUid;
SArray* colIdList; // SArray<int16_t>
} SCheckAlterInfo;
} STqCheckInfo;
int32_t tEncodeSCheckAlterInfo(SEncoder* pEncoder, const SCheckAlterInfo* pInfo);
int32_t tDecodeSCheckAlterInfo(SDecoder* pDecoder, SCheckAlterInfo* pInfo);
int32_t tEncodeSTqCheckInfo(SEncoder* pEncoder, const STqCheckInfo* pInfo);
int32_t tDecodeSTqCheckInfo(SDecoder* pDecoder, STqCheckInfo* pInfo);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
} STqDelCheckInfoReq;
typedef struct {
int32_t vgId;
@ -2659,6 +2663,10 @@ typedef struct {
SEpSet epSet;
} SVgEpSet;
typedef struct {
int32_t padding;
} SRSmaExecMsg;
typedef struct {
int64_t suid;
int8_t level;

View File

@ -188,7 +188,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_COMMIT_OFFSET, "vnode-commit-offset", STqOffset, STqOffset)
TD_DEF_MSG_TYPE(TDMT_VND_CHECK_ALTER_INFO, "vnode-alter-check-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ADD_CHECK_INFO, "vnode-add-check-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DELETE_CHECK_INFO, "vnode-delete-check-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TOPIC, "vnode-alter-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TOPIC, "vnode-drop-topic", NULL, NULL)
@ -201,6 +202,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)

View File

@ -105,7 +105,7 @@ typedef enum ENodeType {
QUERY_NODE_COLUMN_REF,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR,
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIF_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
@ -198,7 +198,7 @@ typedef enum ENodeType {
QUERY_NODE_QUERY,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN,
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
@ -215,7 +215,7 @@ typedef enum ENodeType {
QUERY_NODE_LOGIC_PLAN,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,

View File

@ -428,6 +428,9 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
char* nodesGetFillModeString(EFillMode mode);
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
const char* operatorTypeStr(EOperatorType type);
const char* logicConditionTypeStr(ELogicConditionType type);
#ifdef __cplusplus
}
#endif

View File

@ -53,6 +53,7 @@ enum {
TASK_SCHED_STATUS__WAITING,
TASK_SCHED_STATUS__ACTIVE,
TASK_SCHED_STATUS__FAILED,
TASK_SCHED_STATUS__DROPPING,
};
enum {
@ -127,6 +128,10 @@ typedef struct {
int8_t type;
} SStreamCheckpoint;
typedef struct {
int8_t type;
} SStreamTaskDestroy;
typedef struct {
int8_t type;
SSDataBlock* pBlock;
@ -211,7 +216,6 @@ typedef struct {
void* vnode;
FTbSink* tbSinkFunc;
STSchema* pTSchema;
SHashObj* pHash; // groupId to tbuid
} STaskSinkTb;
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
@ -515,7 +519,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen);
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);

View File

@ -26,11 +26,15 @@ extern "C" {
extern bool gRaftDetailLog;
#define SYNC_RESP_TTL_MS 10000000
#define SYNC_SPEED_UP_HB_TIMER 400
#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
#define SYNC_SLOW_DOWN_RANGE 100
#define SYNC_MAX_READ_RANGE 10
#define SYNC_RESP_TTL_MS 10000000
#define SYNC_SPEED_UP_HB_TIMER 400
#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
#define SYNC_SLOW_DOWN_RANGE 100
#define SYNC_MAX_READ_RANGE 2
#define SYNC_MAX_PROGRESS_WAIT_MS 4000
#define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20)
#define SYNC_MAX_RECV_TIME_RANGE_MS 1200
#define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_MAX_BATCH_SIZE 1
#define SYNC_INDEX_BEGIN 0

View File

@ -423,6 +423,7 @@ typedef struct SyncAppendEntriesReply {
SyncTerm privateTerm;
bool success;
SyncIndex matchIndex;
int64_t startTime;
} SyncAppendEntriesReply;
SyncAppendEntriesReply* syncAppendEntriesReplyBuild(int32_t vgId);

View File

@ -291,6 +291,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_STREAM_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F1)
#define TSDB_CODE_MND_INVALID_STREAM_OPTION TAOS_DEF_ERROR_CODE(0, 0x03F2)
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
#define TSDB_CODE_MND_STREAM_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x03F4)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
@ -614,6 +615,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)

View File

@ -132,15 +132,14 @@ typedef enum EOperatorType {
OP_TYPE_DIV,
OP_TYPE_REM,
// unary arithmetic operator
OP_TYPE_MINUS,
OP_TYPE_ASSIGN,
OP_TYPE_MINUS = 20,
// bitwise operator
OP_TYPE_BIT_AND,
OP_TYPE_BIT_AND = 30,
OP_TYPE_BIT_OR,
// binary comparison operator
OP_TYPE_GREATER_THAN,
OP_TYPE_GREATER_THAN = 40,
OP_TYPE_GREATER_EQUAL,
OP_TYPE_LOWER_THAN,
OP_TYPE_LOWER_EQUAL,
@ -153,7 +152,7 @@ typedef enum EOperatorType {
OP_TYPE_MATCH,
OP_TYPE_NMATCH,
// unary comparison operator
OP_TYPE_IS_NULL,
OP_TYPE_IS_NULL = 100,
OP_TYPE_IS_NOT_NULL,
OP_TYPE_IS_TRUE,
OP_TYPE_IS_FALSE,
@ -163,8 +162,11 @@ typedef enum EOperatorType {
OP_TYPE_IS_NOT_UNKNOWN,
// json operator
OP_TYPE_JSON_GET_VALUE,
OP_TYPE_JSON_CONTAINS
OP_TYPE_JSON_GET_VALUE = 150,
OP_TYPE_JSON_CONTAINS,
// internal operator
OP_TYPE_ASSIGN = 200
} EOperatorType;
#define OP_TYPE_CALC_MAX OP_TYPE_BIT_OR

View File

@ -1,7 +1,57 @@
@echo off
goto %1
:needAdmin
if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
echo The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!
)
set source_dir=%2
set source_dir=%source_dir:/=\\%
set binary_dir=%3
set binary_dir=%binary_dir:/=\\%
set osType=%4
set verNumber=%5
set tagert_dir=C:\\TDengine
if not exist %tagert_dir% (
mkdir %tagert_dir%
)
if not exist %tagert_dir%\\cfg (
mkdir %tagert_dir%\\cfg
)
if not exist %tagert_dir%\\include (
mkdir %tagert_dir%\\include
)
if not exist %tagert_dir%\\driver (
mkdir %tagert_dir%\\driver
)
if not exist C:\\TDengine\\cfg\\taos.cfg (
copy %source_dir%\\packaging\\cfg\\taos.cfg %tagert_dir%\\cfg\\taos.cfg > nul
)
if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
if not exist %tagert_dir%\\cfg\\taosadapter.toml (
copy %binary_dir%\\test\\cfg\\taosadapter.toml %tagert_dir%\\cfg\\taosadapter.toml > nul
)
)
copy %source_dir%\\include\\client\\taos.h %tagert_dir%\\include > nul
copy %source_dir%\\include\\util\\taoserror.h %tagert_dir%\\include > nul
copy %source_dir%\\include\\libs\\function\\taosudf.h %tagert_dir%\\include > nul
copy %binary_dir%\\build\\lib\\taos.lib %tagert_dir%\\driver > nul
copy %binary_dir%\\build\\lib\\taos_static.lib %tagert_dir%\\driver > nul
copy %binary_dir%\\build\\lib\\taos.dll %tagert_dir%\\driver > nul
copy %binary_dir%\\build\\bin\\taos.exe %tagert_dir% > nul
copy %binary_dir%\\build\\bin\\taosd.exe %tagert_dir% > nul
copy %binary_dir%\\build\\bin\\udfd.exe %tagert_dir% > nul
if exist %binary_dir%\\build\\bin\\taosBenchmark.exe (
copy %binary_dir%\\build\\bin\\taosBenchmark.exe %tagert_dir% > nul
)
if exist %binary_dir%\\build\\bin\\taosadapter.exe (
copy %binary_dir%\\build\\bin\\taosadapter.exe %tagert_dir% > nul
)
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&& echo To start/stop TDengine with administrator privileges: sc start/stop taosd &goto :eof
:hasAdmin
cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32
copy /y C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32 > nul
sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND

View File

@ -664,7 +664,9 @@ function install_TDengine() {
## ==============================Main program starts from here============================
echo source directory: $1
echo binary directory: $2
if [ "$osType" != "Darwin" ]; then
if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then
echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m"
elif [ "$osType" != "Darwin" ]; then
if [ -x ${bin_dir}/${clientName} ]; then
update_TDengine
else

View File

@ -123,7 +123,7 @@ void createNewTable(TAOS* pConn, int32_t index) {
}
taos_free_result(pRes);
for(int32_t i = 0; i < 100000; i += 20) {
for(int32_t i = 0; i < 3280; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@ -679,30 +679,28 @@ TEST(testCase, projection_query_tables) {
TAOS_RES* pRes = taos_query(pConn, "use abc1");
taos_free_result(pRes);
pRes = taos_query(pConn, "explain verbose true select _wstart,count(*),a from st1 partition by a interval(1s)");
printResult(pRes);
// pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
//
// pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
//
// pRes = taos_query(pConn, "create table tu using st1 tags(1)");
// if (taos_errno(pRes) != 0) {
// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
// }
// taos_free_result(pRes);
//
// for(int32_t i = 0; i < 1; ++i) {
// printf("create table :%d\n", i);
// createNewTable(pConn, i);
// }
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table tu using st1 tags(1)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
for(int32_t i = 0; i < 2; ++i) {
printf("create table :%d\n", i);
createNewTable(pConn, i);
}
//
// pRes = taos_query(pConn, "select * from tu");
// if (taos_errno(pRes) != 0) {

View File

@ -88,7 +88,7 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},

View File

@ -1343,12 +1343,14 @@ SSDataBlock* createDataBlock() {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
if (pBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData));
if (pBlock->pDataBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pBlock);
return NULL;
}
return pBlock;
@ -1423,6 +1425,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
void colDataDestroy(SColumnInfoData* pColData) {
if(!pColData) return;
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
taosMemoryFreeClear(pColData->varmeta.offset);
} else {

View File

@ -75,7 +75,7 @@ int32_t tsMonitorMaxLogs = 100;
bool tsMonitorComp = false;
// telem
bool tsEnableTelem = false;
bool tsEnableTelem = true;
int32_t tsTelemInterval = 86400;
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
uint16_t tsTelemPort = 80;
@ -166,7 +166,22 @@ int32_t tsTtlPushInterval = 86400;
int32_t tsGrantHBInterval = 60;
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) { return 0; }
int32_t taosSetTfsCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "dataDir");
memset(tsDataDir, 0, PATH_MAX);
int32_t size = taosArrayGetSize(pItem->array);
tsDiskCfgNum = 1;
tstrncpy(tsDiskCfg[0].dir, pItem->str, TSDB_FILENAME_LEN);
tsDiskCfg[0].level = 0;
tsDiskCfg[0].primary = 1;
tstrncpy(tsDataDir, pItem->str, PATH_MAX);
if (taosMulMkDir(tsDataDir) != 0) {
uError("failed to create dataDir:%s", tsDataDir);
return -1;
}
return 0;
}
#else
int32_t taosSetTfsCfg(SConfig *pCfg);
#endif

View File

@ -4262,7 +4262,6 @@ int32_t tDeserializeSServerStatusRsp(void *buf, int32_t bufLen, SServerStatusRsp
tDecoderClear(&decoder);
return 0;
}
int32_t tEncodeSMqOffset(SEncoder *encoder, const SMqOffset *pOffset) {
if (tEncodeI32(encoder, pOffset->vgId) < 0) return -1;
if (tEncodeI64(encoder, pOffset->offset) < 0) return -1;
@ -4300,7 +4299,6 @@ int32_t tDecodeSMqCMCommitOffsetReq(SDecoder *decoder, SMqCMCommitOffsetReq *pRe
tEndDecode(decoder);
return 0;
}
int32_t tSerializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -5590,7 +5588,6 @@ int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
return 0;
}
#if 1
int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
if (pVal->type == TMQ_OFFSET__RESET_NONE) {
snprintf(buf, maxLen, "offset(reset to none)");
@ -5609,7 +5606,6 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
}
return 0;
}
#endif
bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
if (pLeft->type == pRight->type) {
@ -5643,7 +5639,7 @@ int32_t tDecodeSTqOffset(SDecoder *pDecoder, STqOffset *pOffset) {
return 0;
}
int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo) {
int32_t tEncodeSTqCheckInfo(SEncoder *pEncoder, const STqCheckInfo *pInfo) {
if (tEncodeCStr(pEncoder, pInfo->topic) < 0) return -1;
if (tEncodeI64(pEncoder, pInfo->ntbUid) < 0) return -1;
int32_t sz = taosArrayGetSize(pInfo->colIdList);
@ -5655,7 +5651,7 @@ int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo)
return pEncoder->pos;
}
int32_t tDecodeSCheckAlterInfo(SDecoder *pDecoder, SCheckAlterInfo *pInfo) {
int32_t tDecodeSTqCheckInfo(SDecoder *pDecoder, STqCheckInfo *pInfo) {
if (tDecodeCStrTo(pDecoder, pInfo->topic) < 0) return -1;
if (tDecodeI64(pDecoder, &pInfo->ntbUid) < 0) return -1;
int32_t sz;

View File

@ -392,10 +392,10 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
getStatics_i64},
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f},
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d},
{TSDB_DATA_TYPE_VARCHAR, 6, 0, "VARCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_bin},
{TSDB_DATA_TYPE_VARCHAR, 6, 1, "VARCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_bin},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp,
tsDecompressTimestamp, getStatics_i64},
{TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
{TSDB_DATA_TYPE_NCHAR, 5, 1, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
{TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint,
getStatics_u8},
{TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint,

View File

@ -225,7 +225,8 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ADD_CHECK_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE_CHECK_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DROP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -338,6 +338,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_CONTINUE, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_EXEC_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
@ -361,7 +362,8 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_COMMIT_OFFSET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ADD_CHECK_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE_CHECK_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -636,6 +636,7 @@ typedef struct {
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj);
void tFreeStreamObj(SStreamObj* pObj);
typedef struct {
char streamName[TSDB_STREAM_FNAME_LEN];

View File

@ -34,6 +34,7 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate);
SDbObj *mndAcquireDbByStb(SMnode *pMnode, const char *stbName);
int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreate, SDbObj *pDb);
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb);
void mndFreeStb(SStbObj *pStb);
void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst);
void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize);

View File

@ -116,6 +116,25 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) {
return 0;
}
void tFreeStreamObj(SStreamObj *pStream) {
taosMemoryFree(pStream->sql);
taosMemoryFree(pStream->ast);
taosMemoryFree(pStream->physicalPlan);
if (pStream->outputSchema.nCols) taosMemoryFree(pStream->outputSchema.pSchema);
int32_t sz = taosArrayGetSize(pStream->tasks);
for (int32_t i = 0; i < sz; i++) {
SArray *pLevel = taosArrayGetP(pStream->tasks, i);
int32_t taskSz = taosArrayGetSize(pLevel);
for (int32_t j = 0; j < taskSz; j++) {
SStreamTask *pTask = taosArrayGetP(pLevel, j);
tFreeSStreamTask(pTask);
}
taosArrayDestroy(pLevel);
}
taosArrayDestroy(pStream->tasks);
}
SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
if (pVgEpNew == NULL) return NULL;

View File

@ -15,10 +15,10 @@
#define _DEFAULT_SOURCE
#include "mndOffset.h"
#include "mndPrivilege.h"
#include "mndDb.h"
#include "mndDnode.h"
#include "mndMnode.h"
#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndStb.h"
#include "mndTopic.h"
@ -305,7 +305,7 @@ int32_t mndDropOffsetByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
sdbRelease(pSdb, pOffset);
}
return code;
return code;
}
int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic) {

View File

@ -424,6 +424,8 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
}
mndAddTaskToTaskSet(taskSourceLevel, pTask);
pTask->triggerParam = 0;
// source
pTask->taskLevel = TASK_LEVEL__SOURCE;

View File

@ -489,7 +489,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
smaObj.uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN);
ASSERT(smaObj.uid != 0);
char resultTbName[TSDB_TABLE_FNAME_LEN + 16] = {0};
snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb",pCreate->name);
snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb", pCreate->name);
memcpy(smaObj.dstTbName, resultTbName, TSDB_TABLE_FNAME_LEN);
smaObj.dstTbUid = mndGenerateUid(smaObj.dstTbName, TSDB_TABLE_FNAME_LEN);
smaObj.stbUid = pStb->uid;
@ -530,7 +530,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
streamObj.sourceDbUid = pDb->uid;
streamObj.targetDbUid = pDb->uid;
streamObj.version = 1;
streamObj.sql = pCreate->sql;
streamObj.sql = strdup(pCreate->sql);
streamObj.smaId = smaObj.uid;
streamObj.watermark = pCreate->watermark;
streamObj.trigger = STREAM_TRIGGER_WINDOW_CLOSE;
@ -585,6 +585,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
return -1;
}
if (pAst != NULL) nodesDestroyNode(pAst);
nodesDestroyNode((SNode *)pPlan);
int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
@ -609,6 +610,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
code = 0;
_OVER:
tFreeStreamObj(&streamObj);
mndDestroySmaObj(&smaObj);
mndTransDrop(pTrans);
return code;

View File

@ -266,6 +266,15 @@ _OVER:
return pRow;
}
void mndFreeStb(SStbObj *pStb) {
taosArrayDestroy(pStb->pFuncs);
taosMemoryFreeClear(pStb->pColumns);
taosMemoryFreeClear(pStb->pTags);
taosMemoryFreeClear(pStb->comment);
taosMemoryFreeClear(pStb->pAst1);
taosMemoryFreeClear(pStb->pAst2);
}
static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb) {
mTrace("stb:%s, perform insert action, row:%p", pStb->name, pStb);
return 0;
@ -273,12 +282,7 @@ static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb) {
static int32_t mndStbActionDelete(SSdb *pSdb, SStbObj *pStb) {
mTrace("stb:%s, perform delete action, row:%p", pStb->name, pStb);
taosArrayDestroy(pStb->pFuncs);
taosMemoryFreeClear(pStb->pColumns);
taosMemoryFreeClear(pStb->pTags);
taosMemoryFreeClear(pStb->comment);
taosMemoryFreeClear(pStb->pAst1);
taosMemoryFreeClear(pStb->pAst2);
mndFreeStb(pStb);
return 0;
}
@ -1145,7 +1149,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
return 0;
}
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t suid, col_id_t colId) {
static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
@ -1154,7 +1158,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
if (pIter == NULL) break;
mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s",
pTopic->name, stbname, suid, colId, pTopic->subType, pTopic->sql);
pTopic->name, stbFullName, suid, colId, pTopic->subType, pTopic->sql);
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
sdbRelease(pSdb, pTopic);
continue;
@ -1192,20 +1196,66 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
}
return 0;
}
static int32_t mndCheckAlterColForStream(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SStreamObj *pStream = NULL;
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
if (pIter == NULL) break;
SNode *pAst = NULL;
if (nodesStringToNode(pStream->ast, &pAst) != 0) {
ASSERT(0);
return -1;
}
SNodeList *pNodeList = NULL;
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
if (pCol->tableId != suid) {
mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
goto NEXT;
}
if (pCol->colId > 0 && pCol->colId == colId) {
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
mError("stream:%s, check colId:%d conflicted", pStream->name, pCol->colId);
return -1;
}
mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
}
NEXT:
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
}
return 0;
}
static int32_t mndCheckAlterColForTSma(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SSmaObj *pSma = NULL;
pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma);
if (pIter == NULL) break;
mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name, stbname,
suid, colId, pSma->sql);
mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name,
stbFullName, suid, colId, pSma->sql);
SNode *pAst = NULL;
if (nodesStringToNode(pSma->ast, &pAst) != 0) {
terrno = TSDB_CODE_SDB_INVALID_DATA_CONTENT;
mError("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d failed since parse AST err",
pSma->name, stbname, suid, colId);
pSma->name, stbFullName, suid, colId);
return -1;
}
@ -1218,7 +1268,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
if ((pCol->tableId != suid) && (pSma->stbUid != suid)) {
mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
goto NEXT2;
goto NEXT;
}
if ((pCol->colId) > 0 && (pCol->colId == colId)) {
sdbRelease(pSdb, pSma);
@ -1230,11 +1280,24 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
}
NEXT2:
NEXT:
sdbRelease(pSdb, pSma);
nodesDestroyNode(pAst);
}
return 0;
}
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
if (mndCheckAlterColForTopic(pMnode, stbFullName, suid, colId) < 0) {
return -1;
}
if (mndCheckAlterColForStream(pMnode, stbFullName, suid, colId) < 0) {
return -1;
}
if (mndCheckAlterColForTSma(pMnode, stbFullName, suid, colId) < 0) {
return -1;
}
return 0;
}
@ -1930,6 +1993,98 @@ _OVER:
return code;
}
static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SMqTopicObj *pTopic = NULL;
pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
if (pIter == NULL) break;
if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
if (pTopic->stbUid == suid) {
sdbRelease(pSdb, pTopic);
return -1;
}
}
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
sdbRelease(pSdb, pTopic);
continue;
}
SNode *pAst = NULL;
if (nodesStringToNode(pTopic->ast, &pAst) != 0) {
ASSERT(0);
return -1;
}
SNodeList *pNodeList = NULL;
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
if (pCol->tableId == suid) {
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
return -1;
} else {
goto NEXT;
}
}
NEXT:
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
}
return 0;
}
static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName, int64_t suid) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
SStreamObj *pStream = NULL;
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
if (pIter == NULL) break;
if (pStream->smaId != 0) {
sdbRelease(pSdb, pStream);
continue;
}
if (pStream->targetStbUid == suid) {
sdbRelease(pSdb, pStream);
return -1;
}
SNode *pAst = NULL;
if (nodesStringToNode(pStream->ast, &pAst) != 0) {
ASSERT(0);
return -1;
}
SNodeList *pNodeList = NULL;
nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
if (pCol->tableId == suid) {
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
return -1;
} else {
goto NEXT;
}
}
NEXT:
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
}
return 0;
}
static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
@ -1971,6 +2126,16 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
goto _OVER;
}
if (mndCheckDropStbForTopic(pMnode, dropReq.name, pStb->uid) < 0) {
terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
goto _OVER;
}
if (mndCheckDropStbForStream(pMnode, dropReq.name, pStb->uid) < 0) {
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
goto _OVER;
}
code = mndDropStb(pMnode, pReq, pDb, pStb);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;

View File

@ -167,6 +167,9 @@ static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream) {
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream) {
mTrace("stream:%s, perform delete action", pStream->name);
taosWLockLatch(&pStream->lock);
tFreeStreamObj(pStream);
taosWUnLockLatch(&pStream->lock);
return 0;
}
@ -493,10 +496,17 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
stbObj.uid = pStream->targetStbUid;
if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER;
if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) {
mndFreeStb(&stbObj);
goto _OVER;
}
tFreeSMCreateStbReq(&createReq);
mndFreeStb(&stbObj);
return 0;
_OVER:
tFreeSMCreateStbReq(&createReq);
mndReleaseStb(pMnode, pStb);
mndReleaseDb(pMnode, pDb);
return -1;
@ -715,6 +725,7 @@ _OVER:
mndReleaseDb(pMnode, pDb);
tFreeSCMCreateStreamReq(&createStreamReq);
tFreeStreamObj(&streamObj);
return code;
}

View File

@ -356,31 +356,44 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
taosArrayPush(pConsumerEp->vgs, &pRebVg->pVgEp);
pRebVg->newConsumerId = pConsumerEp->consumerId;
taosArrayPush(pOutput->rebVgs, pRebVg);
mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId,
mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 " (second scan) (not enough)", pRebVg->pVgEp->vgId,
pConsumerEp->consumerId);
}
}
ASSERT(pIter == NULL);
// 7. handle unassigned vg
if (taosHashGetSize(pOutput->pSub->consumerHash) != 0) {
// if has consumer, assign all left vg
while (1) {
SMqConsumerEp *pConsumerEp = NULL;
pRemovedIter = taosHashIterate(pHash, pRemovedIter);
if (pRemovedIter == NULL) break;
pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
ASSERT(pIter);
if (pRemovedIter == NULL) {
if (pIter != NULL) {
taosHashCancelIterate(pOutput->pSub->consumerHash, pIter);
pIter = NULL;
}
break;
}
while (1) {
pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
ASSERT(pIter);
pConsumerEp = (SMqConsumerEp *)pIter;
ASSERT(pConsumerEp->consumerId > 0);
if (taosArrayGetSize(pConsumerEp->vgs) == minVgCnt) {
break;
}
}
pRebVg = (SMqRebOutputVg *)pRemovedIter;
SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
ASSERT(pConsumerEp->consumerId > 0);
taosArrayPush(pConsumerEp->vgs, &pRebVg->pVgEp);
pRebVg->newConsumerId = pConsumerEp->consumerId;
if (pRebVg->newConsumerId == pRebVg->oldConsumerId) {
mInfo("mq rebalance: skip vg %d for same consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId,
mInfo("mq rebalance: skip vg %d for same consumer:%" PRId64 " (second scan)", pRebVg->pVgEp->vgId,
pConsumerEp->consumerId);
continue;
}
taosArrayPush(pOutput->rebVgs, pRebVg);
mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId,
mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 " (second scan) (unassigned)", pRebVg->pVgEp->vgId,
pConsumerEp->consumerId);
}
} else {
@ -571,7 +584,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
/*ASSERT(pTopic);*/
if (pTopic == NULL) {
mError("rebalance %s failed since topic %s was dropped, abort", pRebInfo->key, topic);
mError("mq rebalance %s failed since topic %s not exist, abort", pRebInfo->key, topic);
continue;
}
taosRLockLatch(&pTopic->lock);
@ -601,7 +614,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
// TODO replace assert with error check
if (mndPersistRebResult(pMnode, pMsg, &rebOutput) < 0) {
mError("persist rebalance output error, possibly vnode splitted or dropped");
mError("mq rebalance persist rebalance output error, possibly vnode splitted or dropped");
}
taosArrayDestroy(pRebInfo->lostConsumers);
taosArrayDestroy(pRebInfo->newConsumers);

View File

@ -57,7 +57,8 @@ int32_t mndInitTopic(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_TOPIC, mndProcessCreateTopicReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_TOPIC, mndProcessDropTopicReq);
mndSetMsgHandle(pMnode, TDMT_VND_DROP_TOPIC_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_CHECK_ALTER_INFO_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_ADD_CHECK_INFO_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_DELETE_CHECK_INFO_RSP, mndTransProcessRsp);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndRetrieveTopic);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextTopic);
@ -450,7 +451,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
if (topicObj.ntbUid != 0) {
SCheckAlterInfo info;
STqCheckInfo info;
memcpy(info.topic, topicObj.name, TSDB_TOPIC_FNAME_LEN);
info.ntbUid = topicObj.ntbUid;
info.colIdList = topicObj.ntbColIds;
@ -470,7 +471,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
// encoder check alter info
int32_t len;
int32_t code;
tEncodeSize(tEncodeSCheckAlterInfo, &info, len, code);
tEncodeSize(tEncodeSTqCheckInfo, &info, len, code);
if (code < 0) {
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
@ -481,7 +482,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
SEncoder encoder;
tEncoderInit(&encoder, abuf, len);
if (tEncodeSCheckAlterInfo(&encoder, &info) < 0) {
if (tEncodeSTqCheckInfo(&encoder, &info) < 0) {
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
return -1;
@ -493,7 +494,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = buf;
action.contLen = sizeof(SMsgHead) + len;
action.msgType = TDMT_VND_CHECK_ALTER_INFO;
action.msgType = TDMT_VND_ADD_CHECK_INFO;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(buf);
sdbRelease(pSdb, pVgroup);
@ -659,12 +660,14 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
mDebug("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name);
#if 0
if (mndDropOffsetByTopic(pMnode, pTrans, dropReq.name) < 0) {
ASSERT(0);
mndTransDrop(pTrans);
mndReleaseTopic(pMnode, pTopic);
return -1;
}
#endif
// TODO check if rebalancing
if (mndDropSubByTopic(pMnode, pTrans, dropReq.name) < 0) {
@ -675,6 +678,37 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
return -1;
}
if (pTopic->ntbUid != 0) {
// broadcast to all vnode
void *pIter = NULL;
SVgObj *pVgroup = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
if (!mndVgroupInDb(pVgroup, pTopic->dbUid)) {
sdbRelease(pSdb, pVgroup);
continue;
}
void *buf = taosMemoryCalloc(1, sizeof(SMsgHead) + TSDB_TOPIC_FNAME_LEN);
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
((SMsgHead *)buf)->vgId = htonl(pVgroup->vgId);
memcpy(abuf, pTopic->name, TSDB_TOPIC_FNAME_LEN);
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = buf;
action.contLen = sizeof(SMsgHead) + TSDB_TOPIC_FNAME_LEN;
action.msgType = TDMT_VND_DELETE_CHECK_INFO;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(buf);
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
return -1;
}
}
}
int32_t code = mndDropTopic(pMnode, pTrans, pReq, pTopic);
mndReleaseTopic(pMnode, pTopic);

View File

@ -509,6 +509,7 @@ int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) {
pVgroup->replica = 1;
if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) return -1;
taosArrayDestroy(pArray);
mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId);
return 0;
@ -1862,4 +1863,4 @@ _OVER:
#endif
}
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }

View File

@ -24,6 +24,7 @@ target_sources(
"src/meta/metaCommit.c"
"src/meta/metaEntry.c"
"src/meta/metaSnapshot.c"
"src/meta/metaCache.c"
# sma
"src/sma/smaEnv.c"

View File

@ -63,6 +63,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list);
int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list);
int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray* list);
void *vnodeGetIdx(SVnode *pVnode);
void *vnodeGetIvtIdx(SVnode *pVnode);
@ -91,9 +92,11 @@ typedef struct SMetaEntry SMetaEntry;
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
void metaReaderClear(SMetaReader *pReader);
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags);
int32_t metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal);
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
typedef struct SMetaFltParam {
tb_uid_t suid;

View File

@ -23,8 +23,9 @@
extern "C" {
#endif
typedef struct SMetaIdx SMetaIdx;
typedef struct SMetaDB SMetaDB;
typedef struct SMetaIdx SMetaIdx;
typedef struct SMetaDB SMetaDB;
typedef struct SMetaCache SMetaCache;
// metaDebug ==================
// clang-format off
@ -60,6 +61,12 @@ static FORCE_INLINE tb_uid_t metaGenerateUid(SMeta* pMeta) { return tGenIdPI64()
// metaTable ==================
int metaHandleEntry(SMeta* pMeta, const SMetaEntry* pME);
// metaCache ==================
int32_t metaCacheOpen(SMeta* pMeta);
void metaCacheClose(SMeta* pMeta);
int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo);
int32_t metaCacheDrop(SMeta* pMeta, int64_t uid);
struct SMeta {
TdThreadRwlock lock;
@ -84,6 +91,8 @@ struct SMeta {
TTB* pStreamDb;
SMetaIdx* pIdx;
SMetaCache* pCache;
};
typedef struct {
@ -92,6 +101,12 @@ typedef struct {
} STbDbKey;
#pragma pack(push, 1)
typedef struct {
tb_uid_t suid;
int64_t version;
int32_t skmVer;
} SUidIdxVal;
typedef struct {
tb_uid_t uid;
int32_t sver;

View File

@ -57,9 +57,10 @@ typedef struct {
void *tmrHandle; // shared by all fetch tasks
} SSmaMgmt;
#define SMA_ENV_LOCK(env) (&(env)->lock)
#define SMA_ENV_TYPE(env) ((env)->type)
#define SMA_ENV_STAT(env) ((env)->pStat)
#define SMA_ENV_LOCK(env) (&(env)->lock)
#define SMA_ENV_TYPE(env) ((env)->type)
#define SMA_ENV_STAT(env) ((env)->pStat)
#define SMA_RSMA_STAT(sma) ((SRSmaStat *)SMA_ENV_STAT((SSmaEnv *)(sma)->pRSmaEnv))
struct STSmaStat {
int8_t state; // ETsdbSmaStat
@ -86,15 +87,17 @@ struct SQTaskFWriter {
};
struct SRSmaStat {
SSma *pSma;
int64_t commitAppliedVer; // vnode applied version for async commit
int64_t refId; // shared by fetch tasks
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
SHashObj *iRsmaInfoHash; // key: stbUid, value: SRSmaInfo; immutable rsmaInfoHash
SSma *pSma;
int64_t commitAppliedVer; // vnode applied version for async commit
int64_t refId; // shared by fetch tasks
volatile int64_t qBufSize; // queue buffer size
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
int8_t execStat; // 0 not in exec , 1 in exec
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
SHashObj *infoHash; // key: suid, value: SRSmaInfo
SHashObj *fetchHash; // key: suid, value: L1 or L2 or L1|L2
};
struct SSmaStat {
@ -105,34 +108,40 @@ struct SSmaStat {
T_REF_DECLARE()
};
#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
#define RSMA_IMU_INFO_HASH(r) ((r)->iRsmaInfoHash)
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
#define RSMA_REF_ID(r) ((r)->refId)
#define RSMA_FS_LOCK(r) (&(r)->lock)
#define SMA_STAT_TSMA(s) (&(s)->tsmaStat)
#define SMA_STAT_RSMA(s) (&(s)->rsmaStat)
#define RSMA_INFO_HASH(r) ((r)->infoHash)
#define RSMA_FETCH_HASH(r) ((r)->fetchHash)
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
#define RSMA_REF_ID(r) ((r)->refId)
#define RSMA_FS_LOCK(r) (&(r)->lock)
struct SRSmaInfoItem {
int8_t level;
int8_t triggerStat;
int32_t maxDelay;
tmr_h tmrId;
int8_t level;
int8_t triggerStat;
uint16_t interval; // second
int32_t maxDelay;
tmr_h tmrId;
};
struct SRSmaInfo {
STSchema *pTSchema;
int64_t suid;
int64_t refId; // refId of SRSmaStat
int8_t delFlag;
uint64_t delFlag : 1;
uint64_t lastReceived : 63; // second
T_REF_DECLARE()
SRSmaInfoItem items[TSDB_RETENTION_L2];
void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t
void *iTaskInfo[TSDB_RETENTION_L2]; // immutable
STaosQueue *queue; // buffer queue of SubmitReq
STaosQall *qall; // buffer qall of SubmitReq
void *iTaskInfo[TSDB_RETENTION_L2]; // immutable qTaskInfo_t
STaosQueue *iQueue; // immutable buffer queue of SubmitReq
STaosQall *iQall; // immutable buffer qall of SubmitReq
};
#define RSMA_INFO_HEAD_LEN 32
#define RSMA_INFO_HEAD_LEN offsetof(SRSmaInfo, items)
#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1)
#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1)
#define RSMA_INFO_QTASK(r, i) ((r)->taskInfo[i])
@ -161,6 +170,12 @@ enum {
RSMA_RESTORE_SYNC = 2,
};
typedef enum {
RSMA_EXEC_OVERFLOW = 1, // triggered by queue buf overflow
RSMA_EXEC_TIMEOUT = 2, // triggered by timer
RSMA_EXEC_COMMIT = 3, // triggered by commit
} ERsmaExecType;
void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
@ -228,12 +243,13 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName);
void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName);
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc);
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);

View File

@ -117,16 +117,15 @@ typedef struct {
struct STQ {
SVnode* pVnode;
char* path;
SHashObj* pushMgr; // consumerId -> STqHandle*
SHashObj* handles; // subKey -> STqHandle
SHashObj* pAlterInfo; // topic -> SAlterCheckInfo
SHashObj* pPushMgr; // consumerId -> STqHandle*
SHashObj* pHandle; // subKey -> STqHandle
SHashObj* pCheckInfo; // topic -> SAlterCheckInfo
STqOffsetStore* pOffsetStore;
TDB* pMetaStore;
TDB* pMetaDB;
TTB* pExecStore;
TTB* pAlterInfoStore;
TTB* pCheckStore;
SStreamMeta* pStreamMeta;
};
@ -155,6 +154,9 @@ int32_t tqMetaClose(STQ* pTq);
int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle);
int32_t tqMetaDeleteHandle(STQ* pTq, const char* key);
int32_t tqMetaRestoreHandle(STQ* pTq);
int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen);
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
int32_t tqMetaRestoreCheckInfo(STQ* pTq);
typedef struct {
int32_t size;

View File

@ -45,7 +45,7 @@ typedef struct SBlockIdx SBlockIdx;
typedef struct SBlock SBlock;
typedef struct SBlockL SBlockL;
typedef struct SColData SColData;
typedef struct SBlockDataHdr SBlockDataHdr;
typedef struct SDiskDataHdr SDiskDataHdr;
typedef struct SBlockData SBlockData;
typedef struct SDelFile SDelFile;
typedef struct SHeadFile SHeadFile;
@ -61,7 +61,11 @@ typedef struct SRowIter SRowIter;
typedef struct STsdbFS STsdbFS;
typedef struct SRowMerger SRowMerger;
typedef struct STsdbReadSnap STsdbReadSnap;
typedef struct SBlockInfo SBlockInfo;
typedef struct SSmaInfo SSmaInfo;
typedef struct SBlockCol SBlockCol;
#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F)
#define TSDB_MAX_SUBBLOCKS 8
#define TSDB_FHDR_SIZE 512
@ -113,10 +117,14 @@ int32_t tPutBlock(uint8_t *p, void *ph);
int32_t tGetBlock(uint8_t *p, void *ph);
int32_t tBlockCmprFn(const void *p1, const void *p2);
bool tBlockHasSma(SBlock *pBlock);
// SBlockL
int32_t tPutBlockL(uint8_t *p, void *ph);
int32_t tGetBlockL(uint8_t *p, void *ph);
// SBlockIdx
int32_t tPutBlockIdx(uint8_t *p, void *ph);
int32_t tGetBlockIdx(uint8_t *p, void *ph);
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
int32_t tCmprBlockL(void const *lhs, void const *rhs);
// SColdata
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
void tColDataReset(SColData *pColData);
@ -131,20 +139,25 @@ int32_t tGetColData(uint8_t *p, SColData *pColData);
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
#define tBlockDataFirstKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataFirstRow(PBLOCKDATA))
#define tBlockDataLastKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataLastRow(PBLOCKDATA))
int32_t tBlockDataInit(SBlockData *pBlockData);
int32_t tBlockDataCreate(SBlockData *pBlockData);
void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear);
int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema);
int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
void tBlockDataReset(SBlockData *pBlockData);
int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema);
int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
void tBlockDataClearData(SBlockData *pBlockData);
void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear);
int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema);
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid);
void tBlockDataClear(SBlockData *pBlockData);
SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx);
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData);
int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData);
int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[],
int32_t aBufN[]);
int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]);
// SDiskDataHdr
int32_t tPutDiskDataHdr(uint8_t *p, void *ph);
int32_t tGetDiskDataHdr(uint8_t *p, void *ph);
// SDelIdx
int32_t tPutDelIdx(uint8_t *p, void *ph);
int32_t tGetDelIdx(uint8_t *p, void *ph);
@ -168,13 +181,25 @@ void tsdbFidKeyRange(int32_t fid, int32_t minutes, int8_t precision, TSKEY *m
int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t now);
int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SArray *aSkyline);
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg);
int32_t tPutColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg);
int32_t tGetColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg);
int32_t tsdbCmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t nOut,
int32_t *szOut, uint8_t **ppBuf);
int32_t tsdbDecmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t szOut,
uint8_t **ppBuf);
int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol, uint8_t **ppOut, int32_t nOut,
uint8_t **ppBuf);
int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData,
uint8_t **ppBuf);
int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck);
// tsdbMemTable ==============================================================================================
// SMemTable
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
void tsdbMemTableDestroy(SMemTable *pMemTable);
void tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData);
void tsdbRefMemTable(SMemTable *pMemTable);
void tsdbUnrefMemTable(SMemTable *pMemTable);
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
void tsdbMemTableDestroy(SMemTable *pMemTable);
STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid);
void tsdbRefMemTable(SMemTable *pMemTable);
void tsdbUnrefMemTable(SMemTable *pMemTable);
SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable);
// STbDataIter
int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter **ppIter);
void *tsdbTbDataIterDestroy(STbDataIter *pIter);
@ -223,33 +248,33 @@ int32_t tsdbFSUpsertDelFile(STsdbFS *pFS, SDelFile *pDelFile);
int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync);
int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter);
int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **ppBuf);
int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, uint8_t **ppBuf, SBlockIdx *pBlockIdx);
int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2,
SBlockIdx *pBlockIdx, SBlock *pBlock, int8_t cmprAlg);
int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx);
int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, SBlockIdx *pBlockIdx);
int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL);
int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
int8_t cmprAlg, int8_t toLast);
int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo);
// SDataFReader
int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFReaderClose(SDataFReader **ppReader);
int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppBuf);
int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData, uint8_t **ppBuf);
int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int16_t *aColId, int32_t nCol,
SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2);
int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, SBlockData *pBlockData,
uint8_t **ppBuf1, uint8_t **ppBuf2);
int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg, uint8_t **ppBuf);
int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx);
int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData);
int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL);
int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg);
int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData);
int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData);
// SDelFWriter
int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync);
int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf, SDelIdx *pDelIdx);
int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf);
int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx);
int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx);
int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter);
// SDelFReader
int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb, uint8_t **ppBuf);
int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, uint8_t **ppBuf);
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf);
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData);
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
// tsdbRead.c ==============================================================================================
int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap);
void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap);
@ -260,7 +285,7 @@ void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap);
// tsdbCache
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(SLRUCache *pCache);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h);
@ -277,13 +302,6 @@ size_t tsdbCacheGetCapacity(SVnode *pVnode);
int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// structs =======================
typedef struct {
int minFid;
int midFid;
int maxFid;
TSKEY minKey;
} SRtn;
struct STsdbFS {
SDelFile *pDelFile;
SArray *aDFileSet; // SArray<SDFileSet>
@ -298,6 +316,7 @@ struct STsdb {
SMemTable *imem;
STsdbFS fs;
SLRUCache *lruCache;
TdThreadMutex lruMutex;
};
struct TSDBKEY {
@ -311,30 +330,23 @@ struct SMemSkipListNode {
SMemSkipListNode *forwards[0];
};
typedef struct SMemSkipList {
uint32_t seed;
int64_t size;
uint32_t seed;
int8_t maxLevel;
int8_t level;
SMemSkipListNode *pHead;
SMemSkipListNode *pTail;
} SMemSkipList;
struct SDelDataInfo {
tb_uid_t suid;
tb_uid_t uid;
};
struct STbData {
tb_uid_t suid;
tb_uid_t uid;
TSKEY minKey;
TSKEY maxKey;
int64_t minVersion;
int64_t maxVersion;
int32_t maxSkmVer;
SDelData *pHead;
SDelData *pTail;
SMemSkipList sl;
STbData *next;
};
struct SMemTable {
@ -344,11 +356,13 @@ struct SMemTable {
volatile int32_t nRef;
TSKEY minKey;
TSKEY maxKey;
int64_t minVersion;
int64_t maxVersion;
int64_t nRow;
int64_t nDel;
SArray *aTbData; // SArray<STbData*>
struct {
int32_t nTbData;
int32_t nBucket;
STbData **aBucket;
};
};
struct TSDBROW {
@ -379,63 +393,51 @@ struct SMapData {
uint8_t *pData;
};
typedef struct {
struct SBlockCol {
int16_t cid;
int8_t type;
int8_t smaOn;
int8_t flag; // HAS_NONE|HAS_NULL|HAS_VALUE
int32_t offset;
int32_t szBitmap; // bitmap size
int32_t szOffset; // size of offset, only for variant-length data type
int32_t szValue; // compressed column value size
int8_t flag; // HAS_NONE|HAS_NULL|HAS_VALUE
int32_t szOrigin; // original column value size (only save for variant data type)
} SBlockCol;
int32_t szBitmap; // bitmap size, 0 only for flag == HAS_VAL
int32_t szOffset; // offset size, 0 only for non-variant-length type
int32_t szValue; // value size, 0 when flag == (HAS_NULL | HAS_NONE)
int32_t offset;
};
typedef struct {
int32_t nRow;
int8_t cmprAlg;
int64_t offset; // block data offset
int32_t szBlockCol; // SBlockCol size
int32_t szVersion; // VERSION size
int32_t szTSKEY; // TSKEY size
int32_t szBlock; // total block size
int64_t sOffset; // sma offset
int32_t nSma; // sma size
} SSubBlock;
struct SBlockInfo {
int64_t offset; // block data offset
int32_t szBlock;
int32_t szKey;
};
struct SSmaInfo {
int64_t offset;
int32_t size;
};
struct SBlock {
TSDBKEY minKey;
TSDBKEY maxKey;
int64_t minVersion;
int64_t maxVersion;
int32_t nRow;
int8_t last;
int8_t hasDup;
int8_t nSubBlock;
SSubBlock aSubBlock[TSDB_MAX_SUBBLOCKS];
TSDBKEY minKey;
TSDBKEY maxKey;
int64_t minVer;
int64_t maxVer;
int32_t nRow;
int8_t hasDup;
int8_t nSubBlock;
SBlockInfo aSubBlock[TSDB_MAX_SUBBLOCKS];
SSmaInfo smaInfo;
};
struct SBlockL {
struct {
int64_t uid;
int64_t version;
TSKEY ts;
} minKey;
struct {
int64_t uid;
int64_t version;
TSKEY ts;
} maxKey;
int64_t minVer;
int64_t maxVer;
int32_t nRow;
int8_t cmprAlg;
int64_t offset;
int32_t szBlock;
int32_t szBlockCol;
int32_t szUid;
int32_t szVer;
int32_t szTSKEY;
int64_t suid;
int64_t minUid;
int64_t maxUid;
TSKEY minKey;
TSKEY maxKey;
int64_t minVer;
int64_t maxVer;
int32_t nRow;
SBlockInfo bInfo;
};
struct SColData {
@ -450,10 +452,17 @@ struct SColData {
uint8_t *pData;
};
// (SBlockData){.suid = 0, .uid = 0}: block data not initialized
// (SBlockData){.suid = suid, .uid = uid}: block data for ONE child table int .data file
// (SBlockData){.suid = suid, .uid = 0}: block data for N child tables int .last file
// (SBlockData){.suid = 0, .uid = uid}: block data for 1 normal table int .last/.data file
struct SBlockData {
int32_t nRow;
int64_t *aVersion;
TSKEY *aTSKEY;
int64_t suid; // 0 means normal table block data, otherwise child table block data
int64_t uid; // 0 means block data in .last file, otherwise in .data file
int32_t nRow; // number of rows
int64_t *aUid; // uids of each row, only exist in block data in .last file (uid == 0)
int64_t *aVersion; // versions of each row
TSKEY *aTSKEY; // timestamp of each row
SArray *aIdx; // SArray<int32_t>
SArray *aColData; // SArray<SColData>
};
@ -491,13 +500,18 @@ struct SDelIdx {
int64_t size;
};
#pragma pack(push, 1)
struct SBlockDataHdr {
struct SDiskDataHdr {
uint32_t delimiter;
uint32_t fmtVer;
int64_t suid;
int64_t uid;
int32_t szUid;
int32_t szVer;
int32_t szKey;
int32_t szBlkCol;
int32_t nRow;
int8_t cmprAlg;
};
#pragma pack(pop)
struct SDelFile {
volatile int32_t nRef;
@ -527,6 +541,7 @@ struct SLastFile {
int64_t commitID;
int64_t size;
int64_t offset;
};
struct SSmaFile {
@ -561,6 +576,8 @@ struct SDelFWriter {
STsdb *pTsdb;
SDelFile fDel;
TdFilePtr pWriteH;
uint8_t *aBuf[1];
};
struct SDataFWriter {
@ -576,6 +593,8 @@ struct SDataFWriter {
SDataFile fData;
SLastFile fLast;
SSmaFile fSma;
uint8_t *aBuf[4];
};
struct STsdbReadSnap {

View File

@ -65,6 +65,7 @@ struct SVBufPool {
SVBufPool* next;
SVnode* pVnode;
volatile int32_t nRef;
TdThreadSpinlock lock;
int64_t size;
uint8_t* ptr;
SVBufPoolNode* pTail;

View File

@ -130,6 +130,14 @@ int metaTtlSmaller(SMeta* pMeta, uint64_t time, SArray* uidList);
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
typedef struct SMetaInfo {
int64_t uid;
int64_t suid;
int64_t version;
int32_t skmVer;
} SMetaInfo;
int32_t metaGetInfo(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo);
// tsdb
int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg);
int tsdbClose(STsdb** pTsdb);
@ -155,13 +163,16 @@ int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver);
// tq-mq
int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen);
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen);
// tq-stream
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
@ -188,6 +199,7 @@ int32_t smaAsyncCommit(SSma* pSma);
int32_t smaAsyncPostCommit(SSma* pSma);
int32_t smaDoRetention(SSma* pSma, int64_t now);
int32_t smaProcessFetch(SSma* pSma, void* pMsg);
int32_t smaProcessExec(SSma* pSma, void* pMsg);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
@ -357,6 +369,7 @@ struct SSma {
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
enum {
SNAP_DATA_CFG = 0,
SNAP_DATA_META = 1,
SNAP_DATA_TSDB = 2,
SNAP_DATA_DEL = 3,

View File

@ -0,0 +1,206 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "meta.h"
#define META_CACHE_BASE_BUCKET 1024
// (uid , suid) : child table
// (uid, 0) : normal table
// (suid, suid) : super table
typedef struct SMetaCacheEntry SMetaCacheEntry;
struct SMetaCacheEntry {
SMetaCacheEntry* next;
SMetaInfo info;
};
struct SMetaCache {
int32_t nEntry;
int32_t nBucket;
SMetaCacheEntry** aBucket;
};
int32_t metaCacheOpen(SMeta* pMeta) {
int32_t code = 0;
SMetaCache* pCache = NULL;
pCache = (SMetaCache*)taosMemoryMalloc(sizeof(SMetaCache));
if (pCache == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
pCache->nEntry = 0;
pCache->nBucket = META_CACHE_BASE_BUCKET;
pCache->aBucket = (SMetaCacheEntry**)taosMemoryCalloc(pCache->nBucket, sizeof(SMetaCacheEntry*));
if (pCache->aBucket == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pCache);
goto _err;
}
pMeta->pCache = pCache;
_exit:
return code;
_err:
metaError("vgId:%d meta open cache failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
return code;
}
void metaCacheClose(SMeta* pMeta) {
if (pMeta->pCache) {
for (int32_t iBucket = 0; iBucket < pMeta->pCache->nBucket; iBucket++) {
SMetaCacheEntry* pEntry = pMeta->pCache->aBucket[iBucket];
while (pEntry) {
SMetaCacheEntry* tEntry = pEntry->next;
taosMemoryFree(pEntry);
pEntry = tEntry;
}
}
taosMemoryFree(pMeta->pCache->aBucket);
taosMemoryFree(pMeta->pCache);
pMeta->pCache = NULL;
}
}
static int32_t metaRehashCache(SMetaCache* pCache, int8_t expand) {
int32_t code = 0;
int32_t nBucket;
if (expand) {
nBucket = pCache->nBucket * 2;
} else {
nBucket = pCache->nBucket / 2;
}
SMetaCacheEntry** aBucket = (SMetaCacheEntry**)taosMemoryCalloc(nBucket, sizeof(SMetaCacheEntry*));
if (aBucket == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
// rehash
for (int32_t iBucket = 0; iBucket < pCache->nBucket; iBucket++) {
SMetaCacheEntry* pEntry = pCache->aBucket[iBucket];
while (pEntry) {
SMetaCacheEntry* pTEntry = pEntry->next;
pEntry->next = aBucket[TABS(pEntry->info.uid) % nBucket];
aBucket[TABS(pEntry->info.uid) % nBucket] = pEntry;
pEntry = pTEntry;
}
}
// final set
taosMemoryFree(pCache->aBucket);
pCache->nBucket = nBucket;
pCache->aBucket = aBucket;
_exit:
return code;
}
int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) {
int32_t code = 0;
// ASSERT(metaIsWLocked(pMeta));
// search
SMetaCache* pCache = pMeta->pCache;
int32_t iBucket = TABS(pInfo->uid) % pCache->nBucket;
SMetaCacheEntry** ppEntry = &pCache->aBucket[iBucket];
while (*ppEntry && (*ppEntry)->info.uid != pInfo->uid) {
ppEntry = &(*ppEntry)->next;
}
if (*ppEntry) { // update
ASSERT(pInfo->suid == (*ppEntry)->info.suid);
if (pInfo->version > (*ppEntry)->info.version) {
(*ppEntry)->info.version = pInfo->version;
(*ppEntry)->info.skmVer = pInfo->skmVer;
}
} else { // insert
if (pCache->nEntry >= pCache->nBucket) {
code = metaRehashCache(pCache, 1);
if (code) goto _exit;
iBucket = TABS(pInfo->uid) % pCache->nBucket;
}
SMetaCacheEntry* pEntryNew = (SMetaCacheEntry*)taosMemoryMalloc(sizeof(*pEntryNew));
if (pEntryNew == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
pEntryNew->info = *pInfo;
pEntryNew->next = pCache->aBucket[iBucket];
pCache->aBucket[iBucket] = pEntryNew;
pCache->nEntry++;
}
_exit:
return code;
}
int32_t metaCacheDrop(SMeta* pMeta, int64_t uid) {
int32_t code = 0;
SMetaCache* pCache = pMeta->pCache;
int32_t iBucket = TABS(uid) % pCache->nBucket;
SMetaCacheEntry** ppEntry = &pCache->aBucket[iBucket];
while (*ppEntry && (*ppEntry)->info.uid != uid) {
ppEntry = &(*ppEntry)->next;
}
SMetaCacheEntry* pEntry = *ppEntry;
if (pEntry) {
*ppEntry = pEntry->next;
taosMemoryFree(pEntry);
pCache->nEntry--;
if (pCache->nEntry < pCache->nBucket / 4 && pCache->nBucket > META_CACHE_BASE_BUCKET) {
code = metaRehashCache(pCache, 0);
if (code) goto _exit;
}
} else {
code = TSDB_CODE_NOT_FOUND;
}
_exit:
return code;
}
int32_t metaCacheGet(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo) {
int32_t code = 0;
SMetaCache* pCache = pMeta->pCache;
int32_t iBucket = TABS(uid) % pCache->nBucket;
SMetaCacheEntry* pEntry = pCache->aBucket[iBucket];
while (pEntry && pEntry->info.uid != uid) {
pEntry = pEntry->next;
}
if (pEntry) {
*pInfo = pEntry->info;
} else {
code = TSDB_CODE_NOT_FOUND;
}
return code;
}

View File

@ -73,7 +73,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
}
// open pUidIdx
ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(int64_t), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx);
ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(SUidIdxVal), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx);
if (ret < 0) {
metaError("vgId:%d, failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@ -87,7 +87,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
}
// open pCtbIdx
ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), 0, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), -1, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
if (ret < 0) {
metaError("vgId:%d, failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@ -143,6 +143,13 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
goto _err;
}
int32_t code = metaCacheOpen(pMeta);
if (code) {
terrno = code;
metaError("vgId:%d, failed to open meta cache since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
}
metaDebug("vgId:%d, meta is opened", TD_VID(pVnode));
*ppMeta = pMeta;
@ -169,6 +176,7 @@ _err:
int metaClose(SMeta *pMeta) {
if (pMeta) {
if (pMeta->pCache) metaCacheClose(pMeta);
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);

View File

@ -53,6 +53,89 @@ _err:
return -1;
}
// int metaGetTableEntryByUidTest(void* meta, SArray *uidList) {
//
// SArray* readerList = taosArrayInit(taosArrayGetSize(uidList), sizeof(SMetaReader));
// SArray* uidVersion = taosArrayInit(taosArrayGetSize(uidList), sizeof(STbDbKey));
// SMeta *pMeta = meta;
// int64_t version;
// SHashObj *uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
//
// int64_t stt1 = taosGetTimestampUs();
// for(int i = 0; i < taosArrayGetSize(uidList); i++) {
// void* ppVal = NULL;
// int vlen = 0;
// uint64_t * uid = taosArrayGet(uidList, i);
// // query uid.idx
// if (tdbTbGet(pMeta->pUidIdx, uid, sizeof(*uid), &ppVal, &vlen) < 0) {
// continue;
// }
// version = *(int64_t *)ppVal;
//
// STbDbKey tbDbKey = {.version = version, .uid = *uid};
// taosArrayPush(uidVersion, &tbDbKey);
// taosHashPut(uHash, uid, sizeof(int64_t), ppVal, sizeof(int64_t));
// }
// int64_t stt2 = taosGetTimestampUs();
// qDebug("metaGetTableEntryByUidTest1 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt2-stt1);
//
// TBC *pCur = NULL;
// tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
// tdbTbcMoveToFirst(pCur);
// void *pKey = NULL;
// int kLen = 0;
//
// while(1){
// SMetaReader pReader = {0};
// int32_t ret = tdbTbcNext(pCur, &pKey, &kLen, &pReader.pBuf, &pReader.szBuf);
// if (ret < 0) break;
// STbDbKey *tmp = (STbDbKey*)pKey;
// int64_t *ver = (int64_t*)taosHashGet(uHash, &tmp->uid, sizeof(int64_t));
// if(ver == NULL || *ver != tmp->version) continue;
// taosArrayPush(readerList, &pReader);
// }
// tdbTbcClose(pCur);
//
// taosArrayClear(readerList);
// int64_t stt3 = taosGetTimestampUs();
// qDebug("metaGetTableEntryByUidTest2 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt3-stt2);
// for(int i = 0; i < taosArrayGetSize(uidVersion); i++) {
// SMetaReader pReader = {0};
//
// STbDbKey *tbDbKey = taosArrayGet(uidVersion, i);
// // query table.db
// if (tdbTbGet(pMeta->pTbDb, tbDbKey, sizeof(STbDbKey), &pReader.pBuf, &pReader.szBuf) < 0) {
// continue;
// }
// taosArrayPush(readerList, &pReader);
// }
// int64_t stt4 = taosGetTimestampUs();
// qDebug("metaGetTableEntryByUidTest3 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt4-stt3);
//
// for(int i = 0; i < taosArrayGetSize(readerList); i++){
// SMetaReader* pReader = taosArrayGet(readerList, i);
// metaReaderInit(pReader, meta, 0);
// // decode the entry
// tDecoderInit(&pReader->coder, pReader->pBuf, pReader->szBuf);
//
// if (metaDecodeEntry(&pReader->coder, &pReader->me) < 0) {
// }
// metaReaderClear(pReader);
// }
// int64_t stt5 = taosGetTimestampUs();
// qDebug("metaGetTableEntryByUidTest4 rows:%d, cost:%ld us", taosArrayGetSize(readerList), stt5-stt4);
// return 0;
// }
bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) {
// query uid.idx
if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
return false;
}
return true;
}
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
SMeta *pMeta = pReader->pMeta;
int64_t version;
@ -63,7 +146,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
return -1;
}
version = *(int64_t *)pReader->pBuf;
version = ((SUidIdxVal *)pReader->pBuf)[0].version;
return metaGetTableEntryByVersion(pReader, version, uid);
}
@ -160,7 +243,7 @@ int metaTbCursorNext(SMTbCursor *pTbCur) {
tDecoderClear(&pTbCur->mr.coder);
metaGetTableEntryByVersion(&pTbCur->mr, *(int64_t *)pTbCur->pVal, *(tb_uid_t *)pTbCur->pKey);
metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
if (pTbCur->mr.me.type == TSDB_SUPER_TABLE) {
continue;
}
@ -185,7 +268,7 @@ _query:
goto _err;
}
version = *(int64_t *)pData;
version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData);
SMetaEntry me = {0};
@ -429,18 +512,65 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
}
int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sver, STSchema **ppTSchema) {
int32_t code = 0;
STSchema *pTSchema = NULL;
SSkmDbKey skmDbKey = {.uid = suid ? suid : uid, .sver = sver};
int32_t code = 0;
void *pData = NULL;
int nData = 0;
SSkmDbKey skmDbKey;
if (sver <= 0) {
SMetaInfo info;
if (metaGetInfo(pMeta, suid ? suid : uid, &info) == 0) {
sver = info.skmVer;
} else {
TBC *pSkmDbC = NULL;
int c;
// query
skmDbKey.uid = suid ? suid : uid;
skmDbKey.sver = INT32_MAX;
tdbTbcOpen(pMeta->pSkmDb, &pSkmDbC, NULL);
metaRLock(pMeta);
if (tdbTbcMoveTo(pSkmDbC, &skmDbKey, sizeof(skmDbKey), &c) < 0) {
metaULock(pMeta);
tdbTbcClose(pSkmDbC);
code = TSDB_CODE_NOT_FOUND;
goto _exit;
}
ASSERT(c);
if (c < 0) {
tdbTbcMoveToPrev(pSkmDbC);
}
const void *pKey = NULL;
int32_t nKey = 0;
tdbTbcGet(pSkmDbC, &pKey, &nKey, NULL, NULL);
if (((SSkmDbKey *)pKey)->uid != skmDbKey.uid) {
metaULock(pMeta);
tdbTbcClose(pSkmDbC);
code = TSDB_CODE_NOT_FOUND;
goto _exit;
}
sver = ((SSkmDbKey *)pKey)->sver;
metaULock(pMeta);
tdbTbcClose(pSkmDbC);
}
}
ASSERT(sver > 0);
skmDbKey.uid = suid ? suid : uid;
skmDbKey.sver = sver;
metaRLock(pMeta);
if (tdbTbGet(pMeta->pSkmDb, &skmDbKey, sizeof(skmDbKey), &pData, &nData) < 0) {
code = TSDB_CODE_NOT_FOUND;
if (tdbTbGet(pMeta->pSkmDb, &skmDbKey, sizeof(SSkmDbKey), &pData, &nData) < 0) {
metaULock(pMeta);
goto _err;
code = TSDB_CODE_NOT_FOUND;
goto _exit;
}
metaULock(pMeta);
@ -462,15 +592,13 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
SSchema *pSchema = pSchemaWrapper->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);
}
pTSchema = tdGetSchemaFromBuilder(&sb);
STSchema *pTSchema = tdGetSchemaFromBuilder(&sb);
tdDestroyTSchemaBuilder(&sb);
*ppTSchema = pTSchema;
taosMemoryFree(pSchemaWrapper->pSchema);
return code;
_err:
*ppTSchema = NULL;
_exit:
return code;
}
@ -749,9 +877,8 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
#endif
const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) {
ASSERT(pEntry->type == TSDB_CHILD_TABLE);
STag *tag = (STag *)pEntry->ctbEntry.pTags;
const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
STag *tag = (STag *)pTag;
if (type == TSDB_DATA_TYPE_JSON) {
return tag;
}
@ -853,6 +980,9 @@ int32_t metaFilterTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
break;
}
}
if (p->suid != pKey->suid) {
break;
}
first = false;
if (p != NULL) {
int32_t cmp = (*param->filterFunc)(p->data, pKey->data, pKey->type);
@ -888,3 +1018,75 @@ END:
return ret;
}
int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) {
SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid);
SHashObj *uHash = NULL;
size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids
if (len > 0) {
uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
for (int i = 0; i < len; i++) {
int64_t *uid = taosArrayGet(uidList, i);
taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
}
}
while (1) {
tb_uid_t id = metaCtbCursorNext(pCur);
if (id == 0) {
break;
}
if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) {
continue;
} else if (len == 0) {
taosArrayPush(uidList, &id);
}
taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen);
}
taosHashCleanup(uHash);
metaCloseCtbCursor(pCur);
return TSDB_CODE_SUCCESS;
}
int32_t metaCacheGet(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo);
int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo) {
int32_t code = 0;
void *pData = NULL;
int nData = 0;
metaRLock(pMeta);
// search cache
if (metaCacheGet(pMeta, uid, pInfo) == 0) {
metaULock(pMeta);
goto _exit;
}
// search TDB
if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) {
// not found
metaULock(pMeta);
code = TSDB_CODE_NOT_FOUND;
goto _exit;
}
metaULock(pMeta);
pInfo->uid = uid;
pInfo->suid = ((SUidIdxVal *)pData)->suid;
pInfo->version = ((SUidIdxVal *)pData)->version;
pInfo->skmVer = ((SUidIdxVal *)pData)->skmVer;
// upsert the cache
metaWLock(pMeta);
metaCacheUpsert(pMeta, pInfo);
metaULock(pMeta);
_exit:
tdbFree(pData);
return code;
}

View File

@ -28,9 +28,9 @@ int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) {
int vLen = 0;
const void *pKey = NULL;
const void *pVal = NULL;
void * pBuf = NULL;
void *pBuf = NULL;
int32_t szBuf = 0;
void * p = NULL;
void *p = NULL;
SMetaReader mr = {0};
// validate req
@ -83,8 +83,8 @@ int32_t metaDropTSma(SMeta *pMeta, int64_t indexUid) {
static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME) {
STbDbKey tbDbKey;
void * pKey = NULL;
void * pVal = NULL;
void *pKey = NULL;
void *pVal = NULL;
int kLen = 0;
int vLen = 0;
SEncoder coder = {0};
@ -130,7 +130,8 @@ _err:
}
static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) {
return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn);
SUidIdxVal uidIdxVal = {.suid = pME->smaEntry.tsma->indexUid, .version = pME->version, .skmVer = 0};
return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &uidIdxVal, sizeof(uidIdxVal), &pMeta->txn);
}
static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) {

View File

@ -27,6 +27,23 @@ static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type);
static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) {
pInfo->uid = pEntry->uid;
pInfo->version = pEntry->version;
if (pEntry->type == TSDB_SUPER_TABLE) {
pInfo->suid = pEntry->uid;
pInfo->skmVer = pEntry->stbEntry.schemaRow.version;
} else if (pEntry->type == TSDB_CHILD_TABLE) {
pInfo->suid = pEntry->ctbEntry.suid;
pInfo->skmVer = 0;
} else if (pEntry->type == TSDB_NORMAL_TABLE) {
pInfo->suid = 0;
pInfo->skmVer = pEntry->ntbEntry.schemaRow.version;
} else {
ASSERT(0);
}
}
static int metaUpdateMetaRsp(tb_uid_t uid, char *tbName, SSchemaWrapper *pSchema, STableMetaRsp *pMetaRsp) {
pMetaRsp->pSchemas = taosMemoryMalloc(pSchema->nCols * sizeof(SSchema));
if (NULL == pMetaRsp->pSchemas) {
@ -171,22 +188,22 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
void *pBuf = NULL;
int32_t szBuf = 0;
void *p = NULL;
SMetaReader mr = {0};
// validate req
metaReaderInit(&mr, pMeta, 0);
if (metaGetTableEntryByName(&mr, pReq->name) == 0) {
// TODO: just for pass case
#if 0
terrno = TSDB_CODE_TDB_STB_ALREADY_EXIST;
metaReaderClear(&mr);
return -1;
#else
metaReaderClear(&mr);
return 0;
#endif
void *pData = NULL;
int nData = 0;
if (tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData) == 0) {
tb_uid_t uid = *(tb_uid_t *)pData;
tdbFree(pData);
SMetaInfo info;
metaGetInfo(pMeta, uid, &info);
if (info.uid == info.suid) {
return 0;
} else {
terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
return -1;
}
}
metaReaderClear(&mr);
// set structs
me.version = version;
@ -265,8 +282,8 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb
// drop super table
_drop_super_table:
tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData);
tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = *(int64_t *)pData, .uid = pReq->suid}, sizeof(STbDbKey),
&pMeta->txn);
tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = ((SUidIdxVal *)pData)[0].version, .uid = pReq->suid},
sizeof(STbDbKey), &pMeta->txn);
tdbTbDelete(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pMeta->txn);
tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn);
tdbTbDelete(pMeta->pSuidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn);
@ -309,7 +326,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
return -1;
}
oversion = *(int64_t *)pData;
oversion = ((SUidIdxVal *)pData)[0].version;
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn);
ret = tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c);
@ -336,15 +353,11 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
metaSaveToSkmDb(pMeta, &nStbEntry);
}
// if (oStbEntry.stbEntry.schemaTag.sver != pReq->schemaTag.sver) {
// // change tag schema
// }
// update table.db
metaSaveToTbDb(pMeta, &nStbEntry);
// update uid index
tdbTbcUpsert(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &version, sizeof(version), 0);
metaUpdateUidIdx(pMeta, &nStbEntry);
if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
metaULock(pMeta);
@ -503,7 +516,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
SDecoder dc = {0};
rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
int64_t version = *(int64_t *)pData;
int64_t version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
@ -517,7 +530,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
int tLen = 0;
if (tdbTbGet(pMeta->pUidIdx, &e.ctbEntry.suid, sizeof(tb_uid_t), &tData, &tLen) == 0) {
version = *(int64_t *)tData;
version = ((SUidIdxVal *)tData)[0].version;
STbDbKey tbDbKey = {.uid = e.ctbEntry.suid, .version = version};
if (tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &tData, &tLen) == 0) {
SDecoder tdc = {0};
@ -556,6 +569,8 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
--pMeta->pVnode->config.vndStats.numOfSTables;
}
metaCacheDrop(pMeta, uid);
tDecoderClear(&dc);
tdbFree(pData);
@ -594,7 +609,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
ASSERT(c == 0);
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
oversion = *(int64_t *)pData;
oversion = ((SUidIdxVal *)pData)[0].version;
// search table.db
TBC *pTbDbc = NULL;
@ -708,7 +723,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
// save to table db
metaSaveToTbDb(pMeta, &entry);
tdbTbcUpsert(pUidIdxc, &entry.uid, sizeof(tb_uid_t), &version, sizeof(version), 0);
metaUpdateUidIdx(pMeta, &entry);
metaSaveToSkmDb(pMeta, &entry);
@ -764,7 +779,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
ASSERT(c == 0);
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
oversion = *(int64_t *)pData;
oversion = ((SUidIdxVal *)pData)[0].version;
// search table.db
TBC *pTbDbc = NULL;
@ -784,8 +799,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
/* get stbEntry*/
tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal);
tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = *(int64_t *)pVal}), sizeof(STbDbKey),
(void **)&stbEntry.pBuf, &nVal);
tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}),
sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal);
tdbFree(pVal);
tDecoderInit(&dc2, stbEntry.pBuf, nVal);
metaDecodeEntry(&dc2, &stbEntry);
@ -859,12 +874,16 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
metaSaveToTbDb(pMeta, &ctbEntry);
// save to uid.idx
tdbTbUpsert(pMeta->pUidIdx, &ctbEntry.uid, sizeof(tb_uid_t), &version, sizeof(version), &pMeta->txn);
metaUpdateUidIdx(pMeta, &ctbEntry);
if (iCol == 0) {
metaUpdateTagIdx(pMeta, &ctbEntry);
}
SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid};
tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
((STag *)(ctbEntry.ctbEntry.pTags))->len, &pMeta->txn);
tDecoderClear(&dc1);
tDecoderClear(&dc2);
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
@ -914,7 +933,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
ASSERT(c == 0);
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
oversion = *(int64_t *)pData;
oversion = ((SUidIdxVal *)pData)[0].version;
// search table.db
TBC *pTbDbc = NULL;
@ -959,7 +978,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
// save to table db
metaSaveToTbDb(pMeta, &entry);
tdbTbcUpsert(pUidIdxc, &entry.uid, sizeof(tb_uid_t), &version, sizeof(version), 0);
metaUpdateUidIdx(pMeta, &entry);
metaULock(pMeta);
tdbTbcClose(pTbDbc);
@ -1042,7 +1061,14 @@ _err:
}
static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) {
return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn);
// upsert cache
SMetaInfo info;
metaGetEntryInfo(pME, &info);
metaCacheUpsert(pMeta, &info);
SUidIdxVal uidIdxVal = {.suid = info.suid, .version = info.version, .skmVer = info.skmVer};
return tdbTbUpsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &uidIdxVal, sizeof(uidIdxVal), &pMeta->txn);
}
static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME) {
@ -1062,7 +1088,9 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) {
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) {
SCtbIdxKey ctbIdxKey = {.suid = pME->ctbEntry.suid, .uid = pME->uid};
return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), NULL, 0, &pMeta->txn);
return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), pME->ctbEntry.pTags,
((STag *)(pME->ctbEntry.pTags))->len, &pMeta->txn);
}
int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int32_t nTagData, int8_t type, tb_uid_t uid,
@ -1118,7 +1146,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
return -1;
}
tbDbKey.uid = pCtbEntry->ctbEntry.suid;
tbDbKey.version = *(int64_t *)pData;
tbDbKey.version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pData, &nData);
tDecoderInit(&dc, pData, nData);

View File

@ -83,8 +83,7 @@ int32_t smaBegin(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
int8_t rsmaTriggerStat =
atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE);
@ -123,7 +122,7 @@ static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) {
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
// step 1: set rsma stat paused
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
@ -289,8 +288,7 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(SMA_ENV_STAT(pSmaEnv));
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
// cleanup outdated qtaskinfo files
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
@ -299,10 +297,9 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
}
/**
* @brief Rsma async commit implementation
* @brief Rsma async commit implementation(only do some necessary light weighted task)
* 1) set rsma stat TASK_TRIGGER_STAT_PAUSED
* 2) Wait all running fetch task finish to fetch and put submitMsg into level 2/3 wQueue(blocking level 1 write)
* 3)
*
* @param pSma
* @return int32_t
@ -314,7 +311,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
@ -336,28 +333,55 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
}
// step 3: swap rsmaInfoHash and iRsmaInfoHash
/**
* @brief step 3: consume the SubmitReq in buffer
* 1) This is high cost task and should not put in asyncPreCommit originally.
* 2) But, if put in asyncCommit, would trigger taskInfo cloning frequently.
*/
nLoops = 0;
smaInfo("vgId:%d, start to wait for rsma qtask free, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
int8_t old;
while (1) {
old = atomic_val_compare_exchange_8(&pRSmaStat->execStat, 0, 1);
if (old == 0) break;
if (++nLoops > 1000) {
sched_yield();
nLoops = 0;
smaDebug("vgId:%d, wait for rsma qtask free, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
}
}
smaInfo("vgId:%d, end to wait for rsma qtask free, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_COMMIT) < 0) {
atomic_store_8(&pRSmaStat->execStat, 0);
return TSDB_CODE_FAILED;
}
// step 4: swap queue/qall and iQueue/iQall
// lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(RSMA_INFO_HASH(pRSmaStat));
ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat));
RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat);
RSMA_INFO_HASH(pRSmaStat) =
taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
void *pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
if (!RSMA_INFO_HASH(pRSmaStat)) {
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr());
return TSDB_CODE_FAILED;
while (pIter) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
TSWAP(pInfo->iQall, pInfo->qall);
TSWAP(pInfo->iQueue, pInfo->queue);
TSWAP(pInfo->iTaskInfo[0], pInfo->taskInfo[0]);
TSWAP(pInfo->iTaskInfo[1], pInfo->taskInfo[1]);
pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter);
}
atomic_store_64(&pRSmaStat->qBufSize, 0);
atomic_store_8(&pRSmaStat->execStat, 0);
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
// step 4: others
// step 5: others
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
return TSDB_CODE_SUCCESS;
@ -375,17 +399,18 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
// perform persist task for qTaskInfo
tdRSmaPersistExecImpl(pRSmaStat, RSMA_IMU_INFO_HASH(pRSmaStat));
// perform persist task for qTaskInfo operator
if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
return TSDB_CODE_FAILED;
}
return TSDB_CODE_SUCCESS;
}
/**
* @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsmaInfoHash not empty.
* @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsma infoHash not empty.
*
* @param pSma
* @return int32_t
@ -396,65 +421,66 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SArray *rsmaDeleted = NULL;
// step 1: merge rsmaInfoHash and iRsmaInfoHash
// step 1: merge qTaskInfo and iQTaskInfo
// lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
#if 0
if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
// just switch the hash pointer if rsmaInfoHash is empty
if (taosHashGetSize(RSMA_IMU_INFO_HASH(pRSmaStat)) > 0) {
SHashObj *infoHash = RSMA_INFO_HASH(pRSmaStat);
RSMA_INFO_HASH(pRSmaStat) = RSMA_IMU_INFO_HASH(pRSmaStat);
RSMA_IMU_INFO_HASH(pRSmaStat) = infoHash;
}
} else {
#endif
#if 1
void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL);
void *pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
while (pIter) {
tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) {
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
if (refVal == 0) {
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
smaDebug(
"vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
"table:%" PRIi64,
SMA_VID(pSma), *pSuid);
} else {
smaDebug(
"vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
"table:%" PRIi64,
SMA_VID(pSma), refVal, *pSuid);
tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
if (refVal == 0) {
if (!rsmaDeleted) {
if ((rsmaDeleted = taosArrayInit(1, sizeof(tb_uid_t)))) {
taosArrayPush(rsmaDeleted, pSuid);
}
}
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
continue;
} else {
smaDebug(
"vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
"table:%" PRIi64,
SMA_VID(pSma), refVal, *pSuid);
}
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma),
*pSuid);
} else {
// free the resources
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
tdFreeRSmaInfo(pSma, pRSmaInfo, false);
smaDebug("vgId:%d, rsma async post commit, free rsma info since already COW for table:%" PRIi64, SMA_VID(pSma),
*pSuid);
pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter);
continue;
}
pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
}
#endif
// }
if (pRSmaInfo->taskInfo[0]) {
if (pRSmaInfo->iTaskInfo[0]) {
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pRSmaInfo->iTaskInfo[0];
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
pRSmaInfo->iTaskInfo[0] = NULL;
}
} else {
TSWAP(pRSmaInfo->taskInfo[0], pRSmaInfo->iTaskInfo[0]);
}
taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat));
RSMA_IMU_INFO_HASH(pRSmaStat) = NULL;
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid);
pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter);
}
for (int32_t i = 0; i < taosArrayGetSize(rsmaDeleted); ++i) {
tb_uid_t *pSuid = taosArrayGet(rsmaDeleted, i);
void *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
tdFreeRSmaInfo(pSma, pRSmaInfo, true);
smaDebug(
"vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
"table:%" PRIi64,
SMA_VID(pSma), *pSuid);
}
taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
}
taosArrayDestroy(rsmaDeleted);
// TODO: remove suid in files?
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));

View File

@ -171,7 +171,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
if (!pRSmaInfo) return 0;
int ref = T_REF_INC(pRSmaInfo);
smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
return 0;
@ -228,7 +228,12 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
RSMA_INFO_HASH(pRSmaStat) = taosHashInit(
RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (!RSMA_INFO_HASH(pRSmaStat)) {
taosMemoryFreeClear(*pSmaStat);
return TSDB_CODE_FAILED;
}
RSMA_FETCH_HASH(pRSmaStat) = taosHashInit(
RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (!RSMA_FETCH_HASH(pRSmaStat)) {
return TSDB_CODE_FAILED;
}
} else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
@ -264,8 +269,6 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
atomic_store_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_CANCELLED);
// step 2: destroy the rsma info and associated fetch tasks
// TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready.
#if 1
if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) {
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
while (infoHash) {
@ -274,10 +277,12 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
}
}
#endif
taosHashCleanup(RSMA_INFO_HASH(pStat));
// step 3: wait all triggered fetch tasks finished
// step 3: destroy the rsma fetch hash
taosHashCleanup(RSMA_FETCH_HASH(pStat));
// step 4: wait all triggered fetch tasks finished
int32_t nLoops = 0;
while (1) {
if (T_REF_VAL_GET((SSmaStat *)pStat) == 0) {
@ -293,7 +298,7 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
}
}
// step 4: free pStat
// step 5: free pStat
taosMemoryFreeClear(pStat);
}
}
@ -318,9 +323,9 @@ void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
if (pSmaStat) {
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
tdDestroyTSmaStat(SMA_TSMA_STAT(pSmaStat));
tdDestroyTSmaStat(SMA_STAT_TSMA(pSmaStat));
} else if (smaType == TSDB_SMA_TYPE_ROLLUP) {
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSmaStat);
SRSmaStat *pRSmaStat = &pSmaStat->rsmaStat;
int32_t vid = SMA_VID(pRSmaStat->pSma);
int64_t refId = RSMA_REF_ID(pRSmaStat);
if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {

View File

@ -15,8 +15,10 @@
#include "sma.h"
#define RSMA_QTASKINFO_BUFSIZE 32768
#define RSMA_QTASKINFO_BUFSIZE (32768)
#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid
#define RSMA_QTASKEXEC_BUFSIZE (1048576)
#define RSMA_SUBMIT_BATCH_SIZE (1024)
SSmaMgmt smaMgmt = {
.inited = 0,
@ -27,17 +29,20 @@ SSmaMgmt smaMgmt = {
#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
int8_t idx);
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid,
int8_t level);
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo,
ERsmaExecType type, int8_t level);
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid);
static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
static void tdFreeRSmaSubmitItems(SArray *pItems);
static int32_t tdRSmaConsumeAndFetch(SSma *pSma, int64_t suid, int8_t level, SArray *pSubmitArr);
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid, int8_t blkType);
int64_t suid);
static void tdRSmaFetchTrigger(void *param, void *tmrId);
static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level);
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
@ -76,6 +81,11 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
struct SRSmaExecQItem {
void *pRSmaInfo;
void *qall;
};
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@ -139,6 +149,18 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
if (isDeepFree) {
taosMemoryFreeClear(pInfo->pTSchema);
}
if (isDeepFree) {
if (pInfo->queue) taosCloseQueue(pInfo->queue);
if (pInfo->qall) taosFreeQall(pInfo->qall);
if (pInfo->iQueue) taosCloseQueue(pInfo->iQueue);
if (pInfo->iQall) taosFreeQall(pInfo->iQall);
pInfo->queue = NULL;
pInfo->qall = NULL;
pInfo->iQueue = NULL;
pInfo->iQall = NULL;
}
taosMemoryFree(pInfo);
}
@ -179,7 +201,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pRSmaInfo->taskInfo[i]) {
if ((qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true) < 0)) {
if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true)) < 0)) {
tdReleaseRSmaInfo(pSma, pRSmaInfo);
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i,
terrstr());
@ -351,6 +373,19 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
goto _err;
}
pRSmaInfo->pTSchema = pTSchema;
if (!(pRSmaInfo->queue = taosOpenQueue())) {
goto _err;
}
if (!(pRSmaInfo->qall = taosAllocateQall())) {
goto _err;
}
if (!(pRSmaInfo->iQueue = taosOpenQueue())) {
goto _err;
}
if (!(pRSmaInfo->iQall = taosAllocateQall())) {
goto _err;
}
pRSmaInfo->suid = suid;
pRSmaInfo->refId = RSMA_REF_ID(pStat);
T_REF_INIT_VAL(pRSmaInfo, 1);
@ -419,8 +454,7 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) {
return TSDB_CODE_SUCCESS;
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pReq->suid);
@ -528,6 +562,14 @@ void *tdUidStoreFree(STbUidStore *pStore) {
return NULL;
}
/**
* @brief The SubmitReq for rsma L2/L3 is inserted by tsdbInsertData method directly while not by WriteQ, as the queue
* would be freed when close Vnode, thus lock should be used if with race condition.
* @param pTsdb
* @param version
* @param pReq
* @return int32_t
*/
static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) {
if (!pReq) {
terrno = TSDB_CODE_INVALID_PTR;
@ -535,7 +577,7 @@ static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) {
}
SSubmitReq *pSubmitReq = (SSubmitReq *)pReq;
// TODO: spin lock for race conditiond
if (tsdbInsertData(pTsdb, version, pSubmitReq, NULL) < 0) {
return TSDB_CODE_FAILED;
}
@ -569,17 +611,6 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
return 0;
}
static void tdDestroySDataBlockArray(SArray *pArray) {
// TODO
#if 0
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SSDataBlock *pDataBlock = taosArrayGet(pArray, i);
blockDestroyInner(pDataBlock);
}
#endif
taosArrayDestroy(pArray);
}
/**
* @brief retention of rsma1/rsma2
*
@ -605,7 +636,7 @@ _end:
}
static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid, int8_t blkType) {
int64_t suid) {
SArray *pResList = taosArrayInit(1, POINTER_BYTES);
if (pResList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -615,7 +646,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
while (1) {
uint64_t ts;
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
if (code < 0) {
if (code < 0) {
if (code == TSDB_CODE_QRY_IN_EXEC) {
break;
} else {
@ -637,8 +668,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
} else {
smaDebug("vgId:%d, rsma %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
}
#if 1
#if 0
char flag[10] = {0};
snprintf(flag, 10, "level %" PRIi8, pItem->level);
blockDebugShowDataBlocks(pResList, flag);
@ -662,10 +692,9 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
goto _err;
}
taosMemoryFreeClear(pReq);
smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
SMA_VID(pSma), suid, pItem->level, output->info.version);
}
}
@ -677,34 +706,113 @@ _err:
return TSDB_CODE_FAILED;
}
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid,
int8_t level) {
/**
* @brief Copy msg to rsmaQueueBuffer for batch process
*
* @param pSma
* @param pMsg
* @param inputType
* @param pInfo
* @param suid
* @return int32_t
*/
static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo,
tb_uid_t suid) {
const SSubmitReq *pReq = (const SSubmitReq *)pMsg;
void *qItem = taosAllocateQitem(pReq->header.contLen, DEF_QITEM);
if (!qItem) {
return TSDB_CODE_FAILED;
}
memcpy(qItem, pMsg, pReq->header.contLen);
taosWriteQitem(pInfo->queue, qItem);
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
int64_t bufSize = atomic_add_fetch_64(&pRSmaStat->qBufSize, pReq->header.contLen);
// smoothing consume
int32_t n = bufSize / RSMA_QTASKEXEC_BUFSIZE;
if (n > 1) {
if (n > 10) {
n = 10;
}
taosMsleep(n << 4);
if (n > 2) {
smaWarn("vgId:%d, pInfo->queue itemSize:%d, memSize:%" PRIi64 ", sleep %d ms", SMA_VID(pSma),
taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 4);
} else {
smaDebug("vgId:%d, pInfo->queue itemSize:%d, memSize:%" PRIi64 ", sleep %d ms", SMA_VID(pSma),
taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 4);
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t tdRsmaPrintSubmitReq(SSma *pSma, SSubmitReq *pReq) {
SSubmitMsgIter msgIter = {0};
SSubmitBlkIter blkIter = {0};
STSRow *row = NULL;
if (tInitSubmitMsgIter(pReq, &msgIter) < 0) return -1;
while (true) {
SSubmitBlk *pBlock = NULL;
if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
if (pBlock == NULL) break;
tInitSubmitBlkIter(&msgIter, pBlock, &blkIter);
while ((row = tGetSubmitBlkNext(&blkIter)) != NULL) {
smaDebug("vgId:%d, numOfRows:%d, suid:%" PRIi64 ", uid:%" PRIi64 ", version:%" PRIi64 ", ts:%" PRIi64,
SMA_VID(pSma), msgIter.numOfRows, msgIter.suid, msgIter.uid, pReq->version, row->ts);
}
}
return 0;
}
/**
* @brief sync mode
*
* @param pSma
* @param pMsg
* @param msgSize
* @param inputType
* @param pInfo
* @param type
* @param level
* @return int32_t
*/
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo,
ERsmaExecType type, int8_t level) {
int32_t idx = level - 1;
if (!pInfo || !RSMA_INFO_QTASK(pInfo, idx)) {
smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
void *qTaskInfo = (type == RSMA_EXEC_COMMIT) ? RSMA_INFO_IQTASK(pInfo, idx) : RSMA_INFO_QTASK(pInfo, idx);
if (!qTaskInfo) {
smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level,
pInfo->suid);
return TSDB_CODE_SUCCESS;
}
if (!pInfo->pTSchema) {
smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, pInfo->suid);
return TSDB_CODE_FAILED;
}
smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level,
RSMA_INFO_QTASK(pInfo, idx), suid);
RSMA_INFO_QTASK(pInfo, idx), pInfo->suid);
if (qSetMultiStreamInput(RSMA_INFO_QTASK(pInfo, idx), pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT
#if 0
for (int32_t i = 0; i < msgSize; ++i) {
SSubmitReq *pReq = *(SSubmitReq **)((char *)pMsg + i * sizeof(void *));
smaDebug("vgId:%d, [%d][%d] version %" PRIi64, SMA_VID(pSma), msgSize, i, pReq->version);
tdRsmaPrintSubmitReq(pSma, pReq);
}
#endif
if (qSetMultiStreamInput(qTaskInfo, pMsg, msgSize, inputType) < 0) {
smaError("vgId:%d, rsma %" PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno));
return TSDB_CODE_FAILED;
}
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx);
tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid,
STREAM_INPUT__DATA_SUBMIT);
atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
if (smaMgmt.tmrHandle) {
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
tdRSmaFetchAndSubmitResult(pSma, qTaskInfo, pItem, pInfo->pTSchema, pInfo->suid);
return TSDB_CODE_SUCCESS;
}
@ -739,51 +847,20 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
if (!pRSmaInfo->taskInfo[0]) {
if (tdCloneRSmaInfo(pSma, pRSmaInfo) < 0) {
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
}
tdRefRSmaInfo(pSma, pRSmaInfo);
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(pRSmaInfo->suid == suid);
return pRSmaInfo;
}
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat
return NULL;
}
// clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
SRSmaInfo *pCowRSmaInfo = NULL;
// lock
taosWLockLatch(SMA_ENV_LOCK(pEnv));
if (!(pCowRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)))) { // 2-phase lock
void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (iRSmaInfo) {
SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
if (pIRSmaInfo && !RSMA_INFO_IS_DEL(pIRSmaInfo)) {
if (tdCloneRSmaInfo(pSma, &pCowRSmaInfo, pIRSmaInfo) < 0) {
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
return NULL;
}
smaDebug("vgId:%d, clone rsma info succeed for suid:%" PRIu64, SMA_VID(pSma), suid);
if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
return NULL;
}
}
}
} else {
pCowRSmaInfo = *(SRSmaInfo **)pCowRSmaInfo;
ASSERT(!pCowRSmaInfo);
}
if (pCowRSmaInfo) {
tdRefRSmaInfo(pSma, pCowRSmaInfo);
}
// unlock
taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
return pCowRSmaInfo;
return NULL;
}
static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
@ -792,22 +869,90 @@ static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
}
}
static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
/**
* @brief async mode
*
* @param pSma
* @param pMsg
* @param inputType
* @param suid
* @return int32_t
*/
static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, suid);
if (!pRSmaInfo) {
smaError("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
return TSDB_CODE_SUCCESS;
}
if (inputType == STREAM_INPUT__DATA_SUBMIT) {
tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L1);
tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L2);
if (tdExecuteRSmaImplAsync(pSma, pMsg, inputType, pRSmaInfo, suid) < 0) {
tdReleaseRSmaInfo(pSma, pRSmaInfo);
return TSDB_CODE_FAILED;
}
if (smaMgmt.tmrHandle) {
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pRSmaInfo, 0);
if (pItem->level > 0) {
atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
}
pItem = RSMA_INFO_ITEM(pRSmaInfo, 1);
if (pItem->level > 0) {
atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
}
}
} else {
ASSERT(0);
}
tdReleaseRSmaInfo(pSma, pRSmaInfo);
return TSDB_CODE_SUCCESS;
}
static int32_t tdRSmaExecCheck(SSma *pSma) {
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
int64_t bufSize = atomic_load_64(&pRSmaStat->qBufSize);
if (bufSize < RSMA_QTASKEXEC_BUFSIZE) {
smaDebug("vgId:%d, bufSize is %d but has no chance to exec as less than %d", SMA_VID(pSma), bufSize,
RSMA_QTASKEXEC_BUFSIZE);
return TSDB_CODE_SUCCESS;
}
if (atomic_val_compare_exchange_8(&pRSmaStat->execStat, 0, 1) == 1) {
smaDebug("vgId:%d, bufSize is %d but has no chance to exec as qTaskInfo occupied by another task", SMA_VID(pSma),
bufSize);
return TSDB_CODE_SUCCESS;
}
smaDebug("vgId:%d, bufSize is %d and has chance to exec as qTaskInfo is free now", SMA_VID(pSma), bufSize);
SRSmaExecMsg fetchMsg;
int32_t contLen = sizeof(SMsgHead);
void *pBuf = rpcMallocCont(0 + contLen);
((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
((SMsgHead *)pBuf)->contLen = sizeof(SMsgHead);
SRpcMsg rpcMsg = {
.code = 0,
.msgType = TDMT_VND_EXEC_RSMA,
.pCont = pBuf,
.contLen = contLen,
};
if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
smaError("vgId:%d, failed to put rsma exec msg into query-queue since %s", SMA_VID(pSma), terrstr());
goto _err;
}
smaDebug("vgId:%d, success to put rsma fetch msg into query-queue", SMA_VID(pSma));
return TSDB_CODE_SUCCESS;
_err:
atomic_store_8(&pRSmaStat->execStat, 0);
return TSDB_CODE_FAILED;
}
int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pEnv) {
@ -826,45 +971,62 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
tdFetchSubmitReqSuids(pMsg, &uidStore);
if (uidStore.suid != 0) {
tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid);
tdExecuteRSmaAsync(pSma, pMsg, inputType, uidStore.suid);
void *pIter = taosHashIterate(uidStore.uidHash, NULL);
while (pIter) {
tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid);
tdExecuteRSmaAsync(pSma, pMsg, inputType, *pTbSuid);
pIter = taosHashIterate(uidStore.uidHash, pIter);
}
tdUidStoreDestory(&uidStore);
tdRSmaExecCheck(pSma);
}
}
return TSDB_CODE_SUCCESS;
}
/**
* @brief retrieve rsma meta and init
*
* @param pSma
* @param nTables number of tables of rsma
* @return int32_t
*/
static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
SVnode *pVnode = pSma->pVnode;
SVnode *pVnode = pSma->pVnode;
SArray *suidList = NULL;
STbUidStore uidStore = {0};
SMetaReader mr = {0};
SArray *suidList = taosArrayInit(1, sizeof(tb_uid_t));
if (tsdbGetStbIdList(SMA_META(pSma), 0, suidList) < 0) {
taosArrayDestroy(suidList);
if (!(suidList = taosArrayInit(1, sizeof(tb_uid_t)))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
if (vnodeGetStbIdList(pSma->pVnode, 0, suidList) < 0) {
smaError("vgId:%d, failed to restore rsma env since get stb id list error: %s", TD_VID(pVnode), terrstr());
return TSDB_CODE_FAILED;
goto _err;
}
int64_t arrSize = taosArrayGetSize(suidList);
if (nTables) {
*nTables = arrSize;
}
if (arrSize == 0) {
if (nTables) {
*nTables = 0;
}
taosArrayDestroy(suidList);
smaDebug("vgId:%d, no need to restore rsma env since empty stb id list", TD_VID(pVnode));
return TSDB_CODE_SUCCESS;
}
SMetaReader mr = {0};
int64_t nRsmaTables = 0;
metaReaderInit(&mr, SMA_META(pSma), 0);
if (!(uidStore.tbUids = taosArrayInit(1024, sizeof(tb_uid_t)))) {
goto _err;
}
for (int64_t i = 0; i < arrSize; ++i) {
tb_uid_t suid = *(tb_uid_t *)taosArrayGet(suidList, i);
smaDebug("vgId:%d, rsma restore, suid is %" PRIi64, TD_VID(pVnode), suid);
@ -877,6 +1039,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
ASSERT(mr.me.uid == suid);
if (TABLE_IS_ROLLUP(mr.me.flags)) {
++nRsmaTables;
SRSmaParam *param = &mr.me.stbEntry.rsmaParam;
for (int i = 0; i < TSDB_RETENTION_L2; ++i) {
smaDebug("vgId:%d, rsma restore, table:%" PRIi64 " level:%d, maxdelay:%" PRIi64 " watermark:%" PRIi64
@ -887,17 +1050,40 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
smaError("vgId:%d, rsma restore env failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, terrstr());
goto _err;
}
// reload all ctbUids for suid
uidStore.suid = suid;
if (vnodeGetCtbIdList(pVnode, suid, uidStore.tbUids) < 0) {
smaError("vgId:%d, rsma restore, get ctb idlist failed for %" PRIi64 " since %s", TD_VID(pVnode), suid,
terrstr());
goto _err;
}
if (tdUpdateTbUidList(pVnode->pSma, &uidStore) < 0) {
smaError("vgId:%d, rsma restore, update tb uid list failed for %" PRIi64 " since %s", TD_VID(pVnode), suid,
terrstr());
goto _err;
}
taosArrayClear(uidStore.tbUids);
smaDebug("vgId:%d, rsma restore env success for %" PRIi64, TD_VID(pVnode), suid);
}
}
metaReaderClear(&mr);
taosArrayDestroy(suidList);
tdUidStoreDestory(&uidStore);
if (nTables) {
*nTables = nRsmaTables;
}
return TSDB_CODE_SUCCESS;
_err:
metaReaderClear(&mr);
taosArrayDestroy(suidList);
tdUidStoreDestory(&uidStore);
return TSDB_CODE_FAILED;
}
@ -1230,7 +1416,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
}
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pRSmaInfo, i);
qTaskInfo_t taskInfo = RSMA_INFO_IQTASK(pRSmaInfo, i);
if (!taskInfo) {
smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1);
continue;
@ -1388,7 +1574,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
}
_end:
// taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
}
@ -1400,7 +1586,7 @@ _end:
* @param level
* @return int32_t
*/
int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
SRSmaFetchMsg fetchMsg = {.suid = pInfo->suid, .level = level};
int32_t ret = 0;
int32_t contLen = 0;
@ -1427,7 +1613,7 @@ int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
.code = 0,
.msgType = TDMT_VND_FETCH_RSMA,
.pCont = pBuf,
.contLen = contLen,
.contLen = contLen + sizeof(SMsgHead),
};
if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
@ -1452,13 +1638,11 @@ _err:
* @return int32_t
*/
int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
SRSmaFetchMsg req = {0};
SDecoder decoder = {0};
void *pBuf = NULL;
SRSmaInfo *pInfo = NULL;
SRSmaInfoItem *pItem = NULL;
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
SRSmaFetchMsg req = {0};
SDecoder decoder = {0};
void *pBuf = NULL;
SRSmaStat *pRSmaStat = NULL;
if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
goto _err;
@ -1472,37 +1656,265 @@ int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
goto _err;
}
pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid);
if (!pInfo) {
if (terrno == TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_RSMA_EMPTY_INFO;
pRSmaStat = SMA_RSMA_STAT(pSma);
if (atomic_val_compare_exchange_8(&pRSmaStat->execStat, 0, 1) == 0) {
SArray *pSubmitArr = NULL;
if (!(pSubmitArr = taosArrayInit(RSMA_SUBMIT_BATCH_SIZE, POINTER_BYTES))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
atomic_store_8(&pRSmaStat->execStat, 0);
goto _err;
}
smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma),
req.suid, req.level, terrstr());
goto _err;
tdRSmaConsumeAndFetch(pSma, req.suid, req.level, pSubmitArr);
atomic_store_8(&pRSmaStat->execStat, 0);
taosArrayDestroy(pSubmitArr);
} else {
int8_t level = req.level;
int8_t *val = taosHashGet(RSMA_FETCH_HASH(pRSmaStat), &req.suid, sizeof(req.suid));
if (val) {
level |= (*val);
}
ASSERT(level >= 1 && level <= 3);
taosHashPut(RSMA_FETCH_HASH(pRSmaStat), &req.suid, sizeof(req.suid), &level, sizeof(level));
}
pItem = RSMA_INFO_ITEM(pInfo, req.level - 1);
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1);
if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
goto _err;
}
if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) {
goto _err;
}
tdCleanupStreamInputDataBlock(taskInfo);
tdReleaseRSmaInfo(pSma, pInfo);
tDecoderClear(&decoder);
smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid,
req.level);
return TSDB_CODE_SUCCESS;
_err:
tdReleaseRSmaInfo(pSma, pInfo);
tDecoderClear(&decoder);
smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr());
return TSDB_CODE_FAILED;
}
static void tdFreeRSmaSubmitItems(SArray *pItems) {
for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) {
taosFreeQitem(*(void **)taosArrayGet(pItems, i));
}
}
static int32_t tdRSmaConsumeAndFetch(SSma *pSma, int64_t suid, int8_t level, SArray *pSubmitArr) {
SRSmaInfo *pInfo = tdAcquireRSmaInfoBySuid(pSma, suid);
if (!pInfo) {
return TSDB_CODE_SUCCESS;
}
// step 1: consume submit req
int64_t qMemSize = 0;
if ((qMemSize = taosQueueMemorySize(pInfo->queue) > 0)) {
taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
atomic_fetch_sub_64(&pRSmaStat->qBufSize, qMemSize);
taosArrayClear(pSubmitArr);
while (1) {
void *msg = NULL;
taosGetQitem(pInfo->qall, (void **)&msg);
if (msg) {
if (taosArrayPush(pSubmitArr, &msg) < 0) {
tdFreeRSmaSubmitItems(pSubmitArr);
goto _err;
}
} else {
break;
}
}
int32_t size = taosArrayGetSize(pSubmitArr);
if (size > 0) {
for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, RSMA_EXEC_TIMEOUT, i) <
0) {
tdFreeRSmaSubmitItems(pSubmitArr);
goto _err;
}
}
tdFreeRSmaSubmitItems(pSubmitArr);
}
}
// step 2: fetch rsma result
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
for (int8_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
if (level & i) {
qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, i - 1);
if (!taskInfo) {
continue;
}
if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
goto _err;
}
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1);
if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, suid) < 0) {
tdCleanupStreamInputDataBlock(taskInfo);
goto _err;
}
tdCleanupStreamInputDataBlock(taskInfo);
}
}
_end:
tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_SUCCESS;
_err:
tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_FAILED;
}
/**
* @brief
*
* @param pSma
* @param type
* @return int32_t
*/
int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SHashObj *infoHash = NULL;
SArray *pSubmitQArr = NULL;
SArray *pSubmitArr = NULL;
bool isFetchAll = false;
if (!pRSmaStat || !(infoHash = RSMA_INFO_HASH(pRSmaStat))) {
terrno = TSDB_CODE_RSMA_INVALID_STAT;
goto _err;
}
if (type == RSMA_EXEC_OVERFLOW) {
taosRLockLatch(SMA_ENV_LOCK(pEnv));
if (atomic_load_64(&pRSmaStat->qBufSize) < RSMA_QTASKEXEC_BUFSIZE) {
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return TSDB_CODE_SUCCESS;
}
taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
}
if (!(pSubmitQArr = taosArrayInit(taosHashGetSize(infoHash), sizeof(SRSmaExecQItem)))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
if (!(pSubmitArr = taosArrayInit(RSMA_SUBMIT_BATCH_SIZE, POINTER_BYTES))) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
// step 1: rsma exec - consume data in buffer queue for all suids
SRSmaExecQItem qItem = {0};
void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock
if (type == RSMA_EXEC_OVERFLOW) {
while (pIter) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
if (taosQueueItemSize(pInfo->queue)) {
taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
qItem.qall = &pInfo->qall;
qItem.pRSmaInfo = pIter;
taosArrayPush(pSubmitQArr, &qItem);
}
ASSERT(taosQueueItemSize(pInfo->queue) == 0);
pIter = taosHashIterate(infoHash, pIter);
}
} else if (type == RSMA_EXEC_COMMIT) {
while (pIter) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
if (taosQueueItemSize(pInfo->iQueue)) {
taosReadAllQitems(pInfo->iQueue, pInfo->iQall);
qItem.qall = &pInfo->iQall;
qItem.pRSmaInfo = pIter;
taosArrayPush(pSubmitQArr, &qItem);
}
ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);
pIter = taosHashIterate(infoHash, pIter);
}
} else {
ASSERT(0);
}
atomic_store_64(&pRSmaStat->qBufSize, 0);
int32_t qSize = taosArrayGetSize(pSubmitQArr);
for (int32_t i = 0; i < qSize; ++i) {
SRSmaExecQItem *pItem = taosArrayGet(pSubmitQArr, i);
while (1) {
void *msg = NULL;
taosGetQitem(*(STaosQall **)pItem->qall, (void **)&msg);
if (msg) {
if (taosArrayPush(pSubmitArr, &msg) < 0) {
tdFreeRSmaSubmitItems(pSubmitArr);
goto _err;
}
} else {
break;
}
}
int32_t size = taosArrayGetSize(pSubmitArr);
if (size > 0) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pItem->pRSmaInfo;
for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, type, i) < 0) {
tdFreeRSmaSubmitItems(pSubmitArr);
goto _err;
}
}
tdFreeRSmaSubmitItems(pSubmitArr);
taosArrayClear(pSubmitArr);
}
}
// step 2: rsma fetch - consume data in buffer queue for suids triggered by timer
if (taosHashGetSize(RSMA_FETCH_HASH(pRSmaStat)) <= 0) {
goto _end;
}
pIter = taosHashIterate(RSMA_FETCH_HASH(pRSmaStat), NULL);
if (pIter) {
tdRSmaConsumeAndFetch(pSma, *(int64_t *)taosHashGetKey(pIter, NULL), *(int8_t *)pIter, pSubmitArr);
while ((pIter = taosHashIterate(RSMA_FETCH_HASH(pRSmaStat), pIter))) {
tdRSmaConsumeAndFetch(pSma, *(int64_t *)taosHashGetKey(pIter, NULL), *(int8_t *)pIter, pSubmitArr);
}
}
_end:
taosArrayDestroy(pSubmitArr);
taosArrayDestroy(pSubmitQArr);
return TSDB_CODE_SUCCESS;
_err:
taosArrayDestroy(pSubmitArr);
taosArrayDestroy(pSubmitQArr);
return TSDB_CODE_FAILED;
}
/**
* @brief exec rsma level 1data, fetch result of level 2/3 and submit
*
* @param pSma
* @param pMsg
* @return int32_t
*/
int32_t smaProcessExec(SSma *pSma, void *pMsg) {
SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
goto _err;
}
smaDebug("vgId:%d, begin to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_OVERFLOW) < 0) {
goto _err;
}
atomic_store_8(&pRSmaStat->execStat, 0);
smaDebug("vgId:%d, success to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
return TSDB_CODE_SUCCESS;
_err:
atomic_store_8(&pRSmaStat->execStat, 0);
smaError("vgId:%d, failed to process rsma exec msg by TID:%p since %s", SMA_VID(pSma), (void *)taosGetSelfPthreadId(),
terrstr());
return TSDB_CODE_FAILED;
}

View File

@ -139,7 +139,6 @@ static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppBuf)
smaInfo("vgId:%d, vnode snapshot rsma read qtaskinfo, size:%" PRIi64, SMA_VID(pSma), size);
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppBuf);
pHdr->type = SNAP_DATA_QTASK;
pHdr->size = size;
@ -279,7 +278,8 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWrit
TdFilePtr qTaskF = taosCreateFile(qTaskInfoFullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (!qTaskF) {
code = TAOS_SYSTEM_ERROR(errno);
smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName, tstrerror(code));
smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName,
tstrerror(code));
goto _err;
}
qWriter->pWriteH = qTaskF;
@ -309,7 +309,7 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
if (rollback) {
// TODO: rsma1/rsma2
// qtaskinfo
if(pWriter->pQTaskFWriter) {
if (pWriter->pQTaskFWriter) {
taosRemoveFile(pWriter->pQTaskFWriter->fname);
}
} else {

View File

@ -175,7 +175,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
tdRefSmaStat(pSma, pStat);
pTsmaStat = SMA_TSMA_STAT(pStat);
pTsmaStat = SMA_STAT_TSMA(pStat);
if (!pTsmaStat->pTSma) {
STSma *pTSma = metaGetSmaInfoByIndex(SMA_META(pSma), indexUid);

View File

@ -350,49 +350,45 @@ _err:
}
/**
* @brief pTSchema is shared
* @brief Clone qTaskInfo of SRSmaInfo
*
* @param pSma
* @param pDest
* @param pSrc
* @param pInfo
* @return int32_t
*/
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc) {
SVnode *pVnode = pSma->pVnode;
int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
SRSmaParam *param = NULL;
if (!pSrc) {
*pDest = NULL;
if (!pInfo) {
return TSDB_CODE_SUCCESS;
}
SMetaReader mr = {0};
metaReaderInit(&mr, SMA_META(pSma), 0);
smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid);
if (metaGetTableEntryByUid(&mr, pSrc->suid) < 0) {
smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid,
smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) {
smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid,
terrstr());
goto _err;
}
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
ASSERT(mr.me.uid == pSrc->suid);
ASSERT(mr.me.uid == pInfo->suid);
if (TABLE_IS_ROLLUP(mr.me.flags)) {
param = &mr.me.stbEntry.rsmaParam;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (tdCloneQTaskInfo(pSma, pSrc->iTaskInfo[i], pSrc->taskInfo[i], param, pSrc->suid, i) < 0) {
if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) {
goto _err;
}
}
smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid);
smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid);
} else {
terrno = TSDB_CODE_RSMA_INVALID_SCHEMA;
goto _err;
}
metaReaderClear(&mr);
*pDest = pSrc; // pointer copy
return TSDB_CODE_SUCCESS;
_err:
*pDest = NULL;
metaReaderClear(&mr);
smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid, terrstr());
smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr());
return TSDB_CODE_FAILED;
}

View File

@ -60,11 +60,11 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
pTq->path = strdup(path);
pTq->pVnode = pVnode;
pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
pTq->pAlterInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
pTq->pCheckInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
if (tqMetaOpen(pTq) < 0) {
ASSERT(0);
@ -85,9 +85,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
void tqClose(STQ* pTq) {
if (pTq) {
tqOffsetClose(pTq->pOffsetStore);
taosHashCleanup(pTq->handles);
taosHashCleanup(pTq->pushMgr);
taosHashCleanup(pTq->pAlterInfo);
taosHashCleanup(pTq->pHandle);
taosHashCleanup(pTq->pPushMgr);
taosHashCleanup(pTq->pCheckInfo);
taosMemoryFree(pTq->path);
tqMetaClose(pTq);
streamMetaClose(pTq->pStreamMeta);
@ -183,7 +183,12 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
return 0;
}
int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver) {
static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
pLeft->val.version <= pRight->val.version;
}
int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
STqOffset offset = {0};
SDecoder decoder;
tDecoderInit(&decoder, msg, msgLen);
@ -199,19 +204,24 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve
} else if (offset.val.type == TMQ_OFFSET__LOG) {
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, offset.subKey,
TD_VID(pTq->pVnode), offset.val.version);
if (offset.val.version + 1 == version) {
offset.val.version += 1;
}
} else {
ASSERT(0);
}
/*STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey);*/
/*if (pOffset != NULL) {*/
/*if (pOffset->val.type == TMQ_OFFSET__LOG && pOffset->val.version < offset.val.version) {*/
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey);
if (pOffset != NULL && tqOffsetLessOrEqual(&offset, pOffset)) {
return 0;
}
if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) {
ASSERT(0);
return -1;
}
if (offset.val.type == TMQ_OFFSET__LOG) {
STqHandle* pHandle = taosHashGet(pTq->handles, offset.subKey, strlen(offset.subKey));
STqHandle* pHandle = taosHashGet(pTq->pHandle, offset.subKey, strlen(offset.subKey));
if (pHandle) {
if (walRefVer(pHandle->pRef, offset.val.version) < 0) {
ASSERT(0);
@ -220,6 +230,8 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve
}
}
// rsp
/*}*/
/*}*/
@ -229,15 +241,15 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->pAlterInfo, pIter);
pIter = taosHashIterate(pTq->pCheckInfo, pIter);
if (pIter == NULL) break;
SCheckAlterInfo* pCheck = (SCheckAlterInfo*)pIter;
STqCheckInfo* pCheck = (STqCheckInfo*)pIter;
if (pCheck->ntbUid == tbUid) {
int32_t sz = taosArrayGetSize(pCheck->colIdList);
for (int32_t i = 0; i < sz; i++) {
int16_t forbidColId = *(int16_t*)taosArrayGet(pCheck->colIdList, i);
if (forbidColId == colId) {
taosHashCancelIterate(pTq->pAlterInfo, pIter);
taosHashCancelIterate(pTq->pCheckInfo, pIter);
return -1;
}
}
@ -289,7 +301,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SWalCkHead* pCkHead = NULL;
// 1.find handle
STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
STqHandle* pHandle = taosHashGet(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
/*ASSERT(pHandle);*/
if (pHandle == NULL) {
tqError("tmq poll: no consumer handle for consumer:%" PRId64 ", in vgId:%d, subkey %s", consumerId,
@ -478,10 +490,10 @@ OVER:
return code;
}
int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey));
int32_t code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
ASSERT(code == 0);
tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
@ -492,27 +504,43 @@ int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
return 0;
}
int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen) {
SCheckAlterInfo info = {0};
SDecoder decoder;
int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
STqCheckInfo info = {0};
SDecoder decoder;
tDecoderInit(&decoder, msg, msgLen);
if (tDecodeSCheckAlterInfo(&decoder, &info) < 0) {
if (tDecodeSTqCheckInfo(&decoder, &info) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
tDecoderClear(&decoder);
if (taosHashPut(pTq->pAlterInfo, info.topic, strlen(info.topic), &info, sizeof(SCheckAlterInfo)) < 0) {
if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (tqMetaSaveCheckInfo(pTq, info.topic, msg, msgLen) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
return 0;
}
int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
if (taosHashRemove(pTq->pCheckInfo, msg, strlen(msg)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (tqMetaDeleteCheckInfo(pTq, msg) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
return 0;
}
int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SMqRebVgReq req = {0};
tDecodeSMqRebVgReq(msg, &req);
// todo lock
STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey));
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
if (pHandle == NULL) {
if (req.oldConsumerId != -1) {
tqError("vgId:%d, build new consumer handle %s for consumer %d, but old consumerId is %ld", req.vgId, req.subKey,
@ -579,7 +607,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
}
taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
@ -600,8 +628,6 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
}
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
int32_t code = 0;
if (pTask->taskLevel == TASK_LEVEL__AGG) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
@ -612,8 +638,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
pTask->outputQueue = streamQueueOpen();
if (pTask->inputQueue == NULL || pTask->outputQueue == NULL) {
code = -1;
goto FAIL;
return -1;
}
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
@ -658,44 +683,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
streamSetupTrigger(pTask);
tqInfo("deploy stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
tqInfo("expand stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
pTask->selfChildId);
FAIL:
if (pTask->inputQueue) streamQueueClose(pTask->inputQueue);
if (pTask->outputQueue) streamQueueClose(pTask->outputQueue);
// TODO free executor
return code;
return 0;
}
int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
//
return streamMetaAddSerializedTask(pTq->pStreamMeta, msg, msgLen);
#if 0
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
return -1;
}
SDecoder decoder;
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
if (tDecodeSStreamTask(&decoder, pTask) < 0) {
ASSERT(0);
goto FAIL;
}
tDecoderClear(&decoder);
if (tqExpandTask(pTq, pTask) < 0) {
goto FAIL;
}
taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
return 0;
FAIL:
if (pTask) taosMemoryFree(pTask);
return -1;
#endif
return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen);
}
int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
@ -817,7 +812,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
}
}
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) {
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
return streamMetaRemoveTask(pTq->pStreamMeta, pReq->taskId);

View File

@ -43,6 +43,185 @@ int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
return 0;
}
int32_t tqMetaOpen(STQ* pTq) {
if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB) < 0) {
ASSERT(0);
return -1;
}
if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pExecStore) < 0) {
ASSERT(0);
return -1;
}
if (tdbTbOpen("tq.check.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pCheckStore) < 0) {
ASSERT(0);
return -1;
}
if (tqMetaRestoreHandle(pTq) < 0) {
return -1;
}
if (tqMetaRestoreCheckInfo(pTq) < 0) {
return -1;
}
return 0;
}
int32_t tqMetaClose(STQ* pTq) {
if (pTq->pExecStore) {
tdbTbClose(pTq->pExecStore);
}
if (pTq->pCheckStore) {
tdbTbClose(pTq->pCheckStore);
}
tdbClose(pTq->pMetaDB);
return 0;
}
int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen) {
TXN txn;
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
return -1;
}
if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
return -1;
}
if (tdbTbUpsert(pTq->pExecStore, key, strlen(key), value, vLen, &txn) < 0) {
return -1;
}
if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
return -1;
}
return 0;
}
int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key) {
TXN txn;
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
ASSERT(0);
}
if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
ASSERT(0);
}
if (tdbTbDelete(pTq->pCheckStore, key, (int)strlen(key), &txn) < 0) {
/*ASSERT(0);*/
}
if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
ASSERT(0);
}
return 0;
}
int32_t tqMetaRestoreCheckInfo(STQ* pTq) {
TBC* pCur = NULL;
if (tdbTbcOpen(pTq->pCheckStore, &pCur, NULL) < 0) {
ASSERT(0);
return -1;
}
void* pKey = NULL;
int kLen = 0;
void* pVal = NULL;
int vLen = 0;
SDecoder decoder;
tdbTbcMoveToFirst(pCur);
while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
STqCheckInfo info;
tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
if (tDecodeSTqCheckInfo(&decoder, &info) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
tDecoderClear(&decoder);
if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
tdbTbcClose(pCur);
return 0;
}
int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
int32_t code;
int32_t vlen;
tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
ASSERT(code == 0);
tqDebug("tq save %s(%d) consumer %" PRId64 " vgId:%d", pHandle->subKey, strlen(pHandle->subKey), pHandle->consumerId,
TD_VID(pTq->pVnode));
void* buf = taosMemoryCalloc(1, vlen);
if (buf == NULL) {
ASSERT(0);
}
SEncoder encoder;
tEncoderInit(&encoder, buf, vlen);
if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
ASSERT(0);
}
TXN txn;
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
ASSERT(0);
}
if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
ASSERT(0);
}
if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) {
ASSERT(0);
}
if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
ASSERT(0);
}
tEncoderClear(&encoder);
taosMemoryFree(buf);
return 0;
}
int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
TXN txn;
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
ASSERT(0);
}
if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
ASSERT(0);
}
if (tdbTbDelete(pTq->pExecStore, key, (int)strlen(key), &txn) < 0) {
/*ASSERT(0);*/
}
if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
ASSERT(0);
}
return 0;
}
int32_t tqMetaRestoreHandle(STQ* pTq) {
TBC* pCur = NULL;
if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) {
@ -93,101 +272,10 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
}
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode));
taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle));
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
}
tdbTbcClose(pCur);
return 0;
}
int32_t tqMetaOpen(STQ* pTq) {
if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) {
ASSERT(0);
return -1;
}
if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaStore, &pTq->pExecStore) < 0) {
ASSERT(0);
return -1;
}
if (tqMetaRestoreHandle(pTq) < 0) {
return -1;
}
return 0;
}
int32_t tqMetaClose(STQ* pTq) {
if (pTq->pExecStore) {
tdbTbClose(pTq->pExecStore);
}
tdbClose(pTq->pMetaStore);
return 0;
}
int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
int32_t code;
int32_t vlen;
tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
ASSERT(code == 0);
tqDebug("tq save %s(%d) consumer %" PRId64 " vgId:%d", pHandle->subKey, strlen(pHandle->subKey), pHandle->consumerId,
TD_VID(pTq->pVnode));
void* buf = taosMemoryCalloc(1, vlen);
if (buf == NULL) {
ASSERT(0);
}
SEncoder encoder;
tEncoderInit(&encoder, buf, vlen);
if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
ASSERT(0);
}
TXN txn;
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
ASSERT(0);
}
if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
ASSERT(0);
}
if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) {
ASSERT(0);
}
if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
ASSERT(0);
}
tEncoderClear(&encoder);
taosMemoryFree(buf);
return 0;
}
int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
TXN txn;
if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
ASSERT(0);
}
if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
ASSERT(0);
}
if (tdbTbDelete(pTq->pExecStore, key, (int)strlen(key), &txn) < 0) {
/*ASSERT(0);*/
}
if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
ASSERT(0);
}
return 0;
}

View File

@ -341,7 +341,7 @@ FAIL:
return -1;
}
void tqReaderSetColIdList(STqReader* pReadHandle, SArray* pColIdList) { pReadHandle->pColIdList = pColIdList; }
void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { pReader->pColIdList = pColIdList; }
int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash) {
@ -394,7 +394,7 @@ int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
void* pIter = NULL;
while (1) {
pIter = taosHashIterate(pTq->handles, pIter);
pIter = taosHashIterate(pTq->pHandle, pIter);
if (pIter == NULL) break;
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {

View File

@ -231,34 +231,35 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(pTask->tbSink.pTSchema);
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
SSubmitReq* pReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
pTask->tbSink.stbFullName, &deleteReq);
SSubmitReq* submitReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
pTask->tbSink.stbFullName, &deleteReq);
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
int32_t code;
int32_t len;
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
if (code < 0) {
//
ASSERT(0);
}
SEncoder encoder;
void* buf = rpcMallocCont(len + sizeof(SMsgHead));
void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
tEncoderInit(&encoder, abuf, len);
tEncodeSBatchDeleteReq(&encoder, &deleteReq);
tEncoderClear(&encoder);
((SMsgHead*)buf)->vgId = pVnode->config.vgId;
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
int32_t code;
int32_t len;
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
if (code < 0) {
//
ASSERT(0);
}
SEncoder encoder;
void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead));
void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead));
tEncoderInit(&encoder, abuf, len);
tEncodeSBatchDeleteReq(&encoder, &deleteReq);
tEncoderClear(&encoder);
((SMsgHead*)serializedDeleteReq)->vgId = pVnode->config.vgId;
SRpcMsg msg = {
.msgType = TDMT_VND_BATCH_DEL,
.pCont = buf,
.pCont = serializedDeleteReq,
.contLen = len + sizeof(SMsgHead),
};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
rpcFreeCont(serializedDeleteReq);
tqDebug("failed to put into write-queue since %s", terrstr());
}
}
@ -268,11 +269,12 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
// build write msg
SRpcMsg msg = {
.msgType = TDMT_VND_SUBMIT,
.pCont = pReq,
.contLen = ntohl(pReq->length),
.pCont = submitReq,
.contLen = ntohl(submitReq->length),
};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
rpcFreeCont(submitReq);
tqDebug("failed to put into write-queue since %s", terrstr());
}
}

View File

@ -165,9 +165,9 @@ int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) {
STQ* pTq = pWriter->pTq;
if (rollback) {
ASSERT(0);
tdbAbort(pWriter->pTq->pMetaDB, &pWriter->txn);
} else {
code = tdbCommit(pWriter->pTq->pMetaStore, &pWriter->txn);
code = tdbCommit(pWriter->pTq->pMetaDB, &pWriter->txn);
if (code) goto _err;
}

View File

@ -33,16 +33,21 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
taosLRUCacheSetStrictCapacity(pCache, true);
taosThreadMutexInit(&pTsdb->lruMutex, NULL);
_err:
pTsdb->lruCache = pCache;
return code;
}
void tsdbCloseCache(SLRUCache *pCache) {
void tsdbCloseCache(STsdb *pTsdb) {
SLRUCache *pCache = pTsdb->lruCache;
if (pCache) {
taosLRUCacheEraseUnrefEntries(pCache);
taosLRUCacheCleanup(pCache);
taosThreadMutexDestroy(&pTsdb->lruMutex);
}
}
@ -261,14 +266,14 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
}
for (++iCol; iCol < nCol; ++iCol) {
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (keyTs >= tTsVal->ts) {
SColVal *tColVal = &tTsVal->colVal;
SLastCol *tTsVal1 = (SLastCol *)taosArrayGet(pLast, iCol);
if (keyTs >= tTsVal1->ts) {
SColVal *tColVal = &tTsVal1->colVal;
SColVal colVal = {0};
tTSRowGetVal(row, pTSchema, iCol, &colVal);
if (colVal.isNone || colVal.isNull) {
if (keyTs == tTsVal->ts && !tColVal->isNone && !tColVal->isNull) {
if (keyTs == tTsVal1->ts && !tColVal->isNone && !tColVal->isNull) {
invalidate = true;
break;
@ -279,6 +284,7 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
}
}
_invalidate:
taosMemoryFreeClear(pTSchema);
taosLRUCacheRelease(pCache, h, invalidate);
@ -317,7 +323,7 @@ static int32_t getTableDelDataFromDelIdx(SDelFReader *pDelReader, SDelIdx *pDelI
int32_t code = 0;
if (pDelIdx) {
code = tsdbReadDelData(pDelReader, pDelIdx, aDelData, NULL);
code = tsdbReadDelData(pDelReader, pDelIdx, aDelData);
}
return code;
@ -388,8 +394,7 @@ static int32_t getTableDelIdx(SDelFReader *pDelFReader, tb_uid_t suid, tb_uid_t
SDelIdx idx = {.suid = suid, .uid = uid};
// tMapDataReset(&delIdxMap);
// code = tsdbReadDelIdx(pDelFReader, &delIdxMap, NULL);
code = tsdbReadDelIdx(pDelFReader, pDelIdxArray, NULL);
code = tsdbReadDelIdx(pDelFReader, pDelIdxArray);
if (code) goto _err;
// code = tMapDataSearch(&delIdxMap, &idx, tGetDelIdx, tCmprDelIdx, pDelIdx);
@ -405,6 +410,178 @@ _err:
return code;
}
typedef enum {
SFSLASTNEXTROW_FS,
SFSLASTNEXTROW_FILESET,
SFSLASTNEXTROW_BLOCKDATA,
SFSLASTNEXTROW_BLOCKROW
} SFSLASTNEXTROWSTATES;
typedef struct {
SFSLASTNEXTROWSTATES state; // [input]
STsdb *pTsdb; // [input]
SBlockIdx *pBlockIdxExp; // [input]
STSchema *pTSchema; // [input]
int32_t nFileSet;
int32_t iFileSet;
SArray *aDFileSet;
SDataFReader *pDataFReader;
SArray *aBlockL;
SBlockL *pBlockL;
SBlockData *pBlockDataL;
SBlockData blockDataL;
int32_t nRow;
int32_t iRow;
TSDBROW row;
/*
SArray *aBlockIdx;
SBlockIdx *pBlockIdx;
SMapData blockMap;
int32_t nBlock;
int32_t iBlock;
SBlock block;
*/
} SFSLastNextRowIter;
static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
SFSLastNextRowIter *state = (SFSLastNextRowIter *)iter;
int32_t code = 0;
switch (state->state) {
case SFSLASTNEXTROW_FS:
// state->aDFileSet = state->pTsdb->pFS->cState->aDFileSet;
state->nFileSet = taosArrayGetSize(state->aDFileSet);
state->iFileSet = state->nFileSet;
state->pBlockDataL = NULL;
case SFSLASTNEXTROW_FILESET: {
SDFileSet *pFileSet = NULL;
_next_fileset:
if (--state->iFileSet >= 0) {
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
} else {
if (state->pBlockDataL) {
tBlockDataDestroy(state->pBlockDataL, 1);
state->pBlockDataL = NULL;
}
*ppRow = NULL;
return code;
}
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
if (!state->aBlockL) {
state->aBlockL = taosArrayInit(0, sizeof(SBlockIdx));
} else {
taosArrayClear(state->aBlockL);
}
code = tsdbReadBlockL(state->pDataFReader, state->aBlockL);
if (code) goto _err;
// SBlockL *pBlockL = (SBlockL *)taosArrayGet(state->aBlockL, state->iBlockL);
state->pBlockL = taosArraySearch(state->aBlockL, state->pBlockIdxExp, tCmprBlockL, TD_EQ);
if (!state->pBlockL) {
goto _next_fileset;
}
int64_t suid = state->pBlockL->suid;
int64_t uid = state->pBlockL->maxUid;
if (!state->pBlockDataL) {
state->pBlockDataL = &state->blockDataL;
}
code = tBlockDataInit(state->pBlockDataL, suid, suid ? 0 : uid, state->pTSchema);
if (code) goto _err;
}
case SFSLASTNEXTROW_BLOCKDATA:
code = tsdbReadLastBlock(state->pDataFReader, state->pBlockL, state->pBlockDataL);
if (code) goto _err;
state->nRow = state->blockDataL.nRow;
state->iRow = state->nRow - 1;
if (!state->pBlockDataL->uid) {
while (state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
--state->iRow;
}
}
state->state = SFSLASTNEXTROW_BLOCKROW;
case SFSLASTNEXTROW_BLOCKROW:
if (state->pBlockDataL->uid) {
if (state->iRow >= 0) {
state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
*ppRow = &state->row;
if (--state->iRow < 0) {
state->state = SFSLASTNEXTROW_FILESET;
}
}
} else {
if (state->iRow >= 0 && state->pBlockIdxExp->uid == state->pBlockDataL->aUid[state->iRow]) {
state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
*ppRow = &state->row;
if (--state->iRow < 0 || state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
state->state = SFSLASTNEXTROW_FILESET;
}
}
}
return code;
default:
ASSERT(0);
break;
}
_err:
if (state->pDataFReader) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
if (state->aBlockL) {
taosArrayDestroy(state->aBlockL);
state->aBlockL = NULL;
}
if (state->pBlockDataL) {
tBlockDataDestroy(state->pBlockDataL, 1);
state->pBlockDataL = NULL;
}
*ppRow = NULL;
return code;
}
int32_t clearNextRowFromFSLast(void *iter) {
SFSLastNextRowIter *state = (SFSLastNextRowIter *)iter;
int32_t code = 0;
if (!state) {
return code;
}
if (state->pDataFReader) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
if (state->aBlockL) {
taosArrayDestroy(state->aBlockL);
state->aBlockL = NULL;
}
if (state->pBlockDataL) {
tBlockDataDestroy(state->pBlockDataL, 1);
state->pBlockDataL = NULL;
}
return code;
}
typedef enum SFSNEXTROWSTATES {
SFSNEXTROW_FS,
SFSNEXTROW_FILESET,
@ -451,9 +628,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (--state->iFileSet >= 0) {
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
} else {
// tBlockDataClear(&state->blockData, 1);
// tBlockDataDestroy(&state->blockData, 1);
if (state->pBlockData) {
tBlockDataClear(state->pBlockData, 1);
tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@ -465,13 +642,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (code) goto _err;
// tMapDataReset(&state->blockIdxMap);
// code = tsdbReadBlockIdx(state->pDataFReader, &state->blockIdxMap, NULL);
if (!state->aBlockIdx) {
state->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
} else {
taosArrayClear(state->aBlockIdx);
}
code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx, NULL);
code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx);
if (code) goto _err;
/* if (state->pBlockIdx) { */
@ -487,8 +663,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
tMapDataReset(&state->blockMap);
code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap, NULL);
/* code = tsdbReadBlock(state->pDataFReader, &state->blockIdx, &state->blockMap, NULL); */
code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap);
if (code) goto _err;
state->nBlock = state->blockMap.nItem;
@ -497,7 +672,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (!state->pBlockData) {
state->pBlockData = &state->blockData;
tBlockDataInit(&state->blockData);
tBlockDataCreate(&state->blockData);
}
}
case SFSNEXTROW_BLOCKDATA:
@ -510,7 +685,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetBlock);
/* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */
code = tsdbReadBlockData(state->pDataFReader, state->pBlockIdx, &block, state->pBlockData, NULL, NULL);
code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData);
if (code) goto _err;
state->nRow = state->blockData.nRow;
@ -555,8 +730,8 @@ _err:
state->aBlockIdx = NULL;
}
if (state->pBlockData) {
// tBlockDataClear(&state->blockData, 1);
tBlockDataClear(state->pBlockData, 1);
// tBlockDataDestroy(&state->blockData, 1);
tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@ -582,8 +757,8 @@ int32_t clearNextRowFromFS(void *iter) {
state->aBlockIdx = NULL;
}
if (state->pBlockData) {
// tBlockDataClear(&state->blockData, 1);
tBlockDataClear(state->pBlockData, 1);
// tBlockDataDestroy(&state->blockData, 1);
tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@ -725,18 +900,19 @@ typedef struct {
SArray *pSkyline;
int64_t iSkyline;
SBlockIdx idx;
SMemNextRowIter memState;
SMemNextRowIter imemState;
SFSNextRowIter fsState;
TSDBROW memRow, imemRow, fsRow;
SBlockIdx idx;
SMemNextRowIter memState;
SMemNextRowIter imemState;
SFSLastNextRowIter fsLastState;
SFSNextRowIter fsState;
TSDBROW memRow, imemRow, fsLastRow, fsRow;
TsdbNextRowState input[3];
TsdbNextRowState input[4];
STsdbReadSnap *pReadSnap;
STsdb *pTsdb;
} CacheNextRowIter;
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb) {
static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema) {
int code = 0;
tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
@ -745,12 +921,12 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
STbData *pMem = NULL;
if (pIter->pReadSnap->pMem) {
tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid, &pMem);
pMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid);
}
STbData *pIMem = NULL;
if (pIter->pReadSnap->pIMem) {
tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid, &pIMem);
pIMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid);
}
pIter->pTsdb = pTsdb;
@ -763,7 +939,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
if (pDelFile) {
SDelFReader *pDelFReader;
code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL);
code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb);
if (code) goto _err;
code = getTableDelIdx(pDelFReader, suid, uid, &delIdx);
@ -782,6 +958,12 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->idx = (SBlockIdx){.suid = suid, .uid = uid};
pIter->fsLastState.state = (SFSLASTNEXTROWSTATES) SFSNEXTROW_FS;
pIter->fsLastState.pTsdb = pTsdb;
pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
pIter->fsLastState.pBlockIdxExp = &pIter->idx;
pIter->fsLastState.pTSchema = pTSchema;
pIter->fsState.state = SFSNEXTROW_FS;
pIter->fsState.pTsdb = pTsdb;
pIter->fsState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
@ -789,7 +971,9 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->input[0] = (TsdbNextRowState){&pIter->memRow, true, false, &pIter->memState, getNextRowFromMem, NULL};
pIter->input[1] = (TsdbNextRowState){&pIter->imemRow, true, false, &pIter->imemState, getNextRowFromMem, NULL};
pIter->input[2] =
pIter->input[2] = (TsdbNextRowState){&pIter->fsLastRow, false, true, &pIter->fsLastState, getNextRowFromFSLast,
clearNextRowFromFSLast};
pIter->input[3] =
(TsdbNextRowState){&pIter->fsRow, false, true, &pIter->fsState, getNextRowFromFS, clearNextRowFromFS};
if (pMem) {
@ -814,7 +998,7 @@ _err:
static int32_t nextRowIterClose(CacheNextRowIter *pIter) {
int code = 0;
for (int i = 0; i < 3; ++i) {
for (int i = 0; i < 4; ++i) {
if (pIter->input[i].nextRowClearFn) {
pIter->input[i].nextRowClearFn(pIter->input[i].iter);
}
@ -826,7 +1010,6 @@ static int32_t nextRowIterClose(CacheNextRowIter *pIter) {
tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap);
return code;
_err:
return code;
}
@ -835,7 +1018,7 @@ _err:
static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
int code = 0;
for (int i = 0; i < 3; ++i) {
for (int i = 0; i < 4; ++i) {
if (pIter->input[i].next && !pIter->input[i].stop) {
code = pIter->input[i].nextRowFn(pIter->input[i].iter, &pIter->input[i].pRow);
if (code) goto _err;
@ -847,18 +1030,18 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
}
}
if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop) {
if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop && pIter->input[3].stop) {
*ppRow = NULL;
return code;
}
// select maxpoint(s) from mem, imem, fs
TSDBROW *max[3] = {0};
int iMax[3] = {-1, -1, -1};
// select maxpoint(s) from mem, imem, fs and last
TSDBROW *max[4] = {0};
int iMax[4] = {-1, -1, -1, -1};
int nMax = 0;
TSKEY maxKey = TSKEY_MIN;
for (int i = 0; i < 3; ++i) {
for (int i = 0; i < 4; ++i) {
if (!pIter->input[i].stop && pIter->input[i].pRow != NULL) {
TSDBKEY key = TSDBROW_KEY(pIter->input[i].pRow);
@ -876,13 +1059,13 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
}
// delete detection
TSDBROW *merge[3] = {0};
int iMerge[3] = {-1, -1, -1};
TSDBROW *merge[4] = {0};
int iMerge[4] = {-1, -1, -1, -1};
int nMerge = 0;
for (int i = 0; i < nMax; ++i) {
TSDBKEY maxKey = TSDBROW_KEY(max[i]);
TSDBKEY maxKey1 = TSDBROW_KEY(max[i]);
bool deleted = tsdbKeyDeleted(&maxKey, pIter->pSkyline, &pIter->iSkyline);
bool deleted = tsdbKeyDeleted(&maxKey1, pIter->pSkyline, &pIter->iSkyline);
if (!deleted) {
iMerge[nMerge] = iMax[i];
merge[nMerge++] = max[i];
@ -918,7 +1101,7 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRo
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
nextRowIterOpen(&iter, uid, pTsdb);
nextRowIterOpen(&iter, uid, pTsdb, pTSchema);
do {
TSDBROW *pRow = NULL;
@ -1015,7 +1198,7 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
nextRowIterOpen(&iter, uid, pTsdb);
nextRowIterOpen(&iter, uid, pTsdb, pTSchema);
do {
TSDBROW *pRow = NULL;
@ -1100,29 +1283,40 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH
// getTableCacheKeyS(uid, "lr", key, &keyLen);
getTableCacheKey(uid, 0, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
} else {
STSRow *pRow = NULL;
bool dup = false; // which is always false for now
code = mergeLastRow(uid, pTsdb, &dup, &pRow);
// if table's empty or error, return code of -1
if (code < 0 || pRow == NULL) {
if (!dup && pRow) {
taosMemoryFree(pRow);
}
*handle = NULL;
return 0;
}
_taos_lru_deleter_t deleter = deleteTableCacheLastrow;
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pRow, TD_ROW_LEN(pRow), deleter, NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
if (!h) {
taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (!h) {
STSRow *pRow = NULL;
bool dup = false; // which is always false for now
code = mergeLastRow(uid, pTsdb, &dup, &pRow);
// if table's empty or error, return code of -1
if (code < 0 || pRow == NULL) {
if (!dup && pRow) {
taosMemoryFree(pRow);
}
taosThreadMutexUnlock(&pTsdb->lruMutex);
*handle = NULL;
return 0;
}
_taos_lru_deleter_t deleter = deleteTableCacheLastrow;
LRUStatus status =
taosLRUCacheInsert(pCache, key, keyLen, pRow, TD_ROW_LEN(pRow), deleter, NULL, TAOS_LRU_PRIORITY_LOW);
if (status != TAOS_LRU_STATUS_OK) {
code = -1;
}
taosThreadMutexUnlock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
} else {
taosThreadMutexUnlock(&pTsdb->lruMutex);
}
}
*handle = h;

File diff suppressed because it is too large Load Diff

View File

@ -576,10 +576,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
fSet.pHeadF->nRef = 0;
fSet.pHeadF->commitID = pSet->pHeadF->commitID;
fSet.pHeadF->size = pSet->pHeadF->size;
fSet.pHeadF->offset = pSet->pHeadF->offset;
*fSet.pHeadF = *pSet->pHeadF;
// data
fSet.pDataF = (SDataFile *)taosMemoryMalloc(sizeof(SDataFile));
@ -587,9 +584,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
fSet.pDataF->nRef = 0;
fSet.pDataF->commitID = pSet->pDataF->commitID;
fSet.pDataF->size = pSet->pDataF->size;
*fSet.pDataF = *pSet->pDataF;
// data
fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
@ -597,9 +592,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
fSet.pLastF->nRef = 0;
fSet.pLastF->commitID = pSet->pLastF->commitID;
fSet.pLastF->size = pSet->pLastF->size;
*fSet.pLastF = *pSet->pLastF;
// last
fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
@ -607,9 +600,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
fSet.pSmaF->nRef = 0;
fSet.pSmaF->commitID = pSet->pSmaF->commitID;
fSet.pSmaF->size = pSet->pSmaF->size;
*fSet.pSmaF = *pSet->pSmaF;
if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;

View File

@ -58,6 +58,7 @@ int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile) {
n += tPutI64v(p ? p + n : p, pLastFile->commitID);
n += tPutI64v(p ? p + n : p, pLastFile->size);
n += tPutI64v(p ? p + n : p, pLastFile->offset);
return n;
}
@ -67,6 +68,7 @@ static int32_t tGetLastFile(uint8_t *p, SLastFile *pLastFile) {
n += tGetI64v(p + n, &pLastFile->commitID);
n += tGetI64v(p + n, &pLastFile->size);
n += tGetI64v(p + n, &pLastFile->offset);
return n;
}
@ -186,11 +188,16 @@ int32_t tPutDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tPutI32v(p ? p + n : p, pSet->diskId.level);
n += tPutI32v(p ? p + n : p, pSet->diskId.id);
n += tPutI32v(p ? p + n : p, pSet->fid);
// data
n += tPutHeadFile(p ? p + n : p, pSet->pHeadF);
n += tPutDataFile(p ? p + n : p, pSet->pDataF);
n += tPutLastFile(p ? p + n : p, pSet->pLastF);
n += tPutSmaFile(p ? p + n : p, pSet->pSmaF);
// last
n += tPutU8(p ? p + n : p, 1); // for future compatibility
n += tPutLastFile(p ? p + n : p, pSet->pLastF);
return n;
}
@ -200,11 +207,17 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tGetI32v(p + n, &pSet->diskId.level);
n += tGetI32v(p + n, &pSet->diskId.id);
n += tGetI32v(p + n, &pSet->fid);
// data
n += tGetHeadFile(p + n, pSet->pHeadF);
n += tGetDataFile(p + n, pSet->pDataF);
n += tGetLastFile(p + n, pSet->pLastF);
n += tGetSmaFile(p + n, pSet->pSmaF);
// last
uint8_t nLast;
n += tGetU8(p + n, &nLast);
n += tGetLastFile(p + n, pSet->pLastF);
return n;
}

View File

@ -15,6 +15,7 @@
#include "tsdb.h"
#define MEM_MIN_HASH 1024
#define SL_MAX_LEVEL 5
#define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)*2)
@ -45,12 +46,12 @@ int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable) {
pMemTable->nRef = 1;
pMemTable->minKey = TSKEY_MAX;
pMemTable->maxKey = TSKEY_MIN;
pMemTable->minVersion = VERSION_MAX;
pMemTable->maxVersion = VERSION_MIN;
pMemTable->nRow = 0;
pMemTable->nDel = 0;
pMemTable->aTbData = taosArrayInit(128, sizeof(STbData *));
if (pMemTable->aTbData == NULL) {
pMemTable->nTbData = 0;
pMemTable->nBucket = MEM_MIN_HASH;
pMemTable->aBucket = (STbData **)taosMemoryCalloc(pMemTable->nBucket, sizeof(STbData *));
if (pMemTable->aBucket == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pMemTable);
goto _err;
@ -68,37 +69,30 @@ _err:
void tsdbMemTableDestroy(SMemTable *pMemTable) {
if (pMemTable) {
vnodeBufPoolUnRef(pMemTable->pPool);
taosArrayDestroy(pMemTable->aTbData);
taosMemoryFree(pMemTable->aBucket);
taosMemoryFree(pMemTable);
}
}
static int32_t tbDataPCmprFn(const void *p1, const void *p2) {
STbData *pTbData1 = *(STbData **)p1;
STbData *pTbData2 = *(STbData **)p2;
static FORCE_INLINE STbData *tsdbGetTbDataFromMemTableImpl(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid) {
STbData *pTbData = pMemTable->aBucket[TABS(uid) % pMemTable->nBucket];
if (pTbData1->suid < pTbData2->suid) {
return -1;
} else if (pTbData1->suid > pTbData2->suid) {
return 1;
while (pTbData) {
if (pTbData->uid == uid) break;
pTbData = pTbData->next;
}
if (pTbData1->uid < pTbData2->uid) {
return -1;
} else if (pTbData1->uid > pTbData2->uid) {
return 1;
}
return 0;
return pTbData;
}
void tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData) {
STbData *pTbData = &(STbData){.suid = suid, .uid = uid};
STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid) {
STbData *pTbData;
taosRLockLatch(&pMemTable->latch);
void *p = taosArraySearch(pMemTable->aTbData, &pTbData, tbDataPCmprFn, TD_EQ);
pTbData = tsdbGetTbDataFromMemTableImpl(pMemTable, suid, uid);
taosRUnLockLatch(&pMemTable->latch);
*ppTbData = p ? *(STbData **)p : NULL;
return pTbData;
}
int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlock,
@ -108,29 +102,21 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI
STbData *pTbData = NULL;
tb_uid_t suid = pMsgIter->suid;
tb_uid_t uid = pMsgIter->uid;
int32_t sverNew;
// check if table exists (todo: refact)
SMetaReader mr = {0};
// SMetaEntry me = {0};
metaReaderInit(&mr, pTsdb->pVnode->pMeta, 0);
if (metaGetTableEntryByUid(&mr, pMsgIter->uid) < 0) {
metaReaderClear(&mr);
code = TSDB_CODE_PAR_TABLE_NOT_EXIST;
SMetaInfo info;
code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info);
if (code) {
code = TSDB_CODE_TDB_TABLE_NOT_EXIST;
goto _err;
}
if (pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name);
if (mr.me.type == TSDB_NORMAL_TABLE) {
sverNew = mr.me.ntbEntry.schemaRow.version;
} else {
tDecoderClear(&mr.coder);
metaGetTableEntryByUid(&mr, mr.me.ctbEntry.suid);
sverNew = mr.me.stbEntry.schemaRow.version;
if (info.suid != suid) {
code = TSDB_CODE_INVALID_MSG;
goto _err;
}
metaReaderClear(&mr);
pRsp->sver = sverNew;
if (info.suid) {
metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info);
}
pRsp->sver = info.skmVer;
// create/get STbData to op
code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData);
@ -157,7 +143,17 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
SVBufPool *pPool = pTsdb->pVnode->inUse;
TSDBKEY lastKey = {.version = version, .ts = eKey};
// check if table exists (todo)
// check if table exists
SMetaInfo info;
code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info);
if (code) {
code = TSDB_CODE_TDB_TABLE_NOT_EXIST;
goto _err;
}
if (info.suid != suid) {
code = TSDB_CODE_INVALID_MSG;
goto _err;
}
code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData);
if (code) {
@ -182,10 +178,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
pTbData->pTail = pDelData;
}
// update the state of pMemTable and other (todo)
pMemTable->minVersion = TMIN(pMemTable->minVersion, version);
pMemTable->maxVersion = TMAX(pMemTable->maxVersion, version);
pMemTable->nDel++;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
@ -318,18 +310,44 @@ _exit:
return pIter->pRow;
}
static int32_t tsdbMemTableRehash(SMemTable *pMemTable) {
int32_t code = 0;
int32_t nBucket = pMemTable->nBucket * 2;
STbData **aBucket = (STbData **)taosMemoryCalloc(nBucket, sizeof(STbData *));
if (aBucket == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
for (int32_t iBucket = 0; iBucket < pMemTable->nBucket; iBucket++) {
STbData *pTbData = pMemTable->aBucket[iBucket];
while (pTbData) {
STbData *pNext = pTbData->next;
int32_t idx = TABS(pTbData->uid) % nBucket;
pTbData->next = aBucket[idx];
aBucket[idx] = pTbData;
pTbData = pNext;
}
}
taosMemoryFree(pMemTable->aBucket);
pMemTable->nBucket = nBucket;
pMemTable->aBucket = aBucket;
_exit:
return code;
}
static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData) {
int32_t code = 0;
int32_t idx = 0;
STbData *pTbData = NULL;
STbData *pTbDataT = &(STbData){.suid = suid, .uid = uid};
int32_t code = 0;
// get
idx = taosArraySearchIdx(pMemTable->aTbData, &pTbDataT, tbDataPCmprFn, TD_GE);
if (idx >= 0) {
pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, idx);
if (tbDataPCmprFn(&pTbDataT, &pTbData) == 0) goto _exit;
}
STbData *pTbData = tsdbGetTbDataFromMemTableImpl(pMemTable, suid, uid);
if (pTbData) goto _exit;
// create
SVBufPool *pPool = pMemTable->pTsdb->pVnode->inUse;
@ -344,9 +362,6 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid
pTbData->uid = uid;
pTbData->minKey = TSKEY_MAX;
pTbData->maxKey = TSKEY_MIN;
pTbData->minVersion = VERSION_MAX;
pTbData->maxVersion = VERSION_MIN;
pTbData->maxSkmVer = -1;
pTbData->pHead = NULL;
pTbData->pTail = NULL;
pTbData->sl.seed = taosRand();
@ -365,21 +380,23 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid
SL_NODE_FORWARD(pTbData->sl.pTail, iLevel) = NULL;
}
void *p;
if (idx < 0) {
idx = taosArrayGetSize(pMemTable->aTbData);
taosWLockLatch(&pMemTable->latch);
if (pMemTable->nTbData >= pMemTable->nBucket) {
code = tsdbMemTableRehash(pMemTable);
if (code) {
taosWUnLockLatch(&pMemTable->latch);
goto _err;
}
}
taosWLockLatch(&pMemTable->latch);
p = taosArrayInsert(pMemTable->aTbData, idx, &pTbData);
int32_t idx = TABS(uid) % pMemTable->nBucket;
pTbData->next = pMemTable->aBucket[idx];
pMemTable->aBucket[idx] = pTbData;
pMemTable->nTbData++;
taosWUnLockLatch(&pMemTable->latch);
tsdbDebug("vgId:%d, add table data %p at idx:%d", TD_VID(pMemTable->pTsdb->pVnode), pTbData, idx);
if (p == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
_exit:
*ppTbData = pTbData;
return code;
@ -589,15 +606,9 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, pLastRow, pMemTable->pTsdb);
}
pTbData->minVersion = TMIN(pTbData->minVersion, version);
pTbData->maxVersion = TMAX(pTbData->maxVersion, version);
pTbData->maxSkmVer = TMAX(pTbData->maxSkmVer, pMsgIter->sversion);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
pMemTable->maxKey = TMAX(pMemTable->maxKey, pTbData->maxKey);
pMemTable->minVersion = TMIN(pMemTable->minVersion, pTbData->minVersion);
pMemTable->maxVersion = TMAX(pMemTable->maxVersion, pTbData->maxVersion);
pMemTable->nRow += nRow;
pRsp->numOfRows = nRow;
@ -622,3 +633,41 @@ void tsdbUnrefMemTable(SMemTable *pMemTable) {
tsdbMemTableDestroy(pMemTable);
}
}
static FORCE_INLINE int32_t tbDataPCmprFn(const void *p1, const void *p2) {
STbData *pTbData1 = *(STbData **)p1;
STbData *pTbData2 = *(STbData **)p2;
if (pTbData1->suid < pTbData2->suid) {
return -1;
} else if (pTbData1->suid > pTbData2->suid) {
return 1;
}
if (pTbData1->uid < pTbData2->uid) {
return -1;
} else if (pTbData1->uid > pTbData2->uid) {
return 1;
}
return 0;
}
SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable) {
SArray *aTbDataP = taosArrayInit(pMemTable->nTbData, sizeof(STbData *));
if (aTbDataP == NULL) goto _exit;
for (int32_t iBucket = 0; iBucket < pMemTable->nBucket; iBucket++) {
STbData *pTbData = pMemTable->aBucket[iBucket];
while (pTbData) {
taosArrayPush(aTbDataP, &pTbData);
pTbData = pTbData->next;
}
}
taosArraySort(aTbDataP, tbDataPCmprFn);
_exit:
return aTbDataP;
}

View File

@ -86,7 +86,7 @@ int tsdbClose(STsdb **pTsdb) {
if (*pTsdb) {
taosThreadRwlockDestroy(&(*pTsdb)->rwLock);
tsdbFSClose(*pTsdb);
tsdbCloseCache((*pTsdb)->lruCache);
tsdbCloseCache(*pTsdb);
taosMemoryFreeClear(*pTsdb);
}
return 0;

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More