diff --git a/Jenkinsfile2 b/Jenkinsfile2
index 754617f99f..12e806c87a 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -43,6 +43,7 @@ def pre_test(){
cd ${WKC}
git reset --hard
git clean -fxd
+ rm -rf examples/rust/
git remote prune origin
git fetch
'''
diff --git a/cmake/cmake.define b/cmake/cmake.define
index 5639d212d7..989b69a89b 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -81,7 +81,7 @@ ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
- SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi")
+ SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /MANIFEST:NO")
# IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
# SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
@@ -92,6 +92,12 @@ IF (TD_WINDOWS)
IF (CMAKE_DEPFILE_FLAGS_CXX)
SET(CMAKE_DEPFILE_FLAGS_CXX "")
ENDIF ()
+ IF (CMAKE_C_FLAGS_DEBUG)
+ SET(CMAKE_C_FLAGS_DEBUG "" CACHE STRING "" FORCE)
+ ENDIF ()
+ IF (CMAKE_CXX_FLAGS_DEBUG)
+ SET(CMAKE_CXX_FLAGS_DEBUG "" CACHE STRING "" FORCE)
+ ENDIF ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMMON_FLAGS}")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
diff --git a/cmake/cmake.install b/cmake/cmake.install
index 4e3d0b166a..6dc6864975 100644
--- a/cmake/cmake.install
+++ b/cmake/cmake.install
@@ -1,38 +1,8 @@
-IF (EXISTS /var/lib/taos/dnode/dnodeCfg.json)
- INSTALL(CODE "MESSAGE(\"The default data directory /var/lib/taos contains old data of tdengine 2.x, please clear it before installing!\")")
-ELSEIF (EXISTS C:/TDengine/data/dnode/dnodeCfg.json)
- INSTALL(CODE "MESSAGE(\"The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!\")")
-ELSEIF (TD_LINUX)
+IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
ELSEIF (TD_WINDOWS)
- SET(CMAKE_INSTALL_PREFIX C:/TDengine)
-
- # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/go DESTINATION connector)
- # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/nodejs DESTINATION connector)
- # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector)
- # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector)
- # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .)
- INSTALL(CODE "IF (NOT EXISTS ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg)
- execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg)
- ENDIF ()")
- INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include)
- INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include)
- INSTALL(FILES ${TD_SOURCE_DIR}/include/libs/function/taosudf.h DESTINATION include)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos_static.lib DESTINATION driver)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
- INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
- INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .)
- INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .)
- IF (BUILD_TOOLS)
- INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .)
- ENDIF ()
-
- IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc)
- ENDIF ()
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.bat")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} :needAdmin ${TD_SOURCE_DIR} ${PROJECT_BINARY_DIR} Windows ${TD_VER_NUMBER})")
diff --git a/cmake/rust-bindings_CMakeLists.txt.in b/cmake/rust-bindings_CMakeLists.txt.in
deleted file mode 100644
index d16e86139b..0000000000
--- a/cmake/rust-bindings_CMakeLists.txt.in
+++ /dev/null
@@ -1,12 +0,0 @@
-
-# rust-bindings
-ExternalProject_Add(rust-bindings
- GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git
- GIT_TAG 7ed7a97
- SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust"
- BINARY_DIR "${TD_SOURCE_DIR}/examples/rust"
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- INSTALL_COMMAND ""
- TEST_COMMAND ""
- )
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index ed8216be91..f182beed33 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG 3d21433
+ GIT_TAG abed566
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 3a6eb3c25a..1751549680 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 53a0103
+ GIT_TAG d237772
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index b4e8825431..2dc7622f46 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -105,11 +105,6 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
-# rust-bindings
-if(${RUST_BINDINGS})
- cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
-endif(${RUST_BINDINGS})
-
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@@ -140,24 +135,6 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
-
-# clear submodule
-execute_process(COMMAND git submodule deinit -f tools/taos-tools
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git rm --cached tools/taos-tools
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git submodule deinit -f tools/taosadapter
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git rm --cached tools/taosadapter
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git submodule deinit -f tools/taosws-rs
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git rm --cached tools/taosws-rs
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git submodule deinit -f examples/rust
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
-execute_process(COMMAND git rm --cached examples/rust
- WORKING_DIRECTORY "${TD_SOURCE_DIR}")
# ================================================================================================
# Build
@@ -273,7 +250,7 @@ endif(${BUILD_WITH_NURAFT})
# pthread
if(${BUILD_PTHREAD})
- set(CMAKE_BUILD_TYPE release)
+ set(CMAKE_BUILD_TYPE debug)
add_definitions(-DPTW32_STATIC_LIB)
add_subdirectory(pthread EXCLUDE_FROM_ALL)
set_target_properties(libpthreadVC3 PROPERTIES OUTPUT_NAME pthread)
@@ -354,9 +331,11 @@ endif(${BUILD_WITH_TRAFT})
# LIBUV
if(${BUILD_WITH_UV})
- if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Windows")
- MESSAGE("Windows need set no-sign-compare")
- add_compile_options(-Wno-sign-compare)
+ if (TD_WINDOWS)
+ # There is no GetHostNameW function on win7.
+ file(READ "libuv/src/win/util.c" LIBUV_WIN_UTIL_CONTENT)
+ string(REPLACE "if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE" "DWORD nSize = UV_MAXHOSTNAMESIZE;\n if (GetComputerNameW(buf, &nSize" LIBUV_WIN_UTIL_CONTENT "${LIBUV_WIN_UTIL_CONTENT}")
+ file(WRITE "libuv/src/win/util.c" "${LIBUV_WIN_UTIL_CONTENT}")
endif ()
add_subdirectory(libuv EXCLUDE_FROM_ALL)
endif(${BUILD_WITH_UV})
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index f5b7f3e0f6..4c2f9b02b9 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -4,7 +4,8 @@ sidebar_label: Documentation Home
slug: /
---
-TDengine is a [high-performance](https://tdengine.com/fast), [scalable](https://tdengine.com/scalable) time series database with [SQL support](https://tdengine.com/sql-support). This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
+
+TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index f6766f910f..23a79aa229 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -3,7 +3,7 @@ title: Introduction
toc_max_heading_level: 2
---
-TDengine is a high-performance, scalable time-series database with SQL support. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the complexity and cost of development and operation.
+TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/continuous-query), [data subscription](/develop/subscribe) and other functionalities to reduce the system complexity and cost of development and operation.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
@@ -31,25 +31,21 @@ For more details on features, please read through the entire documentation.
## Competitive Advantages
-Time-series data is structured, not transactional, and is rarely deleted or updated. TDengine makes full use of [these characteristics of time series data](https://tdengine.com/2019/07/09/86.html) to build its own innovative storage engine and computing engine to differentiate itself from other time series databases, with the following advantages.
+By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
-- **[High Performance](https://tdengine.com/fast)**: With an innovatively designed and purpose-built storage engine, TDengine outperforms other time series databases in data ingestion and querying while significantly reducing storage costs and compute costs.
+- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
-- **[Scalable](https://tdengine.com/scalable)**: TDengine provides out-of-box scalability and high-availability through its native distributed design. Nodes can be added through simple configuration to achieve greater data processing power. In addition, this feature is open source.
+- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **[SQL Support](https://tdengine.com/sql-support)**: TDengine uses SQL as the query language, thereby reducing learning and migration costs, while adding SQL extensions to better handle time-series. Keeping NoSQL developers in mind, TDengine also supports convenient and flexible, schemaless data ingestion.
+- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
-- **All in One**: TDengine has built-in caching, stream processing and data subscription functions. It is no longer necessary to integrate Kafka/Redis/HBase/Spark or other software in some scenarios. It makes the system architecture much simpler, cost-effective and easier to maintain.
+- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **Seamless Integration**: Without a single line of code, TDengine provide seamless, configurable integration with third-party tools such as Telegraf, Grafana, EMQX, Prometheus, StatsD, collectd, etc. More third-party tools are being integrated.
+- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
-- **Zero Management**: Installation and cluster setup can be done in seconds. Data partitioning and sharding are executed automatically. TDengine’s running status can be monitored via Grafana or other DevOps tools.
+- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
-- **Zero Learning Costs**: With SQL as the query language and support for ubiquitous tools like Python, Java, C/C++, Go, Rust, and Node.js connectors, and a REST API, there are zero learning costs.
-
-- **Interactive Console**: TDengine provides convenient console access to the database, through a CLI, to run ad hoc queries, maintain the database, or manage the cluster, without any programming.
-
-With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly 2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly 3: With its simple architecture and zero management, the operation and maintenance costs are reduced.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
This is how TDengine would be situated, in a typical time-series data processing platform:
diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md
index 850f705146..e986c146b1 100644
--- a/docs/en/04-concept/index.md
+++ b/docs/en/04-concept/index.md
@@ -2,7 +2,7 @@
title: Concepts
---
-In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase 2. There are multiple smart meters, and 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
+In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
diff --git a/docs/en/07-develop/02-model/index.mdx b/docs/en/07-develop/02-model/index.mdx
index b647c845d0..e200920a76 100644
--- a/docs/en/07-develop/02-model/index.mdx
+++ b/docs/en/07-develop/02-model/index.mdx
@@ -2,14 +2,16 @@
title: Data Model
---
-The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the STable (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
+The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
+
+Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
## Create Database
The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. For TDengine to operate with the best performance, we strongly recommend that you create and configure different databases for data with different characteristics. This allows you, for example, to set up different storage and retention policies. When creating a database, there are a lot of parameters that can be configured such as, the days to keep data, the number of replicas, the number of memory blocks, time precision, the minimum and maximum number of rows in each data block, whether compression is enabled, the time range of the data in single data file and so on. Below is an example of the SQL statement to create a database.
```sql
-CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1;
+CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1;
```
In the above SQL statement:
diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx
index a212fa9529..1dfcecc359 100644
--- a/docs/en/07-develop/04-query-data/index.mdx
+++ b/docs/en/07-develop/04-query-data/index.mdx
@@ -61,20 +61,20 @@ In summary, records across subtables can be aggregated by a simple query on thei
In TDengine CLI `taos`, use the SQL below to get the average voltage of all the meters in California grouped by location.
```
-taos> SELECT AVG(voltage) FROM meters GROUP BY location;
- avg(voltage) | location |
-=============================================================
- 222.000000000 | California.LosAngeles |
- 219.200000000 | California.SanFrancisco |
-Query OK, 2 row(s) in set (0.002136s)
+taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
+ avg(voltage) | location |
+===============================================================================================
+ 219.200000000 | California.SanFrancisco |
+ 221.666666667 | California.LosAngeles |
+Query OK, 2 rows in database (0.005995s)
```
### Example 2
-In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current in the past 24 hours from meters whose groupId is 2.
+In TDengine CLI `taos`, use the SQL below to get the number of rows and the maximum current from meters whose groupId is 2.
```
-taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
+taos> SELECT count(*), max(current) FROM meters where groupId = 2;
count(*) | max(current) |
==================================
5 | 13.4 |
@@ -88,40 +88,41 @@ Join queries are only allowed between subtables of the same STable. In [Select](
In IoT use cases, down sampling is widely used to aggregate data by time range. The `INTERVAL` keyword in TDengine can be used to simplify the query by time window. For example, the SQL statement below can be used to get the sum of current every 10 seconds from meters table d1001.
```
-taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
- ts | sum(current) |
+taos> SELECT _wstart, sum(current) FROM d1001 INTERVAL(10s);
+ _wstart | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
-Query OK, 2 row(s) in set (0.000883s)
+Query OK, 2 rows in database (0.003139s)
```
Down sampling can also be used for STable. For example, the below SQL statement can be used to get the sum of current from all meters in California.
```
-taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
- ts | sum(current) |
+taos> SELECT _wstart, SUM(current) FROM meters where location like "California%" INTERVAL(1s);
+ _wstart | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
- 2018-10-03 14:38:05.000 | 32.900000572 |
+ 2018-10-03 14:38:05.000 | 23.699999809 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
- 2018-10-03 14:38:16.000 | 36.000000000 |
-Query OK, 5 row(s) in set (0.001538s)
+ 2018-10-03 14:38:16.000 | 34.400000572 |
+Query OK, 5 rows in database (0.007413s)
```
Down sampling also supports time offset. For example, the below SQL statement can be used to get the sum of current from all meters but each time window must start at the boundary of 500 milliseconds.
```
-taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
- ts | sum(current) |
+taos> SELECT _wstart, SUM(current) FROM meters INTERVAL(1s, 500a);
+ _wstart | sum(current) |
======================================================
- 2018-10-03 14:38:04.500 | 11.189999809 |
- 2018-10-03 14:38:05.500 | 31.900000572 |
- 2018-10-03 14:38:06.500 | 11.600000000 |
- 2018-10-03 14:38:15.500 | 12.300000381 |
- 2018-10-03 14:38:16.500 | 35.000000000 |
-Query OK, 5 row(s) in set (0.001521s)
+ 2018-10-03 14:38:03.500 | 10.199999809 |
+ 2018-10-03 14:38:04.500 | 10.300000191 |
+ 2018-10-03 14:38:05.500 | 13.399999619 |
+ 2018-10-03 14:38:06.500 | 11.500000000 |
+ 2018-10-03 14:38:14.500 | 12.600000381 |
+ 2018-10-03 14:38:16.500 | 34.400000572 |
+Query OK, 6 rows in database (0.005515s)
```
In many use cases, it's hard to align the timestamp of the data collected by each collection point. However, a lot of algorithms like FFT require the data to be aligned with same time interval and application programs have to handle this by themselves. In TDengine, it's easy to achieve the alignment using down sampling.
diff --git a/docs/en/07-develop/_sub_java.mdx b/docs/en/07-develop/_sub_java.mdx
index ae0ecd28e0..d14b5fd609 100644
--- a/docs/en/07-develop/_sub_java.mdx
+++ b/docs/en/07-develop/_sub_java.mdx
@@ -3,7 +3,9 @@
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
-:::note
-For now Java connector doesn't provide asynchronous subscription, but `TimerTask` can be used to achieve similar purpose.
-
-:::
\ No newline at end of file
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
+```
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
+```
\ No newline at end of file
diff --git a/docs/en/07-develop/_sub_rust.mdx b/docs/en/07-develop/_sub_rust.mdx
index afb8d79daa..0021666a70 100644
--- a/docs/en/07-develop/_sub_rust.mdx
+++ b/docs/en/07-develop/_sub_rust.mdx
@@ -1,3 +1,3 @@
-```rs
+```rust
{{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}}
-```
\ No newline at end of file
+```
diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/java.mdx
index cbf7daa879..1ea033eab4 100644
--- a/docs/en/14-reference/03-connector/java.mdx
+++ b/docs/en/14-reference/03-connector/java.mdx
@@ -130,7 +130,7 @@ The configuration parameters in the URL are as follows:
- charset: The character set used by the client, the default value is the system character set.
- locale: Client locale, by default, use the system's current locale.
- timezone: The time zone used by the client, the default value is the system's current time zone.
-- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: false. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
+- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is: true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c
new file mode 100644
index 0000000000..19adaad116
--- /dev/null
+++ b/docs/examples/c/tmq_example.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include "taos.h"
+
+static int running = 1;
+static char dbName[64] = "tmqdb";
+static char stbName[64] = "stb";
+static char topicName[64] = "topicname";
+
+static int32_t msg_process(TAOS_RES* msg) {
+ char buf[1024];
+ int32_t rows = 0;
+
+ const char* topicName = tmq_get_topic_name(msg);
+ const char* dbName = tmq_get_db_name(msg);
+ int32_t vgroupId = tmq_get_vgroup_id(msg);
+
+ printf("topic: %s\n", topicName);
+ printf("db: %s\n", dbName);
+ printf("vgroup id: %d\n", vgroupId);
+
+ while (1) {
+ TAOS_ROW row = taos_fetch_row(msg);
+ if (row == NULL) break;
+
+ TAOS_FIELD* fields = taos_fetch_fields(msg);
+ int32_t numOfFields = taos_field_count(msg);
+ int32_t* length = taos_fetch_lengths(msg);
+ int32_t precision = taos_result_precision(msg);
+ rows++;
+ taos_print_row(buf, row, fields, numOfFields);
+ printf("row content: %s\n", buf);
+ }
+
+ return rows;
+}
+
+static int32_t init_env() {
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ TAOS_RES* pRes;
+ // drop database if exists
+ printf("create database\n");
+ pRes = taos_query(pConn, "drop database if exists tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create database
+ pRes = taos_query(pConn, "create database tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create super table
+ printf("create super table\n");
+ pRes = taos_query(
+ pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // create sub tables
+ printf("create sub tables\n");
+ pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ // insert data
+ printf("insert data into sub tables\n");
+ pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+int32_t create_topic() {
+ printf("create topic\n");
+ TAOS_RES* pRes;
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ pRes = taos_query(pConn, "use tmqdb");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use tmqdb, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ taos_close(pConn);
+ return 0;
+}
+
+void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
+ printf("tmq_commit_cb_print() code: %d, tmq: %p, param: %p\n", code, tmq, param);
+}
+
+tmq_t* build_consumer() {
+ tmq_conf_res_t code;
+ tmq_conf_t* conf = tmq_conf_new();
+ code = tmq_conf_set(conf, "enable.auto.commit", "true");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "group.id", "cgrpName");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "client.id", "user defined name");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "td.connect.user", "root");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "td.connect.pass", "taosdata");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
+ if (TMQ_CONF_OK != code) return NULL;
+ code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
+ if (TMQ_CONF_OK != code) return NULL;
+
+ tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+
+ tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+ tmq_conf_destroy(conf);
+ return tmq;
+}
+
+tmq_list_t* build_topic_list() {
+ tmq_list_t* topicList = tmq_list_new();
+ int32_t code = tmq_list_append(topicList, "topicname");
+ if (code) {
+ return NULL;
+ }
+ return topicList;
+}
+
+void basic_consume_loop(tmq_t* tmq) {
+ int32_t totalRows = 0;
+ int32_t msgCnt = 0;
+ int32_t timeout = 5000;
+ while (running) {
+ TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeout);
+ if (tmqmsg) {
+ msgCnt++;
+ totalRows += msg_process(tmqmsg);
+ taos_free_result(tmqmsg);
+ } else {
+ break;
+ }
+ }
+
+ fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
+}
+
+int main(int argc, char* argv[]) {
+ int32_t code;
+
+ if (init_env() < 0) {
+ return -1;
+ }
+
+ if (create_topic() < 0) {
+ return -1;
+ }
+
+ tmq_t* tmq = build_consumer();
+ if (NULL == tmq) {
+ fprintf(stderr, "%% build_consumer() fail!\n");
+ return -1;
+ }
+
+ tmq_list_t* topic_list = build_topic_list();
+ if (NULL == topic_list) {
+ return -1;
+ }
+
+ if ((code = tmq_subscribe(tmq, topic_list))) {
+ fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
+ }
+ tmq_list_destroy(topic_list);
+
+ basic_consume_loop(tmq);
+
+ code = tmq_consumer_close(tmq);
+ if (code) {
+ fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
+ } else {
+ fprintf(stderr, "%% Consumer closed\n");
+ }
+
+ return 0;
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index b1e675cdf6..50e8b35771 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -68,6 +68,7 @@ public class SubscribeDemo {
System.out.println(meter);
}
}
+ consumer.unsubscribe();
}
} catch (ClassNotFoundException | SQLException e) {
e.printStackTrace();
diff --git a/docs/examples/node/nativeexample/subscribe_demo.js b/docs/examples/node/nativeexample/subscribe_demo.js
index c4f7e6df84..53cbe55d26 100644
--- a/docs/examples/node/nativeexample/subscribe_demo.js
+++ b/docs/examples/node/nativeexample/subscribe_demo.js
@@ -48,4 +48,4 @@ try {
cursor.close();
conn.close();
}, 2000);
-}
\ No newline at end of file
+}
diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json
index 36d3f016b5..d00d71d99f 100644
--- a/docs/examples/node/package.json
+++ b/docs/examples/node/package.json
@@ -4,7 +4,7 @@
"main": "index.js",
"license": "MIT",
"dependencies": {
- "@tdengine/client": "^3.0.0",
+ "@tdengine/client": "^3.0.1",
"@tdengine/rest": "^3.0.0"
}
}
diff --git a/docs/examples/python/tmq_example.py b/docs/examples/python/tmq_example.py
index 1f6da3d1b6..cee036454e 100644
--- a/docs/examples/python/tmq_example.py
+++ b/docs/examples/python/tmq_example.py
@@ -1,59 +1,6 @@
import taos
-from taos.tmq import *
-
-conn = taos.connect()
-
-# create database
-conn.execute("drop database if exists py_tmq")
-conn.execute("create database if not exists py_tmq vgroups 2")
-
-# create table and stables
-conn.select_db("py_tmq")
-conn.execute("create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")
-conn.execute("create table if not exists tb1 using stb1 tags(1)")
-conn.execute("create table if not exists tb2 using stb1 tags(2)")
-conn.execute("create table if not exists tb3 using stb1 tags(3)")
-
-# create topic
-conn.execute("drop topic if exists topic_ctb_column")
-conn.execute("create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1")
-
-# set consumer configure options
-conf = TaosTmqConf()
-conf.set("group.id", "tg2")
-conf.set("td.connect.user", "root")
-conf.set("td.connect.pass", "taosdata")
-conf.set("enable.auto.commit", "true")
-conf.set("msg.with.table.name", "true")
-
-def tmq_commit_cb_print(tmq, resp, offset, param=None):
- print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
-
-conf.set_auto_commit_cb(tmq_commit_cb_print, None)
-
-# build consumer
-tmq = conf.new_consumer()
-
-# build topic list
-topic_list = TaosTmqList()
-topic_list.append("topic_ctb_column")
-
-# subscribe consumer
-tmq.subscribe(topic_list)
-
-# check subscriptions
-sub_list = tmq.subscription()
-print("subscribed topics: ",sub_list)
-
-# start subscribe
-while 1:
- res = tmq.poll(1000)
- if res:
- topic = res.get_topic_name()
- vg = res.get_vgroup_id()
- db = res.get_db_name()
- print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}")
- for row in res:
- print(row)
- tb = res.get_table_name()
- print(f"from table: {tb}")
+from taos.tmq import TaosConsumer
+consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
+for msg in consumer:
+ for row in msg:
+ print(row)
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 64a7d419e1..79d5424ac2 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,7 +4,7 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time-Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index 97322c68a2..a6ef2b94b6 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -3,7 +3,7 @@ title: 产品简介
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md
index 814784b649..f0f09d4c7e 100644
--- a/docs/zh/05-get-started/01-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -2,18 +2,15 @@
sidebar_label: Docker
title: 通过 Docker 快速体验 TDengine
---
-:::info
-如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考[TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-:::
-本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。
+本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
## 启动 TDengine
如果已经安装了 docker, 只需执行下面的命令。
```shell
-docker run -d -p 6030:6030 -p 6041/6041 -p 6043-6049/6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
+docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index 63698aab50..c1a67f0182 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -5,17 +5,75 @@ title: 使用安装包立即开始
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
+import PkgListV3 from "/components/PkgListV3";
-:::info
-如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
+您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-:::
+TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。
-在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。TDengine 也提供 Windows x64 平台的安装包。
+为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
+
+在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
## 安装
+
+
+1. 从列表中下载获得 deb 安装包;
+
+2. 进入到安装包所在目录,执行如下的安装命令:
+
+```bash
+# 替换为下载的安装包版本
+sudo dpkg -i TDengine-server--Linux-x64.deb
+```
+
+
+
+
+
+1. 从列表中下载获得 rpm 安装包;
+
+2. 进入到安装包所在目录,执行如下的安装命令:
+
+```bash
+# 替换为下载的安装包版本
+sudo rpm -ivh TDengine-server--Linux-x64.rpm
+```
+
+
+
+
+
+1. 从列表中下载获得 tar.gz 安装包;
+
+2. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
+
+```bash
+# 替换为下载的安装包版本
+tar -zxvf TDengine-server--Linux-x64.tar.gz
+```
+
+解压后进入相应路径,执行
+
+```bash
+sudo ./install.sh
+```
+
+:::info
+install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
+:::
+
+
+
+
+
+1. 从列表中下载获得 exe 安装程序;
+
+2. 运行可执行程序来安装 TDengine。
+
+
可以使用 apt-get 工具从官方仓库安装。
@@ -29,6 +87,7 @@ echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" |
如果安装 Beta 版需要安装包仓库
```bash
+wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
```
@@ -44,58 +103,12 @@ sudo apt-get install tdengine
apt-get 方式只适用于 Debian 或 Ubuntu 系统
::::
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 deb 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.deb;
-2. 进入到 TDengine-server-3.0.0.0-Linux-x64.deb 安装包所在目录,执行如下的安装命令:
-
-```bash
-sudo dpkg -i TDengine-server-3.0.0.0-Linux-x64.deb
-```
-
-
-
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 rpm 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.rpm;
-2. 进入到 TDengine-server-3.0.0.0-Linux-x64.rpm 安装包所在目录,执行如下的安装命令:
-
-```bash
-sudo rpm -ivh TDengine-server-3.0.0.0-Linux-x64.rpm
-```
-
-
-
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.0-Linux-x64.tar.gz;
-2. 进入到 TDengine-server-3.0.0.0-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
-
-```bash
-tar -zxvf TDengine-server-3.0.0.0-Linux-x64.tar.gz
-```
-
-解压后进入相应路径,执行
-
-```bash
-sudo ./install.sh
-```
+
:::info
-install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
-
+下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases)
:::
-
-
-
-
-1. 从 [发布历史页面](../../releases) 下载获得 exe 安装程序,例如 TDengine-server-3.0.0.0-Windows-x64.exe;
-2. 运行 TDengine-server-3.0.0.0-Windows-x64.exe 来安装 TDengine。
-
-
-
-
:::note
当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
@@ -193,7 +206,7 @@ Query OK, 2 row(s) in set (0.003128s)
## 使用 taosBenchmark 体验写入速度
-启动 TDengine 的服务,在 Linux 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
+启动 TDengine 的服务,在 Linux 或 windows 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
```bash
taosBenchmark
diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx
index be545e8813..1609eb5362 100644
--- a/docs/zh/07-develop/02-model/index.mdx
+++ b/docs/zh/07-develop/02-model/index.mdx
@@ -11,10 +11,10 @@ TDengine 采用类关系型数据模型,需要建库、建表。因此对于
不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除 SQL 标准的选项外,还可以指定保留时长、副本数、缓存大小、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如:
```sql
-CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 VGROUPS 100 WAL 1;
+CREATE DATABASE power KEEP 365 DURATION 10 BUFFER 16 WAL_LEVEL 1;
```
-上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB,数据库的 VGROUPS 数量,对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。
+上述语句将创建一个名为 power 的库,这个库的数据将保留 365 天(超过 365 天将被自动删除),每 10 天一个数据文件,每个 VNODE 的写入内存池的大小为 16 MB,对该数据库入会写 WAL 但不执行 FSYNC。详细的语法及参数请见 [数据库管理](/taos-sql/database) 章节。
创建库之后,需要使用 SQL 命令 `USE` 将当前库切换过来,例如:
diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx
index 68f49d9f2b..2631d147a5 100644
--- a/docs/zh/07-develop/04-query-data/index.mdx
+++ b/docs/zh/07-develop/04-query-data/index.mdx
@@ -54,20 +54,20 @@ Query OK, 2 row(s) in set (0.001100s)
在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
```
-taos> SELECT AVG(voltage) FROM meters GROUP BY location;
- avg(voltage) | location |
-=============================================================
- 222.000000000 | California.LosAngeles |
- 219.200000000 | California.SanFrancisco |
-Query OK, 2 row(s) in set (0.002136s)
+taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
+ avg(voltage) | location |
+===============================================================================================
+ 219.200000000 | California.SanFrancisco |
+ 221.666666667 | California.LosAngeles |
+Query OK, 2 rows in database (0.005995s)
```
### 示例二
-在 TAOS shell, 查找 groupId 为 2 的所有智能电表过去 24 小时的记录条数,电流的最大值。
+在 TAOS shell, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
```
-taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h;
+taos> SELECT count(*), max(current) FROM meters where groupId = 2;
cunt(*) | max(current) |
==================================
5 | 13.4 |
@@ -81,40 +81,41 @@ Query OK, 1 row(s) in set (0.002136s)
物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每 10 秒钟求和
```
-taos> SELECT sum(current) FROM d1001 INTERVAL(10s);
- ts | sum(current) |
+taos> SELECT _wstart, sum(current) FROM d1001 INTERVAL(10s);
+ _wstart | sum(current) |
======================================================
2018-10-03 14:38:00.000 | 10.300000191 |
2018-10-03 14:38:10.000 | 24.900000572 |
-Query OK, 2 row(s) in set (0.000883s)
+Query OK, 2 rows in database (0.003139s)
```
降采样操作也适用于超级表,比如:将加利福尼亚州所有智能电表采集的电流值每秒钟求和
```
-taos> SELECT SUM(current) FROM meters where location like "California%" INTERVAL(1s);
- ts | sum(current) |
+taos> SELECT _wstart, SUM(current) FROM meters where location like "California%" INTERVAL(1s);
+ _wstart | sum(current) |
======================================================
2018-10-03 14:38:04.000 | 10.199999809 |
- 2018-10-03 14:38:05.000 | 32.900000572 |
+ 2018-10-03 14:38:05.000 | 23.699999809 |
2018-10-03 14:38:06.000 | 11.500000000 |
2018-10-03 14:38:15.000 | 12.600000381 |
- 2018-10-03 14:38:16.000 | 36.000000000 |
-Query OK, 5 row(s) in set (0.001538s)
+ 2018-10-03 14:38:16.000 | 34.400000572 |
+Query OK, 5 rows in database (0.007413s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```
-taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
- ts | sum(current) |
+taos> SELECT _wstart, SUM(current) FROM meters INTERVAL(1s, 500a);
+ _wstart | sum(current) |
======================================================
- 2018-10-03 14:38:04.500 | 11.189999809 |
- 2018-10-03 14:38:05.500 | 31.900000572 |
- 2018-10-03 14:38:06.500 | 11.600000000 |
- 2018-10-03 14:38:15.500 | 12.300000381 |
- 2018-10-03 14:38:16.500 | 35.000000000 |
-Query OK, 5 row(s) in set (0.001521s)
+ 2018-10-03 14:38:03.500 | 10.199999809 |
+ 2018-10-03 14:38:04.500 | 10.300000191 |
+ 2018-10-03 14:38:05.500 | 13.399999619 |
+ 2018-10-03 14:38:06.500 | 11.500000000 |
+ 2018-10-03 14:38:14.500 | 12.600000381 |
+ 2018-10-03 14:38:16.500 | 34.400000572 |
+Query OK, 6 rows in database (0.005515s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如 FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用 TDengine 的降采样操作就轻松解决。
diff --git a/docs/zh/07-develop/06-stream.md b/docs/zh/07-develop/06-stream.md
index ab4fdf9004..d5296582d5 100644
--- a/docs/zh/07-develop/06-stream.md
+++ b/docs/zh/07-develop/06-stream.md
@@ -4,8 +4,16 @@ description: "TDengine 流式计算将数据的写入、预处理、复杂分析
title: 流式计算
---
-在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。用户通常需要在时序数据库之外再搭建 Kafka、Flink、Spark 等流计算处理引擎,增加了用户的开发成本和维护成本。
-使用 TDengine 3.0 的流式计算引擎能够最大限度的减少对这些额外中间件的依赖,真正将数据的写入、预处理、长期存储、复杂分析、实时计算、实时报警触发等功能融为一体,并且,所有这些任务只需要使用 SQL 完成,极大降低了用户的学习成本、使用成本。
+在时序数据的处理中,经常要对原始数据进行清洗、预处理,再使用时序数据库进行长久的储存。在传统的时序数据解决方案中,常常需要部署 Kafka、Flink 等流处理系统。而流处理系统的复杂性,带来了高昂的开发与运维成本。
+
+TDengine 3.0 的流式计算引擎提供了实时处理写入的数据流的能力,使用 SQL 定义实时流变换,当数据被写入流的源表后,数据会被以定义的方式自动处理,并根据定义的触发模式向目的表推送结果。它提供了替代复杂流处理系统的轻量级解决方案,并能够在高吞吐的数据写入的情况下,提供毫秒级的计算结果延迟。
+
+流式计算可以包含数据过滤,标量函数计算(含UDF),以及窗口聚合(支持滑动窗口、会话窗口与状态窗口),可以以超级表、子表、普通表为源表,写入到目的超级表。在创建流时,目的超级表将被自动创建,随后新插入的数据会被流定义的方式处理并写入其中,通过 partition by 子句,可以以表名或标签划分 partition,不同的 partition 将写入到目的超级表的不同子表。
+
+TDengine 的流式计算能够支持分布在多个 vnode 中的超级表聚合;还能够处理乱序数据的写入:它提供了 watermark 机制以度量容忍数据乱序的程度,并提供了 ignore expired 配置项以决定乱序数据的处理策略——丢弃或者重新计算。
+
+详见 [流式计算](../../taos-sql/stream)
+
## 流式计算的创建
@@ -14,7 +22,7 @@ CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subq
stream_options: {
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
WATERMARK time
- IGNORE EXPIRED
+ IGNORE EXPIRED [0 | 1]
}
```
@@ -59,7 +67,7 @@ insert into d1004 values("2018-10-03 14:38:05.000", 10.80000, 223, 0.29000);
insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000);
```
-### 查询以观查结果
+### 查询以观察结果
```sql
taos> select start, end, max_current from current_stream_output_stb;
@@ -88,7 +96,7 @@ create stream power_stream into power_stream_output_stb as select ts, concat_ws(
参考示例一 [写入数据](#写入数据)
-### 查询以观查结果
+### 查询以观察结果
```sql
taos> select ts, meter_location, active_power, reactive_power from power_stream_output_stb;
ts | meter_location | active_power | reactive_power |
@@ -102,4 +110,4 @@ taos> select ts, meter_location, active_power, reactive_power from power_stream_
2018-10-03 14:38:16.800 | California.SanFrancisco.d1001 | 2588.728381186 | 829.240910475 |
2018-10-03 14:38:16.650 | California.SanFrancisco.d1002 | 2175.595991997 | 555.520860397 |
Query OK, 8 rows in database (0.014753s)
-```
\ No newline at end of file
+```
diff --git a/docs/zh/07-develop/07-tmq.md b/docs/zh/07-develop/07-tmq.md
deleted file mode 100644
index 25d468cad3..0000000000
--- a/docs/zh/07-develop/07-tmq.md
+++ /dev/null
@@ -1,249 +0,0 @@
----
-sidebar_label: 数据订阅
-description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
-title: 数据订阅
----
-
-import Tabs from "@theme/Tabs";
-import TabItem from "@theme/TabItem";
-import Java from "./_sub_java.mdx";
-import Python from "./_sub_python.mdx";
-import Go from "./_sub_go.mdx";
-import Rust from "./_sub_rust.mdx";
-import Node from "./_sub_node.mdx";
-import CSharp from "./_sub_cs.mdx";
-import CDemo from "./_sub_c.mdx";
-
-
-为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
-
-与 kafka 一样,你需要定义 topic, 但 TDengine 的 topic 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 SELECT 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。
-
-消费者订阅 topic 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的ACK机制,在宕机、重启等复杂环境下确保 at least once 消费。
-
-为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
-
-本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
-
-## 主要数据结构和API
-
-TMQ 的 API 中,与订阅相关的主要数据结构和API如下:
-
-```c
-typedef struct tmq_t tmq_t;
-typedef struct tmq_conf_t tmq_conf_t;
-typedef struct tmq_list_t tmq_list_t;
-
-typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
-
-DLL_EXPORT tmq_list_t *tmq_list_new();
-DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
-DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
-DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
-DLL_EXPORT const char *tmq_err2str(int32_t code);
-
-DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
-DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
-DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
-DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
-DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
-DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
-
-enum tmq_conf_res_t {
- TMQ_CONF_UNKNOWN = -2,
- TMQ_CONF_INVALID = -1,
- TMQ_CONF_OK = 0,
-};
-typedef enum tmq_conf_res_t tmq_conf_res_t;
-
-DLL_EXPORT tmq_conf_t *tmq_conf_new();
-DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
-DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
-DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
-```
-
-这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面C语言的示例代码。
-
-## 写入数据
-
-首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
-
-```sql
-drop database if exists tmqdb;
-create database tmqdb;
-create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16) tags(t1 int, t3 varchar(16));
-create table tmqdb.ctb0 using tmqdb.stb tags(0, "subtable0");
-create table tmqdb.ctb1 using tmqdb.stb tags(1, "subtable1");
-insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
-insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
-```
-
-## 创建topic:
-
-```sql
-create topic topicName as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1;
-```
-
-TMQ支持多种订阅类型:
-
-### 列订阅
-
-语法:CREATE TOPIC topic_name as subquery
-通过select语句订阅(包括select *,或select ts, c1等指定列描述订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)
-
-- TOPIC一旦创建则schema确定
-- 被订阅或用于计算的column和tag不可被删除、修改
-- 若发生schema变更,新增的column不出现在结果中
-
-### 超级表订阅
-语法:CREATE TOPIC topic_name AS STABLE stbName
-
-与select * from stbName订阅的区别是:
-- 不会限制用户的schema变更
-- 返回的是非结构化的数据:返回数据的schema会随之超级表的schema变化而变化
-- 用户对于要处理的每一个数据块都可能有不同的schema,因此,必须重新获取schema
-- 返回数据不带有tag
-
-## 创建 consumer 以及consumer group
-
-对于consumer, 目前支持的config包括:
-
-| 参数名称 | 参数值 | 备注 |
-| ---------------------------- | ------------------------------ | ------------------------------------------------------ |
-| group.id | 最大长度:192 | |
-| enable.auto.commit | 合法值:true, false | |
-| auto.commit.interval.ms | | |
-| auto.offset.reset | 合法值:earliest, latest, none | |
-| td.connect.ip | 用于连接,同taos_connect的参数 | |
-| td.connect.user | 用于连接,同taos_connect的参数 | |
-| td.connect.pass | 用于连接,同taos_connect的参数 | |
-| td.connect.port | 用于连接,同taos_connect的参数 | |
-| enable.heartbeat.background | 合法值:true, false | 开启后台心跳,即consumer不会因为长时间不poll而认为离线 |
-| experimental.snapshot.enable | 合法值:true, false | 从wal开始消费,还是从tsbs开始消费 |
-| msg.with.table.name | 合法值:true, false | 从消息中能否解析表名 |
-
-```sql
-/* 根据需要,设置消费组(group.id)、自动提交(enable.auto.commit)、自动提交时间间隔(auto.commit.interval.ms)、用户名(td.connect.user)、密码(td.connect.pass)等参数 */
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "enable.auto.commit", "true");
- tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
- tmq_conf_set(conf, "group.id", "cgrpName");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "auto.offset.reset", "earliest");
- tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
-
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- tmq_conf_destroy(conf);
-```
-
-上述配置中包括consumer group ID,如果多个 consumer 指定的 consumer group ID一样,则自动形成一个consumer group,共享消费进度。
-
-
-## 创建 topic 列表
-
-单个consumer支持同时订阅多个topic。
-
-```sql
- tmq_list_t* topicList = tmq_list_new();
- tmq_list_append(topicList, "topicName");
-```
-
-## 启动订阅并开始消费
-
-```
- /* 启动订阅 */
- tmq_subscribe(tmq, topicList);
- tmq_list_destroy(topicList);
-
- /* 循环poll消息 */
- while (running) {
- TAOS_RES* tmqmsg = tmq_consumer_poll(tmq, timeOut);
- msg_process(tmqmsg);
- }
-```
-
-这里是一个 **while** 循环,每调用一次tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析API完成消息内容的解析。
-
-## 结束消费
-
-```sql
- /* 取消订阅 */
- tmq_unsubscribe(tmq);
-
- /* 关闭消费 */
- tmq_consumer_close(tmq);
-```
-
-## 删除topic
-
-如果不再需要,可以删除创建topic,但注意:只有没有被订阅的topic才能别删除。
-
-```sql
- /* 删除topic */
- drop topic topicName;
-```
-
-## 状态查看
-
-1、topics:查询已经创建的topic
-
-```sql
- show topics;
-```
-
-2、consumers:查询consumer的状态及其订阅的topic
-
-```sql
- show consumers;
-```
-
-3、subscriptions:查询consumer与vgroup之间的分配关系
-
-```sql
- show subscriptions;
-```
-
-## 示例代码
-
-本节展示各种语言的示例代码。
-
-
-
-
-```c
-{{#include examples/c/tmq.c}}
-```
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-```python
-{{#include docs/examples/python/tmq_example.py}}
-```
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
new file mode 100644
index 0000000000..da8bf5e20e
--- /dev/null
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -0,0 +1,839 @@
+---
+sidebar_label: 数据订阅
+description: "数据订阅与推送服务。写入到 TDengine 中的时序数据能够被自动推送到订阅客户端。"
+title: 数据订阅
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+import Java from "./_sub_java.mdx";
+import Python from "./_sub_python.mdx";
+import Go from "./_sub_go.mdx";
+import Rust from "./_sub_rust.mdx";
+import Node from "./_sub_node.mdx";
+import CSharp from "./_sub_cs.mdx";
+import CDemo from "./_sub_c.mdx";
+
+为了帮助应用实时获取写入 TDengine 的数据,或者以事件到达顺序处理数据,TDengine 提供了类似消息队列产品的数据订阅、消费接口。这样在很多场景下,采用 TDengine 的时序数据处理系统不再需要集成消息队列产品,比如 kafka, 从而简化系统设计的复杂度,降低运营维护成本。
+
+与 kafka 一样,你需要定义 *topic*, 但 TDengine 的 *topic* 是基于一个已经存在的超级表、子表或普通表的查询条件,即一个 `SELECT` 语句。你可以使用 SQL 对标签、表名、列、表达式等条件进行过滤,以及对数据进行标量函数与 UDF 计算(不包括数据聚合)。与其他消息队列软件相比,这是 TDengine 数据订阅功能的最大的优势,它提供了更大的灵活性,数据的颗粒度可以由应用随时调整,而且数据的过滤与预处理交给 TDengine,而不是应用完成,有效的减少传输的数据量与应用的复杂度。
+
+消费者订阅 *topic* 后,可以实时获得最新的数据。多个消费者可以组成一个消费者组 (consumer group), 一个消费者组里的多个消费者共享消费进度,便于多线程、分布式地消费数据,提高消费速度。但不同消费者组中的消费者即使消费同一个 topic, 并不共享消费进度。一个消费者可以订阅多个 topic。如果订阅的是超级表,数据可能会分布在多个不同的 vnode 上,也就是多个 shard 上,这样一个消费组里有多个消费者可以提高消费效率。TDengine 的消息队列提供了消息的 ACK 机制,在宕机、重启等复杂环境下确保 at least once 消费。
+
+为了实现上述功能,TDengine 会为 WAL (Write-Ahead-Log) 文件自动创建索引以支持快速随机访问,并提供了灵活可配置的文件切换与保留机制:用户可以按需指定 WAL 文件保留的时间以及大小(详见 create database 语句)。通过以上方式将 WAL 改造成了一个保留事件到达顺序的、可持久化的存储引擎(但由于 TSDB 具有远比 WAL 更高的压缩率,我们不推荐保留太长时间,一般来说,不超过几天)。 对于以 topic 形式创建的查询,TDengine 将对接 WAL 而不是 TSDB 作为其存储引擎。在消费时,TDengine 根据当前消费进度从 WAL 直接读取数据,并使用统一的查询引擎实现过滤、变换等操作,将数据推送给消费者。
+
+本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
+
+## 主要数据结构和 API
+
+不同语言下, TMQ 订阅相关的 API 及数据结构如下:
+
+
+
+
+```c
+typedef struct tmq_t tmq_t;
+typedef struct tmq_conf_t tmq_conf_t;
+typedef struct tmq_list_t tmq_list_t;
+
+typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
+
+DLL_EXPORT tmq_list_t *tmq_list_new();
+DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
+DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
+DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
+DLL_EXPORT const char *tmq_err2str(int32_t code);
+
+DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
+DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
+DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
+DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
+DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
+DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
+
+enum tmq_conf_res_t {
+ TMQ_CONF_UNKNOWN = -2,
+ TMQ_CONF_INVALID = -1,
+ TMQ_CONF_OK = 0,
+};
+typedef enum tmq_conf_res_t tmq_conf_res_t;
+
+DLL_EXPORT tmq_conf_t *tmq_conf_new();
+DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
+DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
+DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
+```
+
+这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
+
+
+
+
+```java
+void subscribe(Collection topics) throws SQLException;
+
+void unsubscribe() throws SQLException;
+
+Set subscription() throws SQLException;
+
+ConsumerRecords poll(Duration timeout) throws SQLException;
+
+void commitAsync();
+
+void commitAsync(OffsetCommitCallback callback);
+
+void commitSync() throws SQLException;
+
+void close() throws SQLException;
+```
+
+
+
+
+
+```python
+class TaosConsumer():
+ def __init__(self, *topics, **configs)
+
+ def __iter__(self)
+
+ def __next__(self)
+
+ def sync_next(self)
+
+ def subscription(self)
+
+ def unsubscribe(self)
+
+ def close(self)
+
+ def __del__(self)
+```
+
+
+
+
+
+```go
+func NewConsumer(conf *Config) (*Consumer, error)
+
+func (c *Consumer) Close() error
+
+func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error
+
+func (c *Consumer) FreeMessage(message unsafe.Pointer)
+
+func (c *Consumer) Poll(timeout time.Duration) (*Result, error)
+
+func (c *Consumer) Subscribe(topics []string) error
+
+func (c *Consumer) Unsubscribe() error
+```
+
+
+
+
+
+```rust
+impl TBuilder for TmqBuilder
+ fn from_dsn(dsn: D) -> Result
+ fn build(&self) -> Result
+
+impl AsAsyncConsumer for Consumer
+ async fn subscribe, I: IntoIterator- + Send>(
+ &mut self,
+ topics: I,
+ ) -> Result<(), Self::Error>;
+ fn stream(
+ &self,
+ ) -> Pin<
+ Box<
+ dyn '_
+ + Send
+ + futures::Stream<
+ Item = Result<(Self::Offset, MessageSet), Self::Error>,
+ >,
+ >,
+ >;
+ async fn commit(&self, offset: Self::Offset) -> Result<(), Self::Error>;
+
+ async fn unsubscribe(self);
+```
+
+可在 上查看详细 API 说明。
+
+
+
+
+
+```js
+function TMQConsumer(config)
+
+function subscribe(topic)
+
+function consume(timeout)
+
+function subscription()
+
+function unsubscribe()
+
+function commit(msg)
+
+function close()
+```
+
+
+
+
+
+```csharp
+ConsumerBuilder(IEnumerable> config)
+
+virtual IConsumer Build()
+
+Consumer(ConsumerBuilder builder)
+
+void Subscribe(IEnumerable topics)
+
+void Subscribe(string topic)
+
+ConsumeResult Consume(int millisecondsTimeout)
+
+List Subscription()
+
+void Unsubscribe()
+
+void Commit(ConsumeResult consumerResult)
+
+void Close()
+```
+
+
+
+
+## 写入数据
+
+首先完成建库、建一张超级表和多张子表操作,然后就可以写入数据了,比如:
+
+```sql
+DROP DATABASE IF EXISTS tmqdb;
+CREATE DATABASE tmqdb;
+CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16));
+CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
+CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
+INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
+INSERT INTO tmqdb.ctb1 VALUES(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11');
+```
+
+## 创建 *topic*
+
+TDengine 使用 SQL 创建一个 topic:
+
+```sql
+CREATE TOPIC topic_name AS SELECT ts, c1, c2, c3 FROM tmqdb.stb WHERE c1 > 1;
+```
+
+TMQ 支持多种订阅类型:
+
+### 列订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name as subquery
+```
+
+通过 `SELECT` 语句订阅(包括 `SELECT *`,或 `SELECT ts, c1` 等指定列订阅,可以带条件过滤、标量函数计算,但不支持聚合函数、不支持时间窗口聚合)。需要注意的是:
+
+- 该类型 TOPIC 一旦创建则订阅数据的结构确定。
+- 被订阅或用于计算的列或标签不可被删除(`ALTER table DROP`)、修改(`ALTER table MODIFY`)。
+- 若发生表结构变更,新增的列不出现在结果中,若发生列删除则会报错。
+
+### 超级表订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name AS STABLE stb_name
+```
+
+与 `SELECT * from stbName` 订阅的区别是:
+
+- 不会限制用户的表结构变更。
+- 返回的是非结构化的数据:返回数据的结构会随之超级表的表结构变化而变化。
+- 用户对于要处理的每一个数据块都可能有不同的表结构。
+- 返回数据不包含标签。
+
+### 数据库订阅
+
+语法:
+
+```sql
+CREATE TOPIC topic_name [WITH META] AS DATABASE db_name;
+```
+
+通过该语句可创建一个包含数据库所有表数据的订阅,`WITH META` 可选择将数据库结构变动信息加入到订阅消息流,TMQ 将消费当前数据库下所有表结构的变动,包括超级表的创建与删除,列添加、删除或修改,子表的创建、删除及 TAG 变动信息等等。消费者可通过 API 来判断具体的消息类型。这一点也是与 Kafka 不同的地方。
+
+## 创建消费者 *consumer*
+
+消费者需要通过一系列配置选项创建,基础配置项如下表所示:
+
+| 参数名称 | 类型 | 参数说明 | 备注 |
+| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
+| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | |
+| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | |
+| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` |
+| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` |
+| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
+| `client.id` | string | 客户端 ID | 最大长度:192。 |
+| `auto.offset.reset` | enum | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) |
+| `enable.auto.commit` | boolean | 启用自动提交 | 合法值:`true`, `false`。 |
+| `auto.commit.interval.ms` | integer | 以毫秒为单位的自动提交时间间隔 |
+| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | |
+| `experimental.snapshot.enable` | boolean | 从 WAL 开始消费,还是从 TSBS 开始消费 | |
+| `msg.with.table.name` | boolean | 是否允许从消息中解析表名 |
+
+对于不同编程语言,其设置方式如下:
+
+
+
+
+```c
+/* 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
+ 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数 */
+tmq_conf_t* conf = tmq_conf_new();
+tmq_conf_set(conf, "enable.auto.commit", "true");
+tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
+tmq_conf_set(conf, "group.id", "cgrpName");
+tmq_conf_set(conf, "td.connect.user", "root");
+tmq_conf_set(conf, "td.connect.pass", "taosdata");
+tmq_conf_set(conf, "auto.offset.reset", "earliest");
+tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+tmq_conf_set(conf, "msg.with.table.name", "true");
+tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
+
+tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
+tmq_conf_destroy(conf);
+```
+
+
+
+
+对于 Java 程序,使用如下配置项:
+
+| 参数名称 | 类型 | 参数说明 |
+| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
+| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
+| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
+| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
+
+需要注意:此处使用 `bootstrap.servers` 替代 `td.connect.ip` 和 `td.connect.port`,以提供与 Kafka 一致的接口。
+
+```java
+Properties properties = new Properties();
+properties.setProperty("enable.auto.commit", "true");
+properties.setProperty("auto.commit.interval.ms", "1000");
+properties.setProperty("group.id", "cgrpName");
+properties.setProperty("bootstrap.servers", "127.0.0.1:6030");
+properties.setProperty("td.connect.user", "root");
+properties.setProperty("td.connect.pass", "taosdata");
+properties.setProperty("auto.offset.reset", "earliest");
+properties.setProperty("msg.with.table.name", "true");
+properties.setProperty("value.deserializer", "com.taos.example.MetersDeserializer");
+
+TaosConsumer consumer = new TaosConsumer<>(properties);
+
+/* value deserializer definition. */
+import com.taosdata.jdbc.tmq.ReferenceDeserializer;
+
+public class MetersDeserializer extends ReferenceDeserializer {
+}
+```
+
+
+
+
+
+```go
+config := tmq.NewConfig()
+defer config.Destroy()
+err = config.SetGroupID("test")
+if err != nil {
+ panic(err)
+}
+err = config.SetAutoOffsetReset("earliest")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectIP("127.0.0.1")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectUser("root")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectPass("taosdata")
+if err != nil {
+ panic(err)
+}
+err = config.SetConnectPort("6030")
+if err != nil {
+ panic(err)
+}
+err = config.SetMsgWithTableName(true)
+if err != nil {
+ panic(err)
+}
+err = config.EnableHeartBeat()
+if err != nil {
+ panic(err)
+}
+err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) {
+ if result.ErrCode != 0 {
+ errStr := wrapper.TMQErr2Str(result.ErrCode)
+ err := errors.NewError(int(result.ErrCode), errStr)
+ panic(err)
+ }
+})
+if err != nil {
+ panic(err)
+}
+```
+
+
+
+
+
+```rust
+let mut dsn: Dsn = "taos://".parse()?;
+dsn.set("group.id", "group1");
+dsn.set("client.id", "test");
+dsn.set("auto.offset.reset", "earliest");
+
+let tmq = TmqBuilder::from_dsn(dsn)?;
+
+let mut consumer = tmq.build()?;
+```
+
+
+
+
+
+Python 使用以下配置项创建一个 Consumer 实例。
+
+| 参数名称 | 类型 | 参数说明 | 备注 |
+| :----------------------------: | :----: | -------------------------------------------------------- | ------------------------------------------- |
+| `td_connect_ip` | string | 用于创建连接,同 `taos_connect` | |
+| `td_connect_user` | string | 用于创建连接,同 `taos_connect` | |
+| `td_connect_pass` | string | 用于创建连接,同 `taos_connect` | |
+| `td_connect_port` | string | 用于创建连接,同 `taos_connect` | |
+| `group_id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
+| `client_id` | string | 客户端 ID | 最大长度:192。 |
+| `auto_offset_reset` | string | 消费组订阅的初始位置 | 可选:`earliest`, `latest`, `none`(default) |
+| `enable_auto_commit` | string | 启用自动提交 | 合法值:`true`, `false`。 |
+| `auto_commit_interval_ms` | string | 以毫秒为单位的自动提交时间间隔 | |
+| `enable_heartbeat_background` | string | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 合法值:`true`, `false` |
+| `experimental_snapshot_enable` | string | 从 WAL 开始消费,还是从 TSBS 开始消费 | 合法值:`true`, `false` |
+| `msg_with_table_name` | string | 是否允许从消息中解析表名 | 合法值:`true`, `false` |
+| `timeout` | int | 消费者拉去的超时时间 | |
+
+
+
+
+
+```js
+// 根据需要,设置消费组 (group.id)、自动提交 (enable.auto.commit)、
+// 自动提交时间间隔 (auto.commit.interval.ms)、用户名 (td.connect.user)、密码 (td.connect.pass) 等参数
+
+let consumer = taos.consumer({
+ 'enable.auto.commit': 'true',
+ 'auto.commit.interval.ms','1000',
+ 'group.id': 'tg2',
+ 'td.connect.user': 'root',
+ 'td.connect.pass': 'taosdata',
+ 'auto.offset.reset','earliest',
+ 'msg.with.table.name': 'true',
+ 'td.connect.ip','127.0.0.1',
+ 'td.connect.port','6030'
+ });
+```
+
+
+
+
+
+```csharp
+using TDengineTMQ;
+
+// 根据需要,设置消费组 (GourpId)、自动提交 (EnableAutoCommit)、
+// 自动提交时间间隔 (AutoCommitIntervalMs)、用户名 (TDConnectUser)、密码 (TDConnectPasswd) 等参数
+var cfg = new ConsumerConfig
+ {
+ EnableAutoCommit = "true"
+ AutoCommitIntervalMs = "1000"
+ GourpId = "TDengine-TMQ-C#",
+ TDConnectUser = "root",
+ TDConnectPasswd = "taosdata",
+ AutoOffsetReset = "earliest"
+ MsgWithTableName = "true",
+ TDConnectIp = "127.0.0.1",
+ TDConnectPort = "6030"
+ };
+
+var consumer = new ConsumerBuilder(cfg).Build();
+
+```
+
+
+
+
+
+上述配置中包括 consumer group ID,如果多个 consumer 指定的 consumer group ID 一样,则自动形成一个 consumer group,共享消费进度。
+
+## 订阅 *topics*
+
+一个 consumer 支持同时订阅多个 topic。
+
+
+
+
+```c
+// 创建订阅 topics 列表
+tmq_list_t* topicList = tmq_list_new();
+tmq_list_append(topicList, "topicName");
+// 启动订阅
+tmq_subscribe(tmq, topicList);
+tmq_list_destroy(topicList);
+
+```
+
+
+
+
+```java
+List topics = new ArrayList<>();
+topics.add("tmq_topic");
+consumer.subscribe(topics);
+```
+
+
+
+
+```go
+consumer, err := tmq.NewConsumer(config)
+if err != nil {
+ panic(err)
+}
+err = consumer.Subscribe([]string{"example_tmq_topic"})
+if err != nil {
+ panic(err)
+}
+```
+
+
+
+
+```rust
+consumer.subscribe(["tmq_meters"]).await?;
+```
+
+
+
+
+
+```python
+consumer = TaosConsumer('topic_ctb_column', group_id='vg2')
+```
+
+
+
+
+
+```js
+// 创建订阅 topics 列表
+let topics = ['topic_test']
+
+// 启动订阅
+consumer.subscribe(topics);
+```
+
+
+
+
+
+```csharp
+// 创建订阅 topics 列表
+List topics = new List();
+topics.add("tmq_topic");
+// 启动订阅
+consumer.Subscribe(topics);
+```
+
+
+
+
+
+## 消费
+
+以下代码展示了不同语言下如何对 TMQ 消息进行消费。
+
+
+
+
+```c
+// 消费数据
+while (running) {
+ TAOS_RES* msg = tmq_consumer_poll(tmq, timeOut);
+ msg_process(msg);
+}
+```
+
+这里是一个 **while** 循环,每调用一次 tmq_consumer_poll(),获取一个消息,该消息与普通查询返回的结果集完全相同,可以使用相同的解析 API 完成消息内容的解析。
+
+
+
+
+```java
+while(running){
+ ConsumerRecords meters = consumer.poll(Duration.ofMillis(100));
+ for (Meters meter : meters) {
+ processMsg(meter);
+ }
+}
+```
+
+
+
+
+
+```go
+for {
+ result, err := consumer.Poll(time.Second)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(result)
+ consumer.Commit(context.Background(), result.Message)
+ consumer.FreeMessage(result.Message)
+}
+```
+
+
+
+
+
+```rust
+{
+ let mut stream = consumer.stream();
+
+ while let Some((offset, message)) = stream.try_next().await? {
+ // get information from offset
+
+ // the topic
+ let topic = offset.topic();
+ // the vgroup id, like partition id in kafka.
+ let vgroup_id = offset.vgroup_id();
+ println!("* in vgroup id {vgroup_id} of topic {topic}\n");
+
+ if let Some(data) = message.into_data() {
+ while let Some(block) = data.fetch_raw_block().await? {
+ // one block for one table, get table name if needed
+ let name = block.table_name();
+ let records: Vec = block.deserialize().try_collect()?;
+ println!(
+ "** table: {}, got {} records: {:#?}\n",
+ name.unwrap(),
+ records.len(),
+ records
+ );
+ }
+ }
+ consumer.commit(offset).await?;
+ }
+}
+```
+
+
+
+
+```python
+for msg in consumer:
+ for row in msg:
+ print(row)
+```
+
+
+
+
+
+```js
+while(true){
+ msg = consumer.consume(200);
+ // process message(consumeResult)
+ console.log(msg.topicPartition);
+ console.log(msg.block);
+ console.log(msg.fields)
+}
+```
+
+
+
+
+
+```csharp
+// 消费数据
+while (true)
+{
+ var consumerRes = consumer.Consume(100);
+ // process ConsumeResult
+ ProcessMsg(consumerRes);
+ consumer.Commit(consumerRes);
+}
+```
+
+
+
+
+
+## 结束消费
+
+消费结束后,应当取消订阅。
+
+
+
+
+```c
+/* 取消订阅 */
+tmq_unsubscribe(tmq);
+
+/* 关闭消费者对象 */
+tmq_consumer_close(tmq);
+```
+
+
+
+
+```java
+/* 取消订阅 */
+consumer.unsubscribe();
+
+/* 关闭消费 */
+consumer.close();
+```
+
+
+
+
+
+```go
+consumer.Close()
+```
+
+
+
+
+
+```rust
+consumer.unsubscribe().await;
+```
+
+
+
+
+
+```py
+# 取消订阅
+consumer.unsubscribe()
+# 关闭消费
+consumer.close()
+```
+
+
+
+
+```js
+consumer.unsubscribe();
+consumer.close();
+```
+
+
+
+
+
+```csharp
+// 取消订阅
+consumer.Unsubscribe();
+
+// 关闭消费
+consumer.Close();
+```
+
+
+
+
+
+## 删除 *topic*
+
+如果不再需要订阅数据,可以删除 topic,需要注意:只有当前未在订阅中的 TOPIC 才能被删除。
+
+```sql
+/* 删除 topic */
+DROP TOPIC topic_name;
+```
+
+## 状态查看
+
+1、*topics*:查询已经创建的 topic
+
+```sql
+SHOW TOPICS;
+```
+
+2、consumers:查询 consumer 的状态及其订阅的 topic
+
+```sql
+SHOW CONSUMERS;
+```
+
+3、subscriptions:查询 consumer 与 vgroup 之间的分配关系
+
+```sql
+SHOW SUBSCRIPTIONS;
+```
+
+## 示例代码
+
+以下是各语言的完整示例代码。
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/zh/07-develop/_sub_c.mdx b/docs/zh/07-develop/_sub_c.mdx
index da492a0269..b0667268e9 100644
--- a/docs/zh/07-develop/_sub_c.mdx
+++ b/docs/zh/07-develop/_sub_c.mdx
@@ -1,3 +1,3 @@
```c
-{{#include docs/examples/c/subscribe_demo.c}}
-```
\ No newline at end of file
+{{#include docs/examples/c/tmq_example.c}}
+```
diff --git a/docs/zh/07-develop/_sub_java.mdx b/docs/zh/07-develop/_sub_java.mdx
index 9365941679..d14b5fd609 100644
--- a/docs/zh/07-develop/_sub_java.mdx
+++ b/docs/zh/07-develop/_sub_java.mdx
@@ -3,7 +3,9 @@
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
-:::note
-目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。
-
-:::
\ No newline at end of file
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
+```
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
+```
\ No newline at end of file
diff --git a/docs/zh/07-develop/_sub_python.mdx b/docs/zh/07-develop/_sub_python.mdx
index 490b76fca6..1309da5b41 100644
--- a/docs/zh/07-develop/_sub_python.mdx
+++ b/docs/zh/07-develop/_sub_python.mdx
@@ -1,3 +1,3 @@
```py
-{{#include docs/examples/python/subscribe_demo.py}}
-```
\ No newline at end of file
+{{#include docs/examples/python/tmq_example.py}}
+```
diff --git a/docs/zh/07-develop/_sub_rust.mdx b/docs/zh/07-develop/_sub_rust.mdx
index afb8d79daa..0021666a70 100644
--- a/docs/zh/07-develop/_sub_rust.mdx
+++ b/docs/zh/07-develop/_sub_rust.mdx
@@ -1,3 +1,3 @@
-```rs
+```rust
{{#include docs/examples/rust/nativeexample/examples/subscribe_demo.rs}}
-```
\ No newline at end of file
+```
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index 8ac6ee3b87..628086f5a9 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -34,7 +34,7 @@ CREATE DATABASE db_name PRECISION 'ns';
| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
-| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 655357] |
+| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 65535] |
| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
| 13 | BOOL | 1 | 布尔型,{true, false} |
diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md
index 1e20f73541..0e104bb7b6 100644
--- a/docs/zh/12-taos-sql/03-table.md
+++ b/docs/zh/12-taos-sql/03-table.md
@@ -110,7 +110,7 @@ alter_table_option: {
对普通表可以进行如下修改操作
1. ADD COLUMN:添加列。
2. DROP COLUMN:删除列。
-3. ODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
+3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
4. RENAME COLUMN:修改列名称。
### 增加列
@@ -195,4 +195,4 @@ SHOW CREATE TABLE tb_name;
```
DESCRIBE [db_name.]tb_name;
-```
\ No newline at end of file
+```
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 8aa6c43747..5312d7d2f3 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -103,7 +103,7 @@ SELECT d1001.* FROM d1001,d1003 WHERE d1001.ts = d1003.ts;
在超级表和子表的查询中可以指定 _标签列_,且标签列的值会与普通列的数据一起返回。
```sql
-ELECT location, groupid, current FROM d1001 LIMIT 2;
+SELECT location, groupid, current FROM d1001 LIMIT 2;
```
### 结果去重
diff --git a/docs/zh/12-taos-sql/13-tmq.md b/docs/zh/12-taos-sql/13-tmq.md
index 4d9c475a38..b05d2bf680 100644
--- a/docs/zh/12-taos-sql/13-tmq.md
+++ b/docs/zh/12-taos-sql/13-tmq.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 消息队列
-title: 消息队列
+sidebar_label: 数据订阅
+title: 数据订阅
---
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
@@ -8,24 +8,17 @@ TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用
## 创建订阅主题
```sql
-CREATE TOPIC [IF NOT EXISTS] topic_name AS {subquery | DATABASE db_name | STABLE stb_name };
+CREATE TOPIC [IF NOT EXISTS] topic_name AS subquery;
```
-订阅主题包括三种:列订阅、超级表订阅和数据库订阅。
-**列订阅是**用 subquery 描述,支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
+TOPIC 支持过滤和标量函数和 UDF 标量函数,不支持 JOIN、GROUP BY、窗口切分子句、聚合函数和 UDF 聚合函数。列订阅规则如下:
1. TOPIC 一旦创建则返回结果的字段确定
2. 被订阅或用于计算的列不可被删除、修改
3. 列可以新增,但新增的列不出现在订阅结果字段中
4. 对于 select \*,则订阅展开为创建时所有的列(子表、普通表为数据列,超级表为数据列加标签列)
-**超级表订阅和数据库订阅**规则如下:
-
-1. 被订阅主体的 schema 变更不受限
-2. 返回消息中 schema 是块级别的,每块的 schema 可能不一样
-3. 列变更后写入的数据若未落盘,将以写入时的 schema 返回
-4. 列变更后写入的数据若未已落盘,将以落盘时的 schema 返回
## 删除订阅主题
diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md
new file mode 100644
index 0000000000..d653c59a5c
--- /dev/null
+++ b/docs/zh/12-taos-sql/29-changes.md
@@ -0,0 +1,95 @@
+---
+sidebar_label: 3.0 版本语法变更
+title: 3.0 版本语法变更
+description: "TDengine 3.0 版本的语法变更说明"
+---
+
+## SQL 基本元素变更
+
+| # | **元素** | **差异性
** | **说明** |
+| - | :------- | :-------- | :------- |
+| 1 | VARCHAR | 新增 | BINARY类型的别名。
+| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
+| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
+| 4 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
+| 5 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
+| 6 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
+| 7 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
+| 8 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
+| 9 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+
+## SQL 语句变更
+
+在 TDengine 中,普通表的数据模型中可使用以下数据类型。
+
+| # | **语句** | **差异性
** | **说明** |
+| - | :------- | :-------- | :------- |
+| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
+| 3 | ALTER DATABASE | 调整 | 废除- QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。3.0.0版本STRICT暂不支持修改。
- BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
- CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
- COMP:3.0版本暂不支持修改。
新增 - CACHEMODEL:表示是否在内存中缓存子表的最近数据。
- CACHESIZE:表示缓存子表最近数据的内存大小。
- WAL_FSYNC_PERIOD:代替原FSYNC参数。
- WAL_LEVEL:代替原WAL参数。
调整 - REPLICA:3.0.0版本暂不支持修改。
- KEEP:3.0版本新增支持带单位的设置方式。
+| 4 | ALTER STABLE | 调整 | 废除- CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
新增 - RENAME TAG:代替原CHANGE TAG子句。
- COMMENT:修改超级表的注释。
+| 5 | ALTER TABLE | 调整 | 废除- CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。
新增 - RENAME TAG:代替原CHANGE TAG子句。
- COMMENT:修改表的注释。
- TTL:修改表的生命周期。
+| 6 | ALTER USER | 调整 | 废除- PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。
新增 - ENABLE:启用或停用此用户。
- SYSINFO:修改用户是否可查看系统信息。
+| 7 | COMPACT VNODES | 暂不支持 | 整理指定VNODE的数据。3.0.0版本暂不支持。
+| 8 | CREATE ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 9 | CREATE DATABASE | 调整 | 废除- BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- CACHE:VNODE使用的内存块的大小。3.0版本使用BUFFER来表示VNODE写入内存池的大小。
- CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。
- DAYS:数据文件存储数据的时间跨度。3.0版本使用DURATION代替。
- FSYNC:当 WAL 设置为 2 时,执行 fsync 的周期。3.0版本使用WAL_FSYNC_PERIOD代替。
- QUORUM:写入需要的副本确认数。3.0版本使用STRICT来指定强一致还是弱一致。
- UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。
- WAL:WAL 级别。3.0版本使用WAL_LEVEL代替。
新增 - BUFFER:一个 VNODE 写入内存池大小。
- CACHEMODEL:表示是否在内存中缓存子表的最近数据。
- CACHESIZE:表示缓存子表最近数据的内存大小。
- DURATION:代替原DAYS参数。新增支持带单位的设置方式。
- PAGES:一个 VNODE 中元数据存储引擎的缓存页个数。
- PAGESIZE:一个 VNODE 中元数据存储引擎的页大小。
- RETENTIONS:表示数据的聚合周期和保存时长。
- STRICT:表示数据同步的一致性要求。
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表。
- VGROUPS:数据库中初始VGROUP的数目。
- WAL_FSYNC_PERIOD:代替原FSYNC参数。
- WAL_LEVEL:代替原WAL参数。
- WAL_RETENTION_PERIOD:wal文件的额外保留策略,用于数据订阅。
- WAL_RETENTION_SIZE:wal文件的额外保留策略,用于数据订阅。
- WAL_ROLL_PERIOD:wal文件切换时长。
- WAL_SEGMENT_SIZE:wal单个文件大小。
调整 - KEEP:3.0版本新增支持带单位的设置方式。
+| 10 | CREATE DNODE | 调整 | 新增主机名和端口号分开指定语法- CREATE DNODE dnode_host_name PORT port_val
+| 11 | CREATE INDEX | 新增 | 创建SMA索引。
+| 12 | CREATE MNODE | 新增 | 创建管理节点。
+| 13 | CREATE QNODE | 新增 | 创建查询节点。
+| 14 | CREATE STABLE | 调整 | 新增表参数语法COMMENT:表注释。
+| 15 | CREATE STREAM | 新增 | 创建流。
+| 16 | CREATE TABLE | 调整 | 新增表参数语法- COMMENT:表注释。
- WATERMARK:指定窗口的关闭时间。
- MAX_DELAY:用于控制推送计算结果的最大延迟。
- ROLLUP:指定的聚合函数,提供基于多层级的降采样聚合结果。
- SMA:提供基于数据块的自定义预计算功能。
- TTL:用来指定表的生命周期的参数。
+| 17 | CREATE TOPIC | 新增 | 创建订阅主题。
+| 18 | DROP ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 19 | DROP CONSUMER GROUP | 新增 | 删除消费组。
+| 20 | DROP INDEX | 新增 | 删除索引。
+| 21 | DROP MNODE | 新增 | 创建管理节点。
+| 22 | DROP QNODE | 新增 | 创建查询节点。
+| 23 | DROP STREAM | 新增 | 删除流。
+| 24 | DROP TABLE | 调整 | 新增批量删除语法
+| 25 | DROP TOPIC | 新增 | 删除订阅主题。
+| 26 | EXPLAIN | 新增 | 查看查询语句的执行计划。
+| 27 | GRANT | 新增 | 授予用户权限。
+| 28 | KILL TRANSACTION | 新增 | 终止管理节点的事务。
+| 29 | KILL STREAM | 废除 | 终止连续查询。3.0版本不再支持连续查询,而是用更通用的流计算来代替。
+| 30 | MERGE VGROUP | 新增 | 合并VGROUP。
+| 31 | REVOKE | 新增 | 回收用户权限。
+| 32 | SELECT | 调整 | - SELECT关闭隐式结果列,输出列均需要由SELECT子句来指定。
- DISTINCT功能全面支持。2.x版本只支持对标签列去重,并且不可以和JOIN、GROUP BY等子句混用。
- JOIN功能增强。增加支持:JOIN后WHERE条件中有OR条件;JOIN后的多表运算;JOIN后的多表GROUP BY。
- FROM后子查询功能大幅增强。不限制子查询嵌套层数;支持子查询和UNION ALL混合使用;移除其他一些之前版本的语法限制。
- WHERE后可以使用任意的标量表达式。
- GROUP BY功能增强。支持任意标量表达式及其组合的分组。
- SESSION可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
- STATE_WINDOW可以用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
- ORDER BY功能大幅增强。不再必须和GROUP BY子句一起使用;不再有排序表达式个数的限制;增加支持NULLS FIRST/LAST语法功能;支持符合语法语义的任意表达式。
- 新增PARTITION BY语法。替代原来的GROUP BY tags。
+| 33 | SHOW ACCOUNTS | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
+| 34 | SHOW APPS |新增 | 显示接入集群的应用(客户端)信息。
+| 35 | SHOW CONSUMERS | 新增 | 显示当前数据库下所有活跃的消费者的信息。
+| 36 | SHOW DATABASES | 调整 | 3.0版本只显示数据库名。
+| 37 | SHOW FUNCTIONS | 调整 | 3.0版本只显示自定义函数名。
+| 38 | SHOW LICENCE | 新增 | 和SHOW GRANTS 命令等效。
+| 39 | SHOW INDEXES | 新增 | 显示已创建的索引。
+| 40 | SHOW LOCAL VARIABLES | 新增 | 显示当前客户端配置参数的运行值。
+| 41 | SHOW MODULES | 废除 | 显示当前系统中所安装的组件的信息。
+| 42 | SHOW QNODES | 新增 | 显示当前系统中QNODE的信息。
+| 43 | SHOW STABLES | 调整 | 3.0版本只显示超级表名。
+| 44 | SHOW STREAMS | 调整 | 2.x版本此命令显示系统中已创建的连续查询的信息。3.0版本废除了连续查询,用流代替。此命令显示已创建的流。
+| 45 | SHOW SUBSCRIPTIONS | 新增 | 显示当前数据库下的所有的订阅关系
+| 46 | SHOW TABLES | 调整 | 3.0版本只显示表名。
+| 47 | SHOW TABLE DISTRIBUTED | 新增 | 显示表的数据分布信息。代替2.x版本中的SELECT _block_dist() FROM { tb_name | stb_name }方式。
+| 48 | SHOW TOPICS | 新增 | 显示当前数据库下的所有订阅主题。
+| 49 | SHOW TRANSACTIONS | 新增 | 显示当前系统中正在执行的事务的信息。
+| 50 | SHOW DNODE VARIABLES | 新增 |显示指定DNODE的配置参数。
+| 51 | SHOW VNODES | 暂不支持 | 显示当前系统中VNODE的信息。3.0.0版本暂不支持。
+| 52 | SPLIT VGROUP | 新增 | 拆分VGROUP。
+| 53 | TRIM DATABASE | 新增 | 删除过期数据,并根据多级存储的配置归整数据。
+
+## SQL 函数变更
+
+| # | **函数** | ** 差异性
** | **说明** |
+| - | :------- | :-------- | :------- |
+| 1 | TWA | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 2 | IRATE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 3 | LEASTSQUARES | 增强 | 可以用于超级表了。
+| 4 | ELAPSED | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 5 | DIFF | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 6 | DERIVATIVE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 7 | CSUM | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 8 | MAVG | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 9 | SAMPLE | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 10 | STATECOUNT | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 11 | STATEDURATION | 增强 | 可以直接用于超级表了。没有PARTITION BY时,超级表的数据会被合并成一条时间线。
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 900fff1ba2..821679551c 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -3,7 +3,7 @@ title: TAOS SQL
description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。
+本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
index ba43aa30fd..4b9171c07d 100644
--- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx
@@ -2,7 +2,7 @@
title: REST API
---
-为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
+为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
:::note
与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。
@@ -20,8 +20,10 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
-```html
-curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
+```bash
+curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
+ -d "select name, ntables, status from information_schema.ins_databases;" \
+ h1.taosdata.com:6041/rest/sql
```
返回值结果如下表示验证通过:
@@ -35,188 +37,27 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.t
"VARCHAR",
64
],
- [
- "create_time",
- "TIMESTAMP",
- 8
- ],
- [
- "vgroups",
- "SMALLINT",
- 2
- ],
[
"ntables",
"BIGINT",
8
],
- [
- "replica",
- "TINYINT",
- 1
- ],
- [
- "strict",
- "VARCHAR",
- 4
- ],
- [
- "duration",
- "VARCHAR",
- 10
- ],
- [
- "keep",
- "VARCHAR",
- 32
- ],
- [
- "buffer",
- "INT",
- 4
- ],
- [
- "pagesize",
- "INT",
- 4
- ],
- [
- "pages",
- "INT",
- 4
- ],
- [
- "minrows",
- "INT",
- 4
- ],
- [
- "maxrows",
- "INT",
- 4
- ],
- [
- "comp",
- "TINYINT",
- 1
- ],
- [
- "precision",
- "VARCHAR",
- 2
- ],
[
"status",
"VARCHAR",
10
- ],
- [
- "retention",
- "VARCHAR",
- 60
- ],
- [
- "single_stable",
- "BOOL",
- 1
- ],
- [
- "cachemodel",
- "VARCHAR",
- 11
- ],
- [
- "cachesize",
- "INT",
- 4
- ],
- [
- "wal_level",
- "TINYINT",
- 1
- ],
- [
- "wal_fsync_period",
- "INT",
- 4
- ],
- [
- "wal_retention_period",
- "INT",
- 4
- ],
- [
- "wal_retention_size",
- "BIGINT",
- 8
- ],
- [
- "wal_roll_period",
- "INT",
- 4
- ],
- [
- "wal_seg_size",
- "BIGINT",
- 8
]
],
"data": [
[
"information_schema",
- null,
- null,
- 14,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- "ready",
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null
+ 16,
+ "ready"
],
[
"performance_schema",
- null,
- null,
- 3,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- "ready",
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null,
- null
+ 9,
+ "ready"
]
],
"rows": 2
@@ -231,21 +72,21 @@ http://:/rest/sql/[db_name]
参数说明:
-- fqnd: 集群中的任一台主机 FQDN 或 IP 地址
-- port: 配置文件中 httpPort 配置项,缺省为 6041
+- fqnd: 集群中的任一台主机 FQDN 或 IP 地址。
+- port: 配置文件中 httpPort 配置项,缺省为 6041。
- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。
例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。
HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。
-- [自定义身份认证信息](#自定义授权码)如下所示
+- [自定义身份认证信息](#自定义授权码)如下所示:
```text
Authorization: Taosd
```
-- Basic 身份认证信息如下所示
+- Basic 身份认证信息如下所示:
```text
Authorization: Basic
@@ -259,13 +100,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name]
```
-或者
+或者,
```bash
curl -L -u username:password -d "" :/rest/sql/[db_name]
```
-其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
+其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。
## HTTP 返回格式
@@ -282,27 +123,9 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
### HTTP body 结构
-
-
- 执行结果 |
- 说明 |
- 样例 |
-
-
- 正确执行 |
-
- code:(int)0 代表成功
-
-
- column_meta:([][3]any)列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)
-
-
- rows:(int)数据返回行数
-
-
- data:([][]any)具体数据内容
- |
-
+#### 正确执行
+
+样例:
```json
{
@@ -313,23 +136,16 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
}
```
- |
-
-
- 正确查询 |
-
- code:(int)0 代表成功
-
-
- column_meta:([][3]any) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)
-
-
- rows:(int)数据返回行数
-
-
- data:([][]any)具体数据内容
- |
-
+说明:
+
+- code:(`int`)0 代表成功。
+- column_meta:(`[1][3]any`)只返回 `[["affected_rows", "INT", 4]]`。
+- rows:(`int`)只返回 `1`。
+- data:(`[][]any`)返回受影响行数。
+
+#### 正确查询
+
+样例:
```json
{
@@ -385,17 +201,35 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
}
```
- |
-
-
- 错误 |
-
- code:(int)错误码
-
-
- desc:(string)错误描述
- |
-
+说明:
+
+- code:(`int`)0 代表成功。
+- column_meta:(`[][3]any`) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int)。
+- rows:(`int`)数据返回行数。
+- data:(`[][]any`)具体数据内容(时间格式仅支持 RFC3339,结果集为 0 时区)。
+
+列类型使用如下字符串:
+
+- "NULL"
+- "BOOL"
+- "TINYINT"
+- "SMALLINT"
+- "INT"
+- "BIGINT"
+- "FLOAT"
+- "DOUBLE"
+- "VARCHAR"
+- "TIMESTAMP"
+- "NCHAR"
+- "TINYINT UNSIGNED"
+- "SMALLINT UNSIGNED"
+- "INT UNSIGNED"
+- "BIGINT UNSIGNED"
+- "JSON"
+
+#### 错误
+
+样例:
```json
{
@@ -404,30 +238,10 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
}
```
- |
-
-
+说明:
-### 说明
-
-- 时间格式仅支持 RFC3339,结果集为 0 时区
-- 列类型使用如下字符串:
- > "NULL"
- > "BOOL"
- > "TINYINT"
- > "SMALLINT"
- > "INT"
- > "BIGINT"
- > "FLOAT"
- > "DOUBLE"
- > "VARCHAR"
- > "TIMESTAMP"
- > "NCHAR"
- > "TINYINT UNSIGNED"
- > "SMALLINT UNSIGNED"
- > "INT UNSIGNED"
- > "BIGINT UNSIGNED"
- > "JSON"
+- code:(`int`)错误码。
+- desc:(`string`)错误描述。
## 自定义授权码
@@ -439,11 +253,9 @@ curl http://:/rest/login//
其中,`fqdn` 是 TDengine 数据库的 FQDN 或 IP 地址,`port` 是 TDengine 服务的端口号,`username` 为数据库用户名,`password` 为数据库密码,返回值为 JSON 格式,各字段含义如下:
-- status:请求结果的标志位
-
-- code:返回值代码
-
-- desc:授权码
+- status:请求结果的标志位。
+- code:返回值代码。
+- desc:授权码。
获取授权码示例:
diff --git a/docs/zh/14-reference/03-connector/_linux_install.mdx b/docs/zh/14-reference/03-connector/_linux_install.mdx
index eb7f683288..c3ddff53cd 100644
--- a/docs/zh/14-reference/03-connector/_linux_install.mdx
+++ b/docs/zh/14-reference/03-connector/_linux_install.mdx
@@ -1,10 +1,10 @@
-import PkgList from "/components/PkgList";
+import PkgListV3 from "/components/PkgListV3";
1. 下载客户端安装包
-
+
- [所有下载](https://www.taosdata.com/cn/all-downloads/)
+ [所有下载](../../releases)
2. 解压缩软件包
diff --git a/docs/zh/14-reference/03-connector/_windows_install.mdx b/docs/zh/14-reference/03-connector/_windows_install.mdx
index 755f96b2d7..9fdefa04c0 100644
--- a/docs/zh/14-reference/03-connector/_windows_install.mdx
+++ b/docs/zh/14-reference/03-connector/_windows_install.mdx
@@ -1,11 +1,10 @@
-import PkgList from "/components/PkgList";
+import PkgListV3 from "/components/PkgListV3";
1. 下载客户端安装包
-
-
- [所有下载](https://www.taosdata.com/cn/all-downloads/)
+
+ [所有下载](../../releases)
2. 执行安装程序,按提示选择默认值,完成安装
3. 安装路径
diff --git a/docs/zh/14-reference/03-connector/cpp.mdx b/docs/zh/14-reference/03-connector/cpp.mdx
index 3a8367ef33..bd5776d035 100644
--- a/docs/zh/14-reference/03-connector/cpp.mdx
+++ b/docs/zh/14-reference/03-connector/cpp.mdx
@@ -404,47 +404,3 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
**支持版本**
该功能接口从 2.3.0.0 版本开始支持。
-
-### 订阅和消费 API
-
-订阅 API 目前支持订阅一张或多张表,并通过定期轮询的方式不断获取写入表中的最新数据。
-
-- `TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval)`
-
- 该函数负责启动订阅服务,成功时返回订阅对象,失败时返回 `NULL`,其参数为:
-
- - taos:已经建立好的数据库连接
- - restart:如果订阅已经存在,是重新开始,还是继续之前的订阅
- - topic:订阅的主题(即名称),此参数是订阅的唯一标识
- - sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据
- - fp:收到查询结果时的回调函数(稍后介绍函数原型),只在异步调用时使用,同步调用时此参数应该传 `NULL`
- - param:调用回调函数时的附加参数,系统 API 将其原样传递到回调函数,不进行任何处理
- - interval:轮询周期,单位为毫秒。异步调用时,将根据此参数周期性的调用回调函数,为避免对系统性能造成影响,不建议将此参数设置的过小;同步调用时,如两次调用 `taos_consume()` 的间隔小于此周期,API 将会阻塞,直到时间间隔超过此周期。
-
-- `typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code)`
-
- 异步模式下,回调函数的原型,其参数为:
-
- - tsub:订阅对象
- - res:查询结果集,注意结果集中可能没有记录
- - param:调用 `taos_subscribe()` 时客户程序提供的附加参数
- - code:错误码
-
- :::note
- 在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。
-
- :::
-
-- `TAOS_RES *taos_consume(TAOS_SUB *tsub)`
-
- 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用 `taos_consume()` 的间隔小于订阅的轮询周期,API 将会阻塞,直到时间间隔超过此周期。如果数据库有新记录到达,该 API 将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此 API。
-
- :::note
- 在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。
-
- :::
-
-- `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)`
-
- 取消订阅。 如参数 `keepProgress` 不为 0,API 会保留订阅的进度信息,后续调用 `taos_subscribe()` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。
-
diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/14-reference/03-connector/java.mdx
index c9d74dcaeb..183994313e 100644
--- a/docs/zh/14-reference/03-connector/java.mdx
+++ b/docs/zh/14-reference/03-connector/java.mdx
@@ -93,12 +93,12 @@ Maven 项目中,在 pom.xml 中添加以下依赖:
可以通过下载 TDengine 的源码,自己编译最新版本的 Java connector
```shell
-git clone https://github.com/taosdata/taos-connector-jdbc.git --branch 2.0
+git clone https://github.com/taosdata/taos-connector-jdbc.git
cd taos-connector-jdbc
mvn clean install -Dmaven.test.skip=true
```
-编译后,在 target 目录下会产生 taos-jdbcdriver-2.0.XX-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
+编译后,在 target 目录下会产生 taos-jdbcdriver-3.0.*-dist.jar 的 jar 包,并自动将编译的 jar 文件放在本地的 Maven 仓库中。
@@ -131,7 +131,7 @@ url 中的配置参数如下:
- charset:客户端使用的字符集,默认值为系统字符集。
- locale:客户端语言环境,默认值系统当前 locale。
- timezone:客户端使用的时区,默认值为系统当前时区。
-- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
+- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:true。开启批量拉取同时获取一批数据在查询数据量较大时批量拉取可以有效的提升查询性能。
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败将继续执行下面的 SQL。false:不再执行失败 SQL 后的任何语句。默认值为:false。
JDBC 原生连接的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。
@@ -198,7 +198,7 @@ url 中的配置参数如下:
- user:登录 TDengine 用户名,默认值 'root'。
- password:用户登录密码,默认值 'taosdata'。
-- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
+- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。JDBC REST 连接支持批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。
- charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。
- batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。
- httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。
@@ -216,7 +216,7 @@ url 中的配置参数如下:
INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6);
```
-- 从 taos-jdbcdriver-2.0.36 开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
+- 如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6);
:::
@@ -230,7 +230,7 @@ INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFra
**注意**:
- 应用中设置的 client parameter 为进程级别的,即如果要更新 client 的参数,需要重启应用。这是因为 client parameter 是全局参数,仅在应用程序的第一次设置生效。
-- 以下示例代码基于 taos-jdbcdriver-2.0.36。
+- 以下示例代码基于 taos-jdbcdriver-3.0.0。
```java
public Connection getConn() throws Exception{
@@ -367,7 +367,7 @@ TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据
**注意**:
- JDBC REST 连接目前不支持参数绑定
-- 以下示例代码基于 taos-jdbcdriver-2.0.36
+- 以下示例代码基于 taos-jdbcdriver-3.0.0
- binary 类型数据需要调用 setString 方法,nchar 类型数据需要调用 setNString 方法
- setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽
@@ -635,7 +635,7 @@ TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协
**注意**:
- JDBC REST 连接目前不支持无模式写入
-- 以下示例代码基于 taos-jdbcdriver-2.0.36
+- 以下示例代码基于 taos-jdbcdriver-3.0.0
```java
public class SchemalessInsertTest {
@@ -666,7 +666,7 @@ public class SchemalessInsertTest {
}
```
-### 订阅
+### 数据订阅
TDengine Java 连接器支持订阅功能,应用 API 如下:
@@ -712,14 +712,19 @@ while(true) {
}
```
-`poll` 每次调用获取一个消息。请按需选择合理的调用 `poll` 的频率(如例子中的 `Duration.ofMillis(100)`),否则会给服务端造成不必要的压力。
+`poll` 每次调用获取一个消息。
#### 关闭订阅
```java
+// 取消订阅
+consumer.unsubscribe();
+// 关闭消费
consumer.close()
```
+详情请参考:[数据订阅](../../../develop/tmq)
+
### 使用示例如下:
```java
@@ -734,7 +739,7 @@ public abstract class ConsumerLoop {
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1");
- config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
+ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@@ -754,8 +759,9 @@ public abstract class ConsumerLoop {
process(record);
}
}
+ consumer.unsubscribe();
} finally {
- consumer.close();
+ consumer.close();
shutdownLatch.countDown();
}
}
@@ -765,11 +771,11 @@ public abstract class ConsumerLoop {
shutdownLatch.await();
}
- static class ResultDeserializer extends ReferenceDeserializer {
+ public static class ResultDeserializer extends ReferenceDeserializer {
}
- static class ResultBean {
+ public static class ResultBean {
private Timestamp ts;
private int speed;
@@ -875,6 +881,7 @@ public static void main(String[] args) throws Exception {
| taos-jdbcdriver 版本 | 主要变化 |
| :------------------: | :----------------------------: |
+| 3.0.0 | 支持 TDengine 3.0 |
| 2.0.39 - 2.0.40 | 增加 REST 连接/请求 超时设置 |
| 2.0.38 | JDBC REST 连接增加批量拉取功能 |
| 2.0.37 | 增加对 json tag 支持 |
diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md
index c03990ede2..743fc2d32f 100644
--- a/docs/zh/14-reference/11-docker/index.md
+++ b/docs/zh/14-reference/11-docker/index.md
@@ -25,10 +25,11 @@ curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
$ docker exec -it tdengine taos
taos> show databases;
- name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
-====================================================================================================================================================================================================================================================================================
- log | 2022-01-17 13:57:22.270 | 10 | 1 | 1 | 1 | 10 | 30 | 1 | 3 | 100 | 4096 | 1 | 3000 | 2 | 0 | us | 0 | ready |
-Query OK, 1 row(s) in set (0.002843s)
+ name |
+=================================
+ information_schema |
+ performance_schema |
+Query OK, 2 rows in database (0.033802s)
```
因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 taos shell 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 taos shell 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
@@ -45,10 +46,11 @@ docker run -d --name tdengine --network host tdengine/tdengine
$ taos
taos> show dnodes;
- id | end_point | vnodes | cores | status | role | create_time | offline reason |
-======================================================================================================================================
- 1 | myhost:6030 | 1 | 8 | ready | any | 2022-01-17 22:10:32.619 | |
-Query OK, 1 row(s) in set (0.003233s)
+ id | endpoint | vnodes | support_vnodes | status | create_time | note |
+=================================================================================================================================================
+ 1 | vm98:6030 | 0 | 32 | ready | 2022-08-19 14:50:05.337 | |
+Query OK, 1 rows in database (0.010654s)
+
```
## 以指定的 hostname 和 port 启动 TDengine
@@ -59,12 +61,13 @@ Query OK, 1 row(s) in set (0.003233s)
docker run -d \
--name tdengine \
-e TAOS_FQDN=tdengine \
- -p 6030-6049:6030-6049 \
- -p 6030-6049:6030-6049/udp \
+ -p 6030:6030 \
+ -p 6041-6049:6041-6049 \
+ -p 6041-6049:6041-6049/udp \
tdengine/tdengine
```
-上面的命令在容器中启动一个 TDengine 服务,其所监听的 hostname 为 tdengine ,并将容器的 6030 到 6049 端口段映射到主机的 6030 到 6049 端口段 (tcp 和 udp 都需要映射)。如果主机上该端口段已经被占用,可以修改上述命令指定一个主机上空闲的端口段。如果 `rpcForceTcp` 被设置为 `1` ,可以只映射 tcp 协议。
+上面的命令在容器中启动一个 TDengine 服务,其所监听的 hostname 为 tdengine ,并将容器的 6030 端口映射到主机的 6030 端口(TCP,只能映射主机 6030 端口),6041-6049 端口段映射到主机 6041-6049 端口段(tcp 和 udp 都需要映射,如果主机上该端口段已经被占用,可以修改上述命令指定一个主机上空闲的端口段)。
接下来,要确保 "tdengine" 这个 hostname 在 `/etc/hosts` 中可解析。
@@ -103,9 +106,9 @@ taos -h tdengine -P 6030
3. 在同一网络上的另一容器中启动 TDengine 客户端
```shell
- docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine tdengine/tdengine taos
+ docker run --rm -it --network td-net -e TAOS_FIRST_EP=tdengine --entrypoint=taos tdengine/tdengine
# or
- #docker run --rm -it --network td-net -e tdengine/tdengine taos -h tdengine
+ #docker run --rm -it --network td-net --entrypoint=taos tdengine/tdengine -h tdengine
```
## 在容器中启动客户端应用
@@ -115,7 +118,7 @@ taos -h tdengine -P 6030
```docker
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
-ENV TDENGINE_VERSION=2.4.0.0
+ENV TDENGINE_VERSION=3.0.0.0
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
@@ -129,6 +132,14 @@ RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_
以下是一个 go 应用程序的示例:
+* 创建 go mod 项目:
+
+```bash
+go mod init app
+```
+
+* 创建 main.go:
+
```go
/*
* In this test program, we'll create a database and insert 4 records then select out.
@@ -212,12 +223,18 @@ func checkErr(err error, prompt string) {
}
```
-如下是完整版本的 dockerfile
+* 更新 go mod
-```docker
-FROM golang:1.17.6-buster as builder
-ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+```bash
+go mod tidy
+```
+
+如下是完整版本的 dockerfile:
+
+```dockerfile
+FROM golang:1.19.0-buster as builder
+ENV TDENGINE_VERSION=3.0.0.0
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -232,8 +249,8 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
-ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+ENV TDENGINE_VERSION=3.0.0.0
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -248,113 +265,112 @@ CMD ["app"]
目前我们已经有了 `main.go`, `go.mod`, `go.sum`, `app.dockerfile`, 现在可以构建出这个应用程序并在 `td-net` 网络上启动它
```shell
-$ docker build -t app -f app.dockerfile
-$ docker run --rm --network td-net app -h tdengine -p 6030
+$ docker build -t app -f app.dockerfile .
+$ docker run --rm --network td-net app app -h tdengine -p 6030
============= args parse result: =============
hostName: tdengine
serverPort: 6030
usr: root
password: taosdata
================================================
-2022-01-17 15:56:55.48 +0000 UTC 0
-2022-01-17 15:56:56.48 +0000 UTC 1
-2022-01-17 15:56:57.48 +0000 UTC 2
-2022-01-17 15:56:58.48 +0000 UTC 3
-2022-01-17 15:58:01.842 +0000 UTC 0
-2022-01-17 15:58:02.842 +0000 UTC 1
-2022-01-17 15:58:03.842 +0000 UTC 2
-2022-01-17 15:58:04.842 +0000 UTC 3
-2022-01-18 01:43:48.029 +0000 UTC 0
-2022-01-18 01:43:49.029 +0000 UTC 1
-2022-01-18 01:43:50.029 +0000 UTC 2
-2022-01-18 01:43:51.029 +0000 UTC 3
+2022-08-19 07:43:51.68 +0000 UTC 0
+2022-08-19 07:43:52.68 +0000 UTC 1
+2022-08-19 07:43:53.68 +0000 UTC 2
+2022-08-19 07:43:54.68 +0000 UTC 3
```
## 用 docker-compose 启动 TDengine 集群
-1. 如下 docker-compose 文件启动一个 2 副本、2 管理节点、2 数据节点以及 1 个 arbitrator 的 TDengine 集群。
+1. 如下 docker-compose 文件启动一个 三节点 TDengine 集群。
- ```docker
- version: "3"
- services:
- arbitrator:
- image: tdengine/tdengine:$VERSION
- command: tarbitrator
- td-1:
- image: tdengine/tdengine:$VERSION
- environment:
- TAOS_FQDN: "td-1"
- TAOS_FIRST_EP: "td-1"
- TAOS_NUM_OF_MNODES: "2"
- TAOS_REPLICA: "2"
- TAOS_ARBITRATOR: arbitrator:6042
- volumes:
- - taosdata-td1:/var/lib/taos/
- - taoslog-td1:/var/log/taos/
- td-2:
- image: tdengine/tdengine:$VERSION
- environment:
- TAOS_FQDN: "td-2"
- TAOS_FIRST_EP: "td-1"
- TAOS_NUM_OF_MNODES: "2"
- TAOS_REPLICA: "2"
- TAOS_ARBITRATOR: arbitrator:6042
- volumes:
- - taosdata-td2:/var/lib/taos/
- - taoslog-td2:/var/log/taos/
- volumes:
- taosdata-td1:
- taoslog-td1:
- taosdata-td2:
- taoslog-td2:
- ```
+```yml
+version: "3"
+services:
+ td-1:
+ image: tdengine/tdengine:$VERSION
+ environment:
+ TAOS_FQDN: "td-1"
+ TAOS_FIRST_EP: "td-1"
+ volumes:
+ - taosdata-td1:/var/lib/taos/
+ - taoslog-td1:/var/log/taos/
+ td-2:
+ image: tdengine/tdengine:$VERSION
+ environment:
+ TAOS_FQDN: "td-2"
+ TAOS_FIRST_EP: "td-1"
+ volumes:
+ - taosdata-td2:/var/lib/taos/
+ - taoslog-td2:/var/log/taos/
+ td-3:
+ image: tdengine/tdengine:$VERSION
+ environment:
+ TAOS_FQDN: "td-3"
+ TAOS_FIRST_EP: "td-1"
+ volumes:
+ - taosdata-td3:/var/lib/taos/
+ - taoslog-td3:/var/log/taos/
+volumes:
+ taosdata-td1:
+ taoslog-td1:
+ taosdata-td2:
+ taoslog-td2:
+ taosdata-td3:
+ taoslog-td3:
+```
:::note
-- `VERSION` 环境变量被用来设置 tdengine image tag
-- 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP`
-- `TAOS_REPLICA` 用来设置缺省的数据库副本数量,其取值范围为[1,3]
- 在双副本环境下,推荐使用 arbitrator, 用 TAOS_ARBITRATOR 来设置
- :::
+* `VERSION` 环境变量被用来设置 tdengine image tag
+* 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP`
+:::
2. 启动集群
- ```shell
- $ VERSION=2.4.0.0 docker-compose up -d
- Creating network "test_default" with the default driver
- Creating volume "test_taosdata-td1" with default driver
- Creating volume "test_taoslog-td1" with default driver
- Creating volume "test_taosdata-td2" with default driver
- Creating volume "test_taoslog-td2" with default driver
- Creating test_td-1_1 ... done
- Creating test_arbitrator_1 ... done
- Creating test_td-2_1 ... done
- ```
+```shell
+$ VERSION=3.0.0.0 docker-compose up -d
+Creating network "test-docker_default" with the default driver
+Creating volume "test-docker_taosdata-td1" with default driver
+Creating volume "test-docker_taoslog-td1" with default driver
+Creating volume "test-docker_taosdata-td2" with default driver
+Creating volume "test-docker_taoslog-td2" with default driver
+Creating volume "test-docker_taosdata-td3" with default driver
+Creating volume "test-docker_taoslog-td3" with default driver
+
+Creating test-docker_td-3_1 ... done
+Creating test-docker_td-1_1 ... done
+Creating test-docker_td-2_1 ... done
+```
3. 查看节点状态
- ```shell
- $ docker-compose ps
- Name Command State Ports
- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- test_arbitrator_1 /usr/bin/entrypoint.sh tar ... Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
- test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
- test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
- ```
+```shell
+ docker-compose ps
+ Name Command State Ports
+
+-------------------------------------------------------------------
+test-docker_td-1_1 /tini -- /usr/bin/entrypoi ... Up
+test-docker_td-2_1 /tini -- /usr/bin/entrypoi ... Up
+test-docker_td-3_1 /tini -- /usr/bin/entrypoi ... Up
+```
4. 用 taos shell 查看 dnodes
- ```shell
- $ docker-compose exec td-1 taos -s "show dnodes"
+```shell
- taos> show dnodes
- id | end_point | vnodes | cores | status | role | create_time | offline reason |
- ======================================================================================================================================
- 1 | td-1:6030 | 1 | 8 | ready | any | 2022-01-18 02:47:42.871 | |
- 2 | td-2:6030 | 0 | 8 | ready | any | 2022-01-18 02:47:43.518 | |
- 0 | arbitrator:6042 | 0 | 0 | ready | arb | 2022-01-18 02:47:43.633 | - |
- Query OK, 3 row(s) in set (0.000811s)
- ```
+$ docker-compose exec td-1 taos -s "show dnodes"
+
+taos> show dnodes
+
+ id | endpoint | vnodes | support_vnodes | status | create_time | note |
+=================================================================================================================================================
+
+ 1 | td-1:6030 | 0 | 32 | ready | 2022-08-19 07:57:29.971 | |
+ 2 | td-2:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.415 | |
+ 3 | td-3:6030 | 0 | 32 | ready | 2022-08-19 07:57:31.417 | |
+Query OK, 3 rows in database (0.021262s)
+
+```
## taosAdapter
@@ -362,93 +378,80 @@ password: taosdata
2. 同时为了部署灵活起见,可以在独立的容器中启动 taosAdapter
- ```docker
- services:
- # ...
- adapter:
- image: tdengine/tdengine:$VERSION
- command: taosadapter
- ```
+```docker
+services:
+ # ...
+ adapter:
+ image: tdengine/tdengine:$VERSION
+ command: taosadapter
+```
- 如果要部署多个 taosAdapter 来提高吞吐量并提供高可用性,推荐配置方式为使用 nginx 等反向代理来提供统一的访问入口。具体配置方法请参考 nginx 的官方文档。如下是示例:
+如果要部署多个 taosAdapter 来提高吞吐量并提供高可用性,推荐配置方式为使用 nginx 等反向代理来提供统一的访问入口。具体配置方法请参考 nginx 的官方文档。如下是示例:
- ```docker
- version: "3"
+```yml
+version: "3"
- networks:
- inter:
- api:
+networks:
+ inter:
- services:
- arbitrator:
- image: tdengine/tdengine:$VERSION
- command: tarbitrator
- networks:
- - inter
- td-1:
- image: tdengine/tdengine:$VERSION
- networks:
- - inter
- environment:
- TAOS_FQDN: "td-1"
- TAOS_FIRST_EP: "td-1"
- TAOS_NUM_OF_MNODES: "2"
- TAOS_REPLICA: "2"
- TAOS_ARBITRATOR: arbitrator:6042
- volumes:
- - taosdata-td1:/var/lib/taos/
- - taoslog-td1:/var/log/taos/
- td-2:
- image: tdengine/tdengine:$VERSION
- networks:
- - inter
- environment:
- TAOS_FQDN: "td-2"
- TAOS_FIRST_EP: "td-1"
- TAOS_NUM_OF_MNODES: "2"
- TAOS_REPLICA: "2"
- TAOS_ARBITRATOR: arbitrator:6042
- volumes:
- - taosdata-td2:/var/lib/taos/
- - taoslog-td2:/var/log/taos/
- adapter:
- image: tdengine/tdengine:$VERSION
- command: taosadapter
- networks:
- - inter
- environment:
- TAOS_FIRST_EP: "td-1"
- TAOS_SECOND_EP: "td-2"
- deploy:
- replicas: 4
- nginx:
- image: nginx
- depends_on:
- - adapter
- networks:
- - inter
- - api
- ports:
- - 6041:6041
- - 6044:6044/udp
- command: [
- "sh",
- "-c",
- "while true;
- do curl -s http://adapter:6041/-/ping >/dev/null && break;
- done;
- printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
- > /etc/nginx/conf.d/rest.conf;
- printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
- >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
- nginx -g 'daemon off;'",
- ]
- volumes:
- taosdata-td1:
- taoslog-td1:
- taosdata-td2:
- taoslog-td2:
- ```
+services:
+ td-1:
+ image: tdengine/tdengine:$VERSION
+ networks:
+ - inter
+ environment:
+ TAOS_FQDN: "td-1"
+ TAOS_FIRST_EP: "td-1"
+ volumes:
+ - taosdata-td1:/var/lib/taos/
+ - taoslog-td1:/var/log/taos/
+ td-2:
+ image: tdengine/tdengine:$VERSION
+ networks:
+ - inter
+ environment:
+ TAOS_FQDN: "td-2"
+ TAOS_FIRST_EP: "td-1"
+ volumes:
+ - taosdata-td2:/var/lib/taos/
+ - taoslog-td2:/var/log/taos/
+ adapter:
+ image: tdengine/tdengine:$VERSION
+ entrypoint: "taosadapter"
+ networks:
+ - inter
+ environment:
+ TAOS_FIRST_EP: "td-1"
+ TAOS_SECOND_EP: "td-2"
+ deploy:
+ replicas: 4
+ nginx:
+ image: nginx
+ depends_on:
+ - adapter
+ networks:
+ - inter
+ ports:
+ - 6041:6041
+ - 6044:6044/udp
+ command: [
+ "sh",
+ "-c",
+ "while true;
+ do curl -s http://adapter:6041/-/ping >/dev/null && break;
+ done;
+ printf 'server{listen 6041;location /{proxy_pass http://adapter:6041;}}'
+ > /etc/nginx/conf.d/rest.conf;
+ printf 'stream{server{listen 6044 udp;proxy_pass adapter:6044;}}'
+ >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf;
+ nginx -g 'daemon off;'",
+ ]
+volumes:
+ taosdata-td1:
+ taoslog-td1:
+ taosdata-td2:
+ taoslog-td2:
+```
## 使用 docker swarm 部署
@@ -457,50 +460,46 @@ password: taosdata
docker-compose 文件可以参考上节。下面是使用 docker swarm 启动 TDengine 的命令:
```shell
-$ VERSION=2.4.0 docker stack deploy -c docker-compose.yml taos
+$ VERSION=3.0.0.0 docker stack deploy -c docker-compose.yml taos
Creating network taos_inter
-Creating network taos_api
-Creating service taos_arbitrator
+Creating service taos_nginx
Creating service taos_td-1
Creating service taos_td-2
Creating service taos_adapter
-Creating service taos_nginx
```
查看和管理
```shell
$ docker stack ps taos
-ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
-79ni8temw59n taos_nginx.1 nginx:latest TM1701 Running Running about a minute ago
-3e94u72msiyg taos_adapter.1 tdengine/tdengine:2.4.0 TM1702 Running Running 56 seconds ago
-100amjkwzsc6 taos_td-2.1 tdengine/tdengine:2.4.0 TM1703 Running Running about a minute ago
-pkjehr2vvaaa taos_td-1.1 tdengine/tdengine:2.4.0 TM1704 Running Running 2 minutes ago
-tpzvgpsr1qkt taos_arbitrator.1 tdengine/tdengine:2.4.0 TM1705 Running Running 2 minutes ago
-rvss3g5yg6fa taos_adapter.2 tdengine/tdengine:2.4.0 TM1706 Running Running 56 seconds ago
-i2augxamfllf taos_adapter.3 tdengine/tdengine:2.4.0 TM1707 Running Running 56 seconds ago
-lmjyhzccpvpg taos_adapter.4 tdengine/tdengine:2.4.0 TM1708 Running Running 56 seconds ago
+ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
+7m3sbf532bqp taos_adapter.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
+pj403n6ofmmh taos_adapter.2 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
+rxqfwsyk5q1h taos_adapter.3 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
+qj40lpxr40oc taos_adapter.4 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
+oe3455ulxpze taos_nginx.1 nginx:latest vm98 Running Running about a minute ago
+o0tsg70nrrc6 taos_td-1.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
+q5m1oxs589cp taos_td-2.1 tdengine/tdengine:3.0.0.0 vm98 Running Running about a minute ago
$ docker service ls
-ID NAME MODE REPLICAS IMAGE PORTS
-561t4lu6nfw6 taos_adapter replicated 4/4 tdengine/tdengine:2.4.0
-3hk5ct3q90sm taos_arbitrator replicated 1/1 tdengine/tdengine:2.4.0
-d8qr52envqzu taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
-2isssfvjk747 taos_td-1 replicated 1/1 tdengine/tdengine:2.4.0
-9pzw7u02ichv taos_td-2 replicated 1/1 tdengine/tdengine:2.4.0
+ID NAME MODE REPLICAS IMAGE PORTS
+ozuklorgl8bs taos_adapter replicated 4/4 tdengine/tdengine:3.0.0.0
+crmhdjw6vxw0 taos_nginx replicated 1/1 nginx:latest *:6041->6041/tcp, *:6044->6044/udp
+o86ngy7csv5n taos_td-1 replicated 1/1 tdengine/tdengine:3.0.0.0
+rma040ny4tb0 taos_td-2 replicated 1/1 tdengine/tdengine:3.0.0.0
```
-从上面的输出可以看到有两个 dnode, 和两个 taosAdapter,以及一个 nginx 反向代理服务。
+从上面的输出可以看到有两个 dnode, 和四个 taosAdapter,以及一个 nginx 反向代理服务。
接下来,我们可以减少 taosAdapter 服务的数量
```shell
$ docker service scale taos_adapter=1
taos_adapter scaled to 1
-overall progress: 1 out of 1 tasks
-1/1: running [==================================================>]
+overall progress: 1 out of 1 tasks
+1/1: running [==================================================>]
verify: Service converged
$ docker service ls -f name=taos_adapter
-ID NAME MODE REPLICAS IMAGE PORTS
-561t4lu6nfw6 taos_adapter replicated 1/1 tdengine/tdengine:2.4.0
+ID NAME MODE REPLICAS IMAGE PORTS
+ozuklorgl8bs taos_adapter replicated 1/1 tdengine/tdengine:3.0.0.0
```
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 845693a98e..d2efc5baf3 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -647,3 +647,173 @@ charset 的有效值是 UTF-8。
| 含义 | 是否启动 udf 服务 |
| 取值范围 | 0: 不启动;1:启动 |
| 缺省值 | 1 |
+
+## 2.X 与 3.0 配置参数对比
+| # | **参数** | **适用于 2.X 版本** | **适用于 3.0 版本** |
+| --- | :-----------------: | --------------- | --------------- |
+| 1 | firstEp | 是 | 是 |
+| 2 | secondEp | 是 | 是 |
+| 3 | fqdn | 是 | 是 |
+| 4 | serverPort | 是 | 是 |
+| 5 | maxShellConns | 是 | 是 |
+| 6 | monitor | 是 | 是 |
+| 7 | monitorFqdn | 否 | 是 |
+| 8 | monitorPort | 否 | 是 |
+| 9 | monitorInterval | 是 | 是 |
+| 10 | monitorMaxLogs | 否 | 是 |
+| 11 | monitorComp | 否 | 是 |
+| 12 | telemetryReporting | 是 | 是 |
+| 13 | telemetryInterval | 否 | 是 |
+| 14 | telemetryServer | 否 | 是 |
+| 15 | telemetryPort | 否 | 是 |
+| 16 | queryPolicy | 否 | 是 |
+| 17 | querySmaOptimize | 否 | 是 |
+| 18 | queryBufferSize | 是 | 是 |
+| 19 | maxNumOfDistinctRes | 是 | 是 |
+| 20 | minSlidingTime | 是 | 是 |
+| 21 | minIntervalTime | 是 | 是 |
+| 22 | countAlwaysReturnValue | 是 | 是 |
+| 23 | dataDir | 是 | 是 |
+| 24 | minimalDataDirGB | 是 | 是 |
+| 25 | supportVnodes | 否 | 是 |
+| 26 | tempDir | 是 | 是 |
+| 27 | minimalTmpDirGB | 是 | 是 |
+| 28 | compressMsgSize | 是 | 是 |
+| 29 | compressColData | 是 | 是 |
+| 30 | smlChildTableName | 是 | 是 |
+| 31 | smlTagName | 是 | 是 |
+| 32 | smlDataFormat | 否 | 是 |
+| 33 | statusInterval | 是 | 是 |
+| 34 | shellActivityTimer | 是 | 是 |
+| 35 | transPullupInterval | 否 | 是 |
+| 36 | mqRebalanceInterval | 否 | 是 |
+| 37 | ttlUnit | 否 | 是 |
+| 38 | ttlPushInterval | 否 | 是 |
+| 39 | numOfTaskQueueThreads | 否 | 是 |
+| 40 | numOfRpcThreads | 否 | 是 |
+| 41 | numOfCommitThreads | 是 | 是 |
+| 42 | numOfMnodeReadThreads | 否 | 是 |
+| 43 | numOfVnodeQueryThreads | 否 | 是 |
+| 44 | numOfVnodeStreamThreads | 否 | 是 |
+| 45 | numOfVnodeFetchThreads | 否 | 是 |
+| 46 | numOfVnodeWriteThreads | 否 | 是 |
+| 47 | numOfVnodeSyncThreads | 否 | 是 |
+| 48 | numOfQnodeQueryThreads | 否 | 是 |
+| 49 | numOfQnodeFetchThreads | 否 | 是 |
+| 50 | numOfSnodeSharedThreads | 否 | 是 |
+| 51 | numOfSnodeUniqueThreads | 否 | 是 |
+| 52 | rpcQueueMemoryAllowed | 否 | 是 |
+| 53 | logDir | 是 | 是 |
+| 54 | minimalLogDirGB | 是 | 是 |
+| 55 | numOfLogLines | 是 | 是 |
+| 56 | asyncLog | 是 | 是 |
+| 57 | logKeepDays | 是 | 是 |
+| 58 | debugFlag | 是 | 是 |
+| 59 | tmrDebugFlag | 是 | 是 |
+| 60 | uDebugFlag | 是 | 是 |
+| 61 | rpcDebugFlag | 是 | 是 |
+| 62 | jniDebugFlag | 是 | 是 |
+| 63 | qDebugFlag | 是 | 是 |
+| 64 | cDebugFlag | 是 | 是 |
+| 65 | dDebugFlag | 是 | 是 |
+| 66 | vDebugFlag | 是 | 是 |
+| 67 | mDebugFlag | 是 | 是 |
+| 68 | wDebugFlag | 是 | 是 |
+| 69 | sDebugFlag | 是 | 是 |
+| 70 | tsdbDebugFlag | 是 | 是 |
+| 71 | tqDebugFlag | 否 | 是 |
+| 72 | fsDebugFlag | 是 | 是 |
+| 73 | udfDebugFlag | 否 | 是 |
+| 74 | smaDebugFlag | 否 | 是 |
+| 75 | idxDebugFlag | 否 | 是 |
+| 76 | tdbDebugFlag | 否 | 是 |
+| 77 | metaDebugFlag | 否 | 是 |
+| 78 | timezone | 是 | 是 |
+| 79 | locale | 是 | 是 |
+| 80 | charset | 是 | 是 |
+| 81 | udf | 是 | 是 |
+| 82 | enableCoreFile | 是 | 是 |
+| 83 | arbitrator | 是 | 否 |
+| 84 | numOfThreadsPerCore | 是 | 否 |
+| 85 | numOfMnodes | 是 | 否 |
+| 86 | vnodeBak | 是 | 否 |
+| 87 | balance | 是 | 否 |
+| 88 | balanceInterval | 是 | 否 |
+| 89 | offlineThreshold | 是 | 否 |
+| 90 | role | 是 | 否 |
+| 91 | dnodeNopLoop | 是 | 否 |
+| 92 | keepTimeOffset | 是 | 否 |
+| 93 | rpcTimer | 是 | 否 |
+| 94 | rpcMaxTime | 是 | 否 |
+| 95 | rpcForceTcp | 是 | 否 |
+| 96 | tcpConnTimeout | 是 | 否 |
+| 97 | syncCheckInterval | 是 | 否 |
+| 98 | maxTmrCtrl | 是 | 否 |
+| 99 | monitorReplica | 是 | 否 |
+| 100 | smlTagNullName | 是 | 否 |
+| 101 | keepColumnName | 是 | 否 |
+| 102 | ratioOfQueryCores | 是 | 否 |
+| 103 | maxStreamCompDelay | 是 | 否 |
+| 104 | maxFirstStreamCompDelay | 是 | 否 |
+| 105 | retryStreamCompDelay | 是 | 否 |
+| 106 | streamCompDelayRatio | 是 | 否 |
+| 107 | maxVgroupsPerDb | 是 | 否 |
+| 108 | maxTablesPerVnode | 是 | 否 |
+| 109 | minTablesPerVnode | 是 | 否 |
+| 110 | tableIncStepPerVnode | 是 | 否 |
+| 111 | cache | 是 | 否 |
+| 112 | blocks | 是 | 否 |
+| 113 | days | 是 | 否 |
+| 114 | keep | 是 | 否 |
+| 115 | minRows | 是 | 否 |
+| 116 | maxRows | 是 | 否 |
+| 117 | quorum | 是 | 否 |
+| 118 | comp | 是 | 否 |
+| 119 | walLevel | 是 | 否 |
+| 120 | fsync | 是 | 否 |
+| 121 | replica | 是 | 否 |
+| 122 | partitions | 是 | 否 |
+| 123 | quorum | 是 | 否 |
+| 124 | update | 是 | 否 |
+| 125 | cachelast | 是 | 否 |
+| 126 | maxSQLLength | 是 | 否 |
+| 127 | maxWildCardsLength | 是 | 否 |
+| 128 | maxRegexStringLen | 是 | 否 |
+| 129 | maxNumOfOrderedRes | 是 | 否 |
+| 130 | maxConnections | 是 | 否 |
+| 131 | mnodeEqualVnodeNum | 是 | 否 |
+| 132 | http | 是 | 否 |
+| 133 | httpEnableRecordSql | 是 | 否 |
+| 134 | httpMaxThreads | 是 | 否 |
+| 135 | restfulRowLimit | 是 | 否 |
+| 136 | httpDbNameMandatory | 是 | 否 |
+| 137 | httpKeepAlive | 是 | 否 |
+| 138 | enableRecordSql | 是 | 否 |
+| 139 | maxBinaryDisplayWidth | 是 | 否 |
+| 140 | stream | 是 | 否 |
+| 141 | retrieveBlockingModel | 是 | 否 |
+| 142 | tsdbMetaCompactRatio | 是 | 否 |
+| 143 | defaultJSONStrType | 是 | 否 |
+| 144 | walFlushSize | 是 | 否 |
+| 145 | keepTimeOffset | 是 | 否 |
+| 146 | flowctrl | 是 | 否 |
+| 147 | slaveQuery | 是 | 否 |
+| 148 | adjustMaster | 是 | 否 |
+| 149 | topicBinaryLen | 是 | 否 |
+| 150 | telegrafUseFieldNum | 是 | 否 |
+| 151 | deadLockKillQuery | 是 | 否 |
+| 152 | clientMerge | 是 | 否 |
+| 153 | sdbDebugFlag | 是 | 否 |
+| 154 | odbcDebugFlag | 是 | 否 |
+| 155 | httpDebugFlag | 是 | 否 |
+| 156 | monDebugFlag | 是 | 否 |
+| 157 | cqDebugFlag | 是 | 否 |
+| 158 | shortcutFlag | 是 | 否 |
+| 159 | probeSeconds | 是 | 否 |
+| 160 | probeKillSeconds | 是 | 否 |
+| 161 | probeInterval | 是 | 否 |
+| 162 | lossyColumns | 是 | 否 |
+| 163 | fPrecision | 是 | 否 |
+| 164 | dPrecision | 是 | 否 |
+| 165 | maxRange | 是 | 否 |
+| 166 | range | 是 | 否 |
diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md
new file mode 100644
index 0000000000..f1165c9d0f
--- /dev/null
+++ b/docs/zh/14-reference/14-taosKeeper.md
@@ -0,0 +1,134 @@
+---
+sidebar_label: taosKeeper
+title: taosKeeper
+description: TDengine taosKeeper 使用说明
+---
+
+## 简介
+
+TaosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
+
+## 安装
+
+
+taosKeeper 安装方式:
+
+
+
+
+- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
+
+## 运行
+
+### 配置和运行方式
+
+
+taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+
+**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
+
+
+### 配置文件启动
+
+执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
+
+```shell
+taoskeeper -c
+```
+
+**下面是配置文件的示例:**
+```toml
+# gin 框架是否启用 debug
+debug = false
+
+# 服务监听端口, 默认为 6043
+port = 6043
+
+# 日志级别,包含 panic、error、info、debug、trace等
+loglevel = "info"
+
+# 程序中使用协程池的大小
+gopoolsize = 50000
+
+# 查询 TDengine 监控数据轮询间隔
+RotationInterval = "15s"
+
+[tdengine]
+host = "127.0.0.1"
+port = 6041
+username = "root"
+password = "taosdata"
+
+# 需要被监控的 taosAdapter
+[taosAdapter]
+address = ["127.0.0.1:6041","192.168.1.95:6041"]
+
+[metrics]
+# 监控指标前缀
+prefix = "taos"
+
+# 集群数据的标识符
+cluster = "production"
+
+# 存放监控数据的数据库
+database = "log"
+
+# 指定需要监控的普通表
+tables = ["normal_table"]
+```
+
+### 获取监控指标
+
+taosKeeper 作为 TDengine 监控指标的导出工具,可以将 TDengine 产生的监控数据记录在指定数据库中,并提供导出接口。
+
+#### 查看监控结果集
+
+```shell
+$ taos
+# 如上示例,使用 log 库作为监控日志存储位置
+> use log;
+> select * from cluster_info limit 1;
+```
+
+结果示例:
+
+```shell
+ ts | first_ep | first_ep_dnode_id | version | master_uptime | monitor_interval | dbs_total | tbs_total | stbs_total | dnodes_total | dnodes_alive | mnodes_total | mnodes_alive | vgroups_total | vgroups_alive | vnodes_total | vnodes_alive | connections_total | protocol | cluster_id |
+===============================================================================================================================================================================================================================================================================================================================================================================
+ 2022-08-16 17:37:01.629 | hlb:6030 | 1 | 3.0.0.0 | 0.27250 | 15 | 2 | 27 | 38 | 1 | 1 | 1 | 1 | 4 | 4 | 4 | 4 | 14 | 1 | 5981392874047724755 |
+Query OK, 1 rows in database (0.036162s)
+```
+
+#### 导出监控指标
+
+```shell
+curl http://127.0.0.1:6043/metrics
+```
+
+部分结果集:
+
+```shell
+# HELP taos_cluster_info_connections_total
+# TYPE taos_cluster_info_connections_total counter
+taos_cluster_info_connections_total{cluster_id="5981392874047724755"} 16
+# HELP taos_cluster_info_dbs_total
+# TYPE taos_cluster_info_dbs_total counter
+taos_cluster_info_dbs_total{cluster_id="5981392874047724755"} 2
+# HELP taos_cluster_info_dnodes_alive
+# TYPE taos_cluster_info_dnodes_alive counter
+taos_cluster_info_dnodes_alive{cluster_id="5981392874047724755"} 1
+# HELP taos_cluster_info_dnodes_total
+# TYPE taos_cluster_info_dnodes_total counter
+taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
+# HELP taos_cluster_info_first_ep
+# TYPE taos_cluster_info_first_ep gauge
+taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
+```
\ No newline at end of file
diff --git a/docs/zh/14-reference/14-taosx.md b/docs/zh/14-reference/14-taosx.md
deleted file mode 100644
index ed3f8d488f..0000000000
--- a/docs/zh/14-reference/14-taosx.md
+++ /dev/null
@@ -1,4 +0,0 @@
----
-sidebar_label: taosX
-title: 使用 taosX 在集群间复制数据
----
\ No newline at end of file
diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md
index 2cfd4b6484..1ce485b042 100644
--- a/docs/zh/17-operation/03-tolerance.md
+++ b/docs/zh/17-operation/03-tolerance.md
@@ -26,5 +26,3 @@ TDengine 集群中的时序数据的副本数是与数据库关联的,一个
TDengine 集群的节点数必须大于等于副本数,否则创建表时将报错。
当 TDengine 集群中的节点部署在不同的物理机上,并设置多个副本数时,就实现了系统的高可靠性,无需再使用其他软件或工具。TDengine 企业版还可以将副本部署在不同机房,从而实现异地容灾。
-
-另外一种灾备方式是通过 `taosX` 将一个 TDengine 集群的数据同步复制到物理上位于不同数据中心的另一个 TDengine 集群。其详细使用方法请参考 [taosX 参考手册](../../reference/taosX)
diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx
index 93090ffd38..becb1a70a9 100644
--- a/docs/zh/20-third-party/01-grafana.mdx
+++ b/docs/zh/20-third-party/01-grafana.mdx
@@ -193,7 +193,7 @@ docker run -d \
如上图所示,在 Query 中选中 `TDengine` 数据源,在下方查询框可输入相应 SQL 进行查询,具体说明如下:
-- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。
+- INPUT SQL:输入要查询的语句(该 SQL 语句的结果集应为两列多行),例如:`select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)` ,其中,from、to 和 interval 为 TDengine 插件的内置变量,表示从 Grafana 插件面板获取的查询范围和时间间隔。除了内置变量外,`也支持可以使用自定义模板变量`。
- ALIAS BY:可设置当前查询别名。
- GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
@@ -205,7 +205,11 @@ docker run -d \
### 导入 Dashboard
-在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。该 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
+在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
+
+
+
+其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana:[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:
diff --git a/docs/zh/20-third-party/import_dashboard.webp b/docs/zh/20-third-party/import_dashboard.webp
new file mode 100644
index 0000000000..164e3f4690
Binary files /dev/null and b/docs/zh/20-third-party/import_dashboard.webp differ
diff --git a/docs/zh/28-releases.md b/docs/zh/28-releases.md
index 5f30325829..311d69ac1b 100644
--- a/docs/zh/28-releases.md
+++ b/docs/zh/28-releases.md
@@ -3,7 +3,7 @@ sidebar_label: 发布历史
title: 发布历史
---
-import Release from "/components/Release";
+import Release from "/components/ReleaseV3";
diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c
index dd4fbc8d2d..2fcf4dd62c 100644
--- a/examples/c/stream_demo.c
+++ b/examples/c/stream_demo.c
@@ -98,10 +98,9 @@ int32_t create_stream() {
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
- pRes =
- taos_query(pConn,
- "create stream stream1 trigger max_delay 10s into outstb as select _wstart, sum(k) from st1 partition "
- "by tbname session(ts, 10s) ");
+ pRes = taos_query(pConn,
+ "create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, "
+ "count(k) from st1 partition by tbname interval(20s) ");
if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;
diff --git a/examples/c/tmq.c b/examples/c/tmq.c
index fc34915fe7..19adaad116 100644
--- a/examples/c/tmq.c
+++ b/examples/c/tmq.c
@@ -45,10 +45,9 @@ static int32_t msg_process(TAOS_RES* msg) {
int32_t numOfFields = taos_field_count(msg);
int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
- const char* tbName = tmq_get_table_name(msg);
rows++;
taos_print_row(buf, row, fields, numOfFields);
- printf("row content from %s: %s\n", (tbName != NULL ? tbName : "table null"), buf);
+ printf("row content: %s\n", buf);
}
return rows;
@@ -167,7 +166,7 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3 from tmqdb.stb where c1 > 1");
+ pRes = taos_query(pConn, "create topic topicname as select ts, c1, c2, c3, tbname from tmqdb.stb where c1 > 1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic topicname, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -199,9 +198,7 @@ tmq_t* build_consumer() {
if (TMQ_CONF_OK != code) return NULL;
code = tmq_conf_set(conf, "auto.offset.reset", "earliest");
if (TMQ_CONF_OK != code) return NULL;
- code = tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- if (TMQ_CONF_OK != code) return NULL;
- code = tmq_conf_set(conf, "msg.with.table.name", "true");
+ code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) return NULL;
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
@@ -220,14 +217,7 @@ tmq_list_t* build_topic_list() {
return topicList;
}
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topicList))) {
- fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
- return;
- }
-
+void basic_consume_loop(tmq_t* tmq) {
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 5000;
@@ -237,8 +227,8 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topicList) {
msgCnt++;
totalRows += msg_process(tmqmsg);
taos_free_result(tmqmsg);
- /*} else {*/
- /*break;*/
+ } else {
+ break;
}
}
@@ -267,14 +257,12 @@ int main(int argc, char* argv[]) {
return -1;
}
- basic_consume_loop(tmq, topic_list);
-
- code = tmq_unsubscribe(tmq);
- if (code) {
- fprintf(stderr, "%% Failed to unsubscribe: %s\n", tmq_err2str(code));
- } else {
- fprintf(stderr, "%% unsubscribe\n");
+ if ((code = tmq_subscribe(tmq, topic_list))) {
+ fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
}
+ tmq_list_destroy(topic_list);
+
+ basic_consume_loop(tmq);
code = tmq_consumer_close(tmq);
if (code) {
diff --git a/examples/rust/.gitignore b/examples/rust/.gitignore
new file mode 100644
index 0000000000..96ef6c0b94
--- /dev/null
+++ b/examples/rust/.gitignore
@@ -0,0 +1,2 @@
+/target
+Cargo.lock
diff --git a/examples/rust/Cargo.toml b/examples/rust/Cargo.toml
new file mode 100644
index 0000000000..1ed73e2fde
--- /dev/null
+++ b/examples/rust/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rust"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+taos = "*"
+
+[dev-dependencies]
+chrono = "0.4"
+itertools = "0.10.3"
+pretty_env_logger = "0.4.0"
+serde = { version = "1", features = ["derive"] }
+serde_json = "1"
+tokio = { version = "1", features = ["full"] }
+anyhow = "1"
diff --git a/examples/rust/examples/bind-tags.rs b/examples/rust/examples/bind-tags.rs
new file mode 100644
index 0000000000..a1f7286625
--- /dev/null
+++ b/examples/rust/examples/bind-tags.rs
@@ -0,0 +1,80 @@
+use anyhow::Result;
+use serde::Deserialize;
+use taos::*;
+
+#[tokio::main]
+async fn main() -> Result<()> {
+ let taos = TaosBuilder::from_dsn("taos://")?.build()?;
+ taos.exec_many([
+ "drop database if exists test",
+ "create database test keep 36500",
+ "use test",
+ "create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint,
+ c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned,
+ c10 float, c11 double, c12 varchar(100), c13 nchar(100)) tags(t1 varchar(100))",
+ ])
+ .await?;
+ let mut stmt = Stmt::init(&taos)?;
+ stmt.prepare(
+ "insert into ? using tb1 tags(?) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
+ )?;
+ stmt.set_tbname("d0")?;
+ stmt.set_tags(&[Value::VarChar("涛思".to_string())])?;
+
+ let params = vec![
+ ColumnView::from_millis_timestamp(vec![164000000000]),
+ ColumnView::from_bools(vec![true]),
+ ColumnView::from_tiny_ints(vec![i8::MAX]),
+ ColumnView::from_small_ints(vec![i16::MAX]),
+ ColumnView::from_ints(vec![i32::MAX]),
+ ColumnView::from_big_ints(vec![i64::MAX]),
+ ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
+ ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
+ ColumnView::from_unsigned_ints(vec![u32::MAX]),
+ ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
+ ColumnView::from_floats(vec![f32::MAX]),
+ ColumnView::from_doubles(vec![f64::MAX]),
+ ColumnView::from_varchar(vec!["ABC"]),
+ ColumnView::from_nchar(vec!["涛思数据"]),
+ ];
+ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
+ assert_eq!(rows, 1);
+
+ #[derive(Debug, Deserialize)]
+ #[allow(dead_code)]
+ struct Row {
+ ts: String,
+ c1: bool,
+ c2: i8,
+ c3: i16,
+ c4: i32,
+ c5: i64,
+ c6: u8,
+ c7: u16,
+ c8: u32,
+ c9: u64,
+ c10: Option,
+ c11: f64,
+ c12: String,
+ c13: String,
+ t1: serde_json::Value,
+ }
+
+ let rows: Vec = taos
+ .query("select * from tb1")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
+ let row = &rows[0];
+ dbg!(&row);
+ assert_eq!(row.c5, i64::MAX);
+ assert_eq!(row.c8, u32::MAX);
+ assert_eq!(row.c9, u64::MAX);
+ assert_eq!(row.c10.unwrap(), f32::MAX);
+ // assert_eq!(row.c11, f64::MAX);
+ assert_eq!(row.c12, "ABC");
+ assert_eq!(row.c13, "涛思数据");
+
+ Ok(())
+}
diff --git a/examples/rust/examples/bind.rs b/examples/rust/examples/bind.rs
new file mode 100644
index 0000000000..194938a319
--- /dev/null
+++ b/examples/rust/examples/bind.rs
@@ -0,0 +1,74 @@
+use anyhow::Result;
+use serde::Deserialize;
+use taos::*;
+
+#[tokio::main]
+async fn main() -> Result<()> {
+ let taos = TaosBuilder::from_dsn("taos://")?.build()?;
+ taos.exec_many([
+ "drop database if exists test_bindable",
+ "create database test_bindable keep 36500",
+ "use test_bindable",
+ "create table tb1 (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint,
+ c6 tinyint unsigned, c7 smallint unsigned, c8 int unsigned, c9 bigint unsigned,
+ c10 float, c11 double, c12 varchar(100), c13 nchar(100))",
+ ])
+ .await?;
+ let mut stmt = Stmt::init(&taos)?;
+ stmt.prepare("insert into tb1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")?;
+ let params = vec![
+ ColumnView::from_millis_timestamp(vec![0]),
+ ColumnView::from_bools(vec![true]),
+ ColumnView::from_tiny_ints(vec![i8::MAX]),
+ ColumnView::from_small_ints(vec![i16::MAX]),
+ ColumnView::from_ints(vec![i32::MAX]),
+ ColumnView::from_big_ints(vec![i64::MAX]),
+ ColumnView::from_unsigned_tiny_ints(vec![u8::MAX]),
+ ColumnView::from_unsigned_small_ints(vec![u16::MAX]),
+ ColumnView::from_unsigned_ints(vec![u32::MAX]),
+ ColumnView::from_unsigned_big_ints(vec![u64::MAX]),
+ ColumnView::from_floats(vec![f32::MAX]),
+ ColumnView::from_doubles(vec![f64::MAX]),
+ ColumnView::from_varchar(vec!["ABC"]),
+ ColumnView::from_nchar(vec!["涛思数据"]),
+ ];
+ let rows = stmt.bind(¶ms)?.add_batch()?.execute()?;
+ assert_eq!(rows, 1);
+
+ #[derive(Debug, Deserialize)]
+ #[allow(dead_code)]
+ struct Row {
+ ts: String,
+ c1: bool,
+ c2: i8,
+ c3: i16,
+ c4: i32,
+ c5: i64,
+ c6: u8,
+ c7: u16,
+ c8: u32,
+ c9: u64,
+ c10: Option,
+ c11: f64,
+ c12: String,
+ c13: String,
+ }
+
+ let rows: Vec = taos
+ .query("select * from tb1")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
+ let row = &rows[0];
+ dbg!(&row);
+ assert_eq!(row.c5, i64::MAX);
+ assert_eq!(row.c8, u32::MAX);
+ assert_eq!(row.c9, u64::MAX);
+ assert_eq!(row.c10.unwrap(), f32::MAX);
+ // assert_eq!(row.c11, f64::MAX);
+ assert_eq!(row.c12, "ABC");
+ assert_eq!(row.c13, "涛思数据");
+
+ Ok(())
+}
diff --git a/examples/rust/examples/query.rs b/examples/rust/examples/query.rs
new file mode 100644
index 0000000000..016b291abc
--- /dev/null
+++ b/examples/rust/examples/query.rs
@@ -0,0 +1,106 @@
+use std::time::Duration;
+
+use chrono::{DateTime, Local};
+use taos::*;
+
+#[tokio::main]
+async fn main() -> anyhow::Result<()> {
+ let dsn = "taos://";
+
+ let opts = PoolBuilder::new()
+ .max_size(5000) // max connections
+ .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
+ .min_idle(Some(1000)) // minimal idle connections
+ .connection_timeout(Duration::from_secs(2));
+
+ let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
+
+ let taos = pool.get()?;
+
+ let db = "query";
+
+ // prepare database
+ taos.exec_many([
+ format!("DROP DATABASE IF EXISTS `{db}`"),
+ format!("CREATE DATABASE `{db}`"),
+ format!("USE `{db}`"),
+ ])
+ .await?;
+
+ let inserted = taos.exec_many([
+ // create super table
+ "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))",
+ // create child table
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ // insert into child table
+ "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
+ // insert with NULL values
+ "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
+ // insert and automatically create table with tags if not exists
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ // insert many records in a single sql
+ "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
+ ]).await?;
+
+ assert_eq!(inserted, 6);
+ loop {
+ let count: usize = taos
+ .query_one("select count(*) from `meters`")
+ .await?
+ .unwrap_or_default();
+
+ if count >= 6 {
+ break;
+ } else {
+ println!("waiting for data");
+ }
+ }
+
+ let mut result = taos.query("select tbname, * from `meters`").await?;
+
+ for field in result.fields() {
+ println!("got field: {}", field.name());
+ }
+
+ // Query option 1, use rows stream.
+ let mut rows = result.rows();
+ let mut nrows = 0;
+ while let Some(row) = rows.try_next().await? {
+ for (col, (name, value)) in row.enumerate() {
+ println!(
+ "[{}] got value in col {} (named `{:>8}`): {}",
+ nrows, col, name, value
+ );
+ }
+ nrows += 1;
+ }
+
+ // Query options 2, use deserialization with serde.
+ #[derive(Debug, serde::Deserialize)]
+ #[allow(dead_code)]
+ struct Record {
+ tbname: String,
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+ groupid: i32,
+ // binary/varchar to String
+ location: String,
+ }
+
+ let records: Vec = taos
+ .query("select tbname, * from `meters`")
+ .await?
+ .deserialize()
+ .try_collect()
+ .await?;
+
+ dbg!(result.summary());
+ assert_eq!(records.len(), 6);
+ dbg!(records);
+ Ok(())
+}
diff --git a/examples/rust/examples/subscribe.rs b/examples/rust/examples/subscribe.rs
new file mode 100644
index 0000000000..9e2e890405
--- /dev/null
+++ b/examples/rust/examples/subscribe.rs
@@ -0,0 +1,103 @@
+use std::time::Duration;
+
+use chrono::{DateTime, Local};
+use taos::*;
+
+// Query options 2, use deserialization with serde.
+#[derive(Debug, serde::Deserialize)]
+#[allow(dead_code)]
+struct Record {
+ // deserialize timestamp to chrono::DateTime
+ ts: DateTime,
+ // float to f32
+ current: Option,
+ // int to i32
+ voltage: Option,
+ phase: Option,
+}
+
+async fn prepare(taos: Taos) -> anyhow::Result<()> {
+ let inserted = taos.exec_many([
+ // create child table
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ // insert into child table
+ "INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
+ // insert with NULL values
+ "INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
+ // insert and automatically create table with tags if not exists
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ // insert many records in a single sql
+ "INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
+ ]).await?;
+ assert_eq!(inserted, 6);
+ Ok(())
+}
+
+#[tokio::main]
+async fn main() -> anyhow::Result<()> {
+ // std::env::set_var("RUST_LOG", "debug");
+ pretty_env_logger::init();
+ let dsn = "taos://localhost:6030";
+ let builder = TaosBuilder::from_dsn(dsn)?;
+
+ let taos = builder.build()?;
+ let db = "tmq";
+
+ // prepare database
+ taos.exec_many([
+ "DROP TOPIC IF EXISTS tmq_meters".to_string(),
+ format!("DROP DATABASE IF EXISTS `{db}`"),
+ format!("CREATE DATABASE `{db}`"),
+ format!("USE `{db}`"),
+ // create super table
+ "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))".to_string(),
+ // create topic for subscription
+ format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
+ ])
+ .await?;
+
+ let task = tokio::spawn(prepare(taos));
+
+ tokio::time::sleep(Duration::from_secs(1)).await;
+
+ // subscribe
+ let tmq = TmqBuilder::from_dsn("taos://localhost:6030/?group.id=test")?;
+
+ let mut consumer = tmq.build()?;
+ consumer.subscribe(["tmq_meters"]).await?;
+
+ {
+ let mut stream = consumer.stream();
+
+ while let Some((offset, message)) = stream.try_next().await? {
+ // get information from offset
+
+ // the topic
+ let topic = offset.topic();
+ // the vgroup id, like partition id in kafka.
+ let vgroup_id = offset.vgroup_id();
+ println!("* in vgroup id {vgroup_id} of topic {topic}\n");
+
+ if let Some(data) = message.into_data() {
+ while let Some(block) = data.fetch_raw_block().await? {
+ // one block for one table, get table name if needed
+ let name = block.table_name();
+ let records: Vec = block.deserialize().try_collect()?;
+ println!(
+ "** table: {}, got {} records: {:#?}\n",
+ name.unwrap(),
+ records.len(),
+ records
+ );
+ }
+ }
+ consumer.commit(offset).await?;
+ }
+ }
+
+ consumer.unsubscribe().await;
+
+ task.await??;
+
+ Ok(())
+}
diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs
new file mode 100644
index 0000000000..e7a11a969c
--- /dev/null
+++ b/examples/rust/src/main.rs
@@ -0,0 +1,3 @@
+fn main() {
+ println!("Hello, world!");
+}
diff --git a/include/client/taos.h b/include/client/taos.h
index dd7266bd96..f260b84f4a 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -131,10 +131,10 @@ DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...);
DLL_EXPORT setConfRet taos_set_config(const char *config);
DLL_EXPORT int taos_init(void);
DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port);
-DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
-DLL_EXPORT void taos_close(TAOS *taos);
+DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
+DLL_EXPORT void taos_close(TAOS *taos);
-const char *taos_data_type(int type);
+const char *taos_data_type(int type);
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length);
@@ -244,33 +244,37 @@ DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_comm
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
+DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
+DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
+DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
+
+/* ------------------------------ TAOSX -----------------------------------*/
+// note: following apis are unstable
enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
};
-typedef struct tmq_raw_data{
- void* raw;
+typedef struct tmq_raw_data {
+ void *raw;
uint32_t raw_len;
uint16_t raw_type;
} tmq_raw_data;
typedef enum tmq_res_t tmq_res_t;
-DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
-DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
-DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
-DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char* tbname);
-DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
-DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res); // Returning null means error. Returned result need to be freed by tmq_free_json_meta
-DLL_EXPORT void tmq_free_json_meta(char* jsonMeta);
-DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
-DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
-DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
-DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
+DLL_EXPORT const char *tmq_get_table_name(TAOS_RES *res);
+DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
+DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
+DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
+DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
+DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
+// Returning null means error. Returned result need to be freed by tmq_free_json_meta
+DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res);
+DLL_EXPORT void tmq_free_json_meta(char *jsonMeta);
-/* ------------------------------ TMQ END -------------------------------- */
+/* ---------------------------- TAOSX END -------------------------------- */
typedef enum {
TSDB_SRV_STATUS_UNAVAILABLE = 0,
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index e04d9d5e86..dbe020f7ec 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -60,6 +60,7 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
+ STREAM_INPUT__DESTROY,
};
typedef enum EStreamType {
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index 3679b3773b..af7c88acde 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -38,22 +38,18 @@ typedef struct STagVal STagVal;
typedef struct STag STag;
// bitmap
-#define N1(n) ((1 << (n)) - 1)
-#define BIT1_SIZE(n) (((n)-1) / 8 + 1)
-#define BIT2_SIZE(n) (((n)-1) / 4 + 1)
-#define SET_BIT1(p, i, v) \
- do { \
- (p)[(i) / 8] &= N1((i) % 8); \
- (p)[(i) / 8] |= (((uint8_t)(v)) << (((i) % 8))); \
- } while (0)
+const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
+ {0b00000000, 0b00000100, 0b00001000, 2},
+ {0b00000000, 0b00010000, 0b00100000, 4},
+ {0b00000000, 0b01000000, 0b10000000, 6}};
-#define GET_BIT1(p, i) (((p)[(i) / 8] >> ((i) % 8)) & ((uint8_t)1))
-#define SET_BIT2(p, i, v) \
- do { \
- p[(i) / 4] &= N1((i) % 4 * 2); \
- (p)[(i) / 4] |= (((uint8_t)(v)) << (((i) % 4) * 2)); \
- } while (0)
-#define GET_BIT2(p, i) (((p)[(i) / 4] >> (((i) % 4) * 2)) & ((uint8_t)3))
+#define N1(n) ((((uint8_t)1) << (n)) - 1)
+#define BIT1_SIZE(n) ((((n)-1) >> 3) + 1)
+#define BIT2_SIZE(n) ((((n)-1) >> 2) + 1)
+#define SET_BIT1(p, i, v) ((p)[(i) >> 3] = (p)[(i) >> 3] & N1((i)&7) | (((uint8_t)(v)) << ((i)&7)))
+#define GET_BIT1(p, i) (((p)[(i) >> 3] >> ((i)&7)) & ((uint8_t)1))
+#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
+#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
// STSchema
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
@@ -171,7 +167,7 @@ struct SColVal {
#pragma pack(push, 1)
struct STagVal {
-// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
+ // char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
union {
int16_t cid;
char *pKey;
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 9111728e1a..cd74ffd477 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -139,7 +139,6 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
bool tsc);
void taosCleanupCfg();
void taosCfgDynamicOptions(const char *option, const char *value);
-void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
struct SConfig *taosGetCfg();
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index cc15d4ed6b..8f199c72f7 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -2555,10 +2555,14 @@ typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
int64_t ntbUid;
SArray* colIdList; // SArray
-} SCheckAlterInfo;
+} STqCheckInfo;
-int32_t tEncodeSCheckAlterInfo(SEncoder* pEncoder, const SCheckAlterInfo* pInfo);
-int32_t tDecodeSCheckAlterInfo(SDecoder* pDecoder, SCheckAlterInfo* pInfo);
+int32_t tEncodeSTqCheckInfo(SEncoder* pEncoder, const STqCheckInfo* pInfo);
+int32_t tDecodeSTqCheckInfo(SDecoder* pDecoder, STqCheckInfo* pInfo);
+
+typedef struct {
+ char topic[TSDB_TOPIC_FNAME_LEN];
+} STqDelCheckInfoReq;
typedef struct {
int32_t vgId;
@@ -2660,29 +2664,8 @@ typedef struct {
} SVgEpSet;
typedef struct {
- int64_t suid;
- int8_t level;
-} SRSmaFetchMsg;
-
-static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) {
- if (tStartEncode(pCoder) < 0) return -1;
-
- if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
- if (tEncodeI8(pCoder, pReq->level) < 0) return -1;
-
- tEndEncode(pCoder);
- return 0;
-}
-
-static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) {
- if (tStartDecode(pCoder) < 0) return -1;
-
- if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
- if (tDecodeI8(pCoder, &pReq->level) < 0) return -1;
-
- tEndDecode(pCoder);
- return 0;
-}
+ int32_t padding;
+} SRSmaExecMsg;
typedef struct {
int8_t version; // for compatibility(default 0)
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 6462c7afbf..e2bb3e2ae1 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -188,7 +188,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_CHANGE, "vnode-mq-vg-change", SMqRebVgReq, SMqRebVgRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_VG_DELETE, "vnode-mq-vg-delete", SMqVDeleteReq, SMqVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_MQ_COMMIT_OFFSET, "vnode-commit-offset", STqOffset, STqOffset)
- TD_DEF_MSG_TYPE(TDMT_VND_CHECK_ALTER_INFO, "vnode-alter-check-info", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_ADD_CHECK_INFO, "vnode-add-check-info", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_DELETE_CHECK_INFO, "vnode-delete-check-info", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TOPIC, "vnode-create-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TOPIC, "vnode-alter-topic", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TOPIC, "vnode-drop-topic", NULL, NULL)
@@ -200,7 +201,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp)
- TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index bb75efa00a..5743d33608 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -105,7 +105,7 @@ typedef enum ENodeType {
QUERY_NODE_COLUMN_REF,
// Statement nodes are used in parser and planner module.
- QUERY_NODE_SET_OPERATOR,
+ QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIF_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
@@ -198,7 +198,7 @@ typedef enum ENodeType {
QUERY_NODE_QUERY,
// logic plan node
- QUERY_NODE_LOGIC_PLAN_SCAN,
+ QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
@@ -215,7 +215,7 @@ typedef enum ENodeType {
QUERY_NODE_LOGIC_PLAN,
// physical plan node
- QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN,
+ QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 088da73a1a..e1f86bae58 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -428,6 +428,9 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
char* nodesGetFillModeString(EFillMode mode);
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
+const char* operatorTypeStr(EOperatorType type);
+const char* logicConditionTypeStr(ELogicConditionType type);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index a3de9164a2..717278d51d 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -53,6 +53,8 @@ typedef struct SParseContext {
int8_t schemalessType;
const char* svrVer;
bool nodeOffline;
+ SArray* pTableMetaPos; // sql table pos => catalog data pos
+ SArray* pTableVgroupPos; // sql table pos => catalog data pos
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
@@ -84,8 +86,8 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
int32_t rowNum);
int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields);
int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields);
-int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, TAOS_MULTI_BIND* bind,
- char* msgBuf, int32_t msgBufLen);
+int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName,
+ TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen);
void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index f51c37ed47..384c6a289f 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -53,6 +53,7 @@ enum {
TASK_SCHED_STATUS__WAITING,
TASK_SCHED_STATUS__ACTIVE,
TASK_SCHED_STATUS__FAILED,
+ TASK_SCHED_STATUS__DROPPING,
};
enum {
@@ -127,6 +128,10 @@ typedef struct {
int8_t type;
} SStreamCheckpoint;
+typedef struct {
+ int8_t type;
+} SStreamTaskDestroy;
+
typedef struct {
int8_t type;
SSDataBlock* pBlock;
@@ -211,7 +216,6 @@ typedef struct {
void* vnode;
FTbSink* tbSinkFunc;
STSchema* pTSchema;
- SHashObj* pHash; // groupId to tbuid
} STaskSinkTb;
typedef void FSmaSink(void* vnode, int64_t smaId, const SArray* data);
@@ -275,12 +279,8 @@ typedef struct SStreamTask {
int32_t nodeId;
SEpSet epSet;
- // used for task source and sink,
- // while task agg should have processedVer for each child
int64_t recoverSnapVer;
int64_t startVer;
- int64_t checkpointVer;
- int64_t processedVer;
// children info
SArray* childEpInfo; // SArray
@@ -519,7 +519,7 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
void streamMetaClose(SStreamMeta* streamMeta);
int32_t streamMetaAddTask(SStreamMeta* pMeta, SStreamTask* pTask);
-int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen);
+int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h
index 78543118da..c186430f3f 100644
--- a/include/libs/stream/tstreamUpdate.h
+++ b/include/libs/stream/tstreamUpdate.h
@@ -25,33 +25,34 @@ extern "C" {
#endif
typedef struct SUpdateInfo {
- SArray *pTsBuckets;
- uint64_t numBuckets;
- SArray *pTsSBFs;
- uint64_t numSBFs;
- int64_t interval;
- int64_t watermark;
- TSKEY minTS;
- SScalableBf* pCloseWinSBF;
- SHashObj* pMap;
- STimeWindow scanWindow;
- uint64_t scanGroupId;
- uint64_t maxVersion;
+ SArray *pTsBuckets;
+ uint64_t numBuckets;
+ SArray *pTsSBFs;
+ uint64_t numSBFs;
+ int64_t interval;
+ int64_t watermark;
+ TSKEY minTS;
+ SScalableBf *pCloseWinSBF;
+ SHashObj *pMap;
+ STimeWindow scanWindow;
+ uint64_t scanGroupId;
+ uint64_t maxVersion;
} SUpdateInfo;
-SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark);
+SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
-bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
-void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version);
-bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version);
-void updateInfoDestroy(SUpdateInfo *pInfo);
-void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
-void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
-int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
-int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
+bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
+bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
+void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
+bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
+void updateInfoDestroy(SUpdateInfo *pInfo);
+void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo);
+void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo);
+int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo);
+int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
#ifdef __cplusplus
}
#endif
-#endif /* ifndef _TSTREAMUPDATE_H_ */
\ No newline at end of file
+#endif /* ifndef _TSTREAMUPDATE_H_ */
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index aa563343f8..e6a4dd1d49 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -26,10 +26,15 @@ extern "C" {
extern bool gRaftDetailLog;
-#define SYNC_RESP_TTL_MS 10000000
-#define SYNC_SPEED_UP_HB_TIMER 400
-#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
-#define SYNC_SLOW_DOWN_RANGE 100
+#define SYNC_RESP_TTL_MS 10000000
+#define SYNC_SPEED_UP_HB_TIMER 400
+#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
+#define SYNC_SLOW_DOWN_RANGE 100
+#define SYNC_MAX_READ_RANGE 2
+#define SYNC_MAX_PROGRESS_WAIT_MS 4000
+#define SYNC_MAX_START_TIME_RANGE_MS (1000 * 20)
+#define SYNC_MAX_RECV_TIME_RANGE_MS 1200
+#define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_MAX_BATCH_SIZE 1
#define SYNC_INDEX_BEGIN 0
@@ -210,9 +215,12 @@ void syncStop(int64_t rid);
int32_t syncSetStandby(int64_t rid);
ESyncState syncGetMyRole(int64_t rid);
bool syncIsReady(int64_t rid);
+bool syncIsReadyForRead(int64_t rid);
const char* syncGetMyRoleStr(int64_t rid);
bool syncRestoreFinish(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
+SyncIndex syncGetLastIndex(int64_t rid);
+SyncIndex syncGetCommitIndex(int64_t rid);
SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h
index cd2c2d4a4f..6c95c3c6d7 100644
--- a/include/libs/sync/syncTools.h
+++ b/include/libs/sync/syncTools.h
@@ -423,6 +423,7 @@ typedef struct SyncAppendEntriesReply {
SyncTerm privateTerm;
bool success;
SyncIndex matchIndex;
+ int64_t startTime;
} SyncAppendEntriesReply;
SyncAppendEntriesReply* syncAppendEntriesReplyBuild(int32_t vgId);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 3ca6978156..12d6127165 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -291,6 +291,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_STREAM_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F1)
#define TSDB_CODE_MND_INVALID_STREAM_OPTION TAOS_DEF_ERROR_CODE(0, 0x03F2)
#define TSDB_CODE_MND_STREAM_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03F3)
+#define TSDB_CODE_MND_STREAM_TASK_DROPPED TAOS_DEF_ERROR_CODE(0, 0x03F4)
// mnode-sma
#define TSDB_CODE_MND_SMA_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0480)
@@ -614,6 +615,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154)
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
+#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
@@ -622,6 +624,7 @@ int32_t* taosGetErrno();
//tmq
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
+#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002)
#ifdef __cplusplus
}
diff --git a/include/util/tdef.h b/include/util/tdef.h
index a3deb73fd4..6ce1571656 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -132,15 +132,14 @@ typedef enum EOperatorType {
OP_TYPE_DIV,
OP_TYPE_REM,
// unary arithmetic operator
- OP_TYPE_MINUS,
- OP_TYPE_ASSIGN,
+ OP_TYPE_MINUS = 20,
// bitwise operator
- OP_TYPE_BIT_AND,
+ OP_TYPE_BIT_AND = 30,
OP_TYPE_BIT_OR,
// binary comparison operator
- OP_TYPE_GREATER_THAN,
+ OP_TYPE_GREATER_THAN = 40,
OP_TYPE_GREATER_EQUAL,
OP_TYPE_LOWER_THAN,
OP_TYPE_LOWER_EQUAL,
@@ -153,7 +152,7 @@ typedef enum EOperatorType {
OP_TYPE_MATCH,
OP_TYPE_NMATCH,
// unary comparison operator
- OP_TYPE_IS_NULL,
+ OP_TYPE_IS_NULL = 100,
OP_TYPE_IS_NOT_NULL,
OP_TYPE_IS_TRUE,
OP_TYPE_IS_FALSE,
@@ -163,8 +162,11 @@ typedef enum EOperatorType {
OP_TYPE_IS_NOT_UNKNOWN,
// json operator
- OP_TYPE_JSON_GET_VALUE,
- OP_TYPE_JSON_CONTAINS
+ OP_TYPE_JSON_GET_VALUE = 150,
+ OP_TYPE_JSON_CONTAINS,
+
+ // internal operator
+ OP_TYPE_ASSIGN = 200
} EOperatorType;
#define OP_TYPE_CALC_MAX OP_TYPE_BIT_OR
diff --git a/include/util/tqueue.h b/include/util/tqueue.h
index 0f4f1db9ee..da409a90bb 100644
--- a/include/util/tqueue.h
+++ b/include/util/tqueue.h
@@ -76,6 +76,7 @@ void taosFreeQall(STaosQall *qall);
int32_t taosReadAllQitems(STaosQueue *queue, STaosQall *qall);
int32_t taosGetQitem(STaosQall *qall, void **ppItem);
void taosResetQitems(STaosQall *qall);
+int32_t taosQallItemSize(STaosQall *qall);
STaosQset *taosOpenQset();
void taosCloseQset(STaosQset *qset);
diff --git a/include/util/tref.h b/include/util/tref.h
index 7e08bb045b..c2cc54cb07 100644
--- a/include/util/tref.h
+++ b/include/util/tref.h
@@ -29,11 +29,11 @@ int32_t taosOpenRef(int32_t max, void (*fp)(void *));
// close the reference set, refId is the return value by taosOpenRef
// return 0 if success. On error, -1 is returned, and terrno is set appropriately
-int32_t taosCloseRef(int32_t refId);
+int32_t taosCloseRef(int32_t rsetId);
// add ref, p is the pointer to resource or pointer ID
// return Reference ID(rid) allocated. On error, -1 is returned, and terrno is set appropriately
-int64_t taosAddRef(int32_t refId, void *p);
+int64_t taosAddRef(int32_t rsetId, void *p);
// remove ref, rid is the reference ID returned by taosAddRef
// return 0 if success. On error, -1 is returned, and terrno is set appropriately
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 5676bf5c43..4953102842 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -29,6 +29,7 @@ else
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
+ ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${cfg_link_dir}/* || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 6de475a4c0..3db9005f95 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -60,6 +60,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat
cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
+cp ${compile_dir}/build/bin/udfd ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin
if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 7a34f7a222..637d2d425a 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -69,6 +69,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
+cp %{_compiledir}/build/bin/udfd %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin
if [ -f %{_compiledir}/build/bin/taosadapter ]; then
@@ -204,6 +205,7 @@ if [ $1 -eq 0 ];then
# Remove all links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
+ ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${cfg_link_dir}/* || :
${csudo}rm -f ${inc_link_dir}/taos.h || :
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index eda2b052d1..39606ead30 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -18,6 +18,7 @@ script_dir=$(dirname $(readlink -f "$0"))
clientName="taos"
serverName="taosd"
+udfdName="udfd"
configFile="taos.cfg"
productName="TDengine"
emailName="taosdata.com"
@@ -192,6 +193,7 @@ function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/${clientName} || :
${csudo}rm -f ${bin_link_dir}/${serverName} || :
+ ${csudo}rm -f ${bin_link_dir}/${udfdName} || :
${csudo}rm -f ${bin_link_dir}/${adapterName} || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/${demoName} || :
@@ -205,6 +207,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} || :
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || :
+ [ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -s ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || :
[ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
@@ -742,7 +745,7 @@ function is_version_compatible() {
fi
exist_version=$(${installDir}/bin/${serverName} -V | head -1 | cut -d ' ' -f 3)
- vercomp $exist_version "2.0.16.0"
+ vercomp $exist_version "3.0.0.0"
case $? in
2)
prompt_force=1
diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat
index 0f9e836ae2..d4dde391c8 100644
--- a/packaging/tools/make_install.bat
+++ b/packaging/tools/make_install.bat
@@ -1,7 +1,57 @@
@echo off
goto %1
:needAdmin
+
+if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json (
+ echo The default data directory C:/TDengine/data contains old data of tdengine 2.x, please clear it before installing!
+)
+set source_dir=%2
+set source_dir=%source_dir:/=\\%
+set binary_dir=%3
+set binary_dir=%binary_dir:/=\\%
+set osType=%4
+set verNumber=%5
+set tagert_dir=C:\\TDengine
+
+if not exist %tagert_dir% (
+ mkdir %tagert_dir%
+)
+if not exist %tagert_dir%\\cfg (
+ mkdir %tagert_dir%\\cfg
+)
+if not exist %tagert_dir%\\include (
+ mkdir %tagert_dir%\\include
+)
+if not exist %tagert_dir%\\driver (
+ mkdir %tagert_dir%\\driver
+)
+if not exist C:\\TDengine\\cfg\\taos.cfg (
+ copy %source_dir%\\packaging\\cfg\\taos.cfg %tagert_dir%\\cfg\\taos.cfg > nul
+)
+
+if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
+ if not exist %tagert_dir%\\cfg\\taosadapter.toml (
+ copy %binary_dir%\\test\\cfg\\taosadapter.toml %tagert_dir%\\cfg\\taosadapter.toml > nul
+ )
+)
+
+copy %source_dir%\\include\\client\\taos.h %tagert_dir%\\include > nul
+copy %source_dir%\\include\\util\\taoserror.h %tagert_dir%\\include > nul
+copy %source_dir%\\include\\libs\\function\\taosudf.h %tagert_dir%\\include > nul
+copy %binary_dir%\\build\\lib\\taos.lib %tagert_dir%\\driver > nul
+copy %binary_dir%\\build\\lib\\taos_static.lib %tagert_dir%\\driver > nul
+copy %binary_dir%\\build\\lib\\taos.dll %tagert_dir%\\driver > nul
+copy %binary_dir%\\build\\bin\\taos.exe %tagert_dir% > nul
+copy %binary_dir%\\build\\bin\\taosd.exe %tagert_dir% > nul
+copy %binary_dir%\\build\\bin\\udfd.exe %tagert_dir% > nul
+if exist %binary_dir%\\build\\bin\\taosBenchmark.exe (
+ copy %binary_dir%\\build\\bin\\taosBenchmark.exe %tagert_dir% > nul
+)
+if exist %binary_dir%\\build\\bin\\taosadapter.exe (
+ copy %binary_dir%\\build\\bin\\taosadapter.exe %tagert_dir% > nul
+)
+
mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close)&& echo To start/stop TDengine with administrator privileges: sc start/stop taosd &goto :eof
:hasAdmin
-cp -f C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32
+copy /y C:\\TDengine\\driver\\taos.dll C:\\Windows\\System32 > nul
sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index d8d4c5bf2a..6a95ace99e 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -664,7 +664,9 @@ function install_TDengine() {
## ==============================Main program starts from here============================
echo source directory: $1
echo binary directory: $2
-if [ "$osType" != "Darwin" ]; then
+if [ -x ${data_dir}/dnode/dnodeCfg.json ]; then
+ echo -e "\033[44;31;5mThe default data directory ${data_dir} contains old data of tdengine 2.x, please clear it before installing!\033[0m"
+elif [ "$osType" != "Darwin" ]; then
if [ -x ${bin_dir}/${clientName} ]; then
update_TDengine
else
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 6103ce170c..f5e3bf1882 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -85,6 +85,7 @@ else
${build_dir}/bin/${clientName} \
${taostools_bin_files} \
${build_dir}/bin/taosadapter \
+ ${build_dir}/bin/udfd \
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/startPre.sh \
@@ -318,7 +319,7 @@ if [ "$verMode" == "cluster" ]; then
fi
# Copy release note
-cp ${script_dir}/release_note ${install_dir}
+# cp ${script_dir}/release_note ${install_dir}
# exit 1
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index aa80cfb86c..fcc8a2a942 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -118,6 +118,7 @@ function install_bin() {
# Remove links
${csudo}rm -f ${bin_link_dir}/taos || :
${csudo}rm -f ${bin_link_dir}/taosd || :
+ ${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosadapter || :
${csudo}rm -f ${bin_link_dir}/taosBenchmark || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
@@ -130,6 +131,7 @@ function install_bin() {
#Make link
[ -x ${bin_dir}/taos ] && ${csudo}ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
+ [ -x ${bin_dir}/udfd ] && ${csudo}ln -s ${bin_dir}/udfd ${bin_link_dir}/udfd || :
[ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || :
[ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || :
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index acdb3b68b0..9c086fc83e 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -689,11 +689,11 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
TDMT_VND_CREATE_TABLE == pRequest->type) {
pRequest->body.resInfo.numOfRows = res.numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, res.numOfRows);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, res.numOfRows);
}
-
+
schedulerFreeJob(&pRequest->body.queryJob, 0);
}
@@ -800,8 +800,8 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
break;
}
case TDMT_VND_SUBMIT: {
- atomic_add_fetch_64((int64_t *)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
-
+ atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
+
code = handleSubmitExecRes(pRequest, pRes->res, pCatalog, &epset);
break;
}
@@ -832,9 +832,9 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
if (pResult) {
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertRows, pResult->numOfRows);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
}
}
@@ -877,14 +877,14 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (pQuery->pRoot) {
pRequest->stmtType = pQuery->pRoot->type;
}
-
+
if (pQuery->pRoot && !pRequest->inRetry) {
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
if (QUERY_NODE_VNODE_MODIF_STMT == pQuery->pRoot->type) {
- atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertsReq, 1);
} else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
- atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1);
+ atomic_add_fetch_64((int64_t*)&pActivity->numOfQueryReq, 1);
}
}
@@ -1467,9 +1467,9 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4)
tscDebug("0x%" PRIx64 " fetch results, numOfRows:%d total Rows:%" PRId64 ", complete:%d, reqId:0x%" PRIx64,
pRequest->self, pResInfo->numOfRows, pResInfo->totalRows, pResInfo->completed, pRequest->requestId);
- STscObj *pTscObj = pRequest->pTscObj;
- SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- atomic_add_fetch_64((int64_t *)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
+ STscObj* pTscObj = pRequest->pTscObj;
+ SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
if (pResultInfo->numOfRows == 0) {
return NULL;
@@ -2006,7 +2006,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
bool inEscape = false;
int32_t code = 0;
- void *pIter = NULL;
+ void* pIter = NULL;
int32_t vIdx = 0;
int32_t vPos[2];
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 0ec724c6d0..0e95cd4d99 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -192,6 +192,7 @@ void taos_free_result(TAOS_RES *res) {
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
pRsp->resInfo.pRspMsg = NULL;
doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj *pRspObj = (SMqMetaRspObj *)res;
taosMemoryFree(pRspObj->metaRsp.metaRsp);
diff --git a/source/client/src/taosx.c b/source/client/src/taosx.c
new file mode 100644
index 0000000000..677567e38f
--- /dev/null
+++ b/source/client/src/taosx.c
@@ -0,0 +1,1628 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "cJSON.h"
+#include "clientInt.h"
+#include "clientLog.h"
+#include "parser.h"
+#include "tdatablock.h"
+#include "tdef.h"
+#include "tglobal.h"
+#include "tmsgtype.h"
+#include "tqueue.h"
+#include "tref.h"
+#include "ttimer.h"
+
+static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
+ int8_t t) {
+ char* string = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return string;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+
+ // char uid[32] = {0};
+ // sprintf(uid, "%"PRIi64, id);
+ // cJSON* id_ = cJSON_CreateString(uid);
+ // cJSON_AddItemToObject(json, "id", id_);
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ // cJSON* version = cJSON_CreateNumber(1);
+ // cJSON_AddItemToObject(json, "version", version);
+
+ cJSON* columns = cJSON_CreateArray();
+ for (int i = 0; i < schemaRow->nCols; i++) {
+ cJSON* column = cJSON_CreateObject();
+ SSchema* s = schemaRow->pSchema + i;
+ cJSON* cname = cJSON_CreateString(s->name);
+ cJSON_AddItemToObject(column, "name", cname);
+ cJSON* ctype = cJSON_CreateNumber(s->type);
+ cJSON_AddItemToObject(column, "type", ctype);
+ if (s->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = s->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(column, "length", cbytes);
+ } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(column, "length", cbytes);
+ }
+ cJSON_AddItemToArray(columns, column);
+ }
+ cJSON_AddItemToObject(json, "columns", columns);
+
+ cJSON* tags = cJSON_CreateArray();
+ for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
+ cJSON* tag = cJSON_CreateObject();
+ SSchema* s = schemaTag->pSchema + i;
+ cJSON* tname = cJSON_CreateString(s->name);
+ cJSON_AddItemToObject(tag, "name", tname);
+ cJSON* ttype = cJSON_CreateNumber(s->type);
+ cJSON_AddItemToObject(tag, "type", ttype);
+ if (s->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = s->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(tag, "length", cbytes);
+ } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(tag, "length", cbytes);
+ }
+ cJSON_AddItemToArray(tags, tag);
+ }
+ cJSON_AddItemToObject(json, "tags", tags);
+
+ string = cJSON_PrintUnformatted(json);
+ cJSON_Delete(json);
+ return string;
+}
+
+static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
+ SMAlterStbReq req = {0};
+ cJSON* json = NULL;
+ char* string = NULL;
+
+ if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) {
+ goto end;
+ }
+
+ json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto end;
+ }
+ cJSON* type = cJSON_CreateString("alter");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ SName name = {0};
+ tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+ cJSON* tableName = cJSON_CreateString(name.tname);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ cJSON* alterType = cJSON_CreateNumber(req.alterType);
+ cJSON_AddItemToObject(json, "alterType", alterType);
+ switch (req.alterType) {
+ case TSDB_ALTER_TABLE_ADD_TAG:
+ case TSDB_ALTER_TABLE_ADD_COLUMN: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(field->type);
+ cJSON_AddItemToObject(json, "colType", colType);
+
+ if (field->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = field->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_DROP_TAG:
+ case TSDB_ALTER_TABLE_DROP_COLUMN: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
+ TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
+ cJSON* colName = cJSON_CreateString(field->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(field->type);
+ cJSON_AddItemToObject(json, "colType", colType);
+ if (field->type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = field->bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
+ TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0);
+ TAOS_FIELD* newField = taosArrayGet(req.pFields, 1);
+ cJSON* colName = cJSON_CreateString(oldField->name);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colNewName = cJSON_CreateString(newField->name);
+ cJSON_AddItemToObject(json, "colNewName", colNewName);
+ break;
+ }
+ default:
+ break;
+ }
+ string = cJSON_PrintUnformatted(json);
+
+end:
+ cJSON_Delete(json);
+ tFreeSMAltertbReq(&req);
+ return string;
+}
+
+static char* processCreateStb(SMqMetaRsp* metaRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ char* string = NULL;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto _err;
+ }
+ string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
+ tDecoderClear(&coder);
+ return string;
+
+_err:
+ tDecoderClear(&coder);
+ return string;
+}
+
+static char* processAlterStb(SMqMetaRsp* metaRsp) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ char* string = NULL;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto _err;
+ }
+ string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
+ tDecoderClear(&coder);
+ return string;
+
+_err:
+ tDecoderClear(&coder);
+ return string;
+}
+
+static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
+ char* string = NULL;
+ SArray* pTagVals = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return string;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+ // char cid[32] = {0};
+ // sprintf(cid, "%"PRIi64, id);
+ // cJSON* cid_ = cJSON_CreateString(cid);
+ // cJSON_AddItemToObject(json, "id", cid_);
+
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("child");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* using = cJSON_CreateString(sname);
+ cJSON_AddItemToObject(json, "using", using);
+ cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
+ cJSON_AddItemToObject(json, "tagNum", tagNumJson);
+ // cJSON* version = cJSON_CreateNumber(1);
+ // cJSON_AddItemToObject(json, "version", version);
+
+ cJSON* tags = cJSON_CreateArray();
+ int32_t code = tTagToValArray(pTag, &pTagVals);
+ if (code) {
+ goto end;
+ }
+
+ if (tTagIsJson(pTag)) {
+ STag* p = (STag*)pTag;
+ if (p->nTag == 0) {
+ goto end;
+ }
+ char* pJson = parseTagDatatoJson(pTag);
+ cJSON* tag = cJSON_CreateObject();
+ STagVal* pTagVal = taosArrayGet(pTagVals, 0);
+
+ char* ptname = taosArrayGet(tagName, 0);
+ cJSON* tname = cJSON_CreateString(ptname);
+ cJSON_AddItemToObject(tag, "name", tname);
+ // cJSON* cid_ = cJSON_CreateString("");
+ // cJSON_AddItemToObject(tag, "cid", cid_);
+ cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
+ cJSON_AddItemToObject(tag, "type", ttype);
+ cJSON* tvalue = cJSON_CreateString(pJson);
+ cJSON_AddItemToObject(tag, "value", tvalue);
+ cJSON_AddItemToArray(tags, tag);
+ taosMemoryFree(pJson);
+ goto end;
+ }
+
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
+
+ cJSON* tag = cJSON_CreateObject();
+
+ char* ptname = taosArrayGet(tagName, i);
+ cJSON* tname = cJSON_CreateString(ptname);
+ cJSON_AddItemToObject(tag, "name", tname);
+ // cJSON* cid = cJSON_CreateNumber(pTagVal->cid);
+ // cJSON_AddItemToObject(tag, "cid", cid);
+ cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
+ cJSON_AddItemToObject(tag, "type", ttype);
+
+ cJSON* tvalue = NULL;
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
+ if (!buf) goto end;
+ dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
+ tvalue = cJSON_CreateString(buf);
+ taosMemoryFree(buf);
+ } else {
+ double val = 0;
+ GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
+ tvalue = cJSON_CreateNumber(val);
+ }
+
+ cJSON_AddItemToObject(tag, "value", tvalue);
+ cJSON_AddItemToArray(tags, tag);
+ }
+
+end:
+ cJSON_AddItemToObject(json, "tags", tags);
+ string = cJSON_PrintUnformatted(json);
+ cJSON_Delete(json);
+ taosArrayDestroy(pTagVals);
+ return string;
+}
+
+static char* processCreateTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVCreateTbBatchReq req = {0};
+ SVCreateTbReq* pCreateReq;
+ char* string = NULL;
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
+ pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
+ } else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
+ string =
+ buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
+ }
+ }
+
+ tDecoderClear(&decoder);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processAlterTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVAlterTbReq vAlterTbReq = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("alter");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
+ cJSON_AddItemToObject(json, "alterType", alterType);
+
+ switch (vAlterTbReq.action) {
+ case TSDB_ALTER_TABLE_ADD_COLUMN: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
+ cJSON_AddItemToObject(json, "colType", colType);
+
+ if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_DROP_COLUMN: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
+ cJSON_AddItemToObject(json, "colType", colType);
+ if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
+ int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
+ int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
+ cJSON* cbytes = cJSON_CreateNumber(length);
+ cJSON_AddItemToObject(json, "colLength", cbytes);
+ }
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
+ cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
+ cJSON_AddItemToObject(json, "colName", colName);
+ cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
+ cJSON_AddItemToObject(json, "colNewName", colNewName);
+ break;
+ }
+ case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
+ cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
+ cJSON_AddItemToObject(json, "colName", tagName);
+
+ bool isNull = vAlterTbReq.isNull;
+ if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
+ STag* jsonTag = (STag*)vAlterTbReq.pTagVal;
+ if (jsonTag->nTag == 0) isNull = true;
+ }
+ if (!isNull) {
+ char* buf = NULL;
+
+ if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
+ ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
+ buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
+ } else {
+ buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
+ dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
+ }
+
+ cJSON* colValue = cJSON_CreateString(buf);
+ cJSON_AddItemToObject(json, "colValue", colValue);
+ taosMemoryFree(buf);
+ }
+
+ cJSON* isNullCJson = cJSON_CreateBool(isNull);
+ cJSON_AddItemToObject(json, "colValueNull", isNullCJson);
+ break;
+ }
+ default:
+ break;
+ }
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processDropSTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVDropStbReq req = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVDropStbReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("drop");
+ cJSON_AddItemToObject(json, "type", type);
+ cJSON* tableName = cJSON_CreateString(req.name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
+ cJSON* tableType = cJSON_CreateString("super");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processDropTable(SMqMetaRsp* metaRsp) {
+ SDecoder decoder = {0};
+ SVDropTbBatchReq req = {0};
+ char* string = NULL;
+
+ // decode
+ void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
+ int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
+ tDecoderInit(&decoder, data, len);
+ if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) {
+ goto _exit;
+ }
+
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ goto _exit;
+ }
+ cJSON* type = cJSON_CreateString("drop");
+ cJSON_AddItemToObject(json, "type", type);
+ // cJSON* uid = cJSON_CreateNumber(id);
+ // cJSON_AddItemToObject(json, "uid", uid);
+ // cJSON* tableType = cJSON_CreateString("normal");
+ // cJSON_AddItemToObject(json, "tableType", tableType);
+
+ cJSON* tableNameList = cJSON_CreateArray();
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ SVDropTbReq* pDropTbReq = req.pReqs + iReq;
+
+ cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
+ cJSON_AddItemToArray(tableNameList, tableName);
+ }
+ cJSON_AddItemToObject(json, "tableNameList", tableNameList);
+
+ string = cJSON_PrintUnformatted(json);
+
+_exit:
+ tDecoderClear(&decoder);
+ return string;
+}
+
+static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
+ SVCreateStbReq req = {0};
+ SDecoder coder;
+ SMCreateStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+ // build create stable
+ pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
+ for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
+ SSchema* pSchema = req.schemaRow.pSchema + i;
+ SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
+ strcpy(field.name, pSchema->name);
+ taosArrayPush(pReq.pColumns, &field);
+ }
+ pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
+ for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
+ SSchema* pSchema = req.schemaTag.pSchema + i;
+ SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
+ strcpy(field.name, pSchema->name);
+ taosArrayPush(pReq.pTags, &field);
+ }
+
+ pReq.colVer = req.schemaRow.version;
+ pReq.tagVer = req.schemaTag.version;
+ pReq.numOfColumns = req.schemaRow.nCols;
+ pReq.numOfTags = req.schemaTag.nCols;
+ pReq.commentLen = -1;
+ pReq.suid = req.suid;
+ pReq.source = TD_REQ_FROM_TAOX;
+ pReq.igExists = true;
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SName tableName;
+ tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
+
+ SCmdMsgInfo pCmdMsg = {0};
+ pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_CREATE_STB;
+ pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery = {0};
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ catalogRemoveTableMeta(pCatalog, &tableName);
+ }
+
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tFreeSMCreateStbReq(&pReq);
+ tDecoderClear(&coder);
+ return code;
+}
+
+static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
+ SVDropStbReq req = {0};
+ SDecoder coder;
+ SMDropStbReq pReq = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVDropStbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // build drop stable
+ pReq.igNotExists = true;
+ pReq.source = TD_REQ_FROM_TAOX;
+ pReq.suid = req.suid;
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SName tableName = {0};
+ tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
+
+ SCmdMsgInfo pCmdMsg = {0};
+ pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
+ pCmdMsg.msgType = TDMT_MND_DROP_STB;
+ pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq);
+ pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
+ if (NULL == pCmdMsg.pMsg) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
+
+ SQuery pQuery = {0};
+ pQuery.execMode = QUERY_EXEC_MODE_RPC;
+ pQuery.pCmdMsg = &pCmdMsg;
+ pQuery.msgType = pQuery.pCmdMsg->msgType;
+ pQuery.stableQuery = true;
+
+ launchQueryImpl(pRequest, &pQuery, true, NULL);
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ catalogRemoveTableMeta(pCatalog, &tableName);
+ }
+
+ code = pRequest->code;
+ taosMemoryFree(pCmdMsg.pMsg);
+
+end:
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ return code;
+}
+
+typedef struct SVgroupCreateTableBatch {
+ SVCreateTbBatchReq req;
+ SVgroupInfo info;
+ char dbName[TSDB_DB_NAME_LEN];
+} SVgroupCreateTableBatch;
+
+static void destroyCreateTbReqBatch(void* data) {
+ SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data;
+ taosArrayDestroy(pTbBatch->req.pArray);
+}
+
+static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVCreateTbBatchReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SHashObj* pVgroupHashmap = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+
+ SVCreateTbReq* pCreateReq = NULL;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pVgroupHashmap) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+
+ pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+ taosArrayPush(pRequest->tableList, &pName);
+
+ SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
+ if (pTableBatch == NULL) {
+ SVgroupCreateTableBatch tBatch = {0};
+ tBatch.info = pInfo;
+ strcpy(tBatch.dbName, pRequest->pDb);
+
+ tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
+ taosArrayPush(tBatch.req.pArray, pCreateReq);
+
+ taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
+ } else { // add to the correct vgroup
+ taosArrayPush(pTableBatch->req.pArray, pCreateReq);
+ }
+ }
+
+ SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
+ if (NULL == pBufArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_CREATE_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ removeMeta(pTscObj, pRequest->tableList);
+ }
+
+ code = pRequest->code;
+
+end:
+ taosHashCleanup(pVgroupHashmap);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+typedef struct SVgroupDropTableBatch {
+ SVDropTbBatchReq req;
+ SVgroupInfo info;
+ char dbName[TSDB_DB_NAME_LEN];
+} SVgroupDropTableBatch;
+
+static void destroyDropTbReqBatch(void* data) {
+ SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data;
+ taosArrayDestroy(pTbBatch->req.pArray);
+}
+
+static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVDropTbBatchReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SHashObj* pVgroupHashmap = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+
+ SVDropTbReq* pDropReq = NULL;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ if (NULL == pVgroupHashmap) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+ pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
+ // loop to create table
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+ pDropReq->igNotExists = true;
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ taosArrayPush(pRequest->tableList, &pName);
+ SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
+ if (pTableBatch == NULL) {
+ SVgroupDropTableBatch tBatch = {0};
+ tBatch.info = pInfo;
+ tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
+ taosArrayPush(tBatch.req.pArray, pDropReq);
+
+ taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
+ } else { // add to the correct vgroup
+ taosArrayPush(pTableBatch->req.pArray, pDropReq);
+ }
+ }
+
+ SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap);
+ if (NULL == pBufArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_DROP_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ removeMeta(pTscObj, pRequest->tableList);
+ }
+ code = pRequest->code;
+
+end:
+ taosHashCleanup(pVgroupHashmap);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+// delete from db.tabl where .. -> delete from tabl where ..
+// delete from db .tabl where .. -> delete from tabl where ..
+// static void getTbName(char *sql){
+// char *ch = sql;
+//
+// bool inBackQuote = false;
+// int8_t dotIndex = 0;
+// while(*ch != '\0'){
+// if(!inBackQuote && *ch == '`'){
+// inBackQuote = true;
+// ch++;
+// continue;
+// }
+//
+// if(inBackQuote && *ch == '`'){
+// inBackQuote = false;
+// ch++;
+//
+// continue;
+// }
+//
+// if(!inBackQuote && *ch == '.'){
+// dotIndex ++;
+// if(dotIndex == 2){
+// memmove(sql, ch + 1, strlen(ch + 1) + 1);
+// break;
+// }
+// }
+// ch++;
+// }
+//}
+
+static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
+ SDeleteRes req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeDeleteRes(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // getTbName(req.tableFName);
+ char sql[256] = {0};
+ sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName,
+ req.skey, req.tsColName, req.ekey);
+ printf("delete sql:%s\n", sql);
+
+ TAOS_RES* res = taos_query(taos, sql);
+ SRequestObj* pRequest = (SRequestObj*)res;
+ code = pRequest->code;
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ code = TSDB_CODE_SUCCESS;
+ }
+ taos_free_result(res);
+
+end:
+ tDecoderClear(&coder);
+ return code;
+}
+
+static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
+ SVAlterTbReq req = {0};
+ SDecoder coder = {0};
+ int32_t code = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = NULL;
+ SQuery* pQuery = NULL;
+ SArray* pArray = NULL;
+ SVgDataBlocks* pVgData = NULL;
+
+ code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+ // decode and process req
+ void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
+ int32_t len = metaLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+ if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
+ code = TSDB_CODE_INVALID_PARA;
+ goto end;
+ }
+
+ // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS
+ if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) {
+ goto end;
+ }
+
+ STscObj* pTscObj = pRequest->pTscObj;
+ SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
+ .requestId = pRequest->requestId,
+ .requestObjRefId = pRequest->self,
+ .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
+
+ SVgroupInfo pInfo = {0};
+ SName pName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName);
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ pArray = taosArrayInit(1, sizeof(void*));
+ if (NULL == pArray) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == pVgData) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pVgData->vg = pInfo;
+ pVgData->pData = taosMemoryMalloc(metaLen);
+ if (NULL == pVgData->pData) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ memcpy(pVgData->pData, meta, metaLen);
+ ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
+ pVgData->size = metaLen;
+ pVgData->numOfTables = 1;
+ taosArrayPush(pArray, &pVgData);
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->msgType = TDMT_VND_ALTER_TABLE;
+ pQuery->stableQuery = false;
+ pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
+
+ code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+
+ pVgData = NULL;
+ pArray = NULL;
+ code = pRequest->code;
+ if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
+ code = TSDB_CODE_SUCCESS;
+ }
+
+ if (pRequest->code == TSDB_CODE_SUCCESS) {
+ SExecResult* pRes = &pRequest->body.resInfo.execRes;
+ if (pRes->res != NULL) {
+ code = handleAlterTbExecRes(pRes->res, pCatalog);
+ }
+ }
+end:
+ taosArrayDestroy(pArray);
+ if (pVgData) taosMemoryFreeClear(pVgData->pData);
+ taosMemoryFreeClear(pVgData);
+ destroyRequest(pRequest);
+ tDecoderClear(&coder);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+typedef struct {
+ SVgroupInfo vg;
+ void* data;
+} VgData;
+
+static void destroyVgHash(void* data) {
+ VgData* vgData = (VgData*)data;
+ taosMemoryFreeClear(vgData->data);
+}
+
+int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ STableMeta* pTableMeta = NULL;
+ SQuery* pQuery = NULL;
+
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ code = terrno;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbname);
+
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ SVgroupInfo vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
+ goto end;
+ }
+
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
+ goto end;
+ }
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < numOfCols; i++) {
+ SSchema* schema = pTableMeta->schema + i;
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(numOfCols - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
+ SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, pTableMeta->sversion);
+ tdSRowSetTpInfo(&rb, numOfCols, fLen);
+ int32_t dataLen = 0;
+
+ char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
+ int32_t* colLength = (int32_t*)pStart;
+ pStart += sizeof(int32_t) * numOfCols;
+
+ SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
+
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
+ pCol[i].offset = (int32_t*)pStart;
+ pStart += rows * sizeof(int32_t);
+ } else {
+ pCol[i].nullbitmap = pStart;
+ pStart += BitmapLen(rows);
+ }
+
+ pCol[i].pData = pStart;
+ pStart += colLength[i];
+ }
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+ int32_t offset = 0;
+ for (int32_t k = 0; k < numOfCols; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ if (pCol[k].offset[j] != -1) {
+ char* data = pCol[k].pData + pCol[k].offset[j];
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ } else {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ }
+ } else {
+ if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
+ char* data = pCol[k].pData + pColumn->bytes * j;
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ } else {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ }
+ }
+
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ dataLen += rowLen;
+ }
+
+ taosMemoryFree(pCol);
+
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(pTableMeta->sversion);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(dataLen);
+ subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
+ subReq->numOfBlocks = 1;
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+ nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
+
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vgData;
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ subReq = NULL; // no need free
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ taosMemoryFreeClear(pTableMeta);
+ qDestroyQuery(pQuery);
+ return code;
+}
+
+static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SHashObj* pVgHash = NULL;
+ SQuery* pQuery = NULL;
+ SMqRspObj rspObj = {0};
+ SDecoder decoder = {0};
+
+ terrno = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ return terrno;
+ }
+
+ rspObj.resIter = -1;
+ rspObj.resType = RES_TYPE__TMQ;
+
+ tDecoderInit(&decoder, data, dataLen);
+ code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
+ if (code != 0) {
+ uError("WriteRaw:decode smqDataRsp error");
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+ taosHashSetFreeFp(pVgHash, destroyVgHash);
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ printf("raw data block num:%d\n", rspObj.rsp.blockNum);
+ while (++rspObj.resIter < rspObj.rsp.blockNum) {
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
+ if (!rspObj.rsp.withSchema) {
+ uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
+ goto end;
+ }
+ SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
+ setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
+
+ code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: setQueryResultFromRsp error");
+ goto end;
+ }
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = pSW->pSchema + i;
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t rows = rspObj.resInfo.numOfRows;
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
+ const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
+ if (!tbName) {
+ uError("WriteRaw: tbname is null");
+ code = TSDB_CODE_TMQ_INVALID_MSG;
+ goto end;
+ }
+
+ printf("raw data tbname:%s\n", tbName);
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbName);
+
+ VgData vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
+ goto end;
+ }
+
+ SSubmitReq* subReq = NULL;
+ SSubmitBlk* blk = NULL;
+ void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
+ if (hData) {
+ vgData = *(VgData*)hData;
+
+ int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
+ void* tmp = taosMemoryRealloc(vgData.data, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ ((VgData*)hData)->data = tmp;
+ subReq = (SSubmitReq*)(vgData.data);
+ blk = POINTER_SHIFT(vgData.data, subReq->length);
+ } else {
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ void* tmp = taosMemoryCalloc(1, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
+ subReq = (SSubmitReq*)(vgData.data);
+ subReq->length = sizeof(SSubmitReq);
+ subReq->numOfBlocks = 0;
+
+ blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
+ }
+
+ STableMeta* pTableMeta = NULL;
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ taosMemoryFreeClear(pTableMeta);
+
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, pSW->version);
+ tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
+ int32_t dataLen = 0;
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+
+ doSetOneRowPtr(&rspObj.resInfo);
+ rspObj.resInfo.current += 1;
+
+ int32_t offset = 0;
+ for (int32_t k = 0; k < pSW->nCols; k++) {
+ const SSchema* pColumn = &pSW->pSchema[k];
+ char* data = rspObj.resInfo.row[k];
+ if (!data) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ data -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
+ }
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ dataLen += rowLen;
+ }
+
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(pSW->version);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(dataLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
+ subReq->numOfBlocks++;
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+
+ int32_t numOfVg = taosHashGetSize(pVgHash);
+ nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
+
+ VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
+ while (vData) {
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vData->vg;
+ SSubmitReq* subReq = (SSubmitReq*)(vData->data);
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ vData->data = NULL; // no need free
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+ vData = (VgData*)taosHashIterate(pVgHash, vData);
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ tDecoderClear(&decoder);
+ qDestroyQuery(pQuery);
+ destroyRequest(pRequest);
+ taosHashCleanup(pVgHash);
+ return code;
+}
+
+char* tmq_get_json_meta(TAOS_RES* res) {
+ if (!TD_RES_TMQ_META(res)) {
+ return NULL;
+ }
+
+ SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
+ if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
+ return processCreateStb(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
+ return processAlterStb(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
+ return processDropSTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
+ return processCreateTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
+ return processAlterTable(&pMetaRspObj->metaRsp);
+ } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
+ return processDropTable(&pMetaRspObj->metaRsp);
+ }
+ return NULL;
+}
+
+void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
+
+int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
+ if (!raw || !res) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+ if (TD_RES_TMQ_META(res)) {
+ SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
+ raw->raw = pMetaRspObj->metaRsp.metaRsp;
+ raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
+ raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
+ } else if (TD_RES_TMQ(res)) {
+ SMqRspObj* rspObj = ((SMqRspObj*)res);
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+
+ void* buf = taosMemoryCalloc(1, len);
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, buf, len);
+ tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
+ tEncoderClear(&encoder);
+
+ raw->raw = buf;
+ raw->raw_len = len;
+ raw->raw_type = RES_TYPE__TMQ;
+ } else {
+ return TSDB_CODE_TMQ_INVALID_MSG;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void tmq_free_raw(tmq_raw_data raw) {
+ if (raw.raw_type == RES_TYPE__TMQ) {
+ taosMemoryFree(raw.raw);
+ }
+}
+
+int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
+ if (!taos) {
+ return TSDB_CODE_INVALID_PARA;
+ }
+
+ if (raw.raw_type == TDMT_VND_CREATE_STB) {
+ return taosCreateStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_ALTER_STB) {
+ return taosCreateStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DROP_STB) {
+ return taosDropStb(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) {
+ return taosCreateTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) {
+ return taosAlterTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DROP_TABLE) {
+ return taosDropTable(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == TDMT_VND_DELETE) {
+ return taosDeleteData(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == RES_TYPE__TMQ) {
+ return tmqWriteRaw(taos, raw.raw, raw.raw_len);
+ }
+ return TSDB_CODE_INVALID_PARA;
+}
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index ea7f03a416..7637ffbc80 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -28,8 +28,9 @@
int32_t tmqAskEp(tmq_t* tmq, bool async);
typedef struct {
- int8_t inited;
- tmr_h timer;
+ int8_t inited;
+ tmr_h timer;
+ int32_t rsetId;
} SMqMgmt;
static SMqMgmt tmqMgmt = {0};
@@ -55,8 +56,8 @@ struct tmq_conf_t {
int8_t autoCommit;
int8_t resetOffset;
int8_t withTbName;
- int8_t ssEnable;
- int32_t ssBatchSize;
+ int8_t snapEnable;
+ int32_t snapBatchSize;
bool hbBgEnable;
@@ -70,6 +71,7 @@ struct tmq_conf_t {
};
struct tmq_t {
+ int64_t refId;
// conf
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
@@ -146,8 +148,8 @@ typedef struct {
typedef struct {
// subscribe info
- char* topicName;
- char db[TSDB_DB_FNAME_LEN];
+ char topicName[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
SArray* vgs; // SArray
@@ -166,29 +168,32 @@ typedef struct {
} SMqPollRspWrapper;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
tsem_t rspSem;
int32_t rspErr;
} SMqSubscribeCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
int32_t code;
int32_t async;
tsem_t rspSem;
} SMqAskEpCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
SMqClientVg* pVg;
SMqClientTopic* pTopic;
- int32_t epoch;
int32_t vgId;
tsem_t rspSem;
} SMqPollCbParam;
typedef struct {
- tmq_t* tmq;
+ int64_t refId;
+ int32_t epoch;
int8_t automatic;
int8_t async;
int32_t waitingRspNum;
@@ -282,16 +287,21 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
if (strcmp(key, "experimental.snapshot.enable") == 0) {
if (strcmp(value, "true") == 0) {
- conf->ssEnable = true;
+ conf->snapEnable = true;
return TMQ_CONF_OK;
} else if (strcmp(value, "false") == 0) {
- conf->ssEnable = false;
+ conf->snapEnable = false;
return TMQ_CONF_OK;
} else {
return TMQ_CONF_INVALID;
}
}
+ if (strcmp(key, "experimental.snapshot.batch.size") == 0) {
+ conf->snapBatchSize = atoi(value);
+ return TMQ_CONF_OK;
+ }
+
if (strcmp(key, "enable.heartbeat.background") == 0) {
if (strcmp(value, "true") == 0) {
conf->hbBgEnable = true;
@@ -305,11 +315,6 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
return TMQ_CONF_OK;
}
- if (strcmp(key, "experimental.snapshot.batch.size") == 0) {
- conf->ssBatchSize = atoi(value);
- return TMQ_CONF_OK;
- }
-
if (strcmp(key, "td.connect.ip") == 0) {
conf->ip = strdup(value);
return TMQ_CONF_OK;
@@ -369,6 +374,38 @@ static int32_t tmqMakeTopicVgKey(char* dst, const char* topicName, int32_t vg) {
return sprintf(dst, "%s:%d", topicName, vg);
}
+int32_t tmqCommitDone(SMqCommitCbParamSet* pParamSet) {
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParamSet->refId);
+ if (tmq == NULL) {
+ if (!pParamSet->async) {
+ tsem_destroy(&pParamSet->rspSem);
+ }
+ taosMemoryFree(pParamSet);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
+ // if no more waiting rsp
+ if (pParamSet->async) {
+ // call async cb func
+ if (pParamSet->automatic && tmq->commitCb) {
+ tmq->commitCb(tmq, pParamSet->rspErr, tmq->commitCbUserParam);
+ } else if (!pParamSet->automatic && pParamSet->userCb) {
+ // sem post
+ pParamSet->userCb(tmq, pParamSet->rspErr, pParamSet->userParam);
+ }
+ taosMemoryFree(pParamSet);
+ } else {
+ tsem_post(&pParamSet->rspSem);
+ }
+
+#if 0
+ taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
+ taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
+#endif
+ return 0;
+}
+
int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
SMqCommitCbParam* pParam = (SMqCommitCbParam*)param;
SMqCommitCbParamSet* pParamSet = (SMqCommitCbParamSet*)pParam->params;
@@ -381,6 +418,9 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
}
#endif
+ taosMemoryFree(pParam->pOffset);
+ if (pBuf->pData) taosMemoryFree(pBuf->pData);
+
/*tscDebug("receive offset commit cb of %s on vgId:%d, offset is %" PRId64, pParam->pOffset->subKey, pParam->->vgId,
* pOffset->version);*/
@@ -389,23 +429,7 @@ int32_t tmqCommitCb(void* param, SDataBuf* pBuf, int32_t code) {
ASSERT(waitingRspNum >= 0);
if (waitingRspNum == 0) {
- // if no more waiting rsp
- if (pParamSet->async) {
- // call async cb func
- if (pParamSet->automatic && pParamSet->tmq->commitCb) {
- pParamSet->tmq->commitCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->tmq->commitCbUserParam);
- } else if (!pParamSet->automatic && pParamSet->userCb) {
- // sem post
- pParamSet->userCb(pParamSet->tmq, pParamSet->rspErr, pParamSet->userParam);
- }
- } else {
- tsem_post(&pParamSet->rspSem);
- }
-
-#if 0
- taosArrayDestroyP(pParamSet->successfulOffsets, taosMemoryFree);
- taosArrayDestroyP(pParamSet->failedOffsets, taosMemoryFree);
-#endif
+ tmqCommitDone(pParamSet);
}
return 0;
}
@@ -499,7 +523,8 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pParamSet->tmq = tmq;
+ pParamSet->refId = tmq->refId;
+ pParamSet->epoch = tmq->epoch;
pParamSet->automatic = 0;
pParamSet->async = async;
pParamSet->userCb = userCb;
@@ -560,13 +585,19 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pParamSet->tmq = tmq;
+
+ pParamSet->refId = tmq->refId;
+ pParamSet->epoch = tmq->epoch;
+
pParamSet->automatic = automatic;
pParamSet->async = async;
pParamSet->userCb = userCb;
pParamSet->userParam = userParam;
tsem_init(&pParamSet->rspSem, 0, 0);
+ // init as 1 to prevent concurrency issue
+ pParamSet->waitingRspNum = 1;
+
for (int32_t i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
@@ -595,10 +626,17 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
return 0;
}
+ int32_t waitingRspNum = atomic_sub_fetch_32(&pParamSet->waitingRspNum, 1);
+ ASSERT(waitingRspNum >= 0);
+ if (waitingRspNum == 0) {
+ tmqCommitDone(pParamSet);
+ }
+
if (!async) {
tsem_wait(&pParamSet->rspSem);
code = pParamSet->rspErr;
tsem_destroy(&pParamSet->rspSem);
+ taosMemoryFree(pParamSet);
} else {
code = 0;
}
@@ -622,27 +660,39 @@ int32_t tmqCommitInner(tmq_t* tmq, const TAOS_RES* msg, int8_t automatic, int8_t
}
void tmqAssignAskEpTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__ASK_EP;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__ASK_EP;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
void tmqAssignDelayedCommitTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__COMMIT;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__COMMIT;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
void tmqAssignDelayedReportTask(void* param, void* tmrId) {
- tmq_t* tmq = (tmq_t*)param;
- int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
- *pTaskType = TMQ_DELAYED_TASK__REPORT;
- taosWriteQitem(tmq->delayedTask, pTaskType);
- tsem_post(&tmq->rspSem);
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq != NULL) {
+ int8_t* pTaskType = taosAllocateQitem(sizeof(int8_t), DEF_QITEM);
+ *pTaskType = TMQ_DELAYED_TASK__REPORT;
+ taosWriteQitem(tmq->delayedTask, pTaskType);
+ tsem_post(&tmq->rspSem);
+ }
+ taosMemoryFree(param);
}
int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
@@ -651,8 +701,11 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) {
}
void tmqSendHbReq(void* param, void* tmrId) {
- // TODO replace with ref
- tmq_t* tmq = (tmq_t*)param;
+ int64_t refId = *(int64_t*)param;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
+ if (tmq == NULL) {
+ return;
+ }
int64_t consumerId = tmq->consumerId;
int32_t epoch = tmq->epoch;
SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq));
@@ -682,7 +735,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &epSet, &transporterId, sendInfo);
OVER:
- taosTmrReset(tmqSendHbReq, 1000, tmq, tmqMgmt.timer, &tmq->hbLiveTimer);
+ taosTmrReset(tmqSendHbReq, 1000, param, tmqMgmt.timer, &tmq->hbLiveTimer);
}
int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
@@ -695,10 +748,18 @@ int32_t tmqHandleAllDelayedTask(tmq_t* tmq) {
if (*pTaskType == TMQ_DELAYED_TASK__ASK_EP) {
tmqAskEp(tmq, true);
- taosTmrReset(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer, &tmq->epTimer);
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = tmq->refId;
+
+ taosTmrReset(tmqAssignAskEpTask, 1000, pRefId, tmqMgmt.timer, &tmq->epTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) {
tmqCommitInner(tmq, NULL, 1, 1, tmq->commitCb, tmq->commitCbUserParam);
- taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer, &tmq->commitTimer);
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = tmq->refId;
+
+ taosTmrReset(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId, tmqMgmt.timer, &tmq->commitTimer);
} else if (*pTaskType == TMQ_DELAYED_TASK__REPORT) {
} else {
ASSERT(0);
@@ -733,7 +794,6 @@ void tmqClearUnhandleMsg(tmq_t* tmq) {
int32_t tmqSubscribeCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqSubscribeCbParam* pParam = (SMqSubscribeCbParam*)param;
pParam->rspErr = code;
- /*tmq_t* tmq = pParam->tmq;*/
tsem_post(&pParam->rspSem);
return 0;
}
@@ -756,40 +816,27 @@ int32_t tmq_unsubscribe(tmq_t* tmq) {
return rsp;
}
-#if 0
-tmq_t* tmq_consumer_new(void* conn, tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
- tmq_t* pTmq = taosMemoryCalloc(sizeof(tmq_t), 1);
- if (pTmq == NULL) {
- return NULL;
+void tmqFreeImpl(void* handle) {
+ tmq_t* tmq = (tmq_t*)handle;
+
+ // TODO stop timer
+ if (tmq->mqueue) taosCloseQueue(tmq->mqueue);
+ if (tmq->delayedTask) taosCloseQueue(tmq->delayedTask);
+ if (tmq->qall) taosFreeQall(tmq->qall);
+
+ tsem_destroy(&tmq->rspSem);
+
+ int32_t sz = taosArrayGetSize(tmq->clientTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ int32_t vgSz = taosArrayGetSize(pTopic->vgs);
+ taosArrayDestroy(pTopic->vgs);
}
- pTmq->pTscObj = (STscObj*)conn;
- pTmq->status = 0;
- pTmq->pollCnt = 0;
- pTmq->epoch = 0;
- pTmq->epStatus = 0;
- pTmq->epSkipCnt = 0;
- // set conf
- strcpy(pTmq->clientId, conf->clientId);
- strcpy(pTmq->groupId, conf->groupId);
- pTmq->autoCommit = conf->autoCommit;
- pTmq->commit_cb = conf->commit_cb;
- pTmq->resetOffsetCfg = conf->resetOffset;
-
- pTmq->consumerId = generateRequestId() & (((uint64_t)-1) >> 1);
- pTmq->clientTopics = taosArrayInit(0, sizeof(SMqClientTopic));
- if (pTmq->clientTopics == NULL) {
- taosMemoryFree(pTmq);
- return NULL;
- }
-
- pTmq->mqueue = taosOpenQueue();
- pTmq->qall = taosAllocateQall();
-
- tsem_init(&pTmq->rspSem, 0, 0);
-
- return pTmq;
+ taosArrayDestroy(tmq->clientTopics);
+ taos_close_internal(tmq->pTscObj);
+ taosMemoryFree(tmq);
}
-#endif
tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
// init timer
@@ -801,6 +848,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+ tmqMgmt.rsetId = taosOpenRef(10000, tmqFreeImpl);
}
tmq_t* pTmq = taosMemoryCalloc(1, sizeof(tmq_t));
@@ -841,7 +889,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
strcpy(pTmq->clientId, conf->clientId);
strcpy(pTmq->groupId, conf->groupId);
pTmq->withTbName = conf->withTbName;
- pTmq->useSnapshot = conf->ssEnable;
+ pTmq->useSnapshot = conf->snapEnable;
pTmq->autoCommit = conf->autoCommit;
pTmq->autoCommitInterval = conf->autoCommitInterval;
pTmq->commitCb = conf->commitCb;
@@ -869,8 +917,17 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
goto FAIL;
}
+ pTmq->refId = taosAddRef(tmqMgmt.rsetId, pTmq);
+ if (pTmq->refId < 0) {
+ tmqFreeImpl(pTmq);
+ return NULL;
+ }
+
+ int64_t* pRefId = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId = pTmq->refId;
+
if (pTmq->hbBgEnable) {
- pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pTmq, tmqMgmt.timer);
+ pTmq->hbLiveTimer = taosTmrStart(tmqSendHbReq, 1000, pRefId, tmqMgmt.timer);
}
tscInfo("consumer %" PRId64 " is setup, consumer group %s", pTmq->consumerId, pTmq->groupId);
@@ -928,7 +985,8 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
SMqSubscribeCbParam param = {
.rspErr = 0,
- .tmq = tmq,
+ .refId = tmq->refId,
+ .epoch = tmq->epoch,
};
if (tsem_init(¶m.rspSem, 0, 0) != 0) goto FAIL;
@@ -970,12 +1028,16 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
// init ep timer
if (tmq->epTimer == NULL) {
- tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, tmq, tmqMgmt.timer);
+ int64_t* pRefId1 = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId1 = tmq->refId;
+ tmq->epTimer = taosTmrStart(tmqAssignAskEpTask, 1000, pRefId1, tmqMgmt.timer);
}
// init auto commit timer
if (tmq->autoCommit && tmq->commitTimer == NULL) {
- tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, tmq, tmqMgmt.timer);
+ int64_t* pRefId2 = taosMemoryMalloc(sizeof(int64_t));
+ *pRefId2 = tmq->refId;
+ tmq->commitTimer = taosTmrStart(tmqAssignDelayedCommitTask, tmq->autoCommitInterval, pRefId2, tmqMgmt.timer);
}
code = 0;
@@ -997,9 +1059,18 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqPollCbParam* pParam = (SMqPollCbParam*)param;
SMqClientVg* pVg = pParam->pVg;
SMqClientTopic* pTopic = pParam->pTopic;
- tmq_t* tmq = pParam->tmq;
- int32_t vgId = pParam->vgId;
- int32_t epoch = pParam->epoch;
+
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
+ if (tmq == NULL) {
+ tsem_destroy(&pParam->rspSem);
+ taosMemoryFree(pParam);
+ taosMemoryFree(pMsg->pData);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
+ int32_t epoch = pParam->epoch;
+ int32_t vgId = pParam->vgId;
taosMemoryFree(pParam);
if (code != 0) {
tscWarn("msg discard from vgId:%d, epoch %d, since %s", vgId, epoch, terrstr());
@@ -1124,7 +1195,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
SMqClientTopic topic = {0};
SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i);
topic.schema = pTopicEp->schema;
- topic.topicName = strdup(pTopicEp->topic);
+ tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
tscDebug("consumer:%" PRId64 ", update topic: %s", tmq->consumerId, topic.topicName);
@@ -1153,7 +1224,16 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
}
taosArrayPush(newTopics, &topic);
}
- if (tmq->clientTopics) taosArrayDestroy(tmq->clientTopics);
+ if (tmq->clientTopics) {
+ int32_t sz = taosArrayGetSize(tmq->clientTopics);
+ for (int32_t i = 0; i < sz; i++) {
+ SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
+ if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ int32_t vgSz = taosArrayGetSize(pTopic->vgs);
+ taosArrayDestroy(pTopic->vgs);
+ }
+ taosArrayDestroy(tmq->clientTopics);
+ }
taosHashCleanup(pHash);
tmq->clientTopics = newTopics;
@@ -1168,8 +1248,20 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
int32_t tmqAskEpCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqAskEpCbParam* pParam = (SMqAskEpCbParam*)param;
- tmq_t* tmq = pParam->tmq;
int8_t async = pParam->async;
+ tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, pParam->refId);
+
+ if (tmq == NULL) {
+ if (!async) {
+ tsem_destroy(&pParam->rspSem);
+ } else {
+ taosMemoryFree(pParam);
+ }
+ taosMemoryFree(pMsg->pData);
+ terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
+ return -1;
+ }
+
pParam->code = code;
if (code != 0) {
tscError("consumer:%" PRId64 ", get topic endpoint error, not ready, wait:%d", tmq->consumerId, pParam->async);
@@ -1216,6 +1308,7 @@ END:
} else {
taosMemoryFree(pParam);
}
+ taosMemoryFree(pMsg->pData);
return code;
}
@@ -1248,7 +1341,8 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
/*atomic_store_8(&tmq->epStatus, 0);*/
return -1;
}
- pParam->tmq = tmq;
+ pParam->refId = tmq->refId;
+ pParam->epoch = tmq->epoch;
pParam->async = async;
tsem_init(&pParam->rspSem, 0, 0);
@@ -1288,31 +1382,6 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
return code;
}
-#if 0
-int32_t tmq_seek(tmq_t* tmq, const tmq_topic_vgroup_t* offset) {
- const SMqOffset* pOffset = &offset->offset;
- if (strcmp(pOffset->cgroup, tmq->groupId) != 0) {
- return TMQ_RESP_ERR__FAIL;
- }
- int32_t sz = taosArrayGetSize(tmq->clientTopics);
- for (int32_t i = 0; i < sz; i++) {
- SMqClientTopic* clientTopic = taosArrayGet(tmq->clientTopics, i);
- if (strcmp(clientTopic->topicName, pOffset->topicName) == 0) {
- int32_t vgSz = taosArrayGetSize(clientTopic->vgs);
- for (int32_t j = 0; j < vgSz; j++) {
- SMqClientVg* pVg = taosArrayGet(clientTopic->vgs, j);
- if (pVg->vgId == pOffset->vgId) {
- pVg->currentOffset = pOffset->offset;
- tmqClearUnhandleMsg(tmq);
- return TMQ_RESP_ERR__SUCCESS;
- }
- }
- }
- }
- return TMQ_RESP_ERR__FAIL;
-}
-#endif
-
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
if (pReq == NULL) {
@@ -1406,11 +1475,12 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
tsem_post(&tmq->rspSem);
return -1;
}
- pParam->tmq = tmq;
+ pParam->refId = tmq->refId;
+ pParam->epoch = tmq->epoch;
+
pParam->pVg = pVg;
pParam->pTopic = pTopic;
pParam->vgId = pVg->vgId;
- pParam->epoch = tmq->epoch;
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
if (sendInfo == NULL) {
@@ -1550,7 +1620,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
}
#endif
- // in no topic status also need process delayed task
+ // in no topic status, delayed task also need to be processed
if (atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__INIT) {
return NULL;
}
@@ -1615,7 +1685,7 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
/*return rsp;*/
return 0;
}
- // TODO: free resources
+ taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
return 0;
}
@@ -1691,1610 +1761,6 @@ const char* tmq_get_table_name(TAOS_RES* res) {
return NULL;
}
-static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id,
- int8_t t) {
- char* string = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
-
- // char uid[32] = {0};
- // sprintf(uid, "%"PRIi64, id);
- // cJSON* id_ = cJSON_CreateString(uid);
- // cJSON_AddItemToObject(json, "id", id_);
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
- cJSON_AddItemToObject(json, "tableType", tableType);
- // cJSON* version = cJSON_CreateNumber(1);
- // cJSON_AddItemToObject(json, "version", version);
-
- cJSON* columns = cJSON_CreateArray();
- for (int i = 0; i < schemaRow->nCols; i++) {
- cJSON* column = cJSON_CreateObject();
- SSchema* s = schemaRow->pSchema + i;
- cJSON* cname = cJSON_CreateString(s->name);
- cJSON_AddItemToObject(column, "name", cname);
- cJSON* ctype = cJSON_CreateNumber(s->type);
- cJSON_AddItemToObject(column, "type", ctype);
- if (s->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = s->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(column, "length", cbytes);
- } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(column, "length", cbytes);
- }
- cJSON_AddItemToArray(columns, column);
- }
- cJSON_AddItemToObject(json, "columns", columns);
-
- cJSON* tags = cJSON_CreateArray();
- for (int i = 0; schemaTag && i < schemaTag->nCols; i++) {
- cJSON* tag = cJSON_CreateObject();
- SSchema* s = schemaTag->pSchema + i;
- cJSON* tname = cJSON_CreateString(s->name);
- cJSON_AddItemToObject(tag, "name", tname);
- cJSON* ttype = cJSON_CreateNumber(s->type);
- cJSON_AddItemToObject(tag, "type", ttype);
- if (s->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = s->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(tag, "length", cbytes);
- } else if (s->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(tag, "length", cbytes);
- }
- cJSON_AddItemToArray(tags, tag);
- }
- cJSON_AddItemToObject(json, "tags", tags);
-
- string = cJSON_PrintUnformatted(json);
- cJSON_Delete(json);
- return string;
-}
-
-static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
- SMAlterStbReq req = {0};
- cJSON* json = NULL;
- char* string = NULL;
-
- if (tDeserializeSMAlterStbReq(alterData, alterDataLen, &req) != 0) {
- goto end;
- }
-
- json = cJSON_CreateObject();
- if (json == NULL) {
- goto end;
- }
- cJSON* type = cJSON_CreateString("alter");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- SName name = {0};
- tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
- cJSON* tableName = cJSON_CreateString(name.tname);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("super");
- cJSON_AddItemToObject(json, "tableType", tableType);
-
- cJSON* alterType = cJSON_CreateNumber(req.alterType);
- cJSON_AddItemToObject(json, "alterType", alterType);
- switch (req.alterType) {
- case TSDB_ALTER_TABLE_ADD_TAG:
- case TSDB_ALTER_TABLE_ADD_COLUMN: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(field->type);
- cJSON_AddItemToObject(json, "colType", colType);
-
- if (field->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = field->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_DROP_TAG:
- case TSDB_ALTER_TABLE_DROP_COLUMN: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
- TAOS_FIELD* field = taosArrayGet(req.pFields, 0);
- cJSON* colName = cJSON_CreateString(field->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(field->type);
- cJSON_AddItemToObject(json, "colType", colType);
- if (field->type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = field->bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (field->type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (field->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
- TAOS_FIELD* oldField = taosArrayGet(req.pFields, 0);
- TAOS_FIELD* newField = taosArrayGet(req.pFields, 1);
- cJSON* colName = cJSON_CreateString(oldField->name);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colNewName = cJSON_CreateString(newField->name);
- cJSON_AddItemToObject(json, "colNewName", colNewName);
- break;
- }
- default:
- break;
- }
- string = cJSON_PrintUnformatted(json);
-
-end:
- cJSON_Delete(json);
- tFreeSMAltertbReq(&req);
- return string;
-}
-
-static char* processCreateStb(SMqMetaRsp* metaRsp) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- char* string = NULL;
-
- // decode and process req
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
-
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- goto _err;
- }
- string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
- tDecoderClear(&coder);
- return string;
-
-_err:
- tDecoderClear(&coder);
- return string;
-}
-
-static char* processAlterStb(SMqMetaRsp* metaRsp) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- char* string = NULL;
-
- // decode and process req
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
-
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- goto _err;
- }
- string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
- tDecoderClear(&coder);
- return string;
-
-_err:
- tDecoderClear(&coder);
- return string;
-}
-
-static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
- char* string = NULL;
- SArray* pTagVals = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
- // char cid[32] = {0};
- // sprintf(cid, "%"PRIi64, id);
- // cJSON* cid_ = cJSON_CreateString(cid);
- // cJSON_AddItemToObject(json, "id", cid_);
-
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("child");
- cJSON_AddItemToObject(json, "tableType", tableType);
- cJSON* using = cJSON_CreateString(sname);
- cJSON_AddItemToObject(json, "using", using);
- cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
- cJSON_AddItemToObject(json, "tagNum", tagNumJson);
- // cJSON* version = cJSON_CreateNumber(1);
- // cJSON_AddItemToObject(json, "version", version);
-
- cJSON* tags = cJSON_CreateArray();
- int32_t code = tTagToValArray(pTag, &pTagVals);
- if (code) {
- goto end;
- }
-
- if (tTagIsJson(pTag)) {
- STag* p = (STag*)pTag;
- if (p->nTag == 0) {
- goto end;
- }
- char* pJson = parseTagDatatoJson(pTag);
- cJSON* tag = cJSON_CreateObject();
- STagVal* pTagVal = taosArrayGet(pTagVals, 0);
-
- char* ptname = taosArrayGet(tagName, 0);
- cJSON* tname = cJSON_CreateString(ptname);
- cJSON_AddItemToObject(tag, "name", tname);
- // cJSON* cid_ = cJSON_CreateString("");
- // cJSON_AddItemToObject(tag, "cid", cid_);
- cJSON* ttype = cJSON_CreateNumber(TSDB_DATA_TYPE_JSON);
- cJSON_AddItemToObject(tag, "type", ttype);
- cJSON* tvalue = cJSON_CreateString(pJson);
- cJSON_AddItemToObject(tag, "value", tvalue);
- cJSON_AddItemToArray(tags, tag);
- taosMemoryFree(pJson);
- goto end;
- }
-
- for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
- STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
-
- cJSON* tag = cJSON_CreateObject();
-
- char* ptname = taosArrayGet(tagName, i);
- cJSON* tname = cJSON_CreateString(ptname);
- cJSON_AddItemToObject(tag, "name", tname);
- // cJSON* cid = cJSON_CreateNumber(pTagVal->cid);
- // cJSON_AddItemToObject(tag, "cid", cid);
- cJSON* ttype = cJSON_CreateNumber(pTagVal->type);
- cJSON_AddItemToObject(tag, "type", ttype);
-
- cJSON* tvalue = NULL;
- if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- char* buf = taosMemoryCalloc(pTagVal->nData + 3, 1);
- if (!buf) goto end;
- dataConverToStr(buf, pTagVal->type, pTagVal->pData, pTagVal->nData, NULL);
- tvalue = cJSON_CreateString(buf);
- taosMemoryFree(buf);
- } else {
- double val = 0;
- GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
- tvalue = cJSON_CreateNumber(val);
- }
-
- cJSON_AddItemToObject(tag, "value", tvalue);
- cJSON_AddItemToArray(tags, tag);
- }
-
-end:
- cJSON_AddItemToObject(json, "tags", tags);
- string = cJSON_PrintUnformatted(json);
- cJSON_Delete(json);
- taosArrayDestroy(pTagVals);
- return string;
-}
-
-static char* processCreateTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVCreateTbBatchReq req = {0};
- SVCreateTbReq* pCreateReq;
- char* string = NULL;
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVCreateTbBatchReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
- if (pCreateReq->type == TSDB_CHILD_TABLE) {
- string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
- pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
- } else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
- string =
- buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
- }
- }
-
- tDecoderClear(&decoder);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processAlterTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVAlterTbReq vAlterTbReq = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVAlterTbReq(&decoder, &vAlterTbReq) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("alter");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
- cJSON_AddItemToObject(json, "tableType", tableType);
- cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
- cJSON_AddItemToObject(json, "alterType", alterType);
-
- switch (vAlterTbReq.action) {
- case TSDB_ALTER_TABLE_ADD_COLUMN: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
- cJSON_AddItemToObject(json, "colType", colType);
-
- if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
- int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (vAlterTbReq.type == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (vAlterTbReq.bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_DROP_COLUMN: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
- cJSON_AddItemToObject(json, "colType", colType);
- if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
- int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- } else if (vAlterTbReq.colModType == TSDB_DATA_TYPE_NCHAR) {
- int32_t length = (vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
- cJSON* cbytes = cJSON_CreateNumber(length);
- cJSON_AddItemToObject(json, "colLength", cbytes);
- }
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME: {
- cJSON* colName = cJSON_CreateString(vAlterTbReq.colName);
- cJSON_AddItemToObject(json, "colName", colName);
- cJSON* colNewName = cJSON_CreateString(vAlterTbReq.colNewName);
- cJSON_AddItemToObject(json, "colNewName", colNewName);
- break;
- }
- case TSDB_ALTER_TABLE_UPDATE_TAG_VAL: {
- cJSON* tagName = cJSON_CreateString(vAlterTbReq.tagName);
- cJSON_AddItemToObject(json, "colName", tagName);
-
- bool isNull = vAlterTbReq.isNull;
- if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
- STag* jsonTag = (STag*)vAlterTbReq.pTagVal;
- if (jsonTag->nTag == 0) isNull = true;
- }
- if (!isNull) {
- char* buf = NULL;
-
- if (vAlterTbReq.tagType == TSDB_DATA_TYPE_JSON) {
- ASSERT(tTagIsJson(vAlterTbReq.pTagVal) == true);
- buf = parseTagDatatoJson(vAlterTbReq.pTagVal);
- } else {
- buf = taosMemoryCalloc(vAlterTbReq.nTagVal + 1, 1);
- dataConverToStr(buf, vAlterTbReq.tagType, vAlterTbReq.pTagVal, vAlterTbReq.nTagVal, NULL);
- }
-
- cJSON* colValue = cJSON_CreateString(buf);
- cJSON_AddItemToObject(json, "colValue", colValue);
- taosMemoryFree(buf);
- }
-
- cJSON* isNullCJson = cJSON_CreateBool(isNull);
- cJSON_AddItemToObject(json, "colValueNull", isNullCJson);
- break;
- }
- default:
- break;
- }
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processDropSTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVDropStbReq req = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVDropStbReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("drop");
- cJSON_AddItemToObject(json, "type", type);
- cJSON* tableName = cJSON_CreateString(req.name);
- cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("super");
- cJSON_AddItemToObject(json, "tableType", tableType);
-
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static char* processDropTable(SMqMetaRsp* metaRsp) {
- SDecoder decoder = {0};
- SVDropTbBatchReq req = {0};
- char* string = NULL;
-
- // decode
- void* data = POINTER_SHIFT(metaRsp->metaRsp, sizeof(SMsgHead));
- int32_t len = metaRsp->metaRspLen - sizeof(SMsgHead);
- tDecoderInit(&decoder, data, len);
- if (tDecodeSVDropTbBatchReq(&decoder, &req) < 0) {
- goto _exit;
- }
-
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- goto _exit;
- }
- cJSON* type = cJSON_CreateString("drop");
- cJSON_AddItemToObject(json, "type", type);
- // cJSON* uid = cJSON_CreateNumber(id);
- // cJSON_AddItemToObject(json, "uid", uid);
- // cJSON* tableType = cJSON_CreateString("normal");
- // cJSON_AddItemToObject(json, "tableType", tableType);
-
- cJSON* tableNameList = cJSON_CreateArray();
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- SVDropTbReq* pDropTbReq = req.pReqs + iReq;
-
- cJSON* tableName = cJSON_CreateString(pDropTbReq->name);
- cJSON_AddItemToArray(tableNameList, tableName);
- }
- cJSON_AddItemToObject(json, "tableNameList", tableNameList);
-
- string = cJSON_PrintUnformatted(json);
-
-_exit:
- tDecoderClear(&decoder);
- return string;
-}
-
-static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
- SVCreateStbReq req = {0};
- SDecoder coder;
- SMCreateStbReq pReq = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
- // build create stable
- pReq.pColumns = taosArrayInit(req.schemaRow.nCols, sizeof(SField));
- for (int32_t i = 0; i < req.schemaRow.nCols; i++) {
- SSchema* pSchema = req.schemaRow.pSchema + i;
- SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
- strcpy(field.name, pSchema->name);
- taosArrayPush(pReq.pColumns, &field);
- }
- pReq.pTags = taosArrayInit(req.schemaTag.nCols, sizeof(SField));
- for (int32_t i = 0; i < req.schemaTag.nCols; i++) {
- SSchema* pSchema = req.schemaTag.pSchema + i;
- SField field = {.type = pSchema->type, .bytes = pSchema->bytes};
- strcpy(field.name, pSchema->name);
- taosArrayPush(pReq.pTags, &field);
- }
-
- pReq.colVer = req.schemaRow.version;
- pReq.tagVer = req.schemaTag.version;
- pReq.numOfColumns = req.schemaRow.nCols;
- pReq.numOfTags = req.schemaTag.nCols;
- pReq.commentLen = -1;
- pReq.suid = req.suid;
- pReq.source = TD_REQ_FROM_TAOX;
- pReq.igExists = true;
-
- STscObj* pTscObj = pRequest->pTscObj;
- SName tableName;
- tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
-
- SCmdMsgInfo pCmdMsg = {0};
- pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
- pCmdMsg.msgType = TDMT_MND_CREATE_STB;
- pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
- pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
- if (NULL == pCmdMsg.pMsg) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- tSerializeSMCreateStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
-
- SQuery pQuery = {0};
- pQuery.execMode = QUERY_EXEC_MODE_RPC;
- pQuery.pCmdMsg = &pCmdMsg;
- pQuery.msgType = pQuery.pCmdMsg->msgType;
- pQuery.stableQuery = true;
-
- launchQueryImpl(pRequest, &pQuery, true, NULL);
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SCatalog* pCatalog = NULL;
- catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- catalogRemoveTableMeta(pCatalog, &tableName);
- }
-
- code = pRequest->code;
- taosMemoryFree(pCmdMsg.pMsg);
-
-end:
- destroyRequest(pRequest);
- tFreeSMCreateStbReq(&pReq);
- tDecoderClear(&coder);
- return code;
-}
-
-static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
- SVDropStbReq req = {0};
- SDecoder coder;
- SMDropStbReq pReq = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVDropStbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // build drop stable
- pReq.igNotExists = true;
- pReq.source = TD_REQ_FROM_TAOX;
- pReq.suid = req.suid;
-
- STscObj* pTscObj = pRequest->pTscObj;
- SName tableName = {0};
- tNameExtractFullName(toName(pTscObj->acctId, pRequest->pDb, req.name, &tableName), pReq.name);
-
- SCmdMsgInfo pCmdMsg = {0};
- pCmdMsg.epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
- pCmdMsg.msgType = TDMT_MND_DROP_STB;
- pCmdMsg.msgLen = tSerializeSMDropStbReq(NULL, 0, &pReq);
- pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
- if (NULL == pCmdMsg.pMsg) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- tSerializeSMDropStbReq(pCmdMsg.pMsg, pCmdMsg.msgLen, &pReq);
-
- SQuery pQuery = {0};
- pQuery.execMode = QUERY_EXEC_MODE_RPC;
- pQuery.pCmdMsg = &pCmdMsg;
- pQuery.msgType = pQuery.pCmdMsg->msgType;
- pQuery.stableQuery = true;
-
- launchQueryImpl(pRequest, &pQuery, true, NULL);
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SCatalog* pCatalog = NULL;
- catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- catalogRemoveTableMeta(pCatalog, &tableName);
- }
-
- code = pRequest->code;
- taosMemoryFree(pCmdMsg.pMsg);
-
-end:
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- return code;
-}
-
-typedef struct SVgroupCreateTableBatch {
- SVCreateTbBatchReq req;
- SVgroupInfo info;
- char dbName[TSDB_DB_NAME_LEN];
-} SVgroupCreateTableBatch;
-
-static void destroyCreateTbReqBatch(void* data) {
- SVgroupCreateTableBatch* pTbBatch = (SVgroupCreateTableBatch*)data;
- taosArrayDestroy(pTbBatch->req.pArray);
-}
-
-static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVCreateTbBatchReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SHashObj* pVgroupHashmap = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
-
- SVCreateTbReq* pCreateReq = NULL;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (NULL == pVgroupHashmap) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch);
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
-
- pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, pCreateReq->name, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
- taosArrayPush(pRequest->tableList, &pName);
-
- SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
- if (pTableBatch == NULL) {
- SVgroupCreateTableBatch tBatch = {0};
- tBatch.info = pInfo;
- strcpy(tBatch.dbName, pRequest->pDb);
-
- tBatch.req.pArray = taosArrayInit(4, sizeof(struct SVCreateTbReq));
- taosArrayPush(tBatch.req.pArray, pCreateReq);
-
- taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
- } else { // add to the correct vgroup
- taosArrayPush(pTableBatch->req.pArray, pCreateReq);
- }
- }
-
- SArray* pBufArray = serializeVgroupsCreateTableBatch(pVgroupHashmap);
- if (NULL == pBufArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_CREATE_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_CREATE_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- removeMeta(pTscObj, pRequest->tableList);
- }
-
- code = pRequest->code;
-
-end:
- taosHashCleanup(pVgroupHashmap);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-typedef struct SVgroupDropTableBatch {
- SVDropTbBatchReq req;
- SVgroupInfo info;
- char dbName[TSDB_DB_NAME_LEN];
-} SVgroupDropTableBatch;
-
-static void destroyDropTbReqBatch(void* data) {
- SVgroupDropTableBatch* pTbBatch = (SVgroupDropTableBatch*)data;
- taosArrayDestroy(pTbBatch->req.pArray);
-}
-
-static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVDropTbBatchReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SHashObj* pVgroupHashmap = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
-
- SVDropTbReq* pDropReq = NULL;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pVgroupHashmap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (NULL == pVgroupHashmap) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch);
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
- pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName));
- // loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pDropReq = req.pReqs + iReq;
- pDropReq->igNotExists = true;
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, pDropReq->name, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- taosArrayPush(pRequest->tableList, &pName);
- SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
- if (pTableBatch == NULL) {
- SVgroupDropTableBatch tBatch = {0};
- tBatch.info = pInfo;
- tBatch.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
- taosArrayPush(tBatch.req.pArray, pDropReq);
-
- taosHashPut(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId), &tBatch, sizeof(tBatch));
- } else { // add to the correct vgroup
- taosArrayPush(pTableBatch->req.pArray, pDropReq);
- }
- }
-
- SArray* pBufArray = serializeVgroupsDropTableBatch(pVgroupHashmap);
- if (NULL == pBufArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_DROP_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_DROP_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- removeMeta(pTscObj, pRequest->tableList);
- }
- code = pRequest->code;
-
-end:
- taosHashCleanup(pVgroupHashmap);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-// delete from db.tabl where .. -> delete from tabl where ..
-// delete from db .tabl where .. -> delete from tabl where ..
-// static void getTbName(char *sql){
-// char *ch = sql;
-//
-// bool inBackQuote = false;
-// int8_t dotIndex = 0;
-// while(*ch != '\0'){
-// if(!inBackQuote && *ch == '`'){
-// inBackQuote = true;
-// ch++;
-// continue;
-// }
-//
-// if(inBackQuote && *ch == '`'){
-// inBackQuote = false;
-// ch++;
-//
-// continue;
-// }
-//
-// if(!inBackQuote && *ch == '.'){
-// dotIndex ++;
-// if(dotIndex == 2){
-// memmove(sql, ch + 1, strlen(ch + 1) + 1);
-// break;
-// }
-// }
-// ch++;
-// }
-//}
-
-static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) {
- SDeleteRes req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
-
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeDeleteRes(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // getTbName(req.tableFName);
- char sql[256] = {0};
- sprintf(sql, "delete from `%s` where `%s` >= %" PRId64 " and `%s` <= %" PRId64, req.tableFName, req.tsColName,
- req.skey, req.tsColName, req.ekey);
- printf("delete sql:%s\n", sql);
-
- TAOS_RES* res = taos_query(taos, sql);
- SRequestObj* pRequest = (SRequestObj*)res;
- code = pRequest->code;
- if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
- code = TSDB_CODE_SUCCESS;
- }
- taos_free_result(res);
-
-end:
- tDecoderClear(&coder);
- return code;
-}
-
-static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
- SVAlterTbReq req = {0};
- SDecoder coder = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = NULL;
- SQuery* pQuery = NULL;
- SArray* pArray = NULL;
- SVgDataBlocks* pVgData = NULL;
-
- code = buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest);
-
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- if (!pRequest->pDb) {
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
- // decode and process req
- void* data = POINTER_SHIFT(meta, sizeof(SMsgHead));
- int32_t len = metaLen - sizeof(SMsgHead);
- tDecoderInit(&coder, data, len);
- if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
- code = TSDB_CODE_INVALID_PARA;
- goto end;
- }
-
- // do not deal TSDB_ALTER_TABLE_UPDATE_OPTIONS
- if (req.action == TSDB_ALTER_TABLE_UPDATE_OPTIONS) {
- goto end;
- }
-
- STscObj* pTscObj = pRequest->pTscObj;
- SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter,
- .requestId = pRequest->requestId,
- .requestObjRefId = pRequest->self,
- .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)};
-
- SVgroupInfo pInfo = {0};
- SName pName = {0};
- toName(pTscObj->acctId, pRequest->pDb, req.tbName, &pName);
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &pInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- pArray = taosArrayInit(1, sizeof(void*));
- if (NULL == pArray) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
-
- pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == pVgData) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pVgData->vg = pInfo;
- pVgData->pData = taosMemoryMalloc(metaLen);
- if (NULL == pVgData->pData) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- memcpy(pVgData->pData, meta, metaLen);
- ((SMsgHead*)pVgData->pData)->vgId = htonl(pInfo.vgId);
- pVgData->size = metaLen;
- pVgData->numOfTables = 1;
- taosArrayPush(pArray, &pVgData);
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->msgType = TDMT_VND_ALTER_TABLE;
- pQuery->stableQuery = false;
- pQuery->pRoot = nodesMakeNode(QUERY_NODE_ALTER_TABLE_STMT);
-
- code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
- if (code != TSDB_CODE_SUCCESS) {
- goto end;
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
-
- pVgData = NULL;
- pArray = NULL;
- code = pRequest->code;
- if (code == TSDB_CODE_VND_TABLE_NOT_EXIST) {
- code = TSDB_CODE_SUCCESS;
- }
-
- if (pRequest->code == TSDB_CODE_SUCCESS) {
- SExecResult* pRes = &pRequest->body.resInfo.execRes;
- if (pRes->res != NULL) {
- code = handleAlterTbExecRes(pRes->res, pCatalog);
- }
- }
-end:
- taosArrayDestroy(pArray);
- if (pVgData) taosMemoryFreeClear(pVgData->pData);
- taosMemoryFreeClear(pVgData);
- destroyRequest(pRequest);
- tDecoderClear(&coder);
- qDestroyQuery(pQuery);
- return code;
-}
-
-typedef struct {
- SVgroupInfo vg;
- void* data;
-} VgData;
-
-static void destroyVgHash(void* data) {
- VgData* vgData = (VgData*)data;
- taosMemoryFreeClear(vgData->data);
-}
-
-int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname) {
- int32_t code = TSDB_CODE_SUCCESS;
- STableMeta* pTableMeta = NULL;
- SQuery* pQuery = NULL;
-
- SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- if (!pRequest) {
- uError("WriteRaw:createRequest error request is null");
- code = terrno;
- goto end;
- }
-
- if (!pRequest->pDb) {
- uError("WriteRaw:not use db");
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
-
- SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
- strcpy(pName.dbname, pRequest->pDb);
- strcpy(pName.tname, tbname);
-
- struct SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: get gatlog error");
- goto end;
- }
-
- SRequestConnInfo conn = {0};
- conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
- conn.requestId = pRequest->requestId;
- conn.requestObjRefId = pRequest->self;
- conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- SVgroupInfo vgData = {0};
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vgData);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbname);
- goto end;
- }
-
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbname);
- goto end;
- }
- uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
- uint64_t uid = pTableMeta->uid;
- int32_t numOfCols = pTableMeta->tableInfo.numOfColumns;
-
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < numOfCols; i++) {
- SSchema* schema = pTableMeta->schema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(numOfCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
- int32_t totalLen = sizeof(SSubmitReq) + submitLen;
- SSubmitReq* subReq = taosMemoryCalloc(1, totalLen);
- SSubmitBlk* blk = POINTER_SHIFT(subReq, sizeof(SSubmitReq));
- void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
- STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
-
- SRowBuilder rb = {0};
- tdSRowInit(&rb, pTableMeta->sversion);
- tdSRowSetTpInfo(&rb, numOfCols, fLen);
- int32_t dataLen = 0;
-
- char* pStart = pData + getVersion1BlockMetaSize(pData, numOfCols);
- int32_t* colLength = (int32_t*)pStart;
- pStart += sizeof(int32_t) * numOfCols;
-
- SResultColumn* pCol = taosMemoryCalloc(numOfCols, sizeof(SResultColumn));
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- if (IS_VAR_DATA_TYPE(pTableMeta->schema[i].type)) {
- pCol[i].offset = (int32_t*)pStart;
- pStart += rows * sizeof(int32_t);
- } else {
- pCol[i].nullbitmap = pStart;
- pStart += BitmapLen(rows);
- }
-
- pCol[i].pData = pStart;
- pStart += colLength[i];
- }
-
- for (int32_t j = 0; j < rows; j++) {
- tdSRowResetBuf(&rb, rowData);
- int32_t offset = 0;
- for (int32_t k = 0; k < numOfCols; k++) {
- const SSchema* pColumn = &pTableMeta->schema[k];
-
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- if (pCol[k].offset[j] != -1) {
- char* data = pCol[k].pData + pCol[k].offset[j];
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- } else {
-
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- }
- } else {
- if (!colDataIsNull_f(pCol[k].nullbitmap, j)) {
- char* data = pCol[k].pData + pColumn->bytes * j;
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- } else {
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- }
- }
-
- offset += TYPE_BYTES[pColumn->type];
- }
- tdSRowEnd(&rb);
- int32_t rowLen = TD_ROW_LEN(rowData);
- rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
- }
-
- taosMemoryFree(pCol);
-
- blk->uid = htobe64(uid);
- blk->suid = htobe64(suid);
- blk->sversion = htonl(pTableMeta->sversion);
- blk->schemaLen = htonl(schemaLen);
- blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length = sizeof(SSubmitReq) + sizeof(SSubmitBlk) + schemaLen + dataLen;
- subReq->numOfBlocks = 1;
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- if (NULL == pQuery) {
- uError("create SQuery error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->haveResultSet = false;
- pQuery->msgType = TDMT_VND_SUBMIT;
- pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
- if (NULL == pQuery->pRoot) {
- uError("create pQuery->pRoot error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
- nodeStmt->payloadType = PAYLOAD_TYPE_KV;
- nodeStmt->pDataBlocks = taosArrayInit(1, POINTER_BYTES);
-
- SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == dst) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- dst->vg = vgData;
- dst->numOfTables = subReq->numOfBlocks;
- dst->size = subReq->length;
- dst->pData = (char*)subReq;
- subReq->header.vgId = htonl(dst->vg.vgId);
- subReq->version = htonl(1);
- subReq->header.contLen = htonl(subReq->length);
- subReq->length = htonl(subReq->length);
- subReq->numOfBlocks = htonl(subReq->numOfBlocks);
- subReq = NULL; // no need free
- taosArrayPush(nodeStmt->pDataBlocks, &dst);
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- code = pRequest->code;
-
-end:
- taosMemoryFreeClear(pTableMeta);
- qDestroyQuery(pQuery);
- return code;
-}
-
-static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
- int32_t code = TSDB_CODE_SUCCESS;
- SHashObj* pVgHash = NULL;
- SQuery* pQuery = NULL;
- SMqRspObj rspObj = {0};
- SDecoder decoder = {0};
-
- terrno = TSDB_CODE_SUCCESS;
- SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
- if (!pRequest) {
- uError("WriteRaw:createRequest error request is null");
- return terrno;
- }
-
- rspObj.resIter = -1;
- rspObj.resType = RES_TYPE__TMQ;
-
- tDecoderInit(&decoder, data, dataLen);
- code = tDecodeSMqDataRsp(&decoder, &rspObj.rsp);
- if (code != 0) {
- uError("WriteRaw:decode smqDataRsp error");
- code = TSDB_CODE_INVALID_MSG;
- goto end;
- }
-
- if (!pRequest->pDb) {
- uError("WriteRaw:not use db");
- code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
- goto end;
- }
-
- pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
- taosHashSetFreeFp(pVgHash, destroyVgHash);
- struct SCatalog* pCatalog = NULL;
- code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: get gatlog error");
- goto end;
- }
-
- SRequestConnInfo conn = {0};
- conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
- conn.requestId = pRequest->requestId;
- conn.requestObjRefId = pRequest->self;
- conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
-
- printf("raw data block num:%d\n", rspObj.rsp.blockNum);
- while (++rspObj.resIter < rspObj.rsp.blockNum) {
- SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
- if (!rspObj.rsp.withSchema) {
- uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
- goto end;
- }
- SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
- setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
-
- code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw: setQueryResultFromRsp error");
- goto end;
- }
-
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < pSW->nCols; i++) {
- SSchema* schema = pSW->pSchema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t rows = rspObj.resInfo.numOfRows;
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
- const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
- if (!tbName) {
- uError("WriteRaw: tbname is null");
- code = TSDB_CODE_TMQ_INVALID_MSG;
- goto end;
- }
-
- printf("raw data tbname:%s\n", tbName);
- SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
- strcpy(pName.dbname, pRequest->pDb);
- strcpy(pName.tname, tbName);
-
- VgData vgData = {0};
- code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
- goto end;
- }
-
- SSubmitReq* subReq = NULL;
- SSubmitBlk* blk = NULL;
- void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
- if (hData) {
- vgData = *(VgData*)hData;
-
- int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
- void* tmp = taosMemoryRealloc(vgData.data, totalLen);
- if (tmp == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- vgData.data = tmp;
- ((VgData*)hData)->data = tmp;
- subReq = (SSubmitReq*)(vgData.data);
- blk = POINTER_SHIFT(vgData.data, subReq->length);
- } else {
- int32_t totalLen = sizeof(SSubmitReq) + submitLen;
- void* tmp = taosMemoryCalloc(1, totalLen);
- if (tmp == NULL) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- vgData.data = tmp;
- taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
- subReq = (SSubmitReq*)(vgData.data);
- subReq->length = sizeof(SSubmitReq);
- subReq->numOfBlocks = 0;
-
- blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
- }
-
- STableMeta* pTableMeta = NULL;
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
- goto end;
- }
- uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
- uint64_t uid = pTableMeta->uid;
- taosMemoryFreeClear(pTableMeta);
-
- void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
- STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
-
- SRowBuilder rb = {0};
- tdSRowInit(&rb, pSW->version);
- tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
- int32_t dataLen = 0;
-
- for (int32_t j = 0; j < rows; j++) {
- tdSRowResetBuf(&rb, rowData);
-
- doSetOneRowPtr(&rspObj.resInfo);
- rspObj.resInfo.current += 1;
-
- int32_t offset = 0;
- for (int32_t k = 0; k < pSW->nCols; k++) {
- const SSchema* pColumn = &pSW->pSchema[k];
- char* data = rspObj.resInfo.row[k];
- if (!data) {
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
- } else {
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- data -= VARSTR_HEADER_SIZE;
- }
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
- }
- offset += TYPE_BYTES[pColumn->type];
- }
- tdSRowEnd(&rb);
- int32_t rowLen = TD_ROW_LEN(rowData);
- rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
- }
-
- blk->uid = htobe64(uid);
- blk->suid = htobe64(suid);
- blk->sversion = htonl(pSW->version);
- blk->schemaLen = htonl(schemaLen);
- blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
- subReq->numOfBlocks++;
- }
-
- pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
- if (NULL == pQuery) {
- uError("create SQuery error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
- pQuery->haveResultSet = false;
- pQuery->msgType = TDMT_VND_SUBMIT;
- pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
- if (NULL == pQuery->pRoot) {
- uError("create pQuery->pRoot error");
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto end;
- }
- SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
- nodeStmt->payloadType = PAYLOAD_TYPE_KV;
-
- int32_t numOfVg = taosHashGetSize(pVgHash);
- nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
-
- VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
- while (vData) {
- SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
- if (NULL == dst) {
- code = TSDB_CODE_TSC_OUT_OF_MEMORY;
- goto end;
- }
- dst->vg = vData->vg;
- SSubmitReq* subReq = (SSubmitReq*)(vData->data);
- dst->numOfTables = subReq->numOfBlocks;
- dst->size = subReq->length;
- dst->pData = (char*)subReq;
- vData->data = NULL; // no need free
- subReq->header.vgId = htonl(dst->vg.vgId);
- subReq->version = htonl(1);
- subReq->header.contLen = htonl(subReq->length);
- subReq->length = htonl(subReq->length);
- subReq->numOfBlocks = htonl(subReq->numOfBlocks);
- taosArrayPush(nodeStmt->pDataBlocks, &dst);
- vData = (VgData*)taosHashIterate(pVgHash, vData);
- }
-
- launchQueryImpl(pRequest, pQuery, true, NULL);
- code = pRequest->code;
-
-end:
- tDecoderClear(&decoder);
- taos_free_result(&rspObj);
- qDestroyQuery(pQuery);
- destroyRequest(pRequest);
- taosHashCleanup(pVgHash);
- return code;
-}
-
-char* tmq_get_json_meta(TAOS_RES* res) {
- if (!TD_RES_TMQ_META(res)) {
- return NULL;
- }
-
- SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
- if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
- return processCreateStb(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_STB) {
- return processAlterStb(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_STB) {
- return processDropSTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_TABLE) {
- return processCreateTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_ALTER_TABLE) {
- return processAlterTable(&pMetaRspObj->metaRsp);
- } else if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DROP_TABLE) {
- return processDropTable(&pMetaRspObj->metaRsp);
- }
- return NULL;
-}
-
-void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); }
-
-int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
- if (!raw || !res) {
- return TSDB_CODE_INVALID_PARA;
- }
- if (TD_RES_TMQ_META(res)) {
- SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
- raw->raw = pMetaRspObj->metaRsp.metaRsp;
- raw->raw_len = pMetaRspObj->metaRsp.metaRspLen;
- raw->raw_type = pMetaRspObj->metaRsp.resMsgType;
- } else if (TD_RES_TMQ(res)) {
- SMqRspObj* rspObj = ((SMqRspObj*)res);
-
- int32_t len = 0;
- int32_t code = 0;
- tEncodeSize(tEncodeSMqDataRsp, &rspObj->rsp, len, code);
- if (code < 0) {
- return -1;
- }
-
- void* buf = taosMemoryCalloc(1, len);
- SEncoder encoder = {0};
- tEncoderInit(&encoder, buf, len);
- tEncodeSMqDataRsp(&encoder, &rspObj->rsp);
- tEncoderClear(&encoder);
-
- raw->raw = buf;
- raw->raw_len = len;
- raw->raw_type = RES_TYPE__TMQ;
- } else {
- return TSDB_CODE_TMQ_INVALID_MSG;
- }
- return TSDB_CODE_SUCCESS;
-}
-
-void tmq_free_raw(tmq_raw_data raw) {
- if (raw.raw_type == RES_TYPE__TMQ) {
- taosMemoryFree(raw.raw);
- }
-}
-
-int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
- if (!taos) {
- return TSDB_CODE_INVALID_PARA;
- }
-
- if (raw.raw_type == TDMT_VND_CREATE_STB) {
- return taosCreateStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_ALTER_STB) {
- return taosCreateStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DROP_STB) {
- return taosDropStb(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_CREATE_TABLE) {
- return taosCreateTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_ALTER_TABLE) {
- return taosAlterTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DROP_TABLE) {
- return taosDropTable(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == TDMT_VND_DELETE) {
- return taosDeleteData(taos, raw.raw, raw.raw_len);
- } else if (raw.raw_type == RES_TYPE__TMQ) {
- return tmqWriteRaw(taos, raw.raw, raw.raw_len);
- }
- return TSDB_CODE_INVALID_PARA;
-}
-
void tmq_commit_async(tmq_t* tmq, const TAOS_RES* msg, tmq_commit_cb* cb, void* param) {
//
tmqCommitInner(tmq, msg, 0, 1, cb, param);
diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp
index ec270889e2..4ea5443678 100644
--- a/source/client/test/clientTests.cpp
+++ b/source/client/test/clientTests.cpp
@@ -123,7 +123,7 @@ void createNewTable(TAOS* pConn, int32_t index) {
}
taos_free_result(pRes);
- for(int32_t i = 0; i < 100000; i += 20) {
+ for(int32_t i = 0; i < 3280; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@@ -679,30 +679,28 @@ TEST(testCase, projection_query_tables) {
TAOS_RES* pRes = taos_query(pConn, "use abc1");
taos_free_result(pRes);
- pRes = taos_query(pConn, "explain verbose true select _wstart,count(*),a from st1 partition by a interval(1s)");
- printResult(pRes);
-// pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
-// if (taos_errno(pRes) != 0) {
-// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
-// }
-// taos_free_result(pRes);
-//
-// pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
-// if (taos_errno(pRes) != 0) {
-// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
-// }
-// taos_free_result(pRes);
-//
-// pRes = taos_query(pConn, "create table tu using st1 tags(1)");
-// if (taos_errno(pRes) != 0) {
-// printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
-// }
-// taos_free_result(pRes);
-//
-// for(int32_t i = 0; i < 1; ++i) {
-// printf("create table :%d\n", i);
-// createNewTable(pConn, i);
-// }
+ pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table tu using st1 tags(1)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
+ }
+ taos_free_result(pRes);
+
+ for(int32_t i = 0; i < 2; ++i) {
+ printf("create table :%d\n", i);
+ createNewTable(pConn, i);
+ }
//
// pRes = taos_query(pConn, "select * from tu");
// if (taos_errno(pRes) != 0) {
diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt
index 1c11ee7085..9c6d941172 100644
--- a/source/common/CMakeLists.txt
+++ b/source/common/CMakeLists.txt
@@ -9,6 +9,11 @@ IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()
+IF (TD_STORAGE)
+ ADD_DEFINITIONS(-D_STORAGE)
+ TARGET_LINK_LIBRARIES(common PRIVATE storage)
+ENDIF ()
+
target_include_directories(
common
PUBLIC "${TD_SOURCE_DIR}/include/common"
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 6dddcc2f74..65041e1f12 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -88,7 +88,7 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
{.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "retention", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
{.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 8489627721..c65e966046 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -1343,12 +1343,14 @@ SSDataBlock* createDataBlock() {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
if (pBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
}
pBlock->pDataBlock = taosArrayInit(4, sizeof(SColumnInfoData));
if (pBlock->pDataBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pBlock);
+ return NULL;
}
return pBlock;
@@ -1423,6 +1425,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
void colDataDestroy(SColumnInfoData* pColData) {
+ if(!pColData) return;
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
taosMemoryFreeClear(pColData->varmeta.offset);
} else {
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 8823e63db4..adc5af1a17 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -75,7 +75,7 @@ int32_t tsMonitorMaxLogs = 100;
bool tsMonitorComp = false;
// telem
-bool tsEnableTelem = false;
+bool tsEnableTelem = true;
int32_t tsTelemInterval = 86400;
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
uint16_t tsTelemPort = 80;
@@ -165,58 +165,26 @@ int32_t tsTtlUnit = 86400;
int32_t tsTtlPushInterval = 86400;
int32_t tsGrantHBInterval = 60;
-void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
- tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
- tsDiskCfg[index].level = level;
- tsDiskCfg[index].primary = primary;
- uTrace("dataDir:%s, level:%d primary:%d is configured", v1, level, primary);
-}
-
-static int32_t taosSetTfsCfg(SConfig *pCfg) {
+#ifndef _STORAGE
+int32_t taosSetTfsCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "dataDir");
memset(tsDataDir, 0, PATH_MAX);
int32_t size = taosArrayGetSize(pItem->array);
- if (size <= 0) {
- tsDiskCfgNum = 1;
- taosAddDataDir(0, pItem->str, 0, 1);
- tstrncpy(tsDataDir, pItem->str, PATH_MAX);
- if (taosMulMkDir(tsDataDir) != 0) {
- uError("failed to create dataDir:%s since %s", tsDataDir, terrstr());
- return -1;
- }
- } else {
- tsDiskCfgNum = size < TFS_MAX_DISKS ? size : TFS_MAX_DISKS;
- for (int32_t index = 0; index < tsDiskCfgNum; ++index) {
- SDiskCfg *pCfg = taosArrayGet(pItem->array, index);
- memcpy(&tsDiskCfg[index], pCfg, sizeof(SDiskCfg));
- if (pCfg->level == 0 && pCfg->primary == 1) {
- tstrncpy(tsDataDir, pCfg->dir, PATH_MAX);
- }
- if (taosMulMkDir(pCfg->dir) != 0) {
- uError("failed to create tfsDir:%s since %s", tsDataDir, terrstr());
- return -1;
- }
- }
+ tsDiskCfgNum = 1;
+ tstrncpy(tsDiskCfg[0].dir, pItem->str, TSDB_FILENAME_LEN);
+ tsDiskCfg[0].level = 0;
+ tsDiskCfg[0].primary = 1;
+ tstrncpy(tsDataDir, pItem->str, PATH_MAX);
+ if (taosMulMkDir(tsDataDir) != 0) {
+ uError("failed to create dataDir:%s", tsDataDir);
+ return -1;
}
-
- if (tsDataDir[0] == 0) {
- if (pItem->str != NULL) {
- taosAddDataDir(tsDiskCfgNum, pItem->str, 0, 1);
- tstrncpy(tsDataDir, pItem->str, PATH_MAX);
- if (taosMulMkDir(tsDataDir) != 0) {
- uError("failed to create tfsDir:%s since %s", tsDataDir, terrstr());
- return -1;
- }
- tsDiskCfgNum++;
- } else {
- uError("datadir not set");
- return -1;
- }
- }
-
return 0;
}
+#else
+int32_t taosSetTfsCfg(SConfig *pCfg);
+#endif
struct SConfig *taosGetCfg() {
return tsCfg;
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 7dd3ce34c3..533d924546 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -4262,7 +4262,6 @@ int32_t tDeserializeSServerStatusRsp(void *buf, int32_t bufLen, SServerStatusRsp
tDecoderClear(&decoder);
return 0;
}
-
int32_t tEncodeSMqOffset(SEncoder *encoder, const SMqOffset *pOffset) {
if (tEncodeI32(encoder, pOffset->vgId) < 0) return -1;
if (tEncodeI64(encoder, pOffset->offset) < 0) return -1;
@@ -4300,7 +4299,6 @@ int32_t tDecodeSMqCMCommitOffsetReq(SDecoder *decoder, SMqCMCommitOffsetReq *pRe
tEndDecode(decoder);
return 0;
}
-
int32_t tSerializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -5590,7 +5588,6 @@ int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
return 0;
}
-#if 1
int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
if (pVal->type == TMQ_OFFSET__RESET_NONE) {
snprintf(buf, maxLen, "offset(reset to none)");
@@ -5609,7 +5606,6 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
}
return 0;
}
-#endif
bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
if (pLeft->type == pRight->type) {
@@ -5643,7 +5639,7 @@ int32_t tDecodeSTqOffset(SDecoder *pDecoder, STqOffset *pOffset) {
return 0;
}
-int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo) {
+int32_t tEncodeSTqCheckInfo(SEncoder *pEncoder, const STqCheckInfo *pInfo) {
if (tEncodeCStr(pEncoder, pInfo->topic) < 0) return -1;
if (tEncodeI64(pEncoder, pInfo->ntbUid) < 0) return -1;
int32_t sz = taosArrayGetSize(pInfo->colIdList);
@@ -5655,7 +5651,7 @@ int32_t tEncodeSCheckAlterInfo(SEncoder *pEncoder, const SCheckAlterInfo *pInfo)
return pEncoder->pos;
}
-int32_t tDecodeSCheckAlterInfo(SDecoder *pDecoder, SCheckAlterInfo *pInfo) {
+int32_t tDecodeSTqCheckInfo(SDecoder *pDecoder, STqCheckInfo *pInfo) {
if (tDecodeCStrTo(pDecoder, pInfo->topic) < 0) return -1;
if (tDecodeI64(pDecoder, &pInfo->ntbUid) < 0) return -1;
int32_t sz;
diff --git a/source/common/src/ttypes.c b/source/common/src/ttypes.c
index 156b66ae86..fee89e2f37 100644
--- a/source/common/src/ttypes.c
+++ b/source/common/src/ttypes.c
@@ -392,10 +392,10 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
getStatics_i64},
{TSDB_DATA_TYPE_FLOAT, 5, FLOAT_BYTES, "FLOAT", 0, 0, tsCompressFloat, tsDecompressFloat, getStatics_f},
{TSDB_DATA_TYPE_DOUBLE, 6, DOUBLE_BYTES, "DOUBLE", 0, 0, tsCompressDouble, tsDecompressDouble, getStatics_d},
- {TSDB_DATA_TYPE_VARCHAR, 6, 0, "VARCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_bin},
+ {TSDB_DATA_TYPE_VARCHAR, 6, 1, "VARCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_bin},
{TSDB_DATA_TYPE_TIMESTAMP, 9, LONG_BYTES, "TIMESTAMP", INT64_MIN, INT64_MAX, tsCompressTimestamp,
tsDecompressTimestamp, getStatics_i64},
- {TSDB_DATA_TYPE_NCHAR, 5, 8, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
+ {TSDB_DATA_TYPE_NCHAR, 5, 1, "NCHAR", 0, 0, tsCompressString, tsDecompressString, getStatics_nchr},
{TSDB_DATA_TYPE_UTINYINT, 16, CHAR_BYTES, "TINYINT UNSIGNED", 0, UINT8_MAX, tsCompressTinyint, tsDecompressTinyint,
getStatics_u8},
{TSDB_DATA_TYPE_USMALLINT, 17, SHORT_BYTES, "SMALLINT UNSIGNED", 0, UINT16_MAX, tsCompressSmallint,
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index 647af20fcf..ec761e6441 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -225,7 +225,8 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_ADD_CHECK_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE_CHECK_INFO_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, mmPutMsgToFetchQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DROP_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 7c6807ab87..e610b41a04 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -338,6 +338,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_QUERY, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_CONTINUE, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_EXEC_RSMA, vmPutMsgToQueryQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_FETCH, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
@@ -361,7 +362,8 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_CHANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_VG_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_MQ_COMMIT_OFFSET, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_CHECK_ALTER_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_ADD_CHECK_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE_CHECK_INFO, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CONSUME, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DELETE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_BATCH_DEL, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index 455da6a40e..8cff7fe48e 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -636,6 +636,7 @@ typedef struct {
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj);
+void tFreeStreamObj(SStreamObj* pObj);
typedef struct {
char streamName[TSDB_STREAM_FNAME_LEN];
diff --git a/source/dnode/mnode/impl/inc/mndStb.h b/source/dnode/mnode/impl/inc/mndStb.h
index 44a7fdadde..010199a89f 100644
--- a/source/dnode/mnode/impl/inc/mndStb.h
+++ b/source/dnode/mnode/impl/inc/mndStb.h
@@ -34,6 +34,7 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate);
SDbObj *mndAcquireDbByStb(SMnode *pMnode, const char *stbName);
int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreate, SDbObj *pDb);
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb);
+void mndFreeStb(SStbObj *pStb);
void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst);
void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize);
diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c
index 08ce161409..e6f1a40993 100644
--- a/source/dnode/mnode/impl/src/mndDef.c
+++ b/source/dnode/mnode/impl/src/mndDef.c
@@ -116,6 +116,25 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj) {
return 0;
}
+void tFreeStreamObj(SStreamObj *pStream) {
+ taosMemoryFree(pStream->sql);
+ taosMemoryFree(pStream->ast);
+ taosMemoryFree(pStream->physicalPlan);
+ if (pStream->outputSchema.nCols) taosMemoryFree(pStream->outputSchema.pSchema);
+
+ int32_t sz = taosArrayGetSize(pStream->tasks);
+ for (int32_t i = 0; i < sz; i++) {
+ SArray *pLevel = taosArrayGetP(pStream->tasks, i);
+ int32_t taskSz = taosArrayGetSize(pLevel);
+ for (int32_t j = 0; j < taskSz; j++) {
+ SStreamTask *pTask = taosArrayGetP(pLevel, j);
+ tFreeSStreamTask(pTask);
+ }
+ taosArrayDestroy(pLevel);
+ }
+ taosArrayDestroy(pStream->tasks);
+}
+
SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp));
if (pVgEpNew == NULL) return NULL;
diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c
index 9f6108004d..037a46345f 100644
--- a/source/dnode/mnode/impl/src/mndOffset.c
+++ b/source/dnode/mnode/impl/src/mndOffset.c
@@ -15,10 +15,10 @@
#define _DEFAULT_SOURCE
#include "mndOffset.h"
-#include "mndPrivilege.h"
#include "mndDb.h"
#include "mndDnode.h"
#include "mndMnode.h"
+#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndStb.h"
#include "mndTopic.h"
@@ -305,7 +305,7 @@ int32_t mndDropOffsetByDB(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
sdbRelease(pSdb, pOffset);
}
- return code;
+ return code;
}
int32_t mndDropOffsetByTopic(SMnode *pMnode, STrans *pTrans, const char *topic) {
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index a24b7ef459..3bfd7eb596 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -424,6 +424,8 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
}
mndAddTaskToTaskSet(taskSourceLevel, pTask);
+ pTask->triggerParam = 0;
+
// source
pTask->taskLevel = TASK_LEVEL__SOURCE;
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 006d9e749c..2fb934aaad 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -489,7 +489,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
smaObj.uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN);
ASSERT(smaObj.uid != 0);
char resultTbName[TSDB_TABLE_FNAME_LEN + 16] = {0};
- snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb",pCreate->name);
+ snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb", pCreate->name);
memcpy(smaObj.dstTbName, resultTbName, TSDB_TABLE_FNAME_LEN);
smaObj.dstTbUid = mndGenerateUid(smaObj.dstTbName, TSDB_TABLE_FNAME_LEN);
smaObj.stbUid = pStb->uid;
@@ -530,7 +530,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
streamObj.sourceDbUid = pDb->uid;
streamObj.targetDbUid = pDb->uid;
streamObj.version = 1;
- streamObj.sql = pCreate->sql;
+ streamObj.sql = strdup(pCreate->sql);
streamObj.smaId = smaObj.uid;
streamObj.watermark = pCreate->watermark;
streamObj.trigger = STREAM_TRIGGER_WINDOW_CLOSE;
@@ -585,6 +585,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
return -1;
}
if (pAst != NULL) nodesDestroyNode(pAst);
+ nodesDestroyNode((SNode *)pPlan);
int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
@@ -609,6 +610,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
code = 0;
_OVER:
+ tFreeStreamObj(&streamObj);
mndDestroySmaObj(&smaObj);
mndTransDrop(pTrans);
return code;
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 3e747b66c8..ebec3d5ea6 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -266,6 +266,15 @@ _OVER:
return pRow;
}
+void mndFreeStb(SStbObj *pStb) {
+ taosArrayDestroy(pStb->pFuncs);
+ taosMemoryFreeClear(pStb->pColumns);
+ taosMemoryFreeClear(pStb->pTags);
+ taosMemoryFreeClear(pStb->comment);
+ taosMemoryFreeClear(pStb->pAst1);
+ taosMemoryFreeClear(pStb->pAst2);
+}
+
static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb) {
mTrace("stb:%s, perform insert action, row:%p", pStb->name, pStb);
return 0;
@@ -273,12 +282,7 @@ static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb) {
static int32_t mndStbActionDelete(SSdb *pSdb, SStbObj *pStb) {
mTrace("stb:%s, perform delete action, row:%p", pStb->name, pStb);
- taosArrayDestroy(pStb->pFuncs);
- taosMemoryFreeClear(pStb->pColumns);
- taosMemoryFreeClear(pStb->pTags);
- taosMemoryFreeClear(pStb->comment);
- taosMemoryFreeClear(pStb->pAst1);
- taosMemoryFreeClear(pStb->pAst2);
+ mndFreeStb(pStb);
return 0;
}
@@ -438,6 +442,8 @@ static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pSt
if (req.rollup) {
req.rsmaParam.maxdelay[0] = pStb->maxdelay[0];
req.rsmaParam.maxdelay[1] = pStb->maxdelay[1];
+ req.rsmaParam.watermark[0] = pStb->watermark[0];
+ req.rsmaParam.watermark[1] = pStb->watermark[1];
if (pStb->ast1Len > 0) {
if (mndConvertRsmaTask(&req.rsmaParam.qmsg[0], &req.rsmaParam.qmsgLen[0], pStb->pAst1, pStb->uid,
STREAM_TRIGGER_WINDOW_CLOSE, req.rsmaParam.watermark[0]) < 0) {
@@ -1145,7 +1151,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
return 0;
}
-int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t suid, col_id_t colId) {
+static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
while (1) {
@@ -1154,7 +1160,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
if (pIter == NULL) break;
mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s",
- pTopic->name, stbname, suid, colId, pTopic->subType, pTopic->sql);
+ pTopic->name, stbFullName, suid, colId, pTopic->subType, pTopic->sql);
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
sdbRelease(pSdb, pTopic);
continue;
@@ -1192,20 +1198,66 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
}
+ return 0;
+}
+static int32_t mndCheckAlterColForStream(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ while (1) {
+ SStreamObj *pStream = NULL;
+ pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
+ if (pIter == NULL) break;
+
+ SNode *pAst = NULL;
+ if (nodesStringToNode(pStream->ast, &pAst) != 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ SNodeList *pNodeList = NULL;
+ nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
+ SNode *pNode = NULL;
+ FOREACH(pNode, pNodeList) {
+ SColumnNode *pCol = (SColumnNode *)pNode;
+
+ if (pCol->tableId != suid) {
+ mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
+ goto NEXT;
+ }
+ if (pCol->colId > 0 && pCol->colId == colId) {
+ sdbRelease(pSdb, pStream);
+ nodesDestroyNode(pAst);
+ terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
+ mError("stream:%s, check colId:%d conflicted", pStream->name, pCol->colId);
+ return -1;
+ }
+ mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
+ }
+
+ NEXT:
+ sdbRelease(pSdb, pStream);
+ nodesDestroyNode(pAst);
+ }
+ return 0;
+}
+
+static int32_t mndCheckAlterColForTSma(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
while (1) {
SSmaObj *pSma = NULL;
pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma);
if (pIter == NULL) break;
- mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name, stbname,
- suid, colId, pSma->sql);
+ mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name,
+ stbFullName, suid, colId, pSma->sql);
SNode *pAst = NULL;
if (nodesStringToNode(pSma->ast, &pAst) != 0) {
terrno = TSDB_CODE_SDB_INVALID_DATA_CONTENT;
mError("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d failed since parse AST err",
- pSma->name, stbname, suid, colId);
+ pSma->name, stbFullName, suid, colId);
return -1;
}
@@ -1218,7 +1270,7 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
if ((pCol->tableId != suid) && (pSma->stbUid != suid)) {
mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
- goto NEXT2;
+ goto NEXT;
}
if ((pCol->colId) > 0 && (pCol->colId == colId)) {
sdbRelease(pSdb, pSma);
@@ -1230,11 +1282,24 @@ int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t
mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
}
- NEXT2:
+ NEXT:
sdbRelease(pSdb, pSma);
nodesDestroyNode(pAst);
}
+ return 0;
+}
+int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId) {
+ if (mndCheckAlterColForTopic(pMnode, stbFullName, suid, colId) < 0) {
+ return -1;
+ }
+ if (mndCheckAlterColForStream(pMnode, stbFullName, suid, colId) < 0) {
+ return -1;
+ }
+
+ if (mndCheckAlterColForTSma(pMnode, stbFullName, suid, colId) < 0) {
+ return -1;
+ }
return 0;
}
@@ -1930,6 +1995,98 @@ _OVER:
return code;
}
+static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName, int64_t suid) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ while (1) {
+ SMqTopicObj *pTopic = NULL;
+ pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
+ if (pIter == NULL) break;
+
+ if (pTopic->subType == TOPIC_SUB_TYPE__TABLE) {
+ if (pTopic->stbUid == suid) {
+ sdbRelease(pSdb, pTopic);
+ return -1;
+ }
+ }
+
+ if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
+ sdbRelease(pSdb, pTopic);
+ continue;
+ }
+
+ SNode *pAst = NULL;
+ if (nodesStringToNode(pTopic->ast, &pAst) != 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ SNodeList *pNodeList = NULL;
+ nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
+ SNode *pNode = NULL;
+ FOREACH(pNode, pNodeList) {
+ SColumnNode *pCol = (SColumnNode *)pNode;
+
+ if (pCol->tableId == suid) {
+ sdbRelease(pSdb, pTopic);
+ nodesDestroyNode(pAst);
+ return -1;
+ } else {
+ goto NEXT;
+ }
+ }
+ NEXT:
+ sdbRelease(pSdb, pTopic);
+ nodesDestroyNode(pAst);
+ }
+ return 0;
+}
+
+static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName, int64_t suid) {
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
+ while (1) {
+ SStreamObj *pStream = NULL;
+ pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
+ if (pIter == NULL) break;
+
+ if (pStream->smaId != 0) {
+ sdbRelease(pSdb, pStream);
+ continue;
+ }
+
+ if (pStream->targetStbUid == suid) {
+ sdbRelease(pSdb, pStream);
+ return -1;
+ }
+
+ SNode *pAst = NULL;
+ if (nodesStringToNode(pStream->ast, &pAst) != 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ SNodeList *pNodeList = NULL;
+ nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList);
+ SNode *pNode = NULL;
+ FOREACH(pNode, pNodeList) {
+ SColumnNode *pCol = (SColumnNode *)pNode;
+
+ if (pCol->tableId == suid) {
+ sdbRelease(pSdb, pStream);
+ nodesDestroyNode(pAst);
+ return -1;
+ } else {
+ goto NEXT;
+ }
+ }
+ NEXT:
+ sdbRelease(pSdb, pStream);
+ nodesDestroyNode(pAst);
+ }
+ return 0;
+}
+
static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
@@ -1971,6 +2128,16 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
goto _OVER;
}
+ if (mndCheckDropStbForTopic(pMnode, dropReq.name, pStb->uid) < 0) {
+ terrno = TSDB_CODE_MND_TOPIC_MUST_BE_DELETED;
+ goto _OVER;
+ }
+
+ if (mndCheckDropStbForStream(pMnode, dropReq.name, pStb->uid) < 0) {
+ terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
+ goto _OVER;
+ }
+
code = mndDropStb(pMnode, pReq, pDb, pStb);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 8c453e0c88..6dc8e2072b 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -167,6 +167,9 @@ static int32_t mndStreamActionInsert(SSdb *pSdb, SStreamObj *pStream) {
static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream) {
mTrace("stream:%s, perform delete action", pStream->name);
+ taosWLockLatch(&pStream->lock);
+ tFreeStreamObj(pStream);
+ taosWUnLockLatch(&pStream->lock);
return 0;
}
@@ -493,10 +496,17 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
stbObj.uid = pStream->targetStbUid;
- if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER;
+ if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) {
+ mndFreeStb(&stbObj);
+ goto _OVER;
+ }
+
+ tFreeSMCreateStbReq(&createReq);
+ mndFreeStb(&stbObj);
return 0;
_OVER:
+ tFreeSMCreateStbReq(&createReq);
mndReleaseStb(pMnode, pStb);
mndReleaseDb(pMnode, pDb);
return -1;
@@ -715,6 +725,7 @@ _OVER:
mndReleaseDb(pMnode, pDb);
tFreeSCMCreateStreamReq(&createStreamReq);
+ tFreeStreamObj(&streamObj);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 3f310ee9c0..10e520d9ec 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -356,31 +356,44 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
taosArrayPush(pConsumerEp->vgs, &pRebVg->pVgEp);
pRebVg->newConsumerId = pConsumerEp->consumerId;
taosArrayPush(pOutput->rebVgs, pRebVg);
- mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId,
+ mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 " (second scan) (not enough)", pRebVg->pVgEp->vgId,
pConsumerEp->consumerId);
}
}
+ ASSERT(pIter == NULL);
// 7. handle unassigned vg
if (taosHashGetSize(pOutput->pSub->consumerHash) != 0) {
// if has consumer, assign all left vg
while (1) {
+ SMqConsumerEp *pConsumerEp = NULL;
pRemovedIter = taosHashIterate(pHash, pRemovedIter);
- if (pRemovedIter == NULL) break;
- pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
- ASSERT(pIter);
+ if (pRemovedIter == NULL) {
+ if (pIter != NULL) {
+ taosHashCancelIterate(pOutput->pSub->consumerHash, pIter);
+ pIter = NULL;
+ }
+ break;
+ }
+ while (1) {
+ pIter = taosHashIterate(pOutput->pSub->consumerHash, pIter);
+ ASSERT(pIter);
+ pConsumerEp = (SMqConsumerEp *)pIter;
+ ASSERT(pConsumerEp->consumerId > 0);
+ if (taosArrayGetSize(pConsumerEp->vgs) == minVgCnt) {
+ break;
+ }
+ }
pRebVg = (SMqRebOutputVg *)pRemovedIter;
- SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
- ASSERT(pConsumerEp->consumerId > 0);
taosArrayPush(pConsumerEp->vgs, &pRebVg->pVgEp);
pRebVg->newConsumerId = pConsumerEp->consumerId;
if (pRebVg->newConsumerId == pRebVg->oldConsumerId) {
- mInfo("mq rebalance: skip vg %d for same consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId,
+ mInfo("mq rebalance: skip vg %d for same consumer:%" PRId64 " (second scan)", pRebVg->pVgEp->vgId,
pConsumerEp->consumerId);
continue;
}
taosArrayPush(pOutput->rebVgs, pRebVg);
- mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 ",(second scan)", pRebVg->pVgEp->vgId,
+ mInfo("mq rebalance: add vgId:%d to consumer:%" PRId64 " (second scan) (unassigned)", pRebVg->pVgEp->vgId,
pConsumerEp->consumerId);
}
} else {
@@ -571,7 +584,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, topic);
/*ASSERT(pTopic);*/
if (pTopic == NULL) {
- mError("rebalance %s failed since topic %s was dropped, abort", pRebInfo->key, topic);
+ mError("mq rebalance %s failed since topic %s not exist, abort", pRebInfo->key, topic);
continue;
}
taosRLockLatch(&pTopic->lock);
@@ -601,7 +614,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
// TODO replace assert with error check
if (mndPersistRebResult(pMnode, pMsg, &rebOutput) < 0) {
- mError("persist rebalance output error, possibly vnode splitted or dropped");
+ mError("mq rebalance persist rebalance output error, possibly vnode splitted or dropped");
}
taosArrayDestroy(pRebInfo->lostConsumers);
taosArrayDestroy(pRebInfo->newConsumers);
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index 820bb4b636..ff208eae60 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -57,7 +57,8 @@ int32_t mndInitTopic(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_TOPIC, mndProcessCreateTopicReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_TOPIC, mndProcessDropTopicReq);
mndSetMsgHandle(pMnode, TDMT_VND_DROP_TOPIC_RSP, mndTransProcessRsp);
- mndSetMsgHandle(pMnode, TDMT_VND_CHECK_ALTER_INFO_RSP, mndTransProcessRsp);
+ mndSetMsgHandle(pMnode, TDMT_VND_ADD_CHECK_INFO_RSP, mndTransProcessRsp);
+ mndSetMsgHandle(pMnode, TDMT_VND_DELETE_CHECK_INFO_RSP, mndTransProcessRsp);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndRetrieveTopic);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_TOPICS, mndCancelGetNextTopic);
@@ -450,7 +451,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
if (topicObj.ntbUid != 0) {
- SCheckAlterInfo info;
+ STqCheckInfo info;
memcpy(info.topic, topicObj.name, TSDB_TOPIC_FNAME_LEN);
info.ntbUid = topicObj.ntbUid;
info.colIdList = topicObj.ntbColIds;
@@ -470,7 +471,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
// encoder check alter info
int32_t len;
int32_t code;
- tEncodeSize(tEncodeSCheckAlterInfo, &info, len, code);
+ tEncodeSize(tEncodeSTqCheckInfo, &info, len, code);
if (code < 0) {
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
@@ -481,7 +482,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
SEncoder encoder;
tEncoderInit(&encoder, abuf, len);
- if (tEncodeSCheckAlterInfo(&encoder, &info) < 0) {
+ if (tEncodeSTqCheckInfo(&encoder, &info) < 0) {
sdbRelease(pSdb, pVgroup);
mndTransDrop(pTrans);
return -1;
@@ -493,7 +494,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
action.pCont = buf;
action.contLen = sizeof(SMsgHead) + len;
- action.msgType = TDMT_VND_CHECK_ALTER_INFO;
+ action.msgType = TDMT_VND_ADD_CHECK_INFO;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(buf);
sdbRelease(pSdb, pVgroup);
@@ -659,12 +660,14 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
mDebug("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name);
+#if 0
if (mndDropOffsetByTopic(pMnode, pTrans, dropReq.name) < 0) {
ASSERT(0);
mndTransDrop(pTrans);
mndReleaseTopic(pMnode, pTopic);
return -1;
}
+#endif
// TODO check if rebalancing
if (mndDropSubByTopic(pMnode, pTrans, dropReq.name) < 0) {
@@ -675,6 +678,37 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
return -1;
}
+ if (pTopic->ntbUid != 0) {
+ // broadcast to all vnode
+ void *pIter = NULL;
+ SVgObj *pVgroup = NULL;
+ while (1) {
+ pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
+ if (pIter == NULL) break;
+ if (!mndVgroupInDb(pVgroup, pTopic->dbUid)) {
+ sdbRelease(pSdb, pVgroup);
+ continue;
+ }
+
+ void *buf = taosMemoryCalloc(1, sizeof(SMsgHead) + TSDB_TOPIC_FNAME_LEN);
+ void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
+ ((SMsgHead *)buf)->vgId = htonl(pVgroup->vgId);
+ memcpy(abuf, pTopic->name, TSDB_TOPIC_FNAME_LEN);
+
+ STransAction action = {0};
+ action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
+ action.pCont = buf;
+ action.contLen = sizeof(SMsgHead) + TSDB_TOPIC_FNAME_LEN;
+ action.msgType = TDMT_VND_DELETE_CHECK_INFO;
+ if (mndTransAppendRedoAction(pTrans, &action) != 0) {
+ taosMemoryFree(buf);
+ sdbRelease(pSdb, pVgroup);
+ mndTransDrop(pTrans);
+ return -1;
+ }
+ }
+ }
+
int32_t code = mndDropTopic(pMnode, pTrans, pReq, pTopic);
mndReleaseTopic(pMnode, pTopic);
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index 0567ec4e14..09eed7fb32 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -509,6 +509,7 @@ int32_t mndAllocSmaVgroup(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup) {
pVgroup->replica = 1;
if (mndGetAvailableDnode(pMnode, pDb, pVgroup, pArray) != 0) return -1;
+ taosArrayDestroy(pArray);
mInfo("db:%s, sma vgId:%d is alloced", pDb->name, pVgroup->vgId);
return 0;
@@ -1862,4 +1863,4 @@ _OVER:
#endif
}
-bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }
\ No newline at end of file
+bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index b218d982e9..a3e17f5377 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -24,6 +24,7 @@ target_sources(
"src/meta/metaCommit.c"
"src/meta/metaEntry.c"
"src/meta/metaSnapshot.c"
+ "src/meta/metaCache.c"
# sma
"src/sma/smaEnv.c"
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 18a7583f4c..ec27ba8ce6 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -63,6 +63,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list);
int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list);
+int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray* list);
void *vnodeGetIdx(SVnode *pVnode);
void *vnodeGetIvtIdx(SVnode *pVnode);
@@ -91,9 +92,11 @@ typedef struct SMetaEntry SMetaEntry;
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
void metaReaderClear(SMetaReader *pReader);
int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
+int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags);
int32_t metaReadNext(SMetaReader *pReader);
-const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal);
+const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
+bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
typedef struct SMetaFltParam {
tb_uid_t suid;
@@ -128,19 +131,19 @@ typedef struct STsdbReader STsdbReader;
#define LASTROW_RETRIEVE_TYPE_ALL 0x1
#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
-int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
-int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
- const char *idstr);
-void tsdbReaderClose(STsdbReader *pReader);
-bool tsdbNextDataBlock(STsdbReader *pReader);
-void tsdbRetrieveDataBlockInfo(STsdbReader *pReader, SDataBlockInfo *pDataBlockInfo);
-int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave);
-SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
-int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
-int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
-int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
-void *tsdbGetIdx(SMeta *pMeta);
-void *tsdbGetIvtIdx(SMeta *pMeta);
+int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
+int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
+ const char *idstr);
+void tsdbReaderClose(STsdbReader *pReader);
+bool tsdbNextDataBlock(STsdbReader *pReader);
+void tsdbRetrieveDataBlockInfo(STsdbReader *pReader, SDataBlockInfo *pDataBlockInfo);
+int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave);
+SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
+int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
+int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTableBlockInfo);
+int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle);
+void *tsdbGetIdx(SMeta *pMeta);
+void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h
index a72546fe86..adfbb91920 100644
--- a/source/dnode/vnode/src/inc/meta.h
+++ b/source/dnode/vnode/src/inc/meta.h
@@ -23,8 +23,9 @@
extern "C" {
#endif
-typedef struct SMetaIdx SMetaIdx;
-typedef struct SMetaDB SMetaDB;
+typedef struct SMetaIdx SMetaIdx;
+typedef struct SMetaDB SMetaDB;
+typedef struct SMetaCache SMetaCache;
// metaDebug ==================
// clang-format off
@@ -60,6 +61,12 @@ static FORCE_INLINE tb_uid_t metaGenerateUid(SMeta* pMeta) { return tGenIdPI64()
// metaTable ==================
int metaHandleEntry(SMeta* pMeta, const SMetaEntry* pME);
+// metaCache ==================
+int32_t metaCacheOpen(SMeta* pMeta);
+void metaCacheClose(SMeta* pMeta);
+int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo);
+int32_t metaCacheDrop(SMeta* pMeta, int64_t uid);
+
struct SMeta {
TdThreadRwlock lock;
@@ -84,6 +91,8 @@ struct SMeta {
TTB* pStreamDb;
SMetaIdx* pIdx;
+
+ SMetaCache* pCache;
};
typedef struct {
@@ -92,6 +101,12 @@ typedef struct {
} STbDbKey;
#pragma pack(push, 1)
+typedef struct {
+ tb_uid_t suid;
+ int64_t version;
+ int32_t skmVer;
+} SUidIdxVal;
+
typedef struct {
tb_uid_t uid;
int32_t sver;
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index 944d7759b2..ca77042bb2 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -32,7 +32,8 @@ extern "C" {
#define smaTrace(...) do { if (smaDebugFlag & DEBUG_TRACE) { taosPrintLog("SMA ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
-#define RSMA_TASK_INFO_HASH_SLOT 8
+#define RSMA_TASK_INFO_HASH_SLOT (8)
+#define RSMA_EXECUTOR_MAX (1)
typedef struct SSmaEnv SSmaEnv;
typedef struct SSmaStat SSmaStat;
@@ -57,9 +58,10 @@ typedef struct {
void *tmrHandle; // shared by all fetch tasks
} SSmaMgmt;
-#define SMA_ENV_LOCK(env) (&(env)->lock)
-#define SMA_ENV_TYPE(env) ((env)->type)
-#define SMA_ENV_STAT(env) ((env)->pStat)
+#define SMA_ENV_LOCK(env) (&(env)->lock)
+#define SMA_ENV_TYPE(env) ((env)->type)
+#define SMA_ENV_STAT(env) ((env)->pStat)
+#define SMA_RSMA_STAT(sma) ((SRSmaStat *)SMA_ENV_STAT((SSmaEnv *)(sma)->pRSmaEnv))
struct STSmaStat {
int8_t state; // ETsdbSmaStat
@@ -86,15 +88,17 @@ struct SQTaskFWriter {
};
struct SRSmaStat {
- SSma *pSma;
- int64_t commitAppliedVer; // vnode applied version for async commit
- int64_t refId; // shared by fetch tasks
- SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
- int8_t triggerStat; // shared by fetch tasks
- int8_t commitStat; // 0 not in committing, 1 in committing
- SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
- SHashObj *rsmaInfoHash; // key: stbUid, value: SRSmaInfo;
- SHashObj *iRsmaInfoHash; // key: stbUid, value: SRSmaInfo; immutable rsmaInfoHash
+ SSma *pSma;
+ int64_t commitAppliedVer; // vnode applied version for async commit
+ int64_t refId; // shared by fetch tasks
+ volatile int64_t nBufItems; // number of items in queue buffer
+ SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
+ volatile int8_t nExecutor; // [1, max(half of query threads, 4)]
+ int8_t triggerStat; // shared by fetch tasks
+ int8_t commitStat; // 0 not in committing, 1 in committing
+ SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
+ SHashObj *infoHash; // key: suid, value: SRSmaInfo
+ tsem_t notEmpty; // has items in queue buffer
};
struct SSmaStat {
@@ -105,34 +109,42 @@ struct SSmaStat {
T_REF_DECLARE()
};
-#define SMA_TSMA_STAT(s) (&(s)->tsmaStat)
-#define SMA_RSMA_STAT(s) (&(s)->rsmaStat)
-#define RSMA_INFO_HASH(r) ((r)->rsmaInfoHash)
-#define RSMA_IMU_INFO_HASH(r) ((r)->iRsmaInfoHash)
-#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
-#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
-#define RSMA_REF_ID(r) ((r)->refId)
-#define RSMA_FS_LOCK(r) (&(r)->lock)
+#define SMA_STAT_TSMA(s) (&(s)->tsmaStat)
+#define SMA_STAT_RSMA(s) (&(s)->rsmaStat)
+#define RSMA_INFO_HASH(r) ((r)->infoHash)
+#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
+#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
+#define RSMA_REF_ID(r) ((r)->refId)
+#define RSMA_FS_LOCK(r) (&(r)->lock)
struct SRSmaInfoItem {
- int8_t level;
- int8_t triggerStat;
- int32_t maxDelay;
- tmr_h tmrId;
+ int8_t level : 4;
+ int8_t fetchLevel : 4;
+ int8_t triggerStat;
+ uint16_t nSkipped;
+ int32_t maxDelay; // ms
+ tmr_h tmrId;
};
struct SRSmaInfo {
STSchema *pTSchema;
int64_t suid;
- int64_t refId; // refId of SRSmaStat
+ int64_t refId; // refId of SRSmaStat
+ int64_t lastRecv; // ms
+ int8_t assigned; // 0 idle, 1 assgined for exec
int8_t delFlag;
+ int16_t padding;
T_REF_DECLARE()
SRSmaInfoItem items[TSDB_RETENTION_L2];
void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t
- void *iTaskInfo[TSDB_RETENTION_L2]; // immutable
+ STaosQueue *queue; // buffer queue of SubmitReq
+ STaosQall *qall; // buffer qall of SubmitReq
+ void *iTaskInfo[TSDB_RETENTION_L2]; // immutable qTaskInfo_t
+ STaosQueue *iQueue; // immutable buffer queue of SubmitReq
+ STaosQall *iQall; // immutable buffer qall of SubmitReq
};
-#define RSMA_INFO_HEAD_LEN 32
+#define RSMA_INFO_HEAD_LEN offsetof(SRSmaInfo, items)
#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1)
#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1)
#define RSMA_INFO_QTASK(r, i) ((r)->taskInfo[i])
@@ -161,6 +173,12 @@ enum {
RSMA_RESTORE_SYNC = 2,
};
+typedef enum {
+ RSMA_EXEC_OVERFLOW = 1, // triggered by queue buf overflow
+ RSMA_EXEC_TIMEOUT = 2, // triggered by timer
+ RSMA_EXEC_COMMIT = 3, // triggered by commit
+} ERsmaExecType;
+
void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
@@ -228,12 +246,13 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName);
void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName);
-int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc);
+int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
+int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index a1dba41c94..cb5ec7aabe 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -117,16 +117,15 @@ typedef struct {
struct STQ {
SVnode* pVnode;
char* path;
- SHashObj* pushMgr; // consumerId -> STqHandle*
- SHashObj* handles; // subKey -> STqHandle
- SHashObj* pAlterInfo; // topic -> SAlterCheckInfo
+ SHashObj* pPushMgr; // consumerId -> STqHandle*
+ SHashObj* pHandle; // subKey -> STqHandle
+ SHashObj* pCheckInfo; // topic -> SAlterCheckInfo
STqOffsetStore* pOffsetStore;
- TDB* pMetaStore;
+ TDB* pMetaDB;
TTB* pExecStore;
-
- TTB* pAlterInfoStore;
+ TTB* pCheckStore;
SStreamMeta* pStreamMeta;
};
@@ -155,6 +154,9 @@ int32_t tqMetaClose(STQ* pTq);
int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle);
int32_t tqMetaDeleteHandle(STQ* pTq, const char* key);
int32_t tqMetaRestoreHandle(STQ* pTq);
+int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen);
+int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key);
+int32_t tqMetaRestoreCheckInfo(STQ* pTq);
typedef struct {
int32_t size;
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index f1e980c026..d1f5cfb122 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -45,7 +45,7 @@ typedef struct SBlockIdx SBlockIdx;
typedef struct SBlock SBlock;
typedef struct SBlockL SBlockL;
typedef struct SColData SColData;
-typedef struct SBlockDataHdr SBlockDataHdr;
+typedef struct SDiskDataHdr SDiskDataHdr;
typedef struct SBlockData SBlockData;
typedef struct SDelFile SDelFile;
typedef struct SHeadFile SHeadFile;
@@ -61,7 +61,11 @@ typedef struct SRowIter SRowIter;
typedef struct STsdbFS STsdbFS;
typedef struct SRowMerger SRowMerger;
typedef struct STsdbReadSnap STsdbReadSnap;
+typedef struct SBlockInfo SBlockInfo;
+typedef struct SSmaInfo SSmaInfo;
+typedef struct SBlockCol SBlockCol;
+#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F)
#define TSDB_MAX_SUBBLOCKS 8
#define TSDB_FHDR_SIZE 512
@@ -113,10 +117,14 @@ int32_t tPutBlock(uint8_t *p, void *ph);
int32_t tGetBlock(uint8_t *p, void *ph);
int32_t tBlockCmprFn(const void *p1, const void *p2);
bool tBlockHasSma(SBlock *pBlock);
+// SBlockL
+int32_t tPutBlockL(uint8_t *p, void *ph);
+int32_t tGetBlockL(uint8_t *p, void *ph);
// SBlockIdx
int32_t tPutBlockIdx(uint8_t *p, void *ph);
int32_t tGetBlockIdx(uint8_t *p, void *ph);
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
+int32_t tCmprBlockL(void const *lhs, void const *rhs);
// SColdata
void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
void tColDataReset(SColData *pColData);
@@ -131,20 +139,25 @@ int32_t tGetColData(uint8_t *p, SColData *pColData);
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
#define tBlockDataFirstKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataFirstRow(PBLOCKDATA))
#define tBlockDataLastKey(PBLOCKDATA) TSDBROW_KEY(&tBlockDataLastRow(PBLOCKDATA))
-int32_t tBlockDataInit(SBlockData *pBlockData);
+
+int32_t tBlockDataCreate(SBlockData *pBlockData);
+void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear);
+int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema);
+int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
void tBlockDataReset(SBlockData *pBlockData);
-int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema);
-int32_t tBlockDataCorrectSchema(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
-void tBlockDataClearData(SBlockData *pBlockData);
-void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear);
-int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
-int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema);
-int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
-int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
+int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid);
+void tBlockDataClear(SBlockData *pBlockData);
SColData *tBlockDataGetColDataByIdx(SBlockData *pBlockData, int32_t idx);
void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColData);
-int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData);
-int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData);
+int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest);
+int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData);
+int32_t tBlockDataAddColData(SBlockData *pBlockData, int32_t iColData, SColData **ppColData);
+int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[],
+ int32_t aBufN[]);
+int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]);
+// SDiskDataHdr
+int32_t tPutDiskDataHdr(uint8_t *p, void *ph);
+int32_t tGetDiskDataHdr(uint8_t *p, void *ph);
// SDelIdx
int32_t tPutDelIdx(uint8_t *p, void *ph);
int32_t tGetDelIdx(uint8_t *p, void *ph);
@@ -168,13 +181,25 @@ void tsdbFidKeyRange(int32_t fid, int32_t minutes, int8_t precision, TSKEY *m
int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t now);
int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SArray *aSkyline);
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg);
+int32_t tPutColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg);
+int32_t tGetColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg);
+int32_t tsdbCmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t nOut,
+ int32_t *szOut, uint8_t **ppBuf);
+int32_t tsdbDecmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t szOut,
+ uint8_t **ppBuf);
+int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol, uint8_t **ppOut, int32_t nOut,
+ uint8_t **ppBuf);
+int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData,
+ uint8_t **ppBuf);
+int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck);
// tsdbMemTable ==============================================================================================
// SMemTable
-int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
-void tsdbMemTableDestroy(SMemTable *pMemTable);
-void tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData);
-void tsdbRefMemTable(SMemTable *pMemTable);
-void tsdbUnrefMemTable(SMemTable *pMemTable);
+int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
+void tsdbMemTableDestroy(SMemTable *pMemTable);
+STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid);
+void tsdbRefMemTable(SMemTable *pMemTable);
+void tsdbUnrefMemTable(SMemTable *pMemTable);
+SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable);
// STbDataIter
int32_t tsdbTbDataIterCreate(STbData *pTbData, TSDBKEY *pFrom, int8_t backward, STbDataIter **ppIter);
void *tsdbTbDataIterDestroy(STbDataIter *pIter);
@@ -223,33 +248,33 @@ int32_t tsdbFSUpsertDelFile(STsdbFS *pFS, SDelFile *pDelFile);
int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync);
int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter);
-int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **ppBuf);
-int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, uint8_t **ppBuf, SBlockIdx *pBlockIdx);
-int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2,
- SBlockIdx *pBlockIdx, SBlock *pBlock, int8_t cmprAlg);
+int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx);
+int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, SBlockIdx *pBlockIdx);
+int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL);
+int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
+ int8_t cmprAlg, int8_t toLast);
int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo);
// SDataFReader
int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFReaderClose(SDataFReader **ppReader);
-int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppBuf);
-int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData, uint8_t **ppBuf);
-int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int16_t *aColId, int32_t nCol,
- SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2);
-int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, SBlockData *pBlockData,
- uint8_t **ppBuf1, uint8_t **ppBuf2);
-int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg, uint8_t **ppBuf);
+int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx);
+int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData);
+int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL);
+int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg);
+int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData);
+int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData);
// SDelFWriter
int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync);
-int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf, SDelIdx *pDelIdx);
-int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf);
+int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx);
+int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx);
int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter);
// SDelFReader
-int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb, uint8_t **ppBuf);
+int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
-int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, uint8_t **ppBuf);
-int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf);
+int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData);
+int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
// tsdbRead.c ==============================================================================================
int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap);
void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap);
@@ -260,7 +285,7 @@ void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap);
// tsdbCache
int32_t tsdbOpenCache(STsdb *pTsdb);
-void tsdbCloseCache(SLRUCache *pCache);
+void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, STSRow *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h);
@@ -277,13 +302,6 @@ size_t tsdbCacheGetCapacity(SVnode *pVnode);
int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// structs =======================
-typedef struct {
- int minFid;
- int midFid;
- int maxFid;
- TSKEY minKey;
-} SRtn;
-
struct STsdbFS {
SDelFile *pDelFile;
SArray *aDFileSet; // SArray
@@ -298,6 +316,7 @@ struct STsdb {
SMemTable *imem;
STsdbFS fs;
SLRUCache *lruCache;
+ TdThreadMutex lruMutex;
};
struct TSDBKEY {
@@ -311,30 +330,23 @@ struct SMemSkipListNode {
SMemSkipListNode *forwards[0];
};
typedef struct SMemSkipList {
- uint32_t seed;
int64_t size;
+ uint32_t seed;
int8_t maxLevel;
int8_t level;
SMemSkipListNode *pHead;
SMemSkipListNode *pTail;
} SMemSkipList;
-struct SDelDataInfo {
- tb_uid_t suid;
- tb_uid_t uid;
-};
-
struct STbData {
tb_uid_t suid;
tb_uid_t uid;
TSKEY minKey;
TSKEY maxKey;
- int64_t minVersion;
- int64_t maxVersion;
- int32_t maxSkmVer;
SDelData *pHead;
SDelData *pTail;
SMemSkipList sl;
+ STbData *next;
};
struct SMemTable {
@@ -344,11 +356,13 @@ struct SMemTable {
volatile int32_t nRef;
TSKEY minKey;
TSKEY maxKey;
- int64_t minVersion;
- int64_t maxVersion;
int64_t nRow;
int64_t nDel;
- SArray *aTbData; // SArray
+ struct {
+ int32_t nTbData;
+ int32_t nBucket;
+ STbData **aBucket;
+ };
};
struct TSDBROW {
@@ -379,63 +393,51 @@ struct SMapData {
uint8_t *pData;
};
-typedef struct {
+struct SBlockCol {
int16_t cid;
int8_t type;
int8_t smaOn;
- int8_t flag; // HAS_NONE|HAS_NULL|HAS_VALUE
- int32_t offset;
- int32_t szBitmap; // bitmap size
- int32_t szOffset; // size of offset, only for variant-length data type
- int32_t szValue; // compressed column value size
+ int8_t flag; // HAS_NONE|HAS_NULL|HAS_VALUE
int32_t szOrigin; // original column value size (only save for variant data type)
-} SBlockCol;
+ int32_t szBitmap; // bitmap size, 0 only for flag == HAS_VAL
+ int32_t szOffset; // offset size, 0 only for non-variant-length type
+ int32_t szValue; // value size, 0 when flag == (HAS_NULL | HAS_NONE)
+ int32_t offset;
+};
-typedef struct {
- int32_t nRow;
- int8_t cmprAlg;
- int64_t offset; // block data offset
- int32_t szBlockCol; // SBlockCol size
- int32_t szVersion; // VERSION size
- int32_t szTSKEY; // TSKEY size
- int32_t szBlock; // total block size
- int64_t sOffset; // sma offset
- int32_t nSma; // sma size
-} SSubBlock;
+struct SBlockInfo {
+ int64_t offset; // block data offset
+ int32_t szBlock;
+ int32_t szKey;
+};
+
+struct SSmaInfo {
+ int64_t offset;
+ int32_t size;
+};
struct SBlock {
- TSDBKEY minKey;
- TSDBKEY maxKey;
- int64_t minVersion;
- int64_t maxVersion;
- int32_t nRow;
- int8_t last;
- int8_t hasDup;
- int8_t nSubBlock;
- SSubBlock aSubBlock[TSDB_MAX_SUBBLOCKS];
+ TSDBKEY minKey;
+ TSDBKEY maxKey;
+ int64_t minVer;
+ int64_t maxVer;
+ int32_t nRow;
+ int8_t hasDup;
+ int8_t nSubBlock;
+ SBlockInfo aSubBlock[TSDB_MAX_SUBBLOCKS];
+ SSmaInfo smaInfo;
};
struct SBlockL {
- struct {
- int64_t uid;
- int64_t version;
- TSKEY ts;
- } minKey;
- struct {
- int64_t uid;
- int64_t version;
- TSKEY ts;
- } maxKey;
- int64_t minVer;
- int64_t maxVer;
- int32_t nRow;
- int8_t cmprAlg;
- int64_t offset;
- int32_t szBlock;
- int32_t szBlockCol;
- int32_t szUid;
- int32_t szVer;
- int32_t szTSKEY;
+ int64_t suid;
+ int64_t minUid;
+ int64_t maxUid;
+ TSKEY minKey;
+ TSKEY maxKey;
+ int64_t minVer;
+ int64_t maxVer;
+ int32_t nRow;
+ SBlockInfo bInfo;
};
struct SColData {
@@ -450,10 +452,17 @@ struct SColData {
uint8_t *pData;
};
+// (SBlockData){.suid = 0, .uid = 0}: block data not initialized
+// (SBlockData){.suid = suid, .uid = uid}: block data for ONE child table int .data file
+// (SBlockData){.suid = suid, .uid = 0}: block data for N child tables int .last file
+// (SBlockData){.suid = 0, .uid = uid}: block data for 1 normal table int .last/.data file
struct SBlockData {
- int32_t nRow;
- int64_t *aVersion;
- TSKEY *aTSKEY;
+ int64_t suid; // 0 means normal table block data, otherwise child table block data
+ int64_t uid; // 0 means block data in .last file, otherwise in .data file
+ int32_t nRow; // number of rows
+ int64_t *aUid; // uids of each row, only exist in block data in .last file (uid == 0)
+ int64_t *aVersion; // versions of each row
+ TSKEY *aTSKEY; // timestamp of each row
SArray *aIdx; // SArray
SArray *aColData; // SArray
};
@@ -491,13 +500,18 @@ struct SDelIdx {
int64_t size;
};
-#pragma pack(push, 1)
-struct SBlockDataHdr {
+struct SDiskDataHdr {
uint32_t delimiter;
+ uint32_t fmtVer;
int64_t suid;
int64_t uid;
+ int32_t szUid;
+ int32_t szVer;
+ int32_t szKey;
+ int32_t szBlkCol;
+ int32_t nRow;
+ int8_t cmprAlg;
};
-#pragma pack(pop)
struct SDelFile {
volatile int32_t nRef;
@@ -527,6 +541,7 @@ struct SLastFile {
int64_t commitID;
int64_t size;
+ int64_t offset;
};
struct SSmaFile {
@@ -561,6 +576,8 @@ struct SDelFWriter {
STsdb *pTsdb;
SDelFile fDel;
TdFilePtr pWriteH;
+
+ uint8_t *aBuf[1];
};
struct SDataFWriter {
@@ -576,6 +593,8 @@ struct SDataFWriter {
SDataFile fData;
SLastFile fLast;
SSmaFile fSma;
+
+ uint8_t *aBuf[4];
};
struct STsdbReadSnap {
diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h
index dd1facb462..898e79928b 100644
--- a/source/dnode/vnode/src/inc/vnd.h
+++ b/source/dnode/vnode/src/inc/vnd.h
@@ -65,6 +65,7 @@ struct SVBufPool {
SVBufPool* next;
SVnode* pVnode;
volatile int32_t nRef;
+ TdThreadSpinlock lock;
int64_t size;
uint8_t* ptr;
SVBufPoolNode* pTail;
@@ -80,7 +81,7 @@ int32_t vnodeQueryOpen(SVnode* pVnode);
void vnodeQueryClose(SVnode* pVnode);
int32_t vnodeGetTableMeta(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
int vnodeGetTableCfg(SVnode* pVnode, SRpcMsg* pMsg, bool direct);
-int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg);
+int32_t vnodeGetBatchMeta(SVnode* pVnode, SRpcMsg* pMsg);
// vnodeCommit.c
int32_t vnodeBegin(SVnode* pVnode);
@@ -98,6 +99,8 @@ void vnodeSyncStart(SVnode* pVnode);
void vnodeSyncClose(SVnode* pVnode);
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg);
bool vnodeIsLeader(SVnode* pVnode);
+bool vnodeIsReadyForRead(SVnode* pVnode);
+bool vnodeIsRoleLeader(SVnode* pVnode);
#ifdef __cplusplus
}
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 02c4129d6f..39c5f3873e 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -130,6 +130,14 @@ int metaTtlSmaller(SMeta* pMeta, uint64_t time, SArray* uidList);
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
+typedef struct SMetaInfo {
+ int64_t uid;
+ int64_t suid;
+ int64_t version;
+ int32_t skmVer;
+} SMetaInfo;
+int32_t metaGetInfo(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo);
+
// tsdb
int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg);
int tsdbClose(STsdb** pTsdb);
@@ -144,6 +152,7 @@ int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb
STsdbReader tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId,
void* pMemRef);
int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg);
+int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
// tq
int tqInit();
@@ -154,13 +163,16 @@ int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId);
-int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen);
-int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen);
-int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen);
-int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver);
+// tq-mq
+int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
+int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
+int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
+int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
+int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
-int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen);
-int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen);
+// tq-stream
+int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
+int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
@@ -169,15 +181,15 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
-int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list);
-SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
- const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq);
+SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
+ const char* stbFullName, SBatchDeleteReq* pDeleteReq);
// sma
int32_t smaInit();
void smaCleanUp();
int32_t smaOpen(SVnode* pVnode);
+int32_t smaPreClose(SSma* pSma);
int32_t smaClose(SSma* pSma);
int32_t smaBegin(SSma* pSma);
int32_t smaSyncPreCommit(SSma* pSma);
@@ -187,7 +199,7 @@ int32_t smaAsyncPreCommit(SSma* pSma);
int32_t smaAsyncCommit(SSma* pSma);
int32_t smaAsyncPostCommit(SSma* pSma);
int32_t smaDoRetention(SSma* pSma, int64_t now);
-int32_t smaProcessFetch(SSma* pSma, void* pMsg);
+int32_t smaProcessExec(SSma* pSma, void* pMsg);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
@@ -311,6 +323,7 @@ struct SVnode {
TdThreadMutex lock;
bool blocked;
bool restored;
+ bool inClose;
tsem_t syncSem;
SQHandle* pQuery;
};
@@ -357,6 +370,7 @@ struct SSma {
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
enum {
+ SNAP_DATA_CFG = 0,
SNAP_DATA_META = 1,
SNAP_DATA_TSDB = 2,
SNAP_DATA_DEL = 3,
diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c
new file mode 100644
index 0000000000..b8cc9f0df2
--- /dev/null
+++ b/source/dnode/vnode/src/meta/metaCache.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#include "meta.h"
+
+#define META_CACHE_BASE_BUCKET 1024
+
+// (uid , suid) : child table
+// (uid, 0) : normal table
+// (suid, suid) : super table
+typedef struct SMetaCacheEntry SMetaCacheEntry;
+struct SMetaCacheEntry {
+ SMetaCacheEntry* next;
+ SMetaInfo info;
+};
+
+struct SMetaCache {
+ int32_t nEntry;
+ int32_t nBucket;
+ SMetaCacheEntry** aBucket;
+};
+
+int32_t metaCacheOpen(SMeta* pMeta) {
+ int32_t code = 0;
+ SMetaCache* pCache = NULL;
+
+ pCache = (SMetaCache*)taosMemoryMalloc(sizeof(SMetaCache));
+ if (pCache == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ pCache->nEntry = 0;
+ pCache->nBucket = META_CACHE_BASE_BUCKET;
+ pCache->aBucket = (SMetaCacheEntry**)taosMemoryCalloc(pCache->nBucket, sizeof(SMetaCacheEntry*));
+ if (pCache->aBucket == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pCache);
+ goto _err;
+ }
+
+ pMeta->pCache = pCache;
+
+_exit:
+ return code;
+
+_err:
+ metaError("vgId:%d meta open cache failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
+ return code;
+}
+
+void metaCacheClose(SMeta* pMeta) {
+ if (pMeta->pCache) {
+ for (int32_t iBucket = 0; iBucket < pMeta->pCache->nBucket; iBucket++) {
+ SMetaCacheEntry* pEntry = pMeta->pCache->aBucket[iBucket];
+ while (pEntry) {
+ SMetaCacheEntry* tEntry = pEntry->next;
+ taosMemoryFree(pEntry);
+ pEntry = tEntry;
+ }
+ }
+ taosMemoryFree(pMeta->pCache->aBucket);
+ taosMemoryFree(pMeta->pCache);
+ pMeta->pCache = NULL;
+ }
+}
+
+static int32_t metaRehashCache(SMetaCache* pCache, int8_t expand) {
+ int32_t code = 0;
+ int32_t nBucket;
+
+ if (expand) {
+ nBucket = pCache->nBucket * 2;
+ } else {
+ nBucket = pCache->nBucket / 2;
+ }
+
+ SMetaCacheEntry** aBucket = (SMetaCacheEntry**)taosMemoryCalloc(nBucket, sizeof(SMetaCacheEntry*));
+ if (aBucket == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ // rehash
+ for (int32_t iBucket = 0; iBucket < pCache->nBucket; iBucket++) {
+ SMetaCacheEntry* pEntry = pCache->aBucket[iBucket];
+
+ while (pEntry) {
+ SMetaCacheEntry* pTEntry = pEntry->next;
+
+ pEntry->next = aBucket[TABS(pEntry->info.uid) % nBucket];
+ aBucket[TABS(pEntry->info.uid) % nBucket] = pEntry;
+
+ pEntry = pTEntry;
+ }
+ }
+
+ // final set
+ taosMemoryFree(pCache->aBucket);
+ pCache->nBucket = nBucket;
+ pCache->aBucket = aBucket;
+
+_exit:
+ return code;
+}
+
+int32_t metaCacheUpsert(SMeta* pMeta, SMetaInfo* pInfo) {
+ int32_t code = 0;
+
+ // ASSERT(metaIsWLocked(pMeta));
+
+ // search
+ SMetaCache* pCache = pMeta->pCache;
+ int32_t iBucket = TABS(pInfo->uid) % pCache->nBucket;
+ SMetaCacheEntry** ppEntry = &pCache->aBucket[iBucket];
+ while (*ppEntry && (*ppEntry)->info.uid != pInfo->uid) {
+ ppEntry = &(*ppEntry)->next;
+ }
+
+ if (*ppEntry) { // update
+ ASSERT(pInfo->suid == (*ppEntry)->info.suid);
+ if (pInfo->version > (*ppEntry)->info.version) {
+ (*ppEntry)->info.version = pInfo->version;
+ (*ppEntry)->info.skmVer = pInfo->skmVer;
+ }
+ } else { // insert
+ if (pCache->nEntry >= pCache->nBucket) {
+ code = metaRehashCache(pCache, 1);
+ if (code) goto _exit;
+
+ iBucket = TABS(pInfo->uid) % pCache->nBucket;
+ }
+
+ SMetaCacheEntry* pEntryNew = (SMetaCacheEntry*)taosMemoryMalloc(sizeof(*pEntryNew));
+ if (pEntryNew == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ pEntryNew->info = *pInfo;
+ pEntryNew->next = pCache->aBucket[iBucket];
+ pCache->aBucket[iBucket] = pEntryNew;
+ pCache->nEntry++;
+ }
+
+_exit:
+ return code;
+}
+
+int32_t metaCacheDrop(SMeta* pMeta, int64_t uid) {
+ int32_t code = 0;
+
+ SMetaCache* pCache = pMeta->pCache;
+ int32_t iBucket = TABS(uid) % pCache->nBucket;
+ SMetaCacheEntry** ppEntry = &pCache->aBucket[iBucket];
+ while (*ppEntry && (*ppEntry)->info.uid != uid) {
+ ppEntry = &(*ppEntry)->next;
+ }
+
+ SMetaCacheEntry* pEntry = *ppEntry;
+ if (pEntry) {
+ *ppEntry = pEntry->next;
+ taosMemoryFree(pEntry);
+ pCache->nEntry--;
+ if (pCache->nEntry < pCache->nBucket / 4 && pCache->nBucket > META_CACHE_BASE_BUCKET) {
+ code = metaRehashCache(pCache, 0);
+ if (code) goto _exit;
+ }
+ } else {
+ code = TSDB_CODE_NOT_FOUND;
+ }
+
+_exit:
+ return code;
+}
+
+int32_t metaCacheGet(SMeta* pMeta, int64_t uid, SMetaInfo* pInfo) {
+ int32_t code = 0;
+
+ SMetaCache* pCache = pMeta->pCache;
+ int32_t iBucket = TABS(uid) % pCache->nBucket;
+ SMetaCacheEntry* pEntry = pCache->aBucket[iBucket];
+
+ while (pEntry && pEntry->info.uid != uid) {
+ pEntry = pEntry->next;
+ }
+
+ if (pEntry) {
+ *pInfo = pEntry->info;
+ } else {
+ code = TSDB_CODE_NOT_FOUND;
+ }
+
+ return code;
+}
diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c
index 941d2c6d72..f8ecd17cb7 100644
--- a/source/dnode/vnode/src/meta/metaOpen.c
+++ b/source/dnode/vnode/src/meta/metaOpen.c
@@ -73,7 +73,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
}
// open pUidIdx
- ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(int64_t), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx);
+ ret = tdbTbOpen("uid.idx", sizeof(tb_uid_t), sizeof(SUidIdxVal), uidIdxKeyCmpr, pMeta->pEnv, &pMeta->pUidIdx);
if (ret < 0) {
metaError("vgId:%d, failed to open meta uid idx since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@@ -87,7 +87,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
}
// open pCtbIdx
- ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), 0, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
+ ret = tdbTbOpen("ctb.idx", sizeof(SCtbIdxKey), -1, ctbIdxKeyCmpr, pMeta->pEnv, &pMeta->pCtbIdx);
if (ret < 0) {
metaError("vgId:%d, failed to open meta child table index since %s", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@@ -143,6 +143,13 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) {
goto _err;
}
+ int32_t code = metaCacheOpen(pMeta);
+ if (code) {
+ terrno = code;
+ metaError("vgId:%d, failed to open meta cache since %s", TD_VID(pVnode), tstrerror(terrno));
+ goto _err;
+ }
+
metaDebug("vgId:%d, meta is opened", TD_VID(pVnode));
*ppMeta = pMeta;
@@ -169,6 +176,7 @@ _err:
int metaClose(SMeta *pMeta) {
if (pMeta) {
+ if (pMeta->pCache) metaCacheClose(pMeta);
if (pMeta->pIdx) metaCloseIdx(pMeta);
if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb);
if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx);
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index eed0ae5e14..805bc24d8c 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -53,6 +53,89 @@ _err:
return -1;
}
+// int metaGetTableEntryByUidTest(void* meta, SArray *uidList) {
+//
+// SArray* readerList = taosArrayInit(taosArrayGetSize(uidList), sizeof(SMetaReader));
+// SArray* uidVersion = taosArrayInit(taosArrayGetSize(uidList), sizeof(STbDbKey));
+// SMeta *pMeta = meta;
+// int64_t version;
+// SHashObj *uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+//
+// int64_t stt1 = taosGetTimestampUs();
+// for(int i = 0; i < taosArrayGetSize(uidList); i++) {
+// void* ppVal = NULL;
+// int vlen = 0;
+// uint64_t * uid = taosArrayGet(uidList, i);
+// // query uid.idx
+// if (tdbTbGet(pMeta->pUidIdx, uid, sizeof(*uid), &ppVal, &vlen) < 0) {
+// continue;
+// }
+// version = *(int64_t *)ppVal;
+//
+// STbDbKey tbDbKey = {.version = version, .uid = *uid};
+// taosArrayPush(uidVersion, &tbDbKey);
+// taosHashPut(uHash, uid, sizeof(int64_t), ppVal, sizeof(int64_t));
+// }
+// int64_t stt2 = taosGetTimestampUs();
+// qDebug("metaGetTableEntryByUidTest1 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt2-stt1);
+//
+// TBC *pCur = NULL;
+// tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
+// tdbTbcMoveToFirst(pCur);
+// void *pKey = NULL;
+// int kLen = 0;
+//
+// while(1){
+// SMetaReader pReader = {0};
+// int32_t ret = tdbTbcNext(pCur, &pKey, &kLen, &pReader.pBuf, &pReader.szBuf);
+// if (ret < 0) break;
+// STbDbKey *tmp = (STbDbKey*)pKey;
+// int64_t *ver = (int64_t*)taosHashGet(uHash, &tmp->uid, sizeof(int64_t));
+// if(ver == NULL || *ver != tmp->version) continue;
+// taosArrayPush(readerList, &pReader);
+// }
+// tdbTbcClose(pCur);
+//
+// taosArrayClear(readerList);
+// int64_t stt3 = taosGetTimestampUs();
+// qDebug("metaGetTableEntryByUidTest2 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt3-stt2);
+// for(int i = 0; i < taosArrayGetSize(uidVersion); i++) {
+// SMetaReader pReader = {0};
+//
+// STbDbKey *tbDbKey = taosArrayGet(uidVersion, i);
+// // query table.db
+// if (tdbTbGet(pMeta->pTbDb, tbDbKey, sizeof(STbDbKey), &pReader.pBuf, &pReader.szBuf) < 0) {
+// continue;
+// }
+// taosArrayPush(readerList, &pReader);
+// }
+// int64_t stt4 = taosGetTimestampUs();
+// qDebug("metaGetTableEntryByUidTest3 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt4-stt3);
+//
+// for(int i = 0; i < taosArrayGetSize(readerList); i++){
+// SMetaReader* pReader = taosArrayGet(readerList, i);
+// metaReaderInit(pReader, meta, 0);
+// // decode the entry
+// tDecoderInit(&pReader->coder, pReader->pBuf, pReader->szBuf);
+//
+// if (metaDecodeEntry(&pReader->coder, &pReader->me) < 0) {
+// }
+// metaReaderClear(pReader);
+// }
+// int64_t stt5 = taosGetTimestampUs();
+// qDebug("metaGetTableEntryByUidTest4 rows:%d, cost:%ld us", taosArrayGetSize(readerList), stt5-stt4);
+// return 0;
+// }
+
+bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) {
+ // query uid.idx
+ if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
+ return false;
+ }
+
+ return true;
+}
+
int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
SMeta *pMeta = pReader->pMeta;
int64_t version;
@@ -63,7 +146,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
return -1;
}
- version = *(int64_t *)pReader->pBuf;
+ version = ((SUidIdxVal *)pReader->pBuf)[0].version;
return metaGetTableEntryByVersion(pReader, version, uid);
}
@@ -160,7 +243,7 @@ int metaTbCursorNext(SMTbCursor *pTbCur) {
tDecoderClear(&pTbCur->mr.coder);
- metaGetTableEntryByVersion(&pTbCur->mr, *(int64_t *)pTbCur->pVal, *(tb_uid_t *)pTbCur->pKey);
+ metaGetTableEntryByVersion(&pTbCur->mr, ((SUidIdxVal *)pTbCur->pVal)[0].version, *(tb_uid_t *)pTbCur->pKey);
if (pTbCur->mr.me.type == TSDB_SUPER_TABLE) {
continue;
}
@@ -185,7 +268,7 @@ _query:
goto _err;
}
- version = *(int64_t *)pData;
+ version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.uid = uid, .version = version}, sizeof(STbDbKey), &pData, &nData);
SMetaEntry me = {0};
@@ -429,18 +512,65 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver) {
}
int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sver, STSchema **ppTSchema) {
- int32_t code = 0;
- STSchema *pTSchema = NULL;
- SSkmDbKey skmDbKey = {.uid = suid ? suid : uid, .sver = sver};
+ int32_t code = 0;
+
void *pData = NULL;
int nData = 0;
+ SSkmDbKey skmDbKey;
+ if (sver <= 0) {
+ SMetaInfo info;
+ if (metaGetInfo(pMeta, suid ? suid : uid, &info) == 0) {
+ sver = info.skmVer;
+ } else {
+ TBC *pSkmDbC = NULL;
+ int c;
- // query
+ skmDbKey.uid = suid ? suid : uid;
+ skmDbKey.sver = INT32_MAX;
+
+ tdbTbcOpen(pMeta->pSkmDb, &pSkmDbC, NULL);
+ metaRLock(pMeta);
+
+ if (tdbTbcMoveTo(pSkmDbC, &skmDbKey, sizeof(skmDbKey), &c) < 0) {
+ metaULock(pMeta);
+ tdbTbcClose(pSkmDbC);
+ code = TSDB_CODE_NOT_FOUND;
+ goto _exit;
+ }
+
+ ASSERT(c);
+
+ if (c < 0) {
+ tdbTbcMoveToPrev(pSkmDbC);
+ }
+
+ const void *pKey = NULL;
+ int32_t nKey = 0;
+ tdbTbcGet(pSkmDbC, &pKey, &nKey, NULL, NULL);
+
+ if (((SSkmDbKey *)pKey)->uid != skmDbKey.uid) {
+ metaULock(pMeta);
+ tdbTbcClose(pSkmDbC);
+ code = TSDB_CODE_NOT_FOUND;
+ goto _exit;
+ }
+
+ sver = ((SSkmDbKey *)pKey)->sver;
+
+ metaULock(pMeta);
+ tdbTbcClose(pSkmDbC);
+ }
+ }
+
+ ASSERT(sver > 0);
+
+ skmDbKey.uid = suid ? suid : uid;
+ skmDbKey.sver = sver;
metaRLock(pMeta);
- if (tdbTbGet(pMeta->pSkmDb, &skmDbKey, sizeof(skmDbKey), &pData, &nData) < 0) {
- code = TSDB_CODE_NOT_FOUND;
+ if (tdbTbGet(pMeta->pSkmDb, &skmDbKey, sizeof(SSkmDbKey), &pData, &nData) < 0) {
metaULock(pMeta);
- goto _err;
+ code = TSDB_CODE_NOT_FOUND;
+ goto _exit;
}
metaULock(pMeta);
@@ -462,15 +592,13 @@ int32_t metaGetTbTSchemaEx(SMeta *pMeta, tb_uid_t suid, tb_uid_t uid, int32_t sv
SSchema *pSchema = pSchemaWrapper->pSchema + i;
tdAddColToSchema(&sb, pSchema->type, pSchema->flags, pSchema->colId, pSchema->bytes);
}
- pTSchema = tdGetSchemaFromBuilder(&sb);
+ STSchema *pTSchema = tdGetSchemaFromBuilder(&sb);
tdDestroyTSchemaBuilder(&sb);
*ppTSchema = pTSchema;
taosMemoryFree(pSchemaWrapper->pSchema);
- return code;
-_err:
- *ppTSchema = NULL;
+_exit:
return code;
}
@@ -749,9 +877,8 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
#endif
-const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) {
- ASSERT(pEntry->type == TSDB_CHILD_TABLE);
- STag *tag = (STag *)pEntry->ctbEntry.pTags;
+const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
+ STag *tag = (STag *)pTag;
if (type == TSDB_DATA_TYPE_JSON) {
return tag;
}
@@ -853,6 +980,9 @@ int32_t metaFilterTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
break;
}
}
+ if (p->suid != pKey->suid) {
+ break;
+ }
first = false;
if (p != NULL) {
int32_t cmp = (*param->filterFunc)(p->data, pKey->data, pKey->type);
@@ -888,3 +1018,75 @@ END:
return ret;
}
+
+int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHashObj *tags) {
+ SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid);
+
+ SHashObj *uHash = NULL;
+ size_t len = taosArrayGetSize(uidList); // len > 0 means there already have uids
+ if (len > 0) {
+ uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ for (int i = 0; i < len; i++) {
+ int64_t *uid = taosArrayGet(uidList, i);
+ taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
+ }
+ }
+ while (1) {
+ tb_uid_t id = metaCtbCursorNext(pCur);
+ if (id == 0) {
+ break;
+ }
+
+ if (len > 0 && taosHashGet(uHash, &id, sizeof(int64_t)) == NULL) {
+ continue;
+ } else if (len == 0) {
+ taosArrayPush(uidList, &id);
+ }
+
+ taosHashPut(tags, &id, sizeof(int64_t), pCur->pVal, pCur->vLen);
+ }
+
+ taosHashCleanup(uHash);
+ metaCloseCtbCursor(pCur);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t metaCacheGet(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo);
+
+int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo) {
+ int32_t code = 0;
+ void *pData = NULL;
+ int nData = 0;
+
+ metaRLock(pMeta);
+
+ // search cache
+ if (metaCacheGet(pMeta, uid, pInfo) == 0) {
+ metaULock(pMeta);
+ goto _exit;
+ }
+
+ // search TDB
+ if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) {
+ // not found
+ metaULock(pMeta);
+ code = TSDB_CODE_NOT_FOUND;
+ goto _exit;
+ }
+
+ metaULock(pMeta);
+
+ pInfo->uid = uid;
+ pInfo->suid = ((SUidIdxVal *)pData)->suid;
+ pInfo->version = ((SUidIdxVal *)pData)->version;
+ pInfo->skmVer = ((SUidIdxVal *)pData)->skmVer;
+
+ // upsert the cache
+ metaWLock(pMeta);
+ metaCacheUpsert(pMeta, pInfo);
+ metaULock(pMeta);
+
+_exit:
+ tdbFree(pData);
+ return code;
+}
diff --git a/source/dnode/vnode/src/meta/metaSma.c b/source/dnode/vnode/src/meta/metaSma.c
index 1e5b699fce..3ada7d1814 100644
--- a/source/dnode/vnode/src/meta/metaSma.c
+++ b/source/dnode/vnode/src/meta/metaSma.c
@@ -28,9 +28,9 @@ int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) {
int vLen = 0;
const void *pKey = NULL;
const void *pVal = NULL;
- void * pBuf = NULL;
+ void *pBuf = NULL;
int32_t szBuf = 0;
- void * p = NULL;
+ void *p = NULL;
SMetaReader mr = {0};
// validate req
@@ -83,8 +83,8 @@ int32_t metaDropTSma(SMeta *pMeta, int64_t indexUid) {
static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME) {
STbDbKey tbDbKey;
- void * pKey = NULL;
- void * pVal = NULL;
+ void *pKey = NULL;
+ void *pVal = NULL;
int kLen = 0;
int vLen = 0;
SEncoder coder = {0};
@@ -130,7 +130,8 @@ _err:
}
static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) {
- return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn);
+ SUidIdxVal uidIdxVal = {.suid = pME->smaEntry.tsma->indexUid, .version = pME->version, .skmVer = 0};
+ return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &uidIdxVal, sizeof(uidIdxVal), &pMeta->txn);
}
static int metaUpdateNameIdx(SMeta *pMeta, const SMetaEntry *pME) {
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 3e7fd9df2b..aa107ab253 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -27,6 +27,23 @@ static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME);
static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry);
static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type);
+static void metaGetEntryInfo(const SMetaEntry *pEntry, SMetaInfo *pInfo) {
+ pInfo->uid = pEntry->uid;
+ pInfo->version = pEntry->version;
+ if (pEntry->type == TSDB_SUPER_TABLE) {
+ pInfo->suid = pEntry->uid;
+ pInfo->skmVer = pEntry->stbEntry.schemaRow.version;
+ } else if (pEntry->type == TSDB_CHILD_TABLE) {
+ pInfo->suid = pEntry->ctbEntry.suid;
+ pInfo->skmVer = 0;
+ } else if (pEntry->type == TSDB_NORMAL_TABLE) {
+ pInfo->suid = 0;
+ pInfo->skmVer = pEntry->ntbEntry.schemaRow.version;
+ } else {
+ ASSERT(0);
+ }
+}
+
static int metaUpdateMetaRsp(tb_uid_t uid, char *tbName, SSchemaWrapper *pSchema, STableMetaRsp *pMetaRsp) {
pMetaRsp->pSchemas = taosMemoryMalloc(pSchema->nCols * sizeof(SSchema));
if (NULL == pMetaRsp->pSchemas) {
@@ -171,22 +188,22 @@ int metaCreateSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
void *pBuf = NULL;
int32_t szBuf = 0;
void *p = NULL;
- SMetaReader mr = {0};
// validate req
- metaReaderInit(&mr, pMeta, 0);
- if (metaGetTableEntryByName(&mr, pReq->name) == 0) {
-// TODO: just for pass case
-#if 0
- terrno = TSDB_CODE_TDB_STB_ALREADY_EXIST;
- metaReaderClear(&mr);
- return -1;
-#else
- metaReaderClear(&mr);
- return 0;
-#endif
+ void *pData = NULL;
+ int nData = 0;
+ if (tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData) == 0) {
+ tb_uid_t uid = *(tb_uid_t *)pData;
+ tdbFree(pData);
+ SMetaInfo info;
+ metaGetInfo(pMeta, uid, &info);
+ if (info.uid == info.suid) {
+ return 0;
+ } else {
+ terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST;
+ return -1;
+ }
}
- metaReaderClear(&mr);
// set structs
me.version = version;
@@ -265,8 +282,8 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb
// drop super table
_drop_super_table:
tdbTbGet(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pData, &nData);
- tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = *(int64_t *)pData, .uid = pReq->suid}, sizeof(STbDbKey),
- &pMeta->txn);
+ tdbTbDelete(pMeta->pTbDb, &(STbDbKey){.version = ((SUidIdxVal *)pData)[0].version, .uid = pReq->suid},
+ sizeof(STbDbKey), &pMeta->txn);
tdbTbDelete(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pMeta->txn);
tdbTbDelete(pMeta->pUidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn);
tdbTbDelete(pMeta->pSuidIdx, &pReq->suid, sizeof(tb_uid_t), &pMeta->txn);
@@ -298,18 +315,18 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
tdbTbcClose(pUidIdxc);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
- // ASSERT(0);
return -1;
}
ret = tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
if (ret < 0) {
+ tdbTbcClose(pUidIdxc);
+
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
- // ASSERT(0);
return -1;
}
- oversion = *(int64_t *)pData;
+ oversion = ((SUidIdxVal *)pData)[0].version;
tdbTbcOpen(pMeta->pTbDb, &pTbDbc, &pMeta->txn);
ret = tdbTbcMoveTo(pTbDbc, &((STbDbKey){.uid = pReq->suid, .version = oversion}), sizeof(STbDbKey), &c);
@@ -336,15 +353,11 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
metaSaveToSkmDb(pMeta, &nStbEntry);
}
- // if (oStbEntry.stbEntry.schemaTag.sver != pReq->schemaTag.sver) {
- // // change tag schema
- // }
-
// update table.db
metaSaveToTbDb(pMeta, &nStbEntry);
// update uid index
- tdbTbcUpsert(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &version, sizeof(version), 0);
+ metaUpdateUidIdx(pMeta, &nStbEntry);
if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
metaULock(pMeta);
@@ -503,7 +516,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
SDecoder dc = {0};
rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
- int64_t version = *(int64_t *)pData;
+ int64_t version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
@@ -517,7 +530,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
int tLen = 0;
if (tdbTbGet(pMeta->pUidIdx, &e.ctbEntry.suid, sizeof(tb_uid_t), &tData, &tLen) == 0) {
- version = *(int64_t *)tData;
+ version = ((SUidIdxVal *)tData)[0].version;
STbDbKey tbDbKey = {.uid = e.ctbEntry.suid, .version = version};
if (tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &tData, &tLen) == 0) {
SDecoder tdc = {0};
@@ -556,6 +569,8 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
--pMeta->pVnode->config.vndStats.numOfSTables;
}
+ metaCacheDrop(pMeta, uid);
+
tDecoderClear(&dc);
tdbFree(pData);
@@ -594,7 +609,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
ASSERT(c == 0);
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
- oversion = *(int64_t *)pData;
+ oversion = ((SUidIdxVal *)pData)[0].version;
// search table.db
TBC *pTbDbc = NULL;
@@ -708,7 +723,7 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
// save to table db
metaSaveToTbDb(pMeta, &entry);
- tdbTbcUpsert(pUidIdxc, &entry.uid, sizeof(tb_uid_t), &version, sizeof(version), 0);
+ metaUpdateUidIdx(pMeta, &entry);
metaSaveToSkmDb(pMeta, &entry);
@@ -764,7 +779,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
ASSERT(c == 0);
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
- oversion = *(int64_t *)pData;
+ oversion = ((SUidIdxVal *)pData)[0].version;
// search table.db
TBC *pTbDbc = NULL;
@@ -784,8 +799,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
/* get stbEntry*/
tdbTbGet(pMeta->pUidIdx, &ctbEntry.ctbEntry.suid, sizeof(tb_uid_t), &pVal, &nVal);
- tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = *(int64_t *)pVal}), sizeof(STbDbKey),
- (void **)&stbEntry.pBuf, &nVal);
+ tdbTbGet(pMeta->pTbDb, &((STbDbKey){.uid = ctbEntry.ctbEntry.suid, .version = ((SUidIdxVal *)pVal)[0].version}),
+ sizeof(STbDbKey), (void **)&stbEntry.pBuf, &nVal);
tdbFree(pVal);
tDecoderInit(&dc2, stbEntry.pBuf, nVal);
metaDecodeEntry(&dc2, &stbEntry);
@@ -859,12 +874,16 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
metaSaveToTbDb(pMeta, &ctbEntry);
// save to uid.idx
- tdbTbUpsert(pMeta->pUidIdx, &ctbEntry.uid, sizeof(tb_uid_t), &version, sizeof(version), &pMeta->txn);
+ metaUpdateUidIdx(pMeta, &ctbEntry);
if (iCol == 0) {
metaUpdateTagIdx(pMeta, &ctbEntry);
}
+ SCtbIdxKey ctbIdxKey = {.suid = ctbEntry.ctbEntry.suid, .uid = uid};
+ tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
+ ((STag *)(ctbEntry.ctbEntry.pTags))->len, &pMeta->txn);
+
tDecoderClear(&dc1);
tDecoderClear(&dc2);
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
@@ -914,7 +933,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
ASSERT(c == 0);
tdbTbcGet(pUidIdxc, NULL, NULL, &pData, &nData);
- oversion = *(int64_t *)pData;
+ oversion = ((SUidIdxVal *)pData)[0].version;
// search table.db
TBC *pTbDbc = NULL;
@@ -959,7 +978,7 @@ static int metaUpdateTableOptions(SMeta *pMeta, int64_t version, SVAlterTbReq *p
// save to table db
metaSaveToTbDb(pMeta, &entry);
- tdbTbcUpsert(pUidIdxc, &entry.uid, sizeof(tb_uid_t), &version, sizeof(version), 0);
+ metaUpdateUidIdx(pMeta, &entry);
metaULock(pMeta);
tdbTbcClose(pTbDbc);
@@ -1042,7 +1061,14 @@ _err:
}
static int metaUpdateUidIdx(SMeta *pMeta, const SMetaEntry *pME) {
- return tdbTbInsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &pME->version, sizeof(int64_t), &pMeta->txn);
+ // upsert cache
+ SMetaInfo info;
+ metaGetEntryInfo(pME, &info);
+ metaCacheUpsert(pMeta, &info);
+
+ SUidIdxVal uidIdxVal = {.suid = info.suid, .version = info.version, .skmVer = info.skmVer};
+
+ return tdbTbUpsert(pMeta->pUidIdx, &pME->uid, sizeof(tb_uid_t), &uidIdxVal, sizeof(uidIdxVal), &pMeta->txn);
}
static int metaUpdateSuidIdx(SMeta *pMeta, const SMetaEntry *pME) {
@@ -1062,7 +1088,9 @@ static int metaUpdateTtlIdx(SMeta *pMeta, const SMetaEntry *pME) {
static int metaUpdateCtbIdx(SMeta *pMeta, const SMetaEntry *pME) {
SCtbIdxKey ctbIdxKey = {.suid = pME->ctbEntry.suid, .uid = pME->uid};
- return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), NULL, 0, &pMeta->txn);
+
+ return tdbTbInsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), pME->ctbEntry.pTags,
+ ((STag *)(pME->ctbEntry.pTags))->len, &pMeta->txn);
}
int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void *pTagData, int32_t nTagData, int8_t type, tb_uid_t uid,
@@ -1118,7 +1146,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
return -1;
}
tbDbKey.uid = pCtbEntry->ctbEntry.suid;
- tbDbKey.version = *(int64_t *)pData;
+ tbDbKey.version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &tbDbKey, sizeof(tbDbKey), &pData, &nData);
tDecoderInit(&dc, pData, nData);
diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c
index 373cfdfb47..ca5367f397 100644
--- a/source/dnode/vnode/src/sma/smaCommit.c
+++ b/source/dnode/vnode/src/sma/smaCommit.c
@@ -83,8 +83,7 @@ int32_t smaBegin(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
- SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
+ SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
int8_t rsmaTriggerStat =
atomic_val_compare_exchange_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED, TASK_TRIGGER_STAT_ACTIVE);
@@ -110,7 +109,7 @@ int32_t smaBegin(SSma *pSma) {
/**
* @brief pre-commit for rollup sma(sync commit).
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
- * 2) wait all triggered fetch tasks finished
+ * 2) wait for all triggered fetch tasks to finish
* 3) perform persist task for qTaskInfo
*
* @param pSma
@@ -123,19 +122,19 @@ static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma) {
}
SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
+ SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
// step 1: set rsma stat paused
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
- // step 2: wait all triggered fetch tasks finished
+ // step 2: wait for all triggered fetch tasks to finish
int32_t nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
- smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma));
+ smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma));
break;
} else {
- smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma));
+ smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma));
}
++nLoops;
if (nLoops > 1000) {
@@ -289,8 +288,7 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
- SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(SMA_ENV_STAT(pSmaEnv));
+ SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
// cleanup outdated qtaskinfo files
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
@@ -299,10 +297,9 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
}
/**
- * @brief Rsma async commit implementation
+ * @brief Rsma async commit implementation(only do some necessary light weighted task)
* 1) set rsma stat TASK_TRIGGER_STAT_PAUSED
* 2) Wait all running fetch task finish to fetch and put submitMsg into level 2/3 wQueue(blocking level 1 write)
- * 3)
*
* @param pSma
* @return int32_t
@@ -314,20 +311,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
+ SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
+ pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
+ ASSERT(pRSmaStat->commitAppliedVer > 0);
- // step 2: wait all triggered fetch tasks finished
+ // step 2: wait for all triggered fetch tasks to finish
int32_t nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
- smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma));
+ smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
break;
} else {
- smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma));
+ smaDebug("vgId:%d, rsma commit, fetch tasks are not all finished yet", SMA_VID(pSma));
}
++nLoops;
if (nLoops > 1000) {
@@ -336,29 +335,51 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
}
- // step 3: swap rsmaInfoHash and iRsmaInfoHash
- // lock
- taosWLockLatch(SMA_ENV_LOCK(pEnv));
-
- ASSERT(RSMA_INFO_HASH(pRSmaStat));
- ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat));
-
- RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat);
- RSMA_INFO_HASH(pRSmaStat) =
- taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
-
- if (!RSMA_INFO_HASH(pRSmaStat)) {
- // unlock
- taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
- smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr());
+ /**
+ * @brief step 3: consume the SubmitReq in buffer
+ * 1) This is high cost task and should not put in asyncPreCommit originally.
+ * 2) But, if put in asyncCommit, would trigger taskInfo cloning frequently.
+ */
+ if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_COMMIT) < 0) {
return TSDB_CODE_FAILED;
}
- // unlock
- taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
+ nLoops = 0;
+ while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
+ smaInfo("vgId:%d, rsma commit, all items are consumed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
+ if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
+ return TSDB_CODE_FAILED;
+ }
+ smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
- // step 4: others
- pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
+#if 0 // consuming task of qTaskInfo clone
+ // step 4: swap queue/qall and iQueue/iQall
+ // lock
+ // taosWLockLatch(SMA_ENV_LOCK(pEnv));
+
+ ASSERT(RSMA_INFO_HASH(pRSmaStat));
+
+ void *pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), NULL);
+
+ while (pIter) {
+ SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
+ TSWAP(pInfo->iQall, pInfo->qall);
+ TSWAP(pInfo->iQueue, pInfo->queue);
+ TSWAP(pInfo->iTaskInfo[0], pInfo->taskInfo[0]);
+ TSWAP(pInfo->iTaskInfo[1], pInfo->taskInfo[1]);
+ pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter);
+ }
+
+ // unlock
+ // taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+#endif
return TSDB_CODE_SUCCESS;
}
@@ -374,18 +395,20 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) {
if (!pSmaEnv) {
return TSDB_CODE_SUCCESS;
}
+#if 0
+ SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
- SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
-
- // perform persist task for qTaskInfo
- tdRSmaPersistExecImpl(pRSmaStat, RSMA_IMU_INFO_HASH(pRSmaStat));
+ // perform persist task for qTaskInfo operator
+ if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
+ return TSDB_CODE_FAILED;
+ }
+#endif
return TSDB_CODE_SUCCESS;
}
/**
- * @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsmaInfoHash not empty.
+ * @brief Migrate rsmaInfo from iRsmaInfo to rsmaInfo if rsma infoHash not empty.
*
* @param pSma
* @return int32_t
@@ -396,68 +419,66 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
- SSmaStat *pStat = SMA_ENV_STAT(pEnv);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
+ SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
+ SArray *rsmaDeleted = NULL;
- // step 1: merge rsmaInfoHash and iRsmaInfoHash
+ // step 1: merge qTaskInfo and iQTaskInfo
// lock
- taosWLockLatch(SMA_ENV_LOCK(pEnv));
-#if 0
- if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) {
- // just switch the hash pointer if rsmaInfoHash is empty
- if (taosHashGetSize(RSMA_IMU_INFO_HASH(pRSmaStat)) > 0) {
- SHashObj *infoHash = RSMA_INFO_HASH(pRSmaStat);
- RSMA_INFO_HASH(pRSmaStat) = RSMA_IMU_INFO_HASH(pRSmaStat);
- RSMA_IMU_INFO_HASH(pRSmaStat) = infoHash;
- }
- } else {
-#endif
-#if 1
- void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL);
- while (pIter) {
- tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ // taosWLockLatch(SMA_ENV_LOCK(pEnv));
- if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) {
- SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
- if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
- if (refVal == 0) {
- tdFreeRSmaInfo(pSma, pRSmaInfo, true);
- smaDebug(
- "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
- "table:%" PRIi64,
- SMA_VID(pSma), *pSuid);
- } else {
- smaDebug(
- "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
- "table:%" PRIi64,
- SMA_VID(pSma), refVal, *pSuid);
+ void *pIter = NULL;
+ while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) {
+ tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
+ if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
+ if (refVal == 0) {
+ if (!rsmaDeleted) {
+ if ((rsmaDeleted = taosArrayInit(1, sizeof(tb_uid_t)))) {
+ taosArrayPush(rsmaDeleted, pSuid);
+ }
}
-
- pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
- continue;
+ } else {
+ smaDebug(
+ "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
+ "table:%" PRIi64,
+ SMA_VID(pSma), refVal, *pSuid);
+ }
+
+ continue;
+ }
+#if 0
+ if (pRSmaInfo->taskInfo[0]) {
+ if (pRSmaInfo->iTaskInfo[0]) {
+ SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pRSmaInfo->iTaskInfo[0];
+ tdFreeRSmaInfo(pSma, pRSmaInfo, false);
+ pRSmaInfo->iTaskInfo[0] = NULL;
}
- taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
- smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma),
- *pSuid);
} else {
- // free the resources
- SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
- tdFreeRSmaInfo(pSma, pRSmaInfo, false);
- smaDebug("vgId:%d, rsma async post commit, free rsma info since already COW for table:%" PRIi64, SMA_VID(pSma),
- *pSuid);
+ TSWAP(pRSmaInfo->taskInfo[0], pRSmaInfo->iTaskInfo[0]);
}
- pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter);
- }
+ taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
+ smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid);
#endif
- // }
+ }
- taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat));
- RSMA_IMU_INFO_HASH(pRSmaStat) = NULL;
+ for (int32_t i = 0; i < taosArrayGetSize(rsmaDeleted); ++i) {
+ tb_uid_t *pSuid = taosArrayGet(rsmaDeleted, i);
+ void *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
+ if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
+ tdFreeRSmaInfo(pSma, pRSmaInfo, true);
+ smaDebug(
+ "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
+ "table:%" PRIi64,
+ SMA_VID(pSma), *pSuid);
+ }
+ taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
+ }
+ taosArrayDestroy(rsmaDeleted);
// unlock
- taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ // taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
// step 2: cleanup outdated qtaskinfo files
tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c
index ccb6ad3a72..e3b83f9955 100644
--- a/source/dnode/vnode/src/sma/smaEnv.c
+++ b/source/dnode/vnode/src/sma/smaEnv.c
@@ -171,7 +171,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
if (!pRSmaInfo) return 0;
-
+
int ref = T_REF_INC(pRSmaInfo);
smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
return 0;
@@ -209,6 +209,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
SRSmaStat *pRSmaStat = (SRSmaStat *)(*pSmaStat);
pRSmaStat->pSma = (SSma *)pSma;
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_INIT);
+ tsem_init(&pRSmaStat->notEmpty, 0, 0);
// init smaMgmt
smaInit();
@@ -228,7 +229,6 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
RSMA_INFO_HASH(pRSmaStat) = taosHashInit(
RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (!RSMA_INFO_HASH(pRSmaStat)) {
- taosMemoryFreeClear(*pSmaStat);
return TSDB_CODE_FAILED;
}
} else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
@@ -262,10 +262,9 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
smaDebug("vgId:%d, destroy rsma stat %p", SMA_VID(pSma), pRSmaStat);
// step 1: set rsma trigger stat cancelled
atomic_store_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_CANCELLED);
+ tsem_destroy(&(pStat->notEmpty));
// step 2: destroy the rsma info and associated fetch tasks
- // TODO: use taosHashSetFreeFp when taosHashSetFreeFp is ready.
-#if 1
if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) {
void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
while (infoHash) {
@@ -274,17 +273,16 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
}
}
-#endif
taosHashCleanup(RSMA_INFO_HASH(pStat));
- // step 3: wait all triggered fetch tasks finished
+ // step 3: wait for all triggered fetch tasks to finish
int32_t nLoops = 0;
while (1) {
if (T_REF_VAL_GET((SSmaStat *)pStat) == 0) {
- smaDebug("vgId:%d, rsma fetch tasks all finished", SMA_VID(pSma));
+ smaDebug("vgId:%d, rsma fetch tasks are all finished", SMA_VID(pSma));
break;
} else {
- smaDebug("vgId:%d, rsma fetch tasks not all finished yet", SMA_VID(pSma));
+ smaDebug("vgId:%d, rsma fetch tasks are not all finished yet", SMA_VID(pSma));
}
++nLoops;
if (nLoops > 1000) {
@@ -293,7 +291,7 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
}
}
- // step 4: free pStat
+ // step 5: free pStat
taosMemoryFreeClear(pStat);
}
}
@@ -318,9 +316,9 @@ void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
if (pSmaStat) {
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
- tdDestroyTSmaStat(SMA_TSMA_STAT(pSmaStat));
+ tdDestroyTSmaStat(SMA_STAT_TSMA(pSmaStat));
} else if (smaType == TSDB_SMA_TYPE_ROLLUP) {
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSmaStat);
+ SRSmaStat *pRSmaStat = &pSmaStat->rsmaStat;
int32_t vid = SMA_VID(pRSmaStat->pSma);
int64_t refId = RSMA_REF_ID(pRSmaStat);
if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
index 235fb1f941..e2710b26e3 100644
--- a/source/dnode/vnode/src/sma/smaOpen.c
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -146,6 +146,20 @@ int32_t smaClose(SSma *pSma) {
return 0;
}
+int32_t smaPreClose(SSma *pSma) {
+ if (pSma && VND_IS_RSMA(pSma->pVnode)) {
+ SSmaEnv *pEnv = NULL;
+ SRSmaStat *pStat = NULL;
+ if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv))) {
+ return 0;
+ }
+ for (int32_t i = 0; i < RSMA_EXECUTOR_MAX; ++i) {
+ tsem_post(&(pStat->notEmpty));
+ }
+ }
+ return 0;
+}
+
/**
* @brief rsma env restore
*
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index b7a2efd489..448b8ab508 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -15,8 +15,12 @@
#include "sma.h"
-#define RSMA_QTASKINFO_BUFSIZE 32768
-#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid
+#define RSMA_QTASKINFO_BUFSIZE (32768) // size
+#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid
+#define RSMA_QTASKEXEC_SMOOTH_SIZE (100) // cnt
+#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
+#define RSMA_FETCH_DELAY_MAX (900000) // ms
+#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
SSmaMgmt smaMgmt = {
.inited = 0,
@@ -27,19 +31,21 @@ SSmaMgmt smaMgmt = {
#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
+typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
int8_t idx);
-static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid,
- int8_t level);
+static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo,
+ ERsmaExecType type, int8_t level);
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid);
static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
-static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
- int64_t suid, int8_t blkType);
+static void tdFreeRSmaSubmitItems(SArray *pItems);
+static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr);
+static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
+ int64_t suid);
static void tdRSmaFetchTrigger(void *param, void *tmrId);
-static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level);
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish);
static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter);
@@ -76,6 +82,11 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
+struct SRSmaExecQItem {
+ void *pRSmaInfo;
+ void *qall;
+};
+
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@@ -139,6 +150,18 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
if (isDeepFree) {
taosMemoryFreeClear(pInfo->pTSchema);
}
+
+ if (isDeepFree) {
+ if (pInfo->queue) taosCloseQueue(pInfo->queue);
+ if (pInfo->qall) taosFreeQall(pInfo->qall);
+ if (pInfo->iQueue) taosCloseQueue(pInfo->iQueue);
+ if (pInfo->iQall) taosFreeQall(pInfo->iQall);
+ pInfo->queue = NULL;
+ pInfo->qall = NULL;
+ pInfo->iQueue = NULL;
+ pInfo->iQall = NULL;
+ }
+
taosMemoryFree(pInfo);
}
@@ -179,7 +202,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pRSmaInfo->taskInfo[i]) {
- if ((qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true) < 0)) {
+ if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true)) < 0)) {
tdReleaseRSmaInfo(pSma, pRSmaInfo);
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i,
terrstr());
@@ -351,6 +374,19 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
goto _err;
}
pRSmaInfo->pTSchema = pTSchema;
+ if (!(pRSmaInfo->queue = taosOpenQueue())) {
+ goto _err;
+ }
+
+ if (!(pRSmaInfo->qall = taosAllocateQall())) {
+ goto _err;
+ }
+ if (!(pRSmaInfo->iQueue = taosOpenQueue())) {
+ goto _err;
+ }
+ if (!(pRSmaInfo->iQall = taosAllocateQall())) {
+ goto _err;
+ }
pRSmaInfo->suid = suid;
pRSmaInfo->refId = RSMA_REF_ID(pStat);
T_REF_INIT_VAL(pRSmaInfo, 1);
@@ -419,8 +455,7 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) {
return TSDB_CODE_SUCCESS;
}
- SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv);
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat);
+ SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pSmaEnv);
SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pReq->suid);
@@ -528,6 +563,14 @@ void *tdUidStoreFree(STbUidStore *pStore) {
return NULL;
}
+/**
+ * @brief The SubmitReq for rsma L2/L3 is inserted by tsdbInsertData method directly while not by WriteQ, as the queue
+ * would be freed when close Vnode, thus lock should be used if with race condition.
+ * @param pTsdb
+ * @param version
+ * @param pReq
+ * @return int32_t
+ */
static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) {
if (!pReq) {
terrno = TSDB_CODE_INVALID_PTR;
@@ -535,7 +578,7 @@ static int32_t tdProcessSubmitReq(STsdb *pTsdb, int64_t version, void *pReq) {
}
SSubmitReq *pSubmitReq = (SSubmitReq *)pReq;
-
+ // TODO: spin lock for race conditiond
if (tsdbInsertData(pTsdb, version, pSubmitReq, NULL) < 0) {
return TSDB_CODE_FAILED;
}
@@ -569,17 +612,6 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
return 0;
}
-static void tdDestroySDataBlockArray(SArray *pArray) {
- // TODO
-#if 0
- for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
- SSDataBlock *pDataBlock = taosArrayGet(pArray, i);
- blockDestroyInner(pDataBlock);
- }
-#endif
- taosArrayDestroy(pArray);
-}
-
/**
* @brief retention of rsma1/rsma2
*
@@ -604,8 +636,8 @@ _end:
return code;
}
-static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
- int64_t suid, int8_t blkType) {
+static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
+ int64_t suid) {
SArray *pResList = taosArrayInit(1, POINTER_BYTES);
if (pResList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -615,7 +647,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
while (1) {
uint64_t ts;
int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
- if (code < 0) {
+ if (code < 0) {
if (code == TSDB_CODE_QRY_IN_EXEC) {
break;
} else {
@@ -637,8 +669,7 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
} else {
smaDebug("vgId:%d, rsma %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
}
-
-#if 1
+#if 0
char flag[10] = {0};
snprintf(flag, 10, "level %" PRIi8, pItem->level);
blockDebugShowDataBlocks(pResList, flag);
@@ -662,10 +693,9 @@ static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSm
goto _err;
}
taosMemoryFreeClear(pReq);
-
+
smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
SMA_VID(pSma), suid, pItem->level, output->info.version);
-
}
}
@@ -677,34 +707,115 @@ _err:
return TSDB_CODE_FAILED;
}
-static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid,
- int8_t level) {
+/**
+ * @brief Copy msg to rsmaQueueBuffer for batch process
+ *
+ * @param pSma
+ * @param pMsg
+ * @param inputType
+ * @param pInfo
+ * @param suid
+ * @return int32_t
+ */
+static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo,
+ tb_uid_t suid) {
+ const SSubmitReq *pReq = (const SSubmitReq *)pMsg;
+
+ void *qItem = taosAllocateQitem(pReq->header.contLen, DEF_QITEM);
+ if (!qItem) {
+ return TSDB_CODE_FAILED;
+ }
+
+ memcpy(qItem, pMsg, pReq->header.contLen);
+
+ taosWriteQitem(pInfo->queue, qItem);
+
+ pInfo->lastRecv = taosGetTimestampMs();
+
+ SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
+
+ tsem_post(&(pRSmaStat->notEmpty));
+
+ int64_t nItems = atomic_fetch_add_64(&pRSmaStat->nBufItems, 1);
+
+ // smoothing consume
+ int32_t n = nItems / RSMA_QTASKEXEC_SMOOTH_SIZE;
+ if (n > 1) {
+ if (n > 10) {
+ n = 10;
+ }
+ taosMsleep(n << 3);
+ if (n > 5) {
+ smaWarn("vgId:%d, pInfo->queue itemSize:%d, memSize:%" PRIi64 ", sleep %d ms", SMA_VID(pSma),
+ taosQueueItemSize(pInfo->queue), taosQueueMemorySize(pInfo->queue), n << 3);
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tdRsmaPrintSubmitReq(SSma *pSma, SSubmitReq *pReq) {
+ SSubmitMsgIter msgIter = {0};
+ SSubmitBlkIter blkIter = {0};
+ STSRow *row = NULL;
+ if (tInitSubmitMsgIter(pReq, &msgIter) < 0) return -1;
+ while (true) {
+ SSubmitBlk *pBlock = NULL;
+ if (tGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
+ if (pBlock == NULL) break;
+ tInitSubmitBlkIter(&msgIter, pBlock, &blkIter);
+ while ((row = tGetSubmitBlkNext(&blkIter)) != NULL) {
+ smaDebug("vgId:%d, numOfRows:%d, suid:%" PRIi64 ", uid:%" PRIi64 ", version:%" PRIi64 ", ts:%" PRIi64,
+ SMA_VID(pSma), msgIter.numOfRows, msgIter.suid, msgIter.uid, pReq->version, row->ts);
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief sync mode
+ *
+ * @param pSma
+ * @param pMsg
+ * @param msgSize
+ * @param inputType
+ * @param pInfo
+ * @param type
+ * @param level
+ * @return int32_t
+ */
+static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo,
+ ERsmaExecType type, int8_t level) {
int32_t idx = level - 1;
- if (!pInfo || !RSMA_INFO_QTASK(pInfo, idx)) {
- smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
+
+ void *qTaskInfo = (type == RSMA_EXEC_COMMIT) ? RSMA_INFO_IQTASK(pInfo, idx) : RSMA_INFO_QTASK(pInfo, idx);
+ if (!qTaskInfo) {
+ smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level,
+ pInfo->suid);
return TSDB_CODE_SUCCESS;
}
if (!pInfo->pTSchema) {
- smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid);
+ smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, pInfo->suid);
return TSDB_CODE_FAILED;
}
smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level,
- RSMA_INFO_QTASK(pInfo, idx), suid);
+ RSMA_INFO_QTASK(pInfo, idx), pInfo->suid);
- if (qSetMultiStreamInput(RSMA_INFO_QTASK(pInfo, idx), pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT
+#if 0
+ for (int32_t i = 0; i < msgSize; ++i) {
+ SSubmitReq *pReq = *(SSubmitReq **)((char *)pMsg + i * sizeof(void *));
+ smaDebug("vgId:%d, [%d][%d] version %" PRIi64, SMA_VID(pSma), msgSize, i, pReq->version);
+ tdRsmaPrintSubmitReq(pSma, pReq);
+ }
+#endif
+ if (qSetMultiStreamInput(qTaskInfo, pMsg, msgSize, inputType) < 0) {
smaError("vgId:%d, rsma %" PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno));
return TSDB_CODE_FAILED;
}
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx);
- tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid,
- STREAM_INPUT__DATA_SUBMIT);
- atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
-
- if (smaMgmt.tmrHandle) {
- taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
- }
+ tdRSmaExecAndSubmitResult(pSma, qTaskInfo, pItem, pInfo->pTSchema, pInfo->suid);
return TSDB_CODE_SUCCESS;
}
@@ -732,58 +843,27 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
return NULL;
}
- taosRLockLatch(SMA_ENV_LOCK(pEnv));
+ // taosRLockLatch(SMA_ENV_LOCK(pEnv));
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
- tdRefRSmaInfo(pSma, pRSmaInfo);
- taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
- return pRSmaInfo;
- }
- taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
-
- if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat
- return NULL;
- }
-
- // clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat
- SRSmaInfo *pCowRSmaInfo = NULL;
- // lock
- taosWLockLatch(SMA_ENV_LOCK(pEnv));
- if (!(pCowRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)))) { // 2-phase lock
- void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
- if (iRSmaInfo) {
- SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo;
- if (pIRSmaInfo && !RSMA_INFO_IS_DEL(pIRSmaInfo)) {
- if (tdCloneRSmaInfo(pSma, &pCowRSmaInfo, pIRSmaInfo) < 0) {
- // unlock
- taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
- smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
- return NULL;
- }
- smaDebug("vgId:%d, clone rsma info succeed for suid:%" PRIu64, SMA_VID(pSma), suid);
- if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) {
- // unlock
- taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
- smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr());
- return NULL;
- }
+ if (!pRSmaInfo->taskInfo[0]) {
+ if (tdCloneRSmaInfo(pSma, pRSmaInfo) < 0) {
+ // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ return NULL;
}
}
- } else {
- pCowRSmaInfo = *(SRSmaInfo **)pCowRSmaInfo;
- ASSERT(!pCowRSmaInfo);
+ tdRefRSmaInfo(pSma, pRSmaInfo);
+ // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ ASSERT(pRSmaInfo->suid == suid);
+ return pRSmaInfo;
}
+ // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
- if (pCowRSmaInfo) {
- tdRefRSmaInfo(pSma, pCowRSmaInfo);
- }
- // unlock
- taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
- return pCowRSmaInfo;
+ return NULL;
}
static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
@@ -792,29 +872,85 @@ static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
}
}
-static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
+/**
+ * @brief async mode
+ *
+ * @param pSma
+ * @param pMsg
+ * @param inputType
+ * @param suid
+ * @return int32_t
+ */
+static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) {
SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, suid);
if (!pRSmaInfo) {
- smaError("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
+ smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid);
return TSDB_CODE_SUCCESS;
}
if (inputType == STREAM_INPUT__DATA_SUBMIT) {
- tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L1);
- tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L2);
+ if (tdExecuteRSmaImplAsync(pSma, pMsg, inputType, pRSmaInfo, suid) < 0) {
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ return TSDB_CODE_FAILED;
+ }
+ if (smaMgmt.tmrHandle) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pRSmaInfo, 0);
+ if (pItem->level > 0) {
+ atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
+ }
+ pItem = RSMA_INFO_ITEM(pRSmaInfo, 1);
+ if (pItem->level > 0) {
+ atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE);
+ }
+ }
+ } else {
+ ASSERT(0);
}
tdReleaseRSmaInfo(pSma, pRSmaInfo);
return TSDB_CODE_SUCCESS;
}
+static int32_t tdRSmaExecCheck(SSma *pSma) {
+ SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
+
+ if (atomic_load_8(&pRSmaStat->nExecutor) >= TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SRSmaExecMsg fetchMsg;
+ int32_t contLen = sizeof(SMsgHead);
+ void *pBuf = rpcMallocCont(0 + contLen);
+
+ ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
+ ((SMsgHead *)pBuf)->contLen = sizeof(SMsgHead);
+
+ SRpcMsg rpcMsg = {
+ .code = 0,
+ .msgType = TDMT_VND_EXEC_RSMA,
+ .pCont = pBuf,
+ .contLen = contLen,
+ };
+
+ if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
+ smaError("vgId:%d, failed to put rsma exec msg into query-queue since %s", SMA_VID(pSma), terrstr());
+ goto _err;
+ }
+
+ smaDebug("vgId:%d, success to put rsma fetch msg into query-queue", SMA_VID(pSma));
+
+ return TSDB_CODE_SUCCESS;
+_err:
+ return TSDB_CODE_FAILED;
+}
+
int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pEnv) {
// only applicable when rsma env exists
return TSDB_CODE_SUCCESS;
}
-
+ STbUidStore uidStore = {0};
SRetention *pRetention = SMA_RETENTION(pSma);
if (!RETENTION_VALID(pRetention + 1)) {
// return directly if retention level 1 is invalid
@@ -822,49 +958,75 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
}
if (inputType == STREAM_INPUT__DATA_SUBMIT) {
- STbUidStore uidStore = {0};
- tdFetchSubmitReqSuids(pMsg, &uidStore);
+ if (tdFetchSubmitReqSuids(pMsg, &uidStore) < 0) {
+ goto _err;
+ }
if (uidStore.suid != 0) {
- tdExecuteRSma(pSma, pMsg, inputType, uidStore.suid);
-
- void *pIter = taosHashIterate(uidStore.uidHash, NULL);
- while (pIter) {
- tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
- tdExecuteRSma(pSma, pMsg, inputType, *pTbSuid);
- pIter = taosHashIterate(uidStore.uidHash, pIter);
+ if (tdExecuteRSmaAsync(pSma, pMsg, inputType, uidStore.suid) < 0) {
+ goto _err;
}
- tdUidStoreDestory(&uidStore);
+ void *pIter = NULL;
+ while ((pIter = taosHashIterate(uidStore.uidHash, pIter))) {
+ tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ if (tdExecuteRSmaAsync(pSma, pMsg, inputType, *pTbSuid) < 0) {
+ goto _err;
+ }
+ }
+
+ if (tdRSmaExecCheck(pSma) < 0) {
+ goto _err;
+ }
}
}
+ tdUidStoreDestory(&uidStore);
return TSDB_CODE_SUCCESS;
+_err:
+ tdUidStoreDestory(&uidStore);
+ smaError("vgId:%d, failed to process rsma submit since: %s", SMA_VID(pSma), terrstr());
+ return TSDB_CODE_FAILED;
}
+/**
+ * @brief retrieve rsma meta and init
+ *
+ * @param pSma
+ * @param nTables number of tables of rsma
+ * @return int32_t
+ */
static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
- SVnode *pVnode = pSma->pVnode;
+ SVnode *pVnode = pSma->pVnode;
+ SArray *suidList = NULL;
+ STbUidStore uidStore = {0};
+ SMetaReader mr = {0};
- SArray *suidList = taosArrayInit(1, sizeof(tb_uid_t));
- if (tsdbGetStbIdList(SMA_META(pSma), 0, suidList) < 0) {
- taosArrayDestroy(suidList);
+ if (!(suidList = taosArrayInit(1, sizeof(tb_uid_t)))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ if (vnodeGetStbIdList(pSma->pVnode, 0, suidList) < 0) {
smaError("vgId:%d, failed to restore rsma env since get stb id list error: %s", TD_VID(pVnode), terrstr());
- return TSDB_CODE_FAILED;
+ goto _err;
}
int64_t arrSize = taosArrayGetSize(suidList);
- if (nTables) {
- *nTables = arrSize;
- }
-
if (arrSize == 0) {
+ if (nTables) {
+ *nTables = 0;
+ }
taosArrayDestroy(suidList);
smaDebug("vgId:%d, no need to restore rsma env since empty stb id list", TD_VID(pVnode));
return TSDB_CODE_SUCCESS;
}
- SMetaReader mr = {0};
+ int64_t nRsmaTables = 0;
metaReaderInit(&mr, SMA_META(pSma), 0);
+ if (!(uidStore.tbUids = taosArrayInit(1024, sizeof(tb_uid_t)))) {
+ goto _err;
+ }
for (int64_t i = 0; i < arrSize; ++i) {
tb_uid_t suid = *(tb_uid_t *)taosArrayGet(suidList, i);
smaDebug("vgId:%d, rsma restore, suid is %" PRIi64, TD_VID(pVnode), suid);
@@ -877,6 +1039,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
ASSERT(mr.me.uid == suid);
if (TABLE_IS_ROLLUP(mr.me.flags)) {
+ ++nRsmaTables;
SRSmaParam *param = &mr.me.stbEntry.rsmaParam;
for (int i = 0; i < TSDB_RETENTION_L2; ++i) {
smaDebug("vgId:%d, rsma restore, table:%" PRIi64 " level:%d, maxdelay:%" PRIi64 " watermark:%" PRIi64
@@ -887,17 +1050,40 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
smaError("vgId:%d, rsma restore env failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, terrstr());
goto _err;
}
+
+ // reload all ctbUids for suid
+ uidStore.suid = suid;
+ if (vnodeGetCtbIdList(pVnode, suid, uidStore.tbUids) < 0) {
+ smaError("vgId:%d, rsma restore, get ctb idlist failed for %" PRIi64 " since %s", TD_VID(pVnode), suid,
+ terrstr());
+ goto _err;
+ }
+
+ if (tdUpdateTbUidList(pVnode->pSma, &uidStore) < 0) {
+ smaError("vgId:%d, rsma restore, update tb uid list failed for %" PRIi64 " since %s", TD_VID(pVnode), suid,
+ terrstr());
+ goto _err;
+ }
+
+ taosArrayClear(uidStore.tbUids);
+
smaDebug("vgId:%d, rsma restore env success for %" PRIi64, TD_VID(pVnode), suid);
}
}
metaReaderClear(&mr);
taosArrayDestroy(suidList);
+ tdUidStoreDestory(&uidStore);
+
+ if (nTables) {
+ *nTables = nRsmaTables;
+ }
return TSDB_CODE_SUCCESS;
_err:
metaReaderClear(&mr);
taosArrayDestroy(suidList);
+ tdUidStoreDestory(&uidStore);
return TSDB_CODE_FAILED;
}
@@ -1230,6 +1416,9 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
}
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+#if 0
+ qTaskInfo_t taskInfo = RSMA_INFO_IQTASK(pRSmaInfo, i);
+#endif
qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pRSmaInfo, i);
if (!taskInfo) {
smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1);
@@ -1367,7 +1556,16 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
- tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level);
+ pItem->fetchLevel = pItem->level;
+#if 0
+ SRSmaInfo *qInfo = tdAcquireRSmaInfoBySuid(pSma, pRSmaInfo->suid);
+ SRSmaInfoItem *qItem = RSMA_INFO_ITEM(qInfo, pItem->level - 1);
+ ASSERT(qItem->level == pItem->level);
+ ASSERT(qItem->fetchLevel == pItem->fetchLevel);
+#endif
+ tsem_post(&(pStat->notEmpty));
+ smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
+ pRSmaInfo->suid);
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
@@ -1382,127 +1580,268 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
} break;
default: {
- smaWarn("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown",
- SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
+ smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is unknown",
+ SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
} break;
}
_end:
- // taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
}
+static void tdFreeRSmaSubmitItems(SArray *pItems) {
+ for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) {
+ taosFreeQitem(*(void **)taosArrayGet(pItems, i));
+ }
+}
+
/**
- * @brief put rsma fetch msg to fetch queue
+ * @brief fetch rsma result(consider the efficiency and functionality)
*
* @param pSma
* @param pInfo
- * @param level
+ * @param pSubmitArr
* @return int32_t
*/
-int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) {
- SRSmaFetchMsg fetchMsg = {.suid = pInfo->suid, .level = level};
- int32_t ret = 0;
- int32_t contLen = 0;
- SEncoder encoder = {0};
- tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret);
- if (ret < 0) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- tEncoderClear(&encoder);
- goto _err;
+static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr) {
+ SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
+ for (int8_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1);
+ if (pItem->fetchLevel) {
+ pItem->fetchLevel = 0;
+ qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, i - 1);
+ if (!taskInfo) {
+ continue;
+ }
+
+ int64_t curMs = taosGetTimestampMs();
+ if ((pItem->nSkipped * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) {
+ smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch executed",
+ SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay);
+ } else if (((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX)) {
+ ++pItem->nSkipped;
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ",
+ SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ continue;
+ } else {
+ smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ",
+ SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ }
+
+ pItem->nSkipped = 0;
+
+ if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
+ goto _err;
+ }
+ if (tdRSmaExecAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid) < 0) {
+ tdCleanupStreamInputDataBlock(taskInfo);
+ goto _err;
+ }
+
+ tdCleanupStreamInputDataBlock(taskInfo);
+ smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch finished",
+ SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay);
+ } else {
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8
+ " maxDelay:%d, fetch not executed as fetch level is %" PRIi8,
+ SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay, pItem->fetchLevel);
+ }
}
- void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead));
- tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen);
- if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- tEncoderClear(&encoder);
- }
- tEncoderClear(&encoder);
-
- ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
- ((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead);
-
- SRpcMsg rpcMsg = {
- .code = 0,
- .msgType = TDMT_VND_FETCH_RSMA,
- .pCont = pBuf,
- .contLen = contLen,
- };
-
- if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
- smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s",
- SMA_VID(pSma), pInfo->suid, level, terrstr());
- goto _err;
- }
-
- smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma),
- pInfo->suid, level);
-
+_end:
+ tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_SUCCESS;
_err:
+ tdReleaseRSmaInfo(pSma, pInfo);
+ return TSDB_CODE_FAILED;
+}
+
+static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SArray *pSubmitArr, ERsmaExecType type) {
+ taosArrayClear(pSubmitArr);
+ while (1) {
+ void *msg = NULL;
+ taosGetQitem(qall, (void **)&msg);
+ if (msg) {
+ if (taosArrayPush(pSubmitArr, &msg) < 0) {
+ tdFreeRSmaSubmitItems(pSubmitArr);
+ goto _err;
+ }
+ } else {
+ break;
+ }
+ }
+
+ int32_t size = taosArrayGetSize(pSubmitArr);
+ if (size > 0) {
+ for (int32_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
+ if (tdExecuteRSmaImpl(pSma, pSubmitArr->pData, size, STREAM_INPUT__MERGED_SUBMIT, pInfo, type, i) < 0) {
+ tdFreeRSmaSubmitItems(pSubmitArr);
+ goto _err;
+ }
+ }
+ tdFreeRSmaSubmitItems(pSubmitArr);
+ }
+ return TSDB_CODE_SUCCESS;
+_err:
+ while (1) {
+ void *msg = NULL;
+ taosGetQitem(qall, (void **)&msg);
+ if (msg) {
+ taosFreeQitem(msg);
+ } else {
+ break;
+ }
+ }
return TSDB_CODE_FAILED;
}
/**
- * @brief fetch rsma data of level 2/3 and submit
+ * @brief
+ *
+ * @param pSma
+ * @param type
+ * @return int32_t
+ */
+int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
+ SVnode *pVnode = pSma->pVnode;
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
+ SHashObj *infoHash = NULL;
+ SArray *pSubmitArr = NULL;
+ bool isFetchAll = false;
+
+ if (!pRSmaStat || !(infoHash = RSMA_INFO_HASH(pRSmaStat))) {
+ terrno = TSDB_CODE_RSMA_INVALID_STAT;
+ goto _err;
+ }
+
+ if (!(pSubmitArr =
+ taosArrayInit(TMIN(RSMA_SUBMIT_BATCH_SIZE, atomic_load_64(&pRSmaStat->nBufItems)), POINTER_BYTES))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ bool isBusy = false;
+ while (true) {
+ isBusy = false;
+ // step 1: rsma exec - consume data in buffer queue for all suids
+ if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) {
+ void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock
+ while (pIter) {
+ SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
+ int64_t itemSize = 0;
+ if ((itemSize = taosQueueItemSize(pInfo->queue)) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel ||
+ RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ smaDebug("vgId:%d, queueItemSize is %" PRIi64 " execType:%" PRIi8, SMA_VID(pSma), itemSize, type);
+ if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
+ taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
+ int32_t qallItemSize = taosQallItemSize(pInfo->qall);
+ if (qallItemSize > 0) {
+ tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
+ }
+
+ if (type == RSMA_EXEC_OVERFLOW) {
+ tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ }
+
+ if (qallItemSize > 0) {
+ // subtract the item size after the task finished, commit should wait for all items be consumed
+ atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
+ isBusy = true;
+ }
+ ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
+ }
+ }
+ pIter = taosHashIterate(infoHash, pIter);
+ }
+ if (type == RSMA_EXEC_COMMIT) {
+ break;
+ }
+ }
+#if 0
+ else if (type == RSMA_EXEC_COMMIT) {
+ while (pIter) {
+ SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
+ if (taosQueueItemSize(pInfo->iQueue)) {
+ if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
+ taosReadAllQitems(pInfo->iQueue, pInfo->iQall); // queue has mutex lock
+ int32_t qallItemSize = taosQallItemSize(pInfo->iQall);
+ if (qallItemSize > 0) {
+ atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
+ nIdle = 0;
+
+ // batch exec
+ tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
+ }
+
+ // tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
+ }
+ }
+ ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);
+ pIter = taosHashIterate(infoHash, pIter);
+ }
+ break;
+ }
+#endif
+ else {
+ ASSERT(0);
+ }
+
+ if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) {
+ if (pVnode->inClose) {
+ break;
+ }
+ tsem_wait(&pRSmaStat->notEmpty);
+ if (pVnode->inClose && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
+ smaInfo("vgId:%d, exec task end, inClose:%d, nBufItems:%" PRIi64, SMA_VID(pSma), pVnode->inClose,
+ atomic_load_64(&pRSmaStat->nBufItems));
+ break;
+ }
+ }
+ } // end of while(true)
+
+_end:
+ taosArrayDestroy(pSubmitArr);
+ return TSDB_CODE_SUCCESS;
+_err:
+ taosArrayDestroy(pSubmitArr);
+ return TSDB_CODE_FAILED;
+}
+
+/**
+ * @brief exec rsma level 1data, fetch result of level 2/3 and submit
*
* @param pSma
* @param pMsg
* @return int32_t
*/
-int32_t smaProcessFetch(SSma *pSma, void *pMsg) {
- SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
- SRSmaFetchMsg req = {0};
- SDecoder decoder = {0};
- void *pBuf = NULL;
- SRSmaInfo *pInfo = NULL;
- SRSmaInfoItem *pItem = NULL;
+int32_t smaProcessExec(SSma *pSma, void *pMsg) {
+ SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
+ SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
goto _err;
}
+ smaDebug("vgId:%d, begin to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
- pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead));
+ int8_t nOld = atomic_fetch_add_8(&pRSmaStat->nExecutor, 1);
- tDecoderInit(&decoder, pBuf, pRpcMsg->contLen);
- if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) {
- terrno = TSDB_CODE_INVALID_MSG;
- goto _err;
- }
-
- pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid);
- if (!pInfo) {
- if (terrno == TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_RSMA_EMPTY_INFO;
+ if (nOld < TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) {
+ if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_OVERFLOW) < 0) {
+ goto _err;
}
- smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma),
- req.suid, req.level, terrstr());
- goto _err;
+ } else {
+ atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1);
}
- pItem = RSMA_INFO_ITEM(pInfo, req.level - 1);
-
- SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
- qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1);
- if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
- goto _err;
- }
- if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) {
- goto _err;
- }
-
- tdCleanupStreamInputDataBlock(taskInfo);
-
- tdReleaseRSmaInfo(pSma, pInfo);
- tDecoderClear(&decoder);
- smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid,
- req.level);
+ smaDebug("vgId:%d, success to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
return TSDB_CODE_SUCCESS;
_err:
- tdReleaseRSmaInfo(pSma, pInfo);
- tDecoderClear(&decoder);
- smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr());
+ atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1);
+ smaError("vgId:%d, failed to process rsma exec msg by TID:%p since %s", SMA_VID(pSma), (void *)taosGetSelfPthreadId(),
+ terrstr());
return TSDB_CODE_FAILED;
}
diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c
index fbcd2af751..335c15a539 100644
--- a/source/dnode/vnode/src/sma/smaSnapshot.c
+++ b/source/dnode/vnode/src/sma/smaSnapshot.c
@@ -139,7 +139,6 @@ static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppBuf)
smaInfo("vgId:%d, vnode snapshot rsma read qtaskinfo, size:%" PRIi64, SMA_VID(pSma), size);
-
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppBuf);
pHdr->type = SNAP_DATA_QTASK;
pHdr->size = size;
@@ -279,7 +278,8 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWrit
TdFilePtr qTaskF = taosCreateFile(qTaskInfoFullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (!qTaskF) {
code = TAOS_SYSTEM_ERROR(errno);
- smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName, tstrerror(code));
+ smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName,
+ tstrerror(code));
goto _err;
}
qWriter->pWriteH = qTaskF;
@@ -309,7 +309,7 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
if (rollback) {
// TODO: rsma1/rsma2
// qtaskinfo
- if(pWriter->pQTaskFWriter) {
+ if (pWriter->pQTaskFWriter) {
taosRemoveFile(pWriter->pQTaskFWriter->fname);
}
} else {
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
index f46d9dc29c..1687cd46a0 100644
--- a/source/dnode/vnode/src/sma/smaTimeRange.c
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -175,7 +175,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
tdRefSmaStat(pSma, pStat);
- pTsmaStat = SMA_TSMA_STAT(pStat);
+ pTsmaStat = SMA_STAT_TSMA(pStat);
if (!pTsmaStat->pTSma) {
STSma *pTSma = metaGetSmaInfoByIndex(SMA_META(pSma), indexUid);
@@ -201,9 +201,8 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
SBatchDeleteReq deleteReq;
- SSubmitReq *pSubmitReq =
- tdBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true, pTsmaStat->pTSma->dstTbUid,
- pTsmaStat->pTSma->dstTbName, pTsmaStat->pTSma->dstVgId, &deleteReq);
+ SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true,
+ pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq);
if (!pSubmitReq) {
smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c
index d9f38ffd09..d771797963 100644
--- a/source/dnode/vnode/src/sma/smaUtil.c
+++ b/source/dnode/vnode/src/sma/smaUtil.c
@@ -350,49 +350,48 @@ _err:
}
/**
- * @brief pTSchema is shared
+ * @brief Clone qTaskInfo of SRSmaInfo
*
* @param pSma
- * @param pDest
- * @param pSrc
+ * @param pInfo
* @return int32_t
*/
-int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc) {
- SVnode *pVnode = pSma->pVnode;
+int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
SRSmaParam *param = NULL;
- if (!pSrc) {
- *pDest = NULL;
+ if (!pInfo) {
return TSDB_CODE_SUCCESS;
}
SMetaReader mr = {0};
metaReaderInit(&mr, SMA_META(pSma), 0);
- smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid);
- if (metaGetTableEntryByUid(&mr, pSrc->suid) < 0) {
- smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid,
+ smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
+ if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) {
+ smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid,
terrstr());
goto _err;
}
ASSERT(mr.me.type == TSDB_SUPER_TABLE);
- ASSERT(mr.me.uid == pSrc->suid);
+ ASSERT(mr.me.uid == pInfo->suid);
if (TABLE_IS_ROLLUP(mr.me.flags)) {
param = &mr.me.stbEntry.rsmaParam;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
- if (tdCloneQTaskInfo(pSma, pSrc->iTaskInfo[i], pSrc->taskInfo[i], param, pSrc->suid, i) < 0) {
+ if (!pInfo->iTaskInfo[i]) {
+ continue;
+ }
+ if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) {
goto _err;
}
}
- smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid);
+ smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid);
+ } else {
+ terrno = TSDB_CODE_RSMA_INVALID_SCHEMA;
+ goto _err;
}
metaReaderClear(&mr);
-
- *pDest = pSrc; // pointer copy
-
return TSDB_CODE_SUCCESS;
_err:
- *pDest = NULL;
metaReaderClear(&mr);
- smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid, terrstr());
+ smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr());
return TSDB_CODE_FAILED;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 112543e340..c6bc8e6e59 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -60,11 +60,11 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
pTq->path = strdup(path);
pTq->pVnode = pVnode;
- pTq->handles = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
+ pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
- pTq->pushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
+ pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
- pTq->pAlterInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
+ pTq->pCheckInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
if (tqMetaOpen(pTq) < 0) {
ASSERT(0);
@@ -85,9 +85,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
void tqClose(STQ* pTq) {
if (pTq) {
tqOffsetClose(pTq->pOffsetStore);
- taosHashCleanup(pTq->handles);
- taosHashCleanup(pTq->pushMgr);
- taosHashCleanup(pTq->pAlterInfo);
+ taosHashCleanup(pTq->pHandle);
+ taosHashCleanup(pTq->pPushMgr);
+ taosHashCleanup(pTq->pCheckInfo);
taosMemoryFree(pTq->path);
tqMetaClose(pTq);
streamMetaClose(pTq->pStreamMeta);
@@ -183,7 +183,12 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
return 0;
}
-int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ver) {
+static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
+ return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
+ pLeft->val.version <= pRight->val.version;
+}
+
+int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
STqOffset offset = {0};
SDecoder decoder;
tDecoderInit(&decoder, msg, msgLen);
@@ -199,19 +204,24 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve
} else if (offset.val.type == TMQ_OFFSET__LOG) {
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, offset.subKey,
TD_VID(pTq->pVnode), offset.val.version);
+ if (offset.val.version + 1 == version) {
+ offset.val.version += 1;
+ }
} else {
ASSERT(0);
}
- /*STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey);*/
- /*if (pOffset != NULL) {*/
- /*if (pOffset->val.type == TMQ_OFFSET__LOG && pOffset->val.version < offset.val.version) {*/
+ STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, offset.subKey);
+ if (pOffset != NULL && tqOffsetLessOrEqual(&offset, pOffset)) {
+ return 0;
+ }
+
if (tqOffsetWrite(pTq->pOffsetStore, &offset) < 0) {
ASSERT(0);
return -1;
}
if (offset.val.type == TMQ_OFFSET__LOG) {
- STqHandle* pHandle = taosHashGet(pTq->handles, offset.subKey, strlen(offset.subKey));
+ STqHandle* pHandle = taosHashGet(pTq->pHandle, offset.subKey, strlen(offset.subKey));
if (pHandle) {
if (walRefVer(pHandle->pRef, offset.val.version) < 0) {
ASSERT(0);
@@ -220,6 +230,8 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve
}
}
+ // rsp
+
/*}*/
/*}*/
@@ -229,15 +241,15 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen, int64_t ve
int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
void* pIter = NULL;
while (1) {
- pIter = taosHashIterate(pTq->pAlterInfo, pIter);
+ pIter = taosHashIterate(pTq->pCheckInfo, pIter);
if (pIter == NULL) break;
- SCheckAlterInfo* pCheck = (SCheckAlterInfo*)pIter;
+ STqCheckInfo* pCheck = (STqCheckInfo*)pIter;
if (pCheck->ntbUid == tbUid) {
int32_t sz = taosArrayGetSize(pCheck->colIdList);
for (int32_t i = 0; i < sz; i++) {
int16_t forbidColId = *(int16_t*)taosArrayGet(pCheck->colIdList, i);
if (forbidColId == colId) {
- taosHashCancelIterate(pTq->pAlterInfo, pIter);
+ taosHashCancelIterate(pTq->pCheckInfo, pIter);
return -1;
}
}
@@ -289,7 +301,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SWalCkHead* pCkHead = NULL;
// 1.find handle
- STqHandle* pHandle = taosHashGet(pTq->handles, pReq->subKey, strlen(pReq->subKey));
+ STqHandle* pHandle = taosHashGet(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
/*ASSERT(pHandle);*/
if (pHandle == NULL) {
tqError("tmq poll: no consumer handle for consumer:%" PRId64 ", in vgId:%d, subkey %s", consumerId,
@@ -478,10 +490,10 @@ OVER:
return code;
}
-int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
+int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
- int32_t code = taosHashRemove(pTq->handles, pReq->subKey, strlen(pReq->subKey));
+ int32_t code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
ASSERT(code == 0);
tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
@@ -492,27 +504,43 @@ int32_t tqProcessVgDeleteReq(STQ* pTq, char* msg, int32_t msgLen) {
return 0;
}
-int32_t tqProcessCheckAlterInfoReq(STQ* pTq, char* msg, int32_t msgLen) {
- SCheckAlterInfo info = {0};
- SDecoder decoder;
+int32_t tqProcessAddCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
+ STqCheckInfo info = {0};
+ SDecoder decoder;
tDecoderInit(&decoder, msg, msgLen);
- if (tDecodeSCheckAlterInfo(&decoder, &info) < 0) {
+ if (tDecodeSTqCheckInfo(&decoder, &info) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
tDecoderClear(&decoder);
- if (taosHashPut(pTq->pAlterInfo, info.topic, strlen(info.topic), &info, sizeof(SCheckAlterInfo)) < 0) {
+ if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ if (tqMetaSaveCheckInfo(pTq, info.topic, msg, msgLen) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
return 0;
}
-int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
+int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
+ if (taosHashRemove(pTq->pCheckInfo, msg, strlen(msg)) < 0) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ if (tqMetaDeleteCheckInfo(pTq, msg) < 0) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ return 0;
+}
+
+int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SMqRebVgReq req = {0};
tDecodeSMqRebVgReq(msg, &req);
// todo lock
- STqHandle* pHandle = taosHashGet(pTq->handles, req.subKey, strlen(req.subKey));
+ STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
if (pHandle == NULL) {
if (req.oldConsumerId != -1) {
tqError("vgId:%d, build new consumer handle %s for consumer %d, but old consumerId is %ld", req.vgId, req.subKey,
@@ -579,7 +607,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
}
- taosHashPut(pTq->handles, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
+ taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
@@ -600,8 +628,6 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
}
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
- int32_t code = 0;
-
if (pTask->taskLevel == TASK_LEVEL__AGG) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
@@ -612,8 +638,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
pTask->outputQueue = streamQueueOpen();
if (pTask->inputQueue == NULL || pTask->outputQueue == NULL) {
- code = -1;
- goto FAIL;
+ return -1;
}
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
@@ -658,44 +683,14 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
streamSetupTrigger(pTask);
- tqInfo("deploy stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
+ tqInfo("expand stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
pTask->selfChildId);
-
-FAIL:
- if (pTask->inputQueue) streamQueueClose(pTask->inputQueue);
- if (pTask->outputQueue) streamQueueClose(pTask->outputQueue);
- // TODO free executor
- return code;
+ return 0;
}
-int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
+int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
//
- return streamMetaAddSerializedTask(pTq->pStreamMeta, msg, msgLen);
-#if 0
- SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
- if (pTask == NULL) {
- return -1;
- }
- SDecoder decoder;
- tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
- if (tDecodeSStreamTask(&decoder, pTask) < 0) {
- ASSERT(0);
- goto FAIL;
- }
- tDecoderClear(&decoder);
-
- if (tqExpandTask(pTq, pTask) < 0) {
- goto FAIL;
- }
-
- taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
-
- return 0;
-
-FAIL:
- if (pTask) taosMemoryFree(pTask);
- return -1;
-#endif
+ return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen);
}
int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
@@ -817,7 +812,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
}
}
-int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) {
+int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
return streamMetaRemoveTask(pTq->pStreamMeta, pReq->taskId);
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index 5709ad7c85..405bc669bd 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -43,6 +43,185 @@ int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
return 0;
}
+int32_t tqMetaOpen(STQ* pTq) {
+ if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaDB) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pExecStore) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ if (tdbTbOpen("tq.check.db", -1, -1, NULL, pTq->pMetaDB, &pTq->pCheckStore) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ if (tqMetaRestoreHandle(pTq) < 0) {
+ return -1;
+ }
+
+ if (tqMetaRestoreCheckInfo(pTq) < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t tqMetaClose(STQ* pTq) {
+ if (pTq->pExecStore) {
+ tdbTbClose(pTq->pExecStore);
+ }
+ if (pTq->pCheckStore) {
+ tdbTbClose(pTq->pCheckStore);
+ }
+ tdbClose(pTq->pMetaDB);
+ return 0;
+}
+
+int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_t vLen) {
+ TXN txn;
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
+ return -1;
+ }
+
+ if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
+ return -1;
+ }
+
+ if (tdbTbUpsert(pTq->pExecStore, key, strlen(key), value, vLen, &txn) < 0) {
+ return -1;
+ }
+
+ if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t tqMetaDeleteCheckInfo(STQ* pTq, const char* key) {
+ TXN txn;
+
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbTbDelete(pTq->pCheckStore, key, (int)strlen(key), &txn) < 0) {
+ /*ASSERT(0);*/
+ }
+
+ if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ return 0;
+}
+
+int32_t tqMetaRestoreCheckInfo(STQ* pTq) {
+ TBC* pCur = NULL;
+ if (tdbTbcOpen(pTq->pCheckStore, &pCur, NULL) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ void* pKey = NULL;
+ int kLen = 0;
+ void* pVal = NULL;
+ int vLen = 0;
+ SDecoder decoder;
+
+ tdbTbcMoveToFirst(pCur);
+
+ while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) {
+ STqCheckInfo info;
+ tDecoderInit(&decoder, (uint8_t*)pVal, vLen);
+ if (tDecodeSTqCheckInfo(&decoder, &info) < 0) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ tDecoderClear(&decoder);
+ if (taosHashPut(pTq->pCheckInfo, info.topic, strlen(info.topic), &info, sizeof(STqCheckInfo)) < 0) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ }
+ tdbTbcClose(pCur);
+ return 0;
+}
+
+int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
+ int32_t code;
+ int32_t vlen;
+ tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
+ ASSERT(code == 0);
+
+ tqDebug("tq save %s(%d) consumer %" PRId64 " vgId:%d", pHandle->subKey, strlen(pHandle->subKey), pHandle->consumerId,
+ TD_VID(pTq->pVnode));
+
+ void* buf = taosMemoryCalloc(1, vlen);
+ if (buf == NULL) {
+ ASSERT(0);
+ }
+
+ SEncoder encoder;
+ tEncoderInit(&encoder, buf, vlen);
+
+ if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
+ ASSERT(0);
+ }
+
+ TXN txn;
+
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ tEncoderClear(&encoder);
+ taosMemoryFree(buf);
+ return 0;
+}
+
+int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
+ TXN txn;
+
+ if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbBegin(pTq->pMetaDB, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ if (tdbTbDelete(pTq->pExecStore, key, (int)strlen(key), &txn) < 0) {
+ /*ASSERT(0);*/
+ }
+
+ if (tdbCommit(pTq->pMetaDB, &txn) < 0) {
+ ASSERT(0);
+ }
+
+ return 0;
+}
+
int32_t tqMetaRestoreHandle(STQ* pTq) {
TBC* pCur = NULL;
if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) {
@@ -93,101 +272,10 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
}
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode));
- taosHashPut(pTq->handles, pKey, kLen, &handle, sizeof(STqHandle));
+ taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
}
tdbTbcClose(pCur);
return 0;
}
-int32_t tqMetaOpen(STQ* pTq) {
- if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) {
- ASSERT(0);
- return -1;
- }
-
- if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaStore, &pTq->pExecStore) < 0) {
- ASSERT(0);
- return -1;
- }
-
- if (tqMetaRestoreHandle(pTq) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-int32_t tqMetaClose(STQ* pTq) {
- if (pTq->pExecStore) {
- tdbTbClose(pTq->pExecStore);
- }
- tdbClose(pTq->pMetaStore);
- return 0;
-}
-
-int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle) {
- int32_t code;
- int32_t vlen;
- tEncodeSize(tEncodeSTqHandle, pHandle, vlen, code);
- ASSERT(code == 0);
-
- tqDebug("tq save %s(%d) consumer %" PRId64 " vgId:%d", pHandle->subKey, strlen(pHandle->subKey), pHandle->consumerId,
- TD_VID(pTq->pVnode));
-
- void* buf = taosMemoryCalloc(1, vlen);
- if (buf == NULL) {
- ASSERT(0);
- }
-
- SEncoder encoder;
- tEncoderInit(&encoder, buf, vlen);
-
- if (tEncodeSTqHandle(&encoder, pHandle) < 0) {
- ASSERT(0);
- }
-
- TXN txn;
-
- if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
- ASSERT(0);
- }
-
- if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
- ASSERT(0);
- }
-
- if (tdbTbUpsert(pTq->pExecStore, key, (int)strlen(key), buf, vlen, &txn) < 0) {
- ASSERT(0);
- }
-
- if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
- ASSERT(0);
- }
-
- tEncoderClear(&encoder);
- taosMemoryFree(buf);
- return 0;
-}
-
-int32_t tqMetaDeleteHandle(STQ* pTq, const char* key) {
- TXN txn;
-
- if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
- ASSERT(0);
- }
-
- if (tdbBegin(pTq->pMetaStore, &txn) < 0) {
- ASSERT(0);
- }
-
- if (tdbTbDelete(pTq->pExecStore, key, (int)strlen(key), &txn) < 0) {
- /*ASSERT(0);*/
- }
-
- if (tdbCommit(pTq->pMetaStore, &txn) < 0) {
- ASSERT(0);
- }
-
- return 0;
-}
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index ae3fef9b4b..ed7fa80c47 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -14,6 +14,7 @@
*/
#include "tq.h"
+#include "vnd.h"
#if 0
void tqTmrRspFunc(void* param, void* tmrId) {
@@ -212,9 +213,7 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
- walApplyVer(pTq->pVnode->pWal, ver);
-
- if (msgType == TDMT_VND_SUBMIT) {
+ if (vnodeIsRoleLeader(pTq->pVnode) && msgType == TDMT_VND_SUBMIT) {
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
void* data = taosMemoryMalloc(msgLen);
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index 5d7814a045..e6a331f20e 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -341,7 +341,7 @@ FAIL:
return -1;
}
-void tqReaderSetColIdList(STqReader* pReadHandle, SArray* pColIdList) { pReadHandle->pColIdList = pColIdList; }
+void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { pReader->pColIdList = pColIdList; }
int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash) {
@@ -394,7 +394,7 @@ int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
void* pIter = NULL;
while (1) {
- pIter = taosHashIterate(pTq->handles, pIter);
+ pIter = taosHashIterate(pTq->pHandle, pIter);
if (pIter == NULL) break;
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index 42fb5c329d..522bf46aa1 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -17,7 +17,7 @@
#include "tmsg.h"
#include "tq.h"
-int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
+int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBlock* pDataBlock,
SBatchDeleteReq* deleteReq) {
ASSERT(pDataBlock->info.type == STREAM_DELETE_RESULT);
int32_t totRow = pDataBlock->info.rows;
@@ -25,8 +25,7 @@ int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl
SColumnInfoData* pGidCol = taosArrayGet(pDataBlock->pDataBlock, GROUPID_COLUMN_INDEX);
for (int32_t row = 0; row < totRow; row++) {
int64_t ts = *(int64_t*)colDataGetData(pTsCol, row);
- /*int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);*/
- int64_t groupId = 0;
+ int64_t groupId = *(int64_t*)colDataGetData(pGidCol, row);
char* name = buildCtbNameByGroupId(stbFullName, groupId);
tqDebug("stream delete msg: groupId :%ld, name: %s", groupId, name);
SMetaReader mr = {0};
@@ -49,8 +48,8 @@ int32_t tdBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl
return 0;
}
-SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
- int64_t suid, const char* stbFullName, int32_t vgId, SBatchDeleteReq* pDeleteReq) {
+SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
+ int64_t suid, const char* stbFullName, SBatchDeleteReq* pDeleteReq) {
SSubmitReq* ret = NULL;
SArray* schemaReqs = NULL;
SArray* schemaReqSz = NULL;
@@ -69,9 +68,10 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
int32_t padding1 = 0;
- void* padding2 = taosMemoryMalloc(1);
+ void* padding2 = NULL;
taosArrayPush(schemaReqSz, &padding1);
taosArrayPush(schemaReqs, &padding2);
+ continue;
}
STagVal tagVal = {
@@ -139,8 +139,7 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
continue;
}
int32_t rows = pDataBlock->info.rows;
- // TODO min
- int32_t rowSize = pDataBlock->info.rowSize;
+ /*int32_t rowSize = pDataBlock->info.rowSize;*/
int32_t maxLen = TD_ROW_MAX_BYTES_FROM_SCHEMA(pTSchema);
int32_t schemaLen = 0;
@@ -151,9 +150,8 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
}
// assign data
- // TODO
ret = rpcMallocCont(cap);
- ret->header.vgId = vgId;
+ ret->header.vgId = pVnode->config.vgId;
ret->length = sizeof(SSubmitReq);
ret->numOfBlocks = htonl(sz);
@@ -162,13 +160,12 @@ SSubmitReq* tdBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
pDeleteReq->suid = suid;
- tdBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
+ tqBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
continue;
}
blkHead->numOfRows = htonl(pDataBlock->info.rows);
blkHead->sversion = htonl(pTSchema->version);
- // TODO
blkHead->suid = htobe64(suid);
// uid is assigned by vnode
blkHead->uid = 0;
@@ -234,34 +231,35 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(pTask->tbSink.pTSchema);
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
- SSubmitReq* pReq = tdBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
- pTask->tbSink.stbFullName, pVnode->config.vgId, &deleteReq);
+ SSubmitReq* submitReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
+ pTask->tbSink.stbFullName, &deleteReq);
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
- int32_t code;
- int32_t len;
- tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
- if (code < 0) {
- //
- ASSERT(0);
- }
- SEncoder encoder;
- void* buf = rpcMallocCont(len + sizeof(SMsgHead));
- void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
- tEncoderInit(&encoder, abuf, len);
- tEncodeSBatchDeleteReq(&encoder, &deleteReq);
- tEncoderClear(&encoder);
-
- ((SMsgHead*)buf)->vgId = pVnode->config.vgId;
-
if (taosArrayGetSize(deleteReq.deleteReqs) != 0) {
+ int32_t code;
+ int32_t len;
+ tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
+ if (code < 0) {
+ //
+ ASSERT(0);
+ }
+ SEncoder encoder;
+ void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead));
+ void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead));
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSBatchDeleteReq(&encoder, &deleteReq);
+ tEncoderClear(&encoder);
+
+ ((SMsgHead*)serializedDeleteReq)->vgId = pVnode->config.vgId;
+
SRpcMsg msg = {
.msgType = TDMT_VND_BATCH_DEL,
- .pCont = buf,
+ .pCont = serializedDeleteReq,
.contLen = len + sizeof(SMsgHead),
};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
+ rpcFreeCont(serializedDeleteReq);
tqDebug("failed to put into write-queue since %s", terrstr());
}
}
@@ -271,11 +269,12 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
// build write msg
SRpcMsg msg = {
.msgType = TDMT_VND_SUBMIT,
- .pCont = pReq,
- .contLen = ntohl(pReq->length),
+ .pCont = submitReq,
+ .contLen = ntohl(submitReq->length),
};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
+ rpcFreeCont(submitReq);
tqDebug("failed to put into write-queue since %s", terrstr());
}
}
diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c
index b4a7ce7737..c52e0e2c09 100644
--- a/source/dnode/vnode/src/tq/tqSnapshot.c
+++ b/source/dnode/vnode/src/tq/tqSnapshot.c
@@ -165,9 +165,9 @@ int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) {
STQ* pTq = pWriter->pTq;
if (rollback) {
- ASSERT(0);
+ tdbAbort(pWriter->pTq->pMetaDB, &pWriter->txn);
} else {
- code = tdbCommit(pWriter->pTq->pMetaStore, &pWriter->txn);
+ code = tdbCommit(pWriter->pTq->pMetaDB, &pWriter->txn);
if (code) goto _err;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index f03b02af27..ed25783e9f 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -33,16 +33,21 @@ int32_t tsdbOpenCache(STsdb *pTsdb) {
taosLRUCacheSetStrictCapacity(pCache, true);
+ taosThreadMutexInit(&pTsdb->lruMutex, NULL);
+
_err:
pTsdb->lruCache = pCache;
return code;
}
-void tsdbCloseCache(SLRUCache *pCache) {
+void tsdbCloseCache(STsdb *pTsdb) {
+ SLRUCache *pCache = pTsdb->lruCache;
if (pCache) {
taosLRUCacheEraseUnrefEntries(pCache);
taosLRUCacheCleanup(pCache);
+
+ taosThreadMutexDestroy(&pTsdb->lruMutex);
}
}
@@ -261,14 +266,14 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
}
for (++iCol; iCol < nCol; ++iCol) {
- SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
- if (keyTs >= tTsVal->ts) {
- SColVal *tColVal = &tTsVal->colVal;
+ SLastCol *tTsVal1 = (SLastCol *)taosArrayGet(pLast, iCol);
+ if (keyTs >= tTsVal1->ts) {
+ SColVal *tColVal = &tTsVal1->colVal;
SColVal colVal = {0};
tTSRowGetVal(row, pTSchema, iCol, &colVal);
if (colVal.isNone || colVal.isNull) {
- if (keyTs == tTsVal->ts && !tColVal->isNone && !tColVal->isNull) {
+ if (keyTs == tTsVal1->ts && !tColVal->isNone && !tColVal->isNull) {
invalidate = true;
break;
@@ -279,6 +284,7 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
}
}
+ _invalidate:
taosMemoryFreeClear(pTSchema);
taosLRUCacheRelease(pCache, h, invalidate);
@@ -317,7 +323,7 @@ static int32_t getTableDelDataFromDelIdx(SDelFReader *pDelReader, SDelIdx *pDelI
int32_t code = 0;
if (pDelIdx) {
- code = tsdbReadDelData(pDelReader, pDelIdx, aDelData, NULL);
+ code = tsdbReadDelData(pDelReader, pDelIdx, aDelData);
}
return code;
@@ -388,8 +394,7 @@ static int32_t getTableDelIdx(SDelFReader *pDelFReader, tb_uid_t suid, tb_uid_t
SDelIdx idx = {.suid = suid, .uid = uid};
// tMapDataReset(&delIdxMap);
- // code = tsdbReadDelIdx(pDelFReader, &delIdxMap, NULL);
- code = tsdbReadDelIdx(pDelFReader, pDelIdxArray, NULL);
+ code = tsdbReadDelIdx(pDelFReader, pDelIdxArray);
if (code) goto _err;
// code = tMapDataSearch(&delIdxMap, &idx, tGetDelIdx, tCmprDelIdx, pDelIdx);
@@ -405,6 +410,178 @@ _err:
return code;
}
+typedef enum {
+ SFSLASTNEXTROW_FS,
+ SFSLASTNEXTROW_FILESET,
+ SFSLASTNEXTROW_BLOCKDATA,
+ SFSLASTNEXTROW_BLOCKROW
+} SFSLASTNEXTROWSTATES;
+
+typedef struct {
+ SFSLASTNEXTROWSTATES state; // [input]
+ STsdb *pTsdb; // [input]
+ SBlockIdx *pBlockIdxExp; // [input]
+ STSchema *pTSchema; // [input]
+ int32_t nFileSet;
+ int32_t iFileSet;
+ SArray *aDFileSet;
+ SDataFReader *pDataFReader;
+ SArray *aBlockL;
+ SBlockL *pBlockL;
+ SBlockData *pBlockDataL;
+ SBlockData blockDataL;
+ int32_t nRow;
+ int32_t iRow;
+ TSDBROW row;
+ /*
+ SArray *aBlockIdx;
+ SBlockIdx *pBlockIdx;
+ SMapData blockMap;
+ int32_t nBlock;
+ int32_t iBlock;
+ SBlock block;
+ */
+} SFSLastNextRowIter;
+
+static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
+ SFSLastNextRowIter *state = (SFSLastNextRowIter *)iter;
+ int32_t code = 0;
+
+ switch (state->state) {
+ case SFSLASTNEXTROW_FS:
+ // state->aDFileSet = state->pTsdb->pFS->cState->aDFileSet;
+ state->nFileSet = taosArrayGetSize(state->aDFileSet);
+ state->iFileSet = state->nFileSet;
+
+ state->pBlockDataL = NULL;
+
+ case SFSLASTNEXTROW_FILESET: {
+ SDFileSet *pFileSet = NULL;
+ _next_fileset:
+ if (--state->iFileSet >= 0) {
+ pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
+ } else {
+ if (state->pBlockDataL) {
+ tBlockDataDestroy(state->pBlockDataL, 1);
+ state->pBlockDataL = NULL;
+ }
+
+ *ppRow = NULL;
+ return code;
+ }
+
+ code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
+ if (code) goto _err;
+
+ if (!state->aBlockL) {
+ state->aBlockL = taosArrayInit(0, sizeof(SBlockIdx));
+ } else {
+ taosArrayClear(state->aBlockL);
+ }
+
+ code = tsdbReadBlockL(state->pDataFReader, state->aBlockL);
+ if (code) goto _err;
+
+ // SBlockL *pBlockL = (SBlockL *)taosArrayGet(state->aBlockL, state->iBlockL);
+
+ state->pBlockL = taosArraySearch(state->aBlockL, state->pBlockIdxExp, tCmprBlockL, TD_EQ);
+ if (!state->pBlockL) {
+ goto _next_fileset;
+ }
+
+ int64_t suid = state->pBlockL->suid;
+ int64_t uid = state->pBlockL->maxUid;
+
+ if (!state->pBlockDataL) {
+ state->pBlockDataL = &state->blockDataL;
+ }
+ code = tBlockDataInit(state->pBlockDataL, suid, suid ? 0 : uid, state->pTSchema);
+ if (code) goto _err;
+ }
+ case SFSLASTNEXTROW_BLOCKDATA:
+ code = tsdbReadLastBlock(state->pDataFReader, state->pBlockL, state->pBlockDataL);
+ if (code) goto _err;
+
+ state->nRow = state->blockDataL.nRow;
+ state->iRow = state->nRow - 1;
+
+ if (!state->pBlockDataL->uid) {
+ while (state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
+ --state->iRow;
+ }
+ }
+
+ state->state = SFSLASTNEXTROW_BLOCKROW;
+ case SFSLASTNEXTROW_BLOCKROW:
+ if (state->pBlockDataL->uid) {
+ if (state->iRow >= 0) {
+ state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
+ *ppRow = &state->row;
+
+ if (--state->iRow < 0) {
+ state->state = SFSLASTNEXTROW_FILESET;
+ }
+ }
+ } else {
+ if (state->iRow >= 0 && state->pBlockIdxExp->uid == state->pBlockDataL->aUid[state->iRow]) {
+ state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
+ *ppRow = &state->row;
+
+ if (--state->iRow < 0 || state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
+ state->state = SFSLASTNEXTROW_FILESET;
+ }
+ }
+ }
+
+ return code;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+_err:
+ if (state->pDataFReader) {
+ tsdbDataFReaderClose(&state->pDataFReader);
+ state->pDataFReader = NULL;
+ }
+ if (state->aBlockL) {
+ taosArrayDestroy(state->aBlockL);
+ state->aBlockL = NULL;
+ }
+ if (state->pBlockDataL) {
+ tBlockDataDestroy(state->pBlockDataL, 1);
+ state->pBlockDataL = NULL;
+ }
+
+ *ppRow = NULL;
+
+ return code;
+}
+
+int32_t clearNextRowFromFSLast(void *iter) {
+ SFSLastNextRowIter *state = (SFSLastNextRowIter *)iter;
+ int32_t code = 0;
+
+ if (!state) {
+ return code;
+ }
+
+ if (state->pDataFReader) {
+ tsdbDataFReaderClose(&state->pDataFReader);
+ state->pDataFReader = NULL;
+ }
+ if (state->aBlockL) {
+ taosArrayDestroy(state->aBlockL);
+ state->aBlockL = NULL;
+ }
+ if (state->pBlockDataL) {
+ tBlockDataDestroy(state->pBlockDataL, 1);
+ state->pBlockDataL = NULL;
+ }
+
+ return code;
+}
+
typedef enum SFSNEXTROWSTATES {
SFSNEXTROW_FS,
SFSNEXTROW_FILESET,
@@ -451,9 +628,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (--state->iFileSet >= 0) {
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
} else {
- // tBlockDataClear(&state->blockData, 1);
+ // tBlockDataDestroy(&state->blockData, 1);
if (state->pBlockData) {
- tBlockDataClear(state->pBlockData, 1);
+ tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@@ -465,13 +642,12 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (code) goto _err;
// tMapDataReset(&state->blockIdxMap);
- // code = tsdbReadBlockIdx(state->pDataFReader, &state->blockIdxMap, NULL);
if (!state->aBlockIdx) {
state->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
} else {
taosArrayClear(state->aBlockIdx);
}
- code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx, NULL);
+ code = tsdbReadBlockIdx(state->pDataFReader, state->aBlockIdx);
if (code) goto _err;
/* if (state->pBlockIdx) { */
@@ -487,8 +663,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
tMapDataReset(&state->blockMap);
- code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap, NULL);
- /* code = tsdbReadBlock(state->pDataFReader, &state->blockIdx, &state->blockMap, NULL); */
+ code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap);
if (code) goto _err;
state->nBlock = state->blockMap.nItem;
@@ -497,7 +672,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
if (!state->pBlockData) {
state->pBlockData = &state->blockData;
- tBlockDataInit(&state->blockData);
+ tBlockDataCreate(&state->blockData);
}
}
case SFSNEXTROW_BLOCKDATA:
@@ -510,7 +685,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetBlock);
/* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */
- code = tsdbReadBlockData(state->pDataFReader, state->pBlockIdx, &block, state->pBlockData, NULL, NULL);
+ code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData);
if (code) goto _err;
state->nRow = state->blockData.nRow;
@@ -555,8 +730,8 @@ _err:
state->aBlockIdx = NULL;
}
if (state->pBlockData) {
- // tBlockDataClear(&state->blockData, 1);
- tBlockDataClear(state->pBlockData, 1);
+ // tBlockDataDestroy(&state->blockData, 1);
+ tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@@ -582,8 +757,8 @@ int32_t clearNextRowFromFS(void *iter) {
state->aBlockIdx = NULL;
}
if (state->pBlockData) {
- // tBlockDataClear(&state->blockData, 1);
- tBlockDataClear(state->pBlockData, 1);
+ // tBlockDataDestroy(&state->blockData, 1);
+ tBlockDataDestroy(state->pBlockData, 1);
state->pBlockData = NULL;
}
@@ -725,18 +900,19 @@ typedef struct {
SArray *pSkyline;
int64_t iSkyline;
- SBlockIdx idx;
- SMemNextRowIter memState;
- SMemNextRowIter imemState;
- SFSNextRowIter fsState;
- TSDBROW memRow, imemRow, fsRow;
+ SBlockIdx idx;
+ SMemNextRowIter memState;
+ SMemNextRowIter imemState;
+ SFSLastNextRowIter fsLastState;
+ SFSNextRowIter fsState;
+ TSDBROW memRow, imemRow, fsLastRow, fsRow;
- TsdbNextRowState input[3];
+ TsdbNextRowState input[4];
STsdbReadSnap *pReadSnap;
STsdb *pTsdb;
} CacheNextRowIter;
-static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb) {
+static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTsdb, STSchema *pTSchema) {
int code = 0;
tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
@@ -745,12 +921,12 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
STbData *pMem = NULL;
if (pIter->pReadSnap->pMem) {
- tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid, &pMem);
+ pMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pMem, suid, uid);
}
STbData *pIMem = NULL;
if (pIter->pReadSnap->pIMem) {
- tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid, &pIMem);
+ pIMem = tsdbGetTbDataFromMemTable(pIter->pReadSnap->pIMem, suid, uid);
}
pIter->pTsdb = pTsdb;
@@ -763,7 +939,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
if (pDelFile) {
SDelFReader *pDelFReader;
- code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL);
+ code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb);
if (code) goto _err;
code = getTableDelIdx(pDelFReader, suid, uid, &delIdx);
@@ -782,6 +958,12 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->idx = (SBlockIdx){.suid = suid, .uid = uid};
+ pIter->fsLastState.state = (SFSLASTNEXTROWSTATES) SFSNEXTROW_FS;
+ pIter->fsLastState.pTsdb = pTsdb;
+ pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
+ pIter->fsLastState.pBlockIdxExp = &pIter->idx;
+ pIter->fsLastState.pTSchema = pTSchema;
+
pIter->fsState.state = SFSNEXTROW_FS;
pIter->fsState.pTsdb = pTsdb;
pIter->fsState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
@@ -789,7 +971,9 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->input[0] = (TsdbNextRowState){&pIter->memRow, true, false, &pIter->memState, getNextRowFromMem, NULL};
pIter->input[1] = (TsdbNextRowState){&pIter->imemRow, true, false, &pIter->imemState, getNextRowFromMem, NULL};
- pIter->input[2] =
+ pIter->input[2] = (TsdbNextRowState){&pIter->fsLastRow, false, true, &pIter->fsLastState, getNextRowFromFSLast,
+ clearNextRowFromFSLast};
+ pIter->input[3] =
(TsdbNextRowState){&pIter->fsRow, false, true, &pIter->fsState, getNextRowFromFS, clearNextRowFromFS};
if (pMem) {
@@ -814,7 +998,7 @@ _err:
static int32_t nextRowIterClose(CacheNextRowIter *pIter) {
int code = 0;
- for (int i = 0; i < 3; ++i) {
+ for (int i = 0; i < 4; ++i) {
if (pIter->input[i].nextRowClearFn) {
pIter->input[i].nextRowClearFn(pIter->input[i].iter);
}
@@ -826,7 +1010,6 @@ static int32_t nextRowIterClose(CacheNextRowIter *pIter) {
tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap);
- return code;
_err:
return code;
}
@@ -835,7 +1018,7 @@ _err:
static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
int code = 0;
- for (int i = 0; i < 3; ++i) {
+ for (int i = 0; i < 4; ++i) {
if (pIter->input[i].next && !pIter->input[i].stop) {
code = pIter->input[i].nextRowFn(pIter->input[i].iter, &pIter->input[i].pRow);
if (code) goto _err;
@@ -847,18 +1030,18 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
}
}
- if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop) {
+ if (pIter->input[0].stop && pIter->input[1].stop && pIter->input[2].stop && pIter->input[3].stop) {
*ppRow = NULL;
return code;
}
- // select maxpoint(s) from mem, imem, fs
- TSDBROW *max[3] = {0};
- int iMax[3] = {-1, -1, -1};
+ // select maxpoint(s) from mem, imem, fs and last
+ TSDBROW *max[4] = {0};
+ int iMax[4] = {-1, -1, -1, -1};
int nMax = 0;
TSKEY maxKey = TSKEY_MIN;
- for (int i = 0; i < 3; ++i) {
+ for (int i = 0; i < 4; ++i) {
if (!pIter->input[i].stop && pIter->input[i].pRow != NULL) {
TSDBKEY key = TSDBROW_KEY(pIter->input[i].pRow);
@@ -876,13 +1059,13 @@ static int32_t nextRowIterGet(CacheNextRowIter *pIter, TSDBROW **ppRow) {
}
// delete detection
- TSDBROW *merge[3] = {0};
- int iMerge[3] = {-1, -1, -1};
+ TSDBROW *merge[4] = {0};
+ int iMerge[4] = {-1, -1, -1, -1};
int nMerge = 0;
for (int i = 0; i < nMax; ++i) {
- TSDBKEY maxKey = TSDBROW_KEY(max[i]);
+ TSDBKEY maxKey1 = TSDBROW_KEY(max[i]);
- bool deleted = tsdbKeyDeleted(&maxKey, pIter->pSkyline, &pIter->iSkyline);
+ bool deleted = tsdbKeyDeleted(&maxKey1, pIter->pSkyline, &pIter->iSkyline);
if (!deleted) {
iMerge[nMerge] = iMax[i];
merge[nMerge++] = max[i];
@@ -918,7 +1101,7 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRo
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
- nextRowIterOpen(&iter, uid, pTsdb);
+ nextRowIterOpen(&iter, uid, pTsdb, pTSchema);
do {
TSDBROW *pRow = NULL;
@@ -1015,7 +1198,7 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
TSKEY lastRowTs = TSKEY_MAX;
CacheNextRowIter iter = {0};
- nextRowIterOpen(&iter, uid, pTsdb);
+ nextRowIterOpen(&iter, uid, pTsdb, pTSchema);
do {
TSDBROW *pRow = NULL;
@@ -1100,29 +1283,40 @@ int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUH
// getTableCacheKeyS(uid, "lr", key, &keyLen);
getTableCacheKey(uid, 0, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
- if (h) {
- } else {
- STSRow *pRow = NULL;
- bool dup = false; // which is always false for now
- code = mergeLastRow(uid, pTsdb, &dup, &pRow);
- // if table's empty or error, return code of -1
- if (code < 0 || pRow == NULL) {
- if (!dup && pRow) {
- taosMemoryFree(pRow);
- }
-
- *handle = NULL;
- return 0;
- }
-
- _taos_lru_deleter_t deleter = deleteTableCacheLastrow;
- LRUStatus status =
- taosLRUCacheInsert(pCache, key, keyLen, pRow, TD_ROW_LEN(pRow), deleter, NULL, TAOS_LRU_PRIORITY_LOW);
- if (status != TAOS_LRU_STATUS_OK) {
- code = -1;
- }
+ if (!h) {
+ taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
+ if (!h) {
+ STSRow *pRow = NULL;
+ bool dup = false; // which is always false for now
+ code = mergeLastRow(uid, pTsdb, &dup, &pRow);
+ // if table's empty or error, return code of -1
+ if (code < 0 || pRow == NULL) {
+ if (!dup && pRow) {
+ taosMemoryFree(pRow);
+ }
+
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+
+ *handle = NULL;
+
+ return 0;
+ }
+
+ _taos_lru_deleter_t deleter = deleteTableCacheLastrow;
+ LRUStatus status =
+ taosLRUCacheInsert(pCache, key, keyLen, pRow, TD_ROW_LEN(pRow), deleter, NULL, TAOS_LRU_PRIORITY_LOW);
+ if (status != TAOS_LRU_STATUS_OK) {
+ code = -1;
+ }
+
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+
+ h = taosLRUCacheLookup(pCache, key, keyLen);
+ } else {
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+ }
}
*handle = h;
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index 6e25166203..020f3b0bc6 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -20,6 +20,12 @@ typedef struct {
STSchema *pTSchema;
} SSkmInfo;
+typedef struct {
+ int64_t suid;
+ int64_t uid;
+ TSDBROW row;
+} SRowInfo;
+
typedef struct {
STsdb *pTsdb;
/* commit data */
@@ -29,6 +35,7 @@ typedef struct {
int32_t minRow;
int32_t maxRow;
int8_t cmprAlg;
+ SArray *aTbDataP;
STsdbFS fs;
// --------------
TSKEY nextKey; // reset by each table commit
@@ -38,15 +45,27 @@ typedef struct {
// commit file data
struct {
SDataFReader *pReader;
- SArray *aBlockIdx; // SArray
- SMapData mBlock; // SMapData, read from reader
- SBlockData bData;
+ // data
+ SArray *aBlockIdx; // SArray
+ int32_t iBlockIdx;
+ SBlockIdx *pBlockIdx;
+ SMapData mBlock; // SMapData
+ SBlockData bData;
+ // last
+ SArray *aBlockL; // SArray
+ int32_t iBlockL;
+ SBlockData bDatal;
+ int32_t iRow;
+ SRowInfo *pRowInfo;
+ SRowInfo rowInfo;
} dReader;
struct {
SDataFWriter *pWriter;
SArray *aBlockIdx; // SArray
+ SArray *aBlockL; // SArray
SMapData mBlock; // SMapData
SBlockData bData;
+ SBlockData bDatal;
} dWriter;
SSkmInfo skmTable;
SSkmInfo skmRow;
@@ -162,10 +181,10 @@ static int32_t tsdbCommitDelStart(SCommitter *pCommitter) {
SDelFile *pDelFileR = pCommitter->fs.pDelFile;
if (pDelFileR) {
- code = tsdbDelFReaderOpen(&pCommitter->pDelFReader, pDelFileR, pTsdb, NULL);
+ code = tsdbDelFReaderOpen(&pCommitter->pDelFReader, pDelFileR, pTsdb);
if (code) goto _err;
- code = tsdbReadDelIdx(pCommitter->pDelFReader, pCommitter->aDelIdx, NULL);
+ code = tsdbReadDelIdx(pCommitter->pDelFReader, pCommitter->aDelIdx);
if (code) goto _err;
}
@@ -202,7 +221,7 @@ static int32_t tsdbCommitTableDel(SCommitter *pCommitter, STbData *pTbData, SDel
suid = pDelIdx->suid;
uid = pDelIdx->uid;
- code = tsdbReadDelData(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData, NULL);
+ code = tsdbReadDelData(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData);
if (code) goto _err;
} else {
taosArrayClear(pCommitter->aDelData);
@@ -222,7 +241,7 @@ static int32_t tsdbCommitTableDel(SCommitter *pCommitter, STbData *pTbData, SDel
}
// write
- code = tsdbWriteDelData(pCommitter->pDelFWriter, pCommitter->aDelData, NULL, &delIdx);
+ code = tsdbWriteDelData(pCommitter->pDelFWriter, pCommitter->aDelData, &delIdx);
if (code) goto _err;
// put delIdx
@@ -243,7 +262,7 @@ static int32_t tsdbCommitDelEnd(SCommitter *pCommitter) {
int32_t code = 0;
STsdb *pTsdb = pCommitter->pTsdb;
- code = tsdbWriteDelIdx(pCommitter->pDelFWriter, pCommitter->aDelIdxN, NULL);
+ code = tsdbWriteDelIdx(pCommitter->pDelFWriter, pCommitter->aDelIdxN);
if (code) goto _err;
code = tsdbUpdateDelFileHdr(pCommitter->pDelFWriter);
@@ -271,87 +290,19 @@ _err:
return code;
}
-static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
- int32_t code = 0;
- STsdb *pTsdb = pCommitter->pTsdb;
- SDFileSet *pRSet = NULL;
-
- // memory
- pCommitter->nextKey = TSKEY_MAX;
-
- // old
- taosArrayClear(pCommitter->dReader.aBlockIdx);
- tMapDataReset(&pCommitter->dReader.mBlock);
- tBlockDataReset(&pCommitter->dReader.bData);
- pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &(SDFileSet){.fid = pCommitter->commitFid},
- tDFileSetCmprFn, TD_EQ);
- if (pRSet) {
- code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet);
- if (code) goto _err;
-
- code = tsdbReadBlockIdx(pCommitter->dReader.pReader, pCommitter->dReader.aBlockIdx, NULL);
- if (code) goto _err;
- }
-
- // new
- SHeadFile fHead;
- SDataFile fData;
- SLastFile fLast;
- SSmaFile fSma;
- SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma};
-
- taosArrayClear(pCommitter->dWriter.aBlockIdx);
- tMapDataReset(&pCommitter->dWriter.mBlock);
- tBlockDataReset(&pCommitter->dWriter.bData);
- if (pRSet) {
- wSet.diskId = pRSet->diskId;
- wSet.fid = pCommitter->commitFid;
- fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0};
- fData = *pRSet->pDataF;
- fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0};
- fSma = *pRSet->pSmaF;
- } else {
- SDiskID did = {0};
-
- tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
-
- tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
-
- wSet.diskId = did;
- wSet.fid = pCommitter->commitFid;
- fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0};
- fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0};
- fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0};
- fSma = (SSmaFile){.commitID = pCommitter->commitID, .size = 0};
- }
- code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet);
- if (code) goto _err;
-
-_exit:
- return code;
-
-_err:
- tsdbError("vgId:%d, commit file data start failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitterUpdateTableSchema(SCommitter *pCommitter, int64_t suid, int64_t uid, int32_t sver) {
+static int32_t tsdbCommitterUpdateTableSchema(SCommitter *pCommitter, int64_t suid, int64_t uid) {
int32_t code = 0;
- if (pCommitter->skmTable.pTSchema) {
- if (pCommitter->skmTable.suid == suid) {
- if (suid == 0) {
- if (pCommitter->skmTable.uid == uid && sver == pCommitter->skmTable.pTSchema->version) goto _exit;
- } else {
- if (sver == pCommitter->skmTable.pTSchema->version) goto _exit;
- }
- }
+ if (suid) {
+ if (pCommitter->skmTable.suid == suid) goto _exit;
+ } else {
+ if (pCommitter->skmTable.uid == uid) goto _exit;
}
pCommitter->skmTable.suid = suid;
pCommitter->skmTable.uid = uid;
tTSchemaDestroy(pCommitter->skmTable.pTSchema);
- code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, sver, &pCommitter->skmTable.pTSchema);
+ code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, -1, &pCommitter->skmTable.pTSchema);
if (code) goto _exit;
_exit:
@@ -383,152 +334,355 @@ _exit:
return code;
}
-static int32_t tsdbCommitBlockData(SCommitter *pCommitter, SBlockData *pBlockData, SBlock *pBlock, SBlockIdx *pBlockIdx,
- int8_t toDataOnly) {
+static int32_t tsdbCommitterNextLastRow(SCommitter *pCommitter) {
int32_t code = 0;
- if (pBlock->nSubBlock == 0) {
- if (!toDataOnly && pBlockData->nRow < pCommitter->minRow) {
- pBlock->last = 1;
+ ASSERT(pCommitter->dReader.pReader);
+ ASSERT(pCommitter->dReader.pRowInfo);
+
+ SBlockData *pBlockDatal = &pCommitter->dReader.bDatal;
+ pCommitter->dReader.iRow++;
+ if (pCommitter->dReader.iRow < pBlockDatal->nRow) {
+ if (pBlockDatal->uid) {
+ pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid;
} else {
- pBlock->last = 0;
+ pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[pCommitter->dReader.iRow];
+ }
+ pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow);
+ } else {
+ pCommitter->dReader.iBlockL++;
+ if (pCommitter->dReader.iBlockL < taosArrayGetSize(pCommitter->dReader.aBlockL)) {
+ SBlockL *pBlockL = (SBlockL *)taosArrayGet(pCommitter->dReader.aBlockL, pCommitter->dReader.iBlockL);
+ int64_t suid = pBlockL->suid;
+ int64_t uid = pBlockL->maxUid;
+
+ code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid);
+ if (code) goto _exit;
+
+ code = tBlockDataInit(pBlockDatal, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _exit;
+
+ code = tsdbReadLastBlock(pCommitter->dReader.pReader, pBlockL, pBlockDatal);
+ if (code) goto _exit;
+
+ pCommitter->dReader.iRow = 0;
+ pCommitter->dReader.pRowInfo->suid = pBlockDatal->suid;
+ if (pBlockDatal->uid) {
+ pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid;
+ } else {
+ pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[0];
+ }
+ pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow);
+ } else {
+ pCommitter->dReader.pRowInfo = NULL;
}
}
- code =
- tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, NULL, NULL, pBlockIdx, pBlock, pCommitter->cmprAlg);
+_exit:
+ return code;
+}
+
+static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ ASSERT(pCommitter->dReader.pBlockIdx);
+
+ pCommitter->dReader.iBlockIdx++;
+ if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) {
+ pCommitter->dReader.pBlockIdx =
+ (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx);
+
+ code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
+ if (code) goto _exit;
+
+ ASSERT(pCommitter->dReader.mBlock.nItem > 0);
+ } else {
+ pCommitter->dReader.pBlockIdx = NULL;
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
+ int32_t code = 0;
+ STsdb *pTsdb = pCommitter->pTsdb;
+ SDFileSet *pRSet = NULL;
+
+ // memory
+ pCommitter->commitFid = tsdbKeyFid(pCommitter->nextKey, pCommitter->minutes, pCommitter->precision);
+ tsdbFidKeyRange(pCommitter->commitFid, pCommitter->minutes, pCommitter->precision, &pCommitter->minKey,
+ &pCommitter->maxKey);
+ pCommitter->nextKey = TSKEY_MAX;
+
+ // Reader
+ pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &(SDFileSet){.fid = pCommitter->commitFid},
+ tDFileSetCmprFn, TD_EQ);
+ if (pRSet) {
+ code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet);
+ if (code) goto _err;
+
+ // data
+ code = tsdbReadBlockIdx(pCommitter->dReader.pReader, pCommitter->dReader.aBlockIdx);
+ if (code) goto _err;
+
+ pCommitter->dReader.iBlockIdx = 0;
+ if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) {
+ pCommitter->dReader.pBlockIdx =
+ (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx);
+
+ code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
+ if (code) goto _err;
+ } else {
+ pCommitter->dReader.pBlockIdx = NULL;
+ }
+ tBlockDataReset(&pCommitter->dReader.bData);
+
+ // last
+ code = tsdbReadBlockL(pCommitter->dReader.pReader, pCommitter->dReader.aBlockL);
+ if (code) goto _err;
+
+ pCommitter->dReader.iBlockL = -1;
+ pCommitter->dReader.iRow = -1;
+ pCommitter->dReader.pRowInfo = &pCommitter->dReader.rowInfo;
+ tBlockDataReset(&pCommitter->dReader.bDatal);
+ code = tsdbCommitterNextLastRow(pCommitter);
+ if (code) goto _err;
+ } else {
+ pCommitter->dReader.pBlockIdx = NULL;
+ pCommitter->dReader.pRowInfo = NULL;
+ }
+
+ // Writer
+ SHeadFile fHead;
+ SDataFile fData;
+ SLastFile fLast;
+ SSmaFile fSma;
+ SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma};
+ if (pRSet) {
+ wSet.diskId = pRSet->diskId;
+ wSet.fid = pCommitter->commitFid;
+ fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
+ fData = *pRSet->pDataF;
+ fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
+ fSma = *pRSet->pSmaF;
+ } else {
+ SDiskID did = {0};
+
+ tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
+
+ tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
+
+ wSet.diskId = did;
+ wSet.fid = pCommitter->commitFid;
+ fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
+ fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0};
+ fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
+ fSma = (SSmaFile){.commitID = pCommitter->commitID, .size = 0};
+ }
+ code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet);
if (code) goto _err;
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
+ taosArrayClear(pCommitter->dWriter.aBlockIdx);
+ taosArrayClear(pCommitter->dWriter.aBlockL);
+ tMapDataReset(&pCommitter->dWriter.mBlock);
+ tBlockDataReset(&pCommitter->dWriter.bData);
+ tBlockDataReset(&pCommitter->dWriter.bDatal);
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d, commit file data start failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbCommitDataBlock(SCommitter *pCommitter, SBlock *pBlock) {
+ int32_t code = 0;
+ SBlockData *pBlockData = &pCommitter->dWriter.bData;
+ SBlock block;
+
+ ASSERT(pBlockData->nRow > 0);
+
+ if (pBlock) {
+ block = *pBlock; // as a subblock
+ } else {
+ tBlockReset(&block); // as a new block
+ }
+
+ // info
+ block.nRow += pBlockData->nRow;
+ for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
+ TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]};
+
+ if (iRow == 0) {
+ if (tsdbKeyCmprFn(&block.minKey, &key) > 0) {
+ block.minKey = key;
+ }
+ } else {
+ if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) {
+ block.hasDup = 1;
+ }
+ }
+
+ if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&block.maxKey, &key) < 0) {
+ block.maxKey = key;
+ }
+
+ block.minVer = TMIN(block.minVer, key.version);
+ block.maxVer = TMAX(block.maxVer, key.version);
+ }
+
+ // write
+ block.nSubBlock++;
+ code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &block.aSubBlock[block.nSubBlock - 1],
+ ((block.nSubBlock == 1) && !block.hasDup) ? &block.smaInfo : NULL, pCommitter->cmprAlg, 0);
if (code) goto _err;
+ // put SBlock
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, &block, tPutBlock);
+ if (code) goto _err;
+
+ // clear
+ tBlockDataClear(pBlockData);
+
return code;
_err:
+ tsdbError("vgId:%d tsdb commit data block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbMergeTableData(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlockMerge, TSDBKEY toKey,
- int8_t toDataOnly) {
+static int32_t tsdbCommitLastBlock(SCommitter *pCommitter) {
int32_t code = 0;
- SBlockIdx *pBlockIdx = &(SBlockIdx){.suid = pIter->pTbData->suid, .uid = pIter->pTbData->uid};
- SBlockData *pBlockDataMerge = &pCommitter->dReader.bData;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
- SBlock block;
- SBlock *pBlock = █
- TSDBROW *pRow1;
- TSDBROW row2;
- TSDBROW *pRow2 = &row2;
+ SBlockL blockL;
+ SBlockData *pBlockData = &pCommitter->dWriter.bDatal;
- // read SBlockData
- code = tsdbReadBlockData(pCommitter->dReader.pReader, pBlockIdx, pBlockMerge, pBlockDataMerge, NULL, NULL);
+ ASSERT(pBlockData->nRow > 0);
+
+ // info
+ blockL.suid = pBlockData->suid;
+ blockL.nRow = pBlockData->nRow;
+ blockL.minKey = TSKEY_MAX;
+ blockL.maxKey = TSKEY_MIN;
+ blockL.minVer = VERSION_MAX;
+ blockL.maxVer = VERSION_MIN;
+ for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
+ blockL.minKey = TMIN(blockL.minKey, pBlockData->aTSKEY[iRow]);
+ blockL.maxKey = TMAX(blockL.maxKey, pBlockData->aTSKEY[iRow]);
+ blockL.minVer = TMIN(blockL.minVer, pBlockData->aVersion[iRow]);
+ blockL.maxVer = TMAX(blockL.maxVer, pBlockData->aVersion[iRow]);
+ }
+ blockL.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0];
+ blockL.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1];
+
+ // write
+ code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &blockL.bInfo, NULL, pCommitter->cmprAlg, 1);
if (code) goto _err;
- code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema);
+ // push SBlockL
+ if (taosArrayPush(pCommitter->dWriter.aBlockL, &blockL) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ // clear
+ tBlockDataClear(pBlockData);
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb commit last block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbMergeCommitData(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) {
+ int32_t code = 0;
+ STbData *pTbData = pIter->pTbData;
+ SBlockData *pBlockDataR = &pCommitter->dReader.bData;
+ SBlockData *pBlockDataW = &pCommitter->dWriter.bData;
+
+ code = tsdbReadDataBlock(pCommitter->dReader.pReader, pBlock, pBlockDataR);
if (code) goto _err;
- // loop to merge
- pRow1 = tsdbTbDataIterGet(pIter);
- *pRow2 = tsdbRowFromBlockData(pBlockDataMerge, 0);
- ASSERT(pRow1 && tsdbKeyCmprFn(&TSDBROW_KEY(pRow1), &toKey) < 0);
- ASSERT(tsdbKeyCmprFn(&TSDBROW_KEY(pRow2), &toKey) < 0);
- code = tsdbCommitterUpdateRowSchema(pCommitter, pBlockIdx->suid, pBlockIdx->uid, TSDBROW_SVERSION(pRow1));
- if (code) goto _err;
+ tBlockDataClear(pBlockDataW);
+ int32_t iRow = 0;
+ TSDBROW row;
+ TSDBROW *pRow1 = tsdbTbDataIterGet(pIter);
+ TSDBROW *pRow2 = &row;
+ *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
+ while (pRow1 && pRow2) {
+ int32_t c = tsdbRowCmprFn(pRow1, pRow2);
- tBlockReset(pBlock);
- tBlockDataClearData(pBlockData);
- while (true) {
- if (pRow1 == NULL && pRow2 == NULL) {
- if (pBlockData->nRow == 0) {
- break;
+ if (c < 0) {
+ code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow1));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBlockDataW, pRow1, pCommitter->skmRow.pTSchema, pTbData->uid);
+ if (code) goto _err;
+
+ // next
+ tsdbTbDataIterNext(pIter);
+ pRow1 = tsdbTbDataIterGet(pIter);
+ } else if (c > 0) {
+ code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid);
+ if (code) goto _err;
+
+ iRow++;
+ if (iRow < pBlockDataR->nRow) {
+ *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
} else {
- goto _write_block;
+ pRow2 = NULL;
}
- }
-
- if (pRow1 && pRow2) {
- int32_t c = tsdbRowCmprFn(pRow1, pRow2);
- if (c < 0) {
- goto _append_mem_row;
- } else if (c > 0) {
- goto _append_block_row;
- } else {
- ASSERT(0);
- }
- } else if (pRow1) {
- goto _append_mem_row;
} else {
- goto _append_block_row;
+ ASSERT(0);
}
- _append_mem_row:
- code = tBlockDataAppendRow(pBlockData, pRow1, pCommitter->skmRow.pTSchema);
+ // check
+ if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) {
+ code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (code) goto _err;
+ }
+ }
+
+ while (pRow2) {
+ code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid);
if (code) goto _err;
- tsdbTbDataIterNext(pIter);
- pRow1 = tsdbTbDataIterGet(pIter);
- if (pRow1) {
- if (tsdbKeyCmprFn(&TSDBROW_KEY(pRow1), &toKey) < 0) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pBlockIdx->suid, pBlockIdx->uid, TSDBROW_SVERSION(pRow1));
- if (code) goto _err;
- } else {
- pRow1 = NULL;
- }
- }
-
- if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- goto _write_block;
- } else {
- continue;
- }
-
- _append_block_row:
- code = tBlockDataAppendRow(pBlockData, pRow2, NULL);
- if (code) goto _err;
-
- if (pRow2->iRow + 1 < pBlockDataMerge->nRow) {
- *pRow2 = tsdbRowFromBlockData(pBlockDataMerge, pRow2->iRow + 1);
+ iRow++;
+ if (iRow < pBlockDataR->nRow) {
+ *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
} else {
pRow2 = NULL;
}
- if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- goto _write_block;
- } else {
- continue;
+ // check
+ if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) {
+ code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (code) goto _err;
}
+ }
- _write_block:
- code = tsdbCommitBlockData(pCommitter, pBlockData, pBlock, pBlockIdx, toDataOnly);
+ // check
+ if (pBlockDataW->nRow > 0) {
+ code = tsdbCommitDataBlock(pCommitter, NULL);
if (code) goto _err;
-
- tBlockReset(pBlock);
- tBlockDataClearData(pBlockData);
}
return code;
_err:
- tsdbError("vgId:%d, tsdb merge block and mem failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb merge commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter, TSDBKEY toKey, int8_t toDataOnly) {
+static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter, TSDBKEY toKey) {
int32_t code = 0;
- TSDBROW *pRow;
- SBlock block;
- SBlock *pBlock = █
+ STbData *pTbData = pIter->pTbData;
SBlockData *pBlockData = &pCommitter->dWriter.bData;
- int64_t suid = pIter->pTbData->suid;
- int64_t uid = pIter->pTbData->uid;
- code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
-
- tBlockReset(pBlock);
- tBlockDataClearData(pBlockData);
- pRow = tsdbTbDataIterGet(pIter);
- ASSERT(pRow && tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &toKey) < 0);
+ tBlockDataClear(pBlockData);
+ TSDBROW *pRow = tsdbTbDataIterGet(pIter);
while (true) {
if (pRow == NULL) {
if (pBlockData->nRow > 0) {
@@ -539,33 +693,27 @@ static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter
}
// update schema
- code = tsdbCommitterUpdateRowSchema(pCommitter, suid, uid, TSDBROW_SVERSION(pRow));
+ code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
if (code) goto _err;
// append
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema);
+ code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
if (code) goto _err;
tsdbTbDataIterNext(pIter);
pRow = tsdbTbDataIterGet(pIter);
- // if (pRow && tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &toKey) >= 0) pRow = NULL;
- // crash on CI, use the block following
if (pRow) {
- TSDBKEY tmpKey = TSDBROW_KEY(pRow);
- if (tsdbKeyCmprFn(&tmpKey, &toKey) >= 0) {
+ TSDBKEY rowKey = TSDBROW_KEY(pRow);
+ if (tsdbKeyCmprFn(&rowKey, &toKey) >= 0) {
pRow = NULL;
}
}
- if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) goto _write_block;
- continue;
-
- _write_block:
- code = tsdbCommitBlockData(pCommitter, pBlockData, pBlock, &(SBlockIdx){.suid = suid, .uid = uid}, toDataOnly);
- if (code) goto _err;
-
- tBlockReset(pBlock);
- tBlockDataClearData(pBlockData);
+ if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
+ _write_block:
+ code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (code) goto _err;
+ }
}
return code;
@@ -575,65 +723,16 @@ _err:
return code;
}
-static int32_t tsdbCommitTableDiskData(SCommitter *pCommitter, SBlock *pBlock, SBlockIdx *pBlockIdx) {
- int32_t code = 0;
- SBlock block;
+static int32_t tsdbGetNumOfRowsLessThan(STbDataIter *pIter, TSDBKEY key) {
+ int32_t nRow = 0;
- if (pBlock->last) {
- code = tsdbReadBlockData(pCommitter->dReader.pReader, pBlockIdx, pBlock, &pCommitter->dReader.bData, NULL, NULL);
- if (code) goto _err;
-
- tBlockReset(&block);
- code = tsdbCommitBlockData(pCommitter, &pCommitter->dReader.bData, &block, pBlockIdx, 0);
- if (code) goto _err;
- } else {
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
- if (code) goto _err;
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb commit table disk data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitTableDataEnd(SCommitter *pCommitter, int64_t suid, int64_t uid) {
- int32_t code = 0;
- SBlockIdx blockIdx = {.suid = suid, .uid = uid};
- SBlockIdx *pBlockIdx = &blockIdx;
-
- code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, NULL, pBlockIdx);
- if (code) goto _err;
-
- if (taosArrayPush(pCommitter->dWriter.aBlockIdx, pBlockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d, commit table data end failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbGetOvlpNRow(STbDataIter *pIter, SBlock *pBlock) {
- int32_t nRow = 0;
- TSDBROW *pRow;
- TSDBKEY key;
- int32_t c = 0;
STbDataIter iter = *pIter;
-
- iter.pRow = NULL;
while (true) {
- pRow = tsdbTbDataIterGet(&iter);
-
+ TSDBROW *pRow = tsdbTbDataIterGet(&iter);
if (pRow == NULL) break;
- key = TSDBROW_KEY(pRow);
- c = tBlockCmprFn(&(SBlock){.maxKey = key, .minKey = key}, pBlock);
- if (c == 0) {
+ int32_t c = tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &key);
+ if (c < 0) {
nRow++;
tsdbTbDataIterNext(&iter);
} else if (c > 0) {
@@ -648,42 +747,33 @@ static int32_t tsdbGetOvlpNRow(STbDataIter *pIter, SBlock *pBlock) {
static int32_t tsdbMergeAsSubBlock(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) {
int32_t code = 0;
+ STbData *pTbData = pIter->pTbData;
SBlockData *pBlockData = &pCommitter->dWriter.bData;
- SBlockIdx *pBlockIdx = &(SBlockIdx){.suid = pIter->pTbData->suid, .uid = pIter->pTbData->uid};
- SBlock block;
- TSDBROW *pRow;
- code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
-
- pRow = tsdbTbDataIterGet(pIter);
- code = tsdbCommitterUpdateRowSchema(pCommitter, pBlockIdx->suid, pBlockIdx->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
+ tBlockDataClear(pBlockData);
+ TSDBROW *pRow = tsdbTbDataIterGet(pIter);
while (true) {
if (pRow == NULL) break;
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema);
+
+ code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
if (code) goto _err;
tsdbTbDataIterNext(pIter);
pRow = tsdbTbDataIterGet(pIter);
if (pRow) {
- TSDBKEY key = TSDBROW_KEY(pRow);
- int32_t c = tBlockCmprFn(&(SBlock){.minKey = key, .maxKey = key}, pBlock);
-
- if (c == 0) {
- code =
- tsdbCommitterUpdateRowSchema(pCommitter, pIter->pTbData->suid, pIter->pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
- } else if (c > 0) {
+ TSDBKEY rowKey = TSDBROW_KEY(pRow);
+ if (tsdbKeyCmprFn(&rowKey, &pBlock->maxKey) > 0) {
pRow = NULL;
- } else {
- ASSERT(0);
}
}
}
- block = *pBlock;
- code = tsdbCommitBlockData(pCommitter, pBlockData, &block, pBlockIdx, 0);
+ ASSERT(pBlockData->nRow > 0 && pBlock->nRow + pBlockData->nRow <= pCommitter->maxRow);
+
+ code = tsdbCommitDataBlock(pCommitter, pBlock);
if (code) goto _err;
return code;
@@ -693,176 +783,307 @@ _err:
return code;
}
-static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBlockIdx *pBlockIdx) {
- int32_t code = 0;
- STbDataIter iter = {0};
- STbDataIter *pIter = &iter;
- TSDBROW *pRow;
- int32_t iBlock;
- int32_t nBlock;
- int64_t suid;
- int64_t uid;
+static int32_t tsdbMergeCommitLast(SCommitter *pCommitter, STbDataIter *pIter) {
+ int32_t code = 0;
+ STbData *pTbData = pIter->pTbData;
+ int32_t nRow = tsdbGetNumOfRowsLessThan(pIter, (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN});
- if (pTbData) {
- tsdbTbDataIterOpen(pTbData, &(TSDBKEY){.ts = pCommitter->minKey, .version = VERSION_MIN}, 0, pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL;
+ if (pCommitter->dReader.pRowInfo && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pRowInfo) == 0) {
+ if (pCommitter->dReader.pRowInfo->suid) { // super table
+ for (int32_t iRow = pCommitter->dReader.iRow; iRow < pCommitter->dReader.bDatal.nRow; iRow++) {
+ if (pTbData->uid != pCommitter->dReader.bDatal.aUid[iRow]) break;
+ nRow++;
+ }
+ } else { // normal table
+ ASSERT(pCommitter->dReader.iRow == 0);
+ nRow += pCommitter->dReader.bDatal.nRow;
+ }
+ }
- suid = pTbData->suid;
- uid = pTbData->uid;
- } else {
- pIter = NULL;
+ if (nRow == 0) goto _exit;
+
+ TSDBROW *pRow = tsdbTbDataIterGet(pIter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
pRow = NULL;
}
- if (pBlockIdx) {
- code = tsdbReadBlock(pCommitter->dReader.pReader, pBlockIdx, &pCommitter->dReader.mBlock, NULL);
- if (code) goto _err;
-
- nBlock = pCommitter->dReader.mBlock.nItem;
- ASSERT(nBlock > 0);
-
- suid = pBlockIdx->suid;
- uid = pBlockIdx->uid;
- } else {
- nBlock = 0;
+ SRowInfo *pRowInfo = pCommitter->dReader.pRowInfo;
+ if (pRowInfo && pRowInfo->uid != pTbData->uid) {
+ pRowInfo = NULL;
}
- if (pRow == NULL && nBlock == 0) goto _exit;
+ while (nRow) {
+ SBlockData *pBlockData;
+ int8_t toData;
- // start ===========
- tMapDataReset(&pCommitter->dWriter.mBlock);
+ if (nRow < pCommitter->minRow) { // to .last
+ toData = 0;
+ pBlockData = &pCommitter->dWriter.bDatal;
+
+ // commit and reset block data schema if need
+ // QUESTION: Is there a case that pBlockData->nRow == 0 but need to change schema ?
+ if (pBlockData->suid || pBlockData->uid) {
+ if (pBlockData->suid != pTbData->suid || pBlockData->suid == 0) {
+ if (pBlockData->nRow > 0) {
+ code = tsdbCommitLastBlock(pCommitter);
+ if (code) goto _err;
+ }
+
+ tBlockDataReset(pBlockData);
+ }
+ }
+
+ // set block data schema if need
+ if (pBlockData->suid == 0 && pBlockData->uid == 0) {
+ code =
+ tBlockDataInit(pBlockData, pTbData->suid, pTbData->suid ? 0 : pTbData->uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _err;
+ }
+
+ if (pBlockData->nRow + nRow > pCommitter->maxRow) {
+ code = tsdbCommitLastBlock(pCommitter);
+ if (code) goto _err;
+ }
+ } else { // to .data
+ toData = 1;
+ pBlockData = &pCommitter->dWriter.bData;
+ ASSERT(pBlockData->nRow == 0);
+ }
+
+ while (pRow && pRowInfo) {
+ int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row);
+ if (c < 0) {
+ code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
+ if (code) goto _err;
+
+ tsdbTbDataIterNext(pIter);
+ pRow = tsdbTbDataIterGet(pIter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pRow = NULL;
+ }
+ } else if (c > 0) {
+ code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid);
+ if (code) goto _err;
+
+ code = tsdbCommitterNextLastRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = pCommitter->dReader.pRowInfo;
+ if (pRowInfo && pRowInfo->uid != pTbData->uid) {
+ pRowInfo = NULL;
+ }
+ } else {
+ ASSERT(0);
+ }
+
+ nRow--;
+ if (toData) {
+ if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
+ code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (code) goto _err;
+ goto _outer_break;
+ }
+ }
+ }
+
+ while (pRow) {
+ code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
+ if (code) goto _err;
+
+ tsdbTbDataIterNext(pIter);
+ pRow = tsdbTbDataIterGet(pIter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pRow = NULL;
+ }
+
+ nRow--;
+ if (toData) {
+ if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
+ code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (code) goto _err;
+ goto _outer_break;
+ }
+ }
+ }
+
+ while (pRowInfo) {
+ code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid);
+ if (code) goto _err;
+
+ code = tsdbCommitterNextLastRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = pCommitter->dReader.pRowInfo;
+ if (pRowInfo && pRowInfo->uid != pTbData->uid) {
+ pRowInfo = NULL;
+ }
+
+ nRow--;
+ if (toData) {
+ if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
+ code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (code) goto _err;
+ goto _outer_break;
+ }
+ }
+ }
+
+ _outer_break:
+ ASSERT(nRow >= 0);
+ }
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb merge commit last failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData) {
+ int32_t code = 0;
+
+ ASSERT(pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) >= 0);
+ ASSERT(pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, pTbData) >= 0);
+
+ // merge commit table data
+ STbDataIter iter = {0};
+ STbDataIter *pIter = &iter;
+ TSDBROW *pRow;
+
+ tsdbTbDataIterOpen(pTbData, &(TSDBKEY){.ts = pCommitter->minKey, .version = VERSION_MIN}, 0, pIter);
+ pRow = tsdbTbDataIterGet(pIter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pRow = NULL;
+ }
+
+ if (pRow == NULL) goto _exit;
+
+ int32_t iBlock = 0;
SBlock block;
SBlock *pBlock = █
-
- iBlock = 0;
- if (iBlock < nBlock) {
+ if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) {
tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
} else {
pBlock = NULL;
}
- if (pRow) {
- code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid, pTbData->maxSkmVer);
- if (code) goto _err;
- }
+ code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid);
+ if (code) goto _err;
- // merge ===========
- while (true) {
- if (pRow == NULL && pBlock == NULL) break;
+ tMapDataReset(&pCommitter->dWriter.mBlock);
+ code = tBlockDataInit(&pCommitter->dReader.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _err;
+ code = tBlockDataInit(&pCommitter->dWriter.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _err;
- if (pRow && pBlock) {
- if (pBlock->last) {
- code = tsdbMergeTableData(pCommitter, pIter, pBlock,
- (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN}, 0);
- if (code) goto _err;
-
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL;
- iBlock++;
- if (iBlock < nBlock) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
-
- ASSERT(pRow == NULL && pBlock == NULL);
- } else {
- int32_t c = tBlockCmprFn(&(SBlock){.maxKey = TSDBROW_KEY(pRow), .minKey = TSDBROW_KEY(pRow)}, pBlock);
- if (c > 0) {
- // only disk data
- code = tsdbCommitTableDiskData(pCommitter, pBlock, pBlockIdx);
- if (code) goto _err;
-
- iBlock++;
- if (iBlock < nBlock) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- } else if (c < 0) {
- // only memory data
- code = tsdbCommitTableMemData(pCommitter, pIter, pBlock->minKey, 1);
- if (code) goto _err;
-
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL;
- } else {
- // merge memory and disk
- int32_t nOvlp = tsdbGetOvlpNRow(pIter, pBlock);
- ASSERT(nOvlp);
- if (pBlock->nRow + nOvlp <= pCommitter->maxRow && pBlock->nSubBlock < TSDB_MAX_SUBBLOCKS) {
- code = tsdbMergeAsSubBlock(pCommitter, pIter, pBlock);
- if (code) goto _err;
- } else {
- TSDBKEY toKey = {.ts = pCommitter->maxKey + 1, .version = VERSION_MIN};
- int8_t toDataOnly = 0;
-
- if (iBlock < nBlock - 1) {
- toDataOnly = 1;
-
- SBlock nextBlock = {0};
- tBlockReset(&nextBlock);
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock + 1, &nextBlock, tGetBlock);
- toKey = nextBlock.minKey;
- }
-
- code = tsdbMergeTableData(pCommitter, pIter, pBlock, toKey, toDataOnly);
- if (code) goto _err;
- }
-
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL;
- iBlock++;
- if (iBlock < nBlock) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- }
- }
- } else if (pBlock) {
- code = tsdbCommitTableDiskData(pCommitter, pBlock, pBlockIdx);
+ // .data merge
+ while (pBlock && pRow) {
+ int32_t c = tBlockCmprFn(pBlock, &(SBlock){.minKey = TSDBROW_KEY(pRow), .maxKey = TSDBROW_KEY(pRow)});
+ if (c < 0) { // disk
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
if (code) goto _err;
+ // next
iBlock++;
- if (iBlock < nBlock) {
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
} else {
pBlock = NULL;
}
- } else {
- code =
- tsdbCommitTableMemData(pCommitter, pIter, (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN}, 0);
+ } else if (c > 0) { // memory
+ code = tsdbCommitTableMemData(pCommitter, pIter, pBlock->minKey);
if (code) goto _err;
+ // next
pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL;
- ASSERT(pRow == NULL);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pRow = NULL;
+ }
+ } else { // merge
+ int32_t nOvlp = tsdbGetNumOfRowsLessThan(pIter, pBlock->maxKey);
+
+ ASSERT(nOvlp > 0);
+
+ if (pBlock->nRow + nOvlp <= pCommitter->maxRow && pBlock->nSubBlock < TSDB_MAX_SUBBLOCKS) {
+ code = tsdbMergeAsSubBlock(pCommitter, pIter, pBlock);
+ if (code) goto _err;
+ } else {
+ code = tsdbMergeCommitData(pCommitter, pIter, pBlock);
+ if (code) goto _err;
+ }
+
+ // next
+ pRow = tsdbTbDataIterGet(pIter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pRow = NULL;
+ }
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
+ } else {
+ pBlock = NULL;
+ }
}
}
- // end =====================
- code = tsdbCommitTableDataEnd(pCommitter, suid, uid);
+ while (pBlock) {
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
+ if (code) goto _err;
+
+ // next
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
+ } else {
+ pBlock = NULL;
+ }
+ }
+
+ // .data append and .last merge
+ code = tsdbMergeCommitLast(pCommitter, pIter);
if (code) goto _err;
-_exit:
- if (pIter) {
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
+ // end
+ if (pCommitter->dWriter.mBlock.nItem > 0) {
+ SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid};
+ code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx);
+ if (code) goto _err;
+
+ if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
}
+
+_exit:
+ pRow = tsdbTbDataIterGet(pIter);
+ if (pRow) {
+ pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
+ }
+
return code;
_err:
- tsdbError("vgId:%d, tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) {
int32_t code = 0;
- // write blockIdx
- code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx, NULL);
+ // write aBlockIdx
+ code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx);
+ if (code) goto _err;
+
+ // write aBlockL
+ code = tsdbWriteBlockL(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockL);
if (code) goto _err;
// update file header
@@ -890,6 +1111,98 @@ _err:
return code;
}
+static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) {
+ int32_t code = 0;
+
+ // .data
+ while (true) {
+ if (pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) >= 0) break;
+
+ SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx;
+ code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx);
+ if (code) goto _err;
+
+ if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
+ code = tsdbCommitterNextTableData(pCommitter);
+ if (code) goto _err;
+ }
+
+ // .last
+ while (true) {
+ if (pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, &toTable) >= 0) break;
+
+ SBlockData *pBlockDataR = &pCommitter->dReader.bDatal;
+ SBlockData *pBlockDataW = &pCommitter->dWriter.bDatal;
+ tb_uid_t suid = pCommitter->dReader.pRowInfo->suid;
+ tb_uid_t uid = pCommitter->dReader.pRowInfo->uid;
+
+ ASSERT((pBlockDataR->suid && !pBlockDataR->uid) || (!pBlockDataR->suid && pBlockDataR->uid));
+ ASSERT(pBlockDataR->nRow > 0);
+
+ // commit and reset block data schema if need
+ if (pBlockDataW->suid || pBlockDataW->uid) {
+ if (pBlockDataW->suid != suid || pBlockDataW->suid == 0) {
+ if (pBlockDataW->nRow > 0) {
+ code = tsdbCommitLastBlock(pCommitter);
+ if (code) goto _err;
+ }
+ tBlockDataReset(pBlockDataW);
+ }
+ }
+
+ // set block data schema if need
+ if (pBlockDataW->suid == 0 && pBlockDataW->uid == 0) {
+ code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid);
+ if (code) goto _err;
+
+ code = tBlockDataInit(pBlockDataW, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _err;
+ }
+
+ // check if it can make sure that one table data in one block
+ int32_t nRow = 0;
+ if (pBlockDataR->suid) {
+ int32_t iRow = pCommitter->dReader.iRow;
+ while ((iRow < pBlockDataR->nRow) && (pBlockDataR->aUid[iRow] == uid)) {
+ nRow++;
+ iRow++;
+ }
+ } else {
+ ASSERT(pCommitter->dReader.iRow == 0);
+ nRow = pBlockDataR->nRow;
+ }
+
+ ASSERT(nRow > 0 && nRow < pCommitter->minRow);
+
+ if (pBlockDataW->nRow + nRow > pCommitter->maxRow) {
+ ASSERT(pBlockDataW->nRow > 0);
+
+ code = tsdbCommitLastBlock(pCommitter);
+ if (code) goto _err;
+ }
+
+ while (nRow > 0) {
+ code = tBlockDataAppendRow(pBlockDataW, &pCommitter->dReader.pRowInfo->row, NULL, uid);
+ if (code) goto _err;
+
+ code = tsdbCommitterNextLastRow(pCommitter);
+ if (code) goto _err;
+
+ nRow--;
+ }
+ }
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb move commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
static int32_t tsdbCommitFileData(SCommitter *pCommitter) {
int32_t code = 0;
STsdb *pTsdb = pCommitter->pTsdb;
@@ -900,59 +1213,30 @@ static int32_t tsdbCommitFileData(SCommitter *pCommitter) {
if (code) goto _err;
// commit file data impl
- int32_t iTbData = 0;
- int32_t nTbData = taosArrayGetSize(pMemTable->aTbData);
- int32_t iBlockIdx = 0;
- int32_t nBlockIdx = taosArrayGetSize(pCommitter->dReader.aBlockIdx);
- STbData *pTbData;
- SBlockIdx *pBlockIdx;
+ for (int32_t iTbData = 0; iTbData < taosArrayGetSize(pCommitter->aTbDataP); iTbData++) {
+ STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData);
- ASSERT(nTbData > 0);
+ // move commit until current (suid, uid)
+ code = tsdbMoveCommitData(pCommitter, *(TABLEID *)pTbData);
+ if (code) goto _err;
- pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData);
- pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL;
- while (pTbData || pBlockIdx) {
- if (pTbData && pBlockIdx) {
- int32_t c = tTABLEIDCmprFn(pTbData, pBlockIdx);
+ // commit current table data
+ code = tsdbCommitTableData(pCommitter, pTbData);
+ if (code) goto _err;
- if (c == 0) {
- goto _commit_table_mem_and_disk;
- } else if (c < 0) {
- goto _commit_table_mem_data;
- } else {
- goto _commit_table_disk_data;
- }
- } else if (pBlockIdx) {
- goto _commit_table_disk_data;
- } else {
- goto _commit_table_mem_data;
+ // move next reader table data if need
+ if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) {
+ code = tsdbCommitterNextTableData(pCommitter);
+ if (code) goto _err;
}
+ }
- _commit_table_mem_data:
- code = tsdbCommitTableData(pCommitter, pTbData, NULL);
+ code = tsdbMoveCommitData(pCommitter, (TABLEID){.suid = INT64_MAX, .uid = INT64_MAX});
+ if (code) goto _err;
+
+ if (pCommitter->dWriter.bDatal.nRow > 0) {
+ code = tsdbCommitLastBlock(pCommitter);
if (code) goto _err;
-
- iTbData++;
- pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL;
- continue;
-
- _commit_table_disk_data:
- code = tsdbCommitTableData(pCommitter, NULL, pBlockIdx);
- if (code) goto _err;
-
- iBlockIdx++;
- pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL;
- continue;
-
- _commit_table_mem_and_disk:
- code = tsdbCommitTableData(pCommitter, pTbData, pBlockIdx);
- if (code) goto _err;
-
- iBlockIdx++;
- pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL;
- iTbData++;
- pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL;
- continue;
}
// commit file data end
@@ -987,6 +1271,11 @@ static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter) {
pCommitter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows;
pCommitter->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows;
pCommitter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
+ pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem);
+ if (pCommitter->aTbDataP == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
code = tsdbFSCopy(pTsdb, &pCommitter->fs);
if (code) goto _err;
@@ -1001,22 +1290,42 @@ _err:
static int32_t tsdbCommitDataStart(SCommitter *pCommitter) {
int32_t code = 0;
+ // Reader
pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
if (pCommitter->dReader.aBlockIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
+ code = tBlockDataCreate(&pCommitter->dReader.bData);
+ if (code) goto _exit;
+
+ pCommitter->dReader.aBlockL = taosArrayInit(0, sizeof(SBlockL));
+ if (pCommitter->dReader.aBlockL == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ code = tBlockDataCreate(&pCommitter->dReader.bDatal);
+ if (code) goto _exit;
+
+ // Writer
pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
if (pCommitter->dWriter.aBlockIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- code = tBlockDataInit(&pCommitter->dReader.bData);
+ pCommitter->dWriter.aBlockL = taosArrayInit(0, sizeof(SBlockL));
+ if (pCommitter->dWriter.aBlockL == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ code = tBlockDataCreate(&pCommitter->dWriter.bData);
if (code) goto _exit;
- code = tBlockDataInit(&pCommitter->dWriter.bData);
+ code = tBlockDataCreate(&pCommitter->dWriter.bDatal);
if (code) goto _exit;
_exit:
@@ -1024,12 +1333,19 @@ _exit:
}
static void tsdbCommitDataEnd(SCommitter *pCommitter) {
+ // Reader
taosArrayDestroy(pCommitter->dReader.aBlockIdx);
tMapDataClear(&pCommitter->dReader.mBlock);
- tBlockDataClear(&pCommitter->dReader.bData, 1);
+ tBlockDataDestroy(&pCommitter->dReader.bData, 1);
+ taosArrayDestroy(pCommitter->dReader.aBlockL);
+ tBlockDataDestroy(&pCommitter->dReader.bDatal, 1);
+
+ // Writer
taosArrayDestroy(pCommitter->dWriter.aBlockIdx);
+ taosArrayDestroy(pCommitter->dWriter.aBlockL);
tMapDataClear(&pCommitter->dWriter.mBlock);
- tBlockDataClear(&pCommitter->dWriter.bData, 1);
+ tBlockDataDestroy(&pCommitter->dWriter.bData, 1);
+ tBlockDataDestroy(&pCommitter->dWriter.bDatal, 1);
tTSchemaDestroy(pCommitter->skmTable.pTSchema);
tTSchemaDestroy(pCommitter->skmRow.pTSchema);
}
@@ -1049,9 +1365,6 @@ static int32_t tsdbCommitData(SCommitter *pCommitter) {
// impl ====================
pCommitter->nextKey = pMemTable->minKey;
while (pCommitter->nextKey < TSKEY_MAX) {
- pCommitter->commitFid = tsdbKeyFid(pCommitter->nextKey, pCommitter->minutes, pCommitter->precision);
- tsdbFidKeyRange(pCommitter->commitFid, pCommitter->minutes, pCommitter->precision, &pCommitter->minKey,
- &pCommitter->maxKey);
code = tsdbCommitFileData(pCommitter);
if (code) goto _err;
}
@@ -1088,13 +1401,13 @@ static int32_t tsdbCommitDel(SCommitter *pCommitter) {
int32_t iDelIdx = 0;
int32_t nDelIdx = taosArrayGetSize(pCommitter->aDelIdx);
int32_t iTbData = 0;
- int32_t nTbData = taosArrayGetSize(pMemTable->aTbData);
+ int32_t nTbData = taosArrayGetSize(pCommitter->aTbDataP);
STbData *pTbData;
SDelIdx *pDelIdx;
ASSERT(nTbData > 0);
- pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData);
+ pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData);
pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL;
while (true) {
if (pTbData == NULL && pDelIdx == NULL) break;
@@ -1120,7 +1433,7 @@ static int32_t tsdbCommitDel(SCommitter *pCommitter) {
if (code) goto _err;
iTbData++;
- pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL;
+ pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData) : NULL;
continue;
_commit_disk_del:
@@ -1136,7 +1449,7 @@ static int32_t tsdbCommitDel(SCommitter *pCommitter) {
if (code) goto _err;
iTbData++;
- pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL;
+ pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData) : NULL;
iDelIdx++;
pDelIdx = (iDelIdx < nDelIdx) ? (SDelIdx *)taosArrayGet(pCommitter->aDelIdx, iDelIdx) : NULL;
continue;
@@ -1184,6 +1497,7 @@ static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno) {
tsdbUnrefMemTable(pMemTable);
tsdbFSDestroy(&pCommitter->fs);
+ taosArrayDestroy(pCommitter->aTbDataP);
tsdbInfo("vgId:%d, tsdb end commit", TD_VID(pTsdb->pVnode));
return code;
diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c
index 74f1aef1fc..247de99338 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFS.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFS.c
@@ -576,10 +576,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- fSet.pHeadF->nRef = 0;
- fSet.pHeadF->commitID = pSet->pHeadF->commitID;
- fSet.pHeadF->size = pSet->pHeadF->size;
- fSet.pHeadF->offset = pSet->pHeadF->offset;
+ *fSet.pHeadF = *pSet->pHeadF;
// data
fSet.pDataF = (SDataFile *)taosMemoryMalloc(sizeof(SDataFile));
@@ -587,9 +584,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- fSet.pDataF->nRef = 0;
- fSet.pDataF->commitID = pSet->pDataF->commitID;
- fSet.pDataF->size = pSet->pDataF->size;
+ *fSet.pDataF = *pSet->pDataF;
// data
fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
@@ -597,9 +592,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- fSet.pLastF->nRef = 0;
- fSet.pLastF->commitID = pSet->pLastF->commitID;
- fSet.pLastF->size = pSet->pLastF->size;
+ *fSet.pLastF = *pSet->pLastF;
// last
fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
@@ -607,9 +600,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- fSet.pSmaF->nRef = 0;
- fSet.pSmaF->commitID = pSet->pSmaF->commitID;
- fSet.pSmaF->size = pSet->pSmaF->size;
+ *fSet.pSmaF = *pSet->pSmaF;
if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c
index 52a102f911..00d2ac848f 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFile.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFile.c
@@ -58,6 +58,7 @@ int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile) {
n += tPutI64v(p ? p + n : p, pLastFile->commitID);
n += tPutI64v(p ? p + n : p, pLastFile->size);
+ n += tPutI64v(p ? p + n : p, pLastFile->offset);
return n;
}
@@ -67,6 +68,7 @@ static int32_t tGetLastFile(uint8_t *p, SLastFile *pLastFile) {
n += tGetI64v(p + n, &pLastFile->commitID);
n += tGetI64v(p + n, &pLastFile->size);
+ n += tGetI64v(p + n, &pLastFile->offset);
return n;
}
@@ -186,11 +188,16 @@ int32_t tPutDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tPutI32v(p ? p + n : p, pSet->diskId.level);
n += tPutI32v(p ? p + n : p, pSet->diskId.id);
n += tPutI32v(p ? p + n : p, pSet->fid);
+
+ // data
n += tPutHeadFile(p ? p + n : p, pSet->pHeadF);
n += tPutDataFile(p ? p + n : p, pSet->pDataF);
- n += tPutLastFile(p ? p + n : p, pSet->pLastF);
n += tPutSmaFile(p ? p + n : p, pSet->pSmaF);
+ // last
+ n += tPutU8(p ? p + n : p, 1); // for future compatibility
+ n += tPutLastFile(p ? p + n : p, pSet->pLastF);
+
return n;
}
@@ -200,11 +207,17 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tGetI32v(p + n, &pSet->diskId.level);
n += tGetI32v(p + n, &pSet->diskId.id);
n += tGetI32v(p + n, &pSet->fid);
+
+ // data
n += tGetHeadFile(p + n, pSet->pHeadF);
n += tGetDataFile(p + n, pSet->pDataF);
- n += tGetLastFile(p + n, pSet->pLastF);
n += tGetSmaFile(p + n, pSet->pSmaF);
+ // last
+ uint8_t nLast;
+ n += tGetU8(p + n, &nLast);
+ n += tGetLastFile(p + n, pSet->pLastF);
+
return n;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index 8ae0e824cf..a6628463f8 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -15,6 +15,7 @@
#include "tsdb.h"
+#define MEM_MIN_HASH 1024
#define SL_MAX_LEVEL 5
#define SL_NODE_SIZE(l) (sizeof(SMemSkipListNode) + sizeof(SMemSkipListNode *) * (l)*2)
@@ -45,12 +46,12 @@ int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable) {
pMemTable->nRef = 1;
pMemTable->minKey = TSKEY_MAX;
pMemTable->maxKey = TSKEY_MIN;
- pMemTable->minVersion = VERSION_MAX;
- pMemTable->maxVersion = VERSION_MIN;
pMemTable->nRow = 0;
pMemTable->nDel = 0;
- pMemTable->aTbData = taosArrayInit(128, sizeof(STbData *));
- if (pMemTable->aTbData == NULL) {
+ pMemTable->nTbData = 0;
+ pMemTable->nBucket = MEM_MIN_HASH;
+ pMemTable->aBucket = (STbData **)taosMemoryCalloc(pMemTable->nBucket, sizeof(STbData *));
+ if (pMemTable->aBucket == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pMemTable);
goto _err;
@@ -68,37 +69,30 @@ _err:
void tsdbMemTableDestroy(SMemTable *pMemTable) {
if (pMemTable) {
vnodeBufPoolUnRef(pMemTable->pPool);
- taosArrayDestroy(pMemTable->aTbData);
+ taosMemoryFree(pMemTable->aBucket);
taosMemoryFree(pMemTable);
}
}
-static int32_t tbDataPCmprFn(const void *p1, const void *p2) {
- STbData *pTbData1 = *(STbData **)p1;
- STbData *pTbData2 = *(STbData **)p2;
+static FORCE_INLINE STbData *tsdbGetTbDataFromMemTableImpl(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid) {
+ STbData *pTbData = pMemTable->aBucket[TABS(uid) % pMemTable->nBucket];
- if (pTbData1->suid < pTbData2->suid) {
- return -1;
- } else if (pTbData1->suid > pTbData2->suid) {
- return 1;
+ while (pTbData) {
+ if (pTbData->uid == uid) break;
+ pTbData = pTbData->next;
}
- if (pTbData1->uid < pTbData2->uid) {
- return -1;
- } else if (pTbData1->uid > pTbData2->uid) {
- return 1;
- }
-
- return 0;
+ return pTbData;
}
-void tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData) {
- STbData *pTbData = &(STbData){.suid = suid, .uid = uid};
+
+STbData *tsdbGetTbDataFromMemTable(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid) {
+ STbData *pTbData;
taosRLockLatch(&pMemTable->latch);
- void *p = taosArraySearch(pMemTable->aTbData, &pTbData, tbDataPCmprFn, TD_EQ);
+ pTbData = tsdbGetTbDataFromMemTableImpl(pMemTable, suid, uid);
taosRUnLockLatch(&pMemTable->latch);
- *ppTbData = p ? *(STbData **)p : NULL;
+ return pTbData;
}
int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgIter, SSubmitBlk *pBlock,
@@ -108,29 +102,21 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI
STbData *pTbData = NULL;
tb_uid_t suid = pMsgIter->suid;
tb_uid_t uid = pMsgIter->uid;
- int32_t sverNew;
- // check if table exists (todo: refact)
- SMetaReader mr = {0};
- // SMetaEntry me = {0};
- metaReaderInit(&mr, pTsdb->pVnode->pMeta, 0);
- if (metaGetTableEntryByUid(&mr, pMsgIter->uid) < 0) {
- metaReaderClear(&mr);
- code = TSDB_CODE_PAR_TABLE_NOT_EXIST;
+ SMetaInfo info;
+ code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info);
+ if (code) {
+ code = TSDB_CODE_TDB_TABLE_NOT_EXIST;
goto _err;
}
- if (pRsp->tblFName) strcat(pRsp->tblFName, mr.me.name);
-
- if (mr.me.type == TSDB_NORMAL_TABLE) {
- sverNew = mr.me.ntbEntry.schemaRow.version;
- } else {
- tDecoderClear(&mr.coder);
-
- metaGetTableEntryByUid(&mr, mr.me.ctbEntry.suid);
- sverNew = mr.me.stbEntry.schemaRow.version;
+ if (info.suid != suid) {
+ code = TSDB_CODE_INVALID_MSG;
+ goto _err;
}
- metaReaderClear(&mr);
- pRsp->sver = sverNew;
+ if (info.suid) {
+ metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info);
+ }
+ pRsp->sver = info.skmVer;
// create/get STbData to op
code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData);
@@ -157,7 +143,17 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
SVBufPool *pPool = pTsdb->pVnode->inUse;
TSDBKEY lastKey = {.version = version, .ts = eKey};
- // check if table exists (todo)
+ // check if table exists
+ SMetaInfo info;
+ code = metaGetInfo(pTsdb->pVnode->pMeta, uid, &info);
+ if (code) {
+ code = TSDB_CODE_TDB_TABLE_NOT_EXIST;
+ goto _err;
+ }
+ if (info.suid != suid) {
+ code = TSDB_CODE_INVALID_MSG;
+ goto _err;
+ }
code = tsdbGetOrCreateTbData(pMemTable, suid, uid, &pTbData);
if (code) {
@@ -182,10 +178,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
pTbData->pTail = pDelData;
}
- // update the state of pMemTable and other (todo)
-
- pMemTable->minVersion = TMIN(pMemTable->minVersion, version);
- pMemTable->maxVersion = TMAX(pMemTable->maxVersion, version);
pMemTable->nDel++;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
@@ -196,9 +188,9 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
- tsdbError("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
- " since %s",
- TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code));
+ tsdbInfo("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
+ " since %s",
+ TD_VID(pTsdb->pVnode), suid, uid, sKey, eKey, tstrerror(code));
return code;
_err:
@@ -318,18 +310,44 @@ _exit:
return pIter->pRow;
}
+static int32_t tsdbMemTableRehash(SMemTable *pMemTable) {
+ int32_t code = 0;
+
+ int32_t nBucket = pMemTable->nBucket * 2;
+ STbData **aBucket = (STbData **)taosMemoryCalloc(nBucket, sizeof(STbData *));
+ if (aBucket == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ for (int32_t iBucket = 0; iBucket < pMemTable->nBucket; iBucket++) {
+ STbData *pTbData = pMemTable->aBucket[iBucket];
+
+ while (pTbData) {
+ STbData *pNext = pTbData->next;
+
+ int32_t idx = TABS(pTbData->uid) % nBucket;
+ pTbData->next = aBucket[idx];
+ aBucket[idx] = pTbData;
+
+ pTbData = pNext;
+ }
+ }
+
+ taosMemoryFree(pMemTable->aBucket);
+ pMemTable->nBucket = nBucket;
+ pMemTable->aBucket = aBucket;
+
+_exit:
+ return code;
+}
+
static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid_t uid, STbData **ppTbData) {
- int32_t code = 0;
- int32_t idx = 0;
- STbData *pTbData = NULL;
- STbData *pTbDataT = &(STbData){.suid = suid, .uid = uid};
+ int32_t code = 0;
// get
- idx = taosArraySearchIdx(pMemTable->aTbData, &pTbDataT, tbDataPCmprFn, TD_GE);
- if (idx >= 0) {
- pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, idx);
- if (tbDataPCmprFn(&pTbDataT, &pTbData) == 0) goto _exit;
- }
+ STbData *pTbData = tsdbGetTbDataFromMemTableImpl(pMemTable, suid, uid);
+ if (pTbData) goto _exit;
// create
SVBufPool *pPool = pMemTable->pTsdb->pVnode->inUse;
@@ -344,9 +362,6 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid
pTbData->uid = uid;
pTbData->minKey = TSKEY_MAX;
pTbData->maxKey = TSKEY_MIN;
- pTbData->minVersion = VERSION_MAX;
- pTbData->maxVersion = VERSION_MIN;
- pTbData->maxSkmVer = -1;
pTbData->pHead = NULL;
pTbData->pTail = NULL;
pTbData->sl.seed = taosRand();
@@ -365,21 +380,23 @@ static int32_t tsdbGetOrCreateTbData(SMemTable *pMemTable, tb_uid_t suid, tb_uid
SL_NODE_FORWARD(pTbData->sl.pTail, iLevel) = NULL;
}
- void *p;
- if (idx < 0) {
- idx = taosArrayGetSize(pMemTable->aTbData);
+ taosWLockLatch(&pMemTable->latch);
+
+ if (pMemTable->nTbData >= pMemTable->nBucket) {
+ code = tsdbMemTableRehash(pMemTable);
+ if (code) {
+ taosWUnLockLatch(&pMemTable->latch);
+ goto _err;
+ }
}
- taosWLockLatch(&pMemTable->latch);
- p = taosArrayInsert(pMemTable->aTbData, idx, &pTbData);
+ int32_t idx = TABS(uid) % pMemTable->nBucket;
+ pTbData->next = pMemTable->aBucket[idx];
+ pMemTable->aBucket[idx] = pTbData;
+ pMemTable->nTbData++;
+
taosWUnLockLatch(&pMemTable->latch);
- tsdbDebug("vgId:%d, add table data %p at idx:%d", TD_VID(pMemTable->pTsdb->pVnode), pTbData, idx);
-
- if (p == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
_exit:
*ppTbData = pTbData;
return code;
@@ -589,15 +606,9 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, pLastRow, pMemTable->pTsdb);
}
- pTbData->minVersion = TMIN(pTbData->minVersion, version);
- pTbData->maxVersion = TMAX(pTbData->maxVersion, version);
- pTbData->maxSkmVer = TMAX(pTbData->maxSkmVer, pMsgIter->sversion);
-
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
pMemTable->maxKey = TMAX(pMemTable->maxKey, pTbData->maxKey);
- pMemTable->minVersion = TMIN(pMemTable->minVersion, pTbData->minVersion);
- pMemTable->maxVersion = TMAX(pMemTable->maxVersion, pTbData->maxVersion);
pMemTable->nRow += nRow;
pRsp->numOfRows = nRow;
@@ -622,3 +633,41 @@ void tsdbUnrefMemTable(SMemTable *pMemTable) {
tsdbMemTableDestroy(pMemTable);
}
}
+
+static FORCE_INLINE int32_t tbDataPCmprFn(const void *p1, const void *p2) {
+ STbData *pTbData1 = *(STbData **)p1;
+ STbData *pTbData2 = *(STbData **)p2;
+
+ if (pTbData1->suid < pTbData2->suid) {
+ return -1;
+ } else if (pTbData1->suid > pTbData2->suid) {
+ return 1;
+ }
+
+ if (pTbData1->uid < pTbData2->uid) {
+ return -1;
+ } else if (pTbData1->uid > pTbData2->uid) {
+ return 1;
+ }
+
+ return 0;
+}
+
+SArray *tsdbMemTableGetTbDataArray(SMemTable *pMemTable) {
+ SArray *aTbDataP = taosArrayInit(pMemTable->nTbData, sizeof(STbData *));
+ if (aTbDataP == NULL) goto _exit;
+
+ for (int32_t iBucket = 0; iBucket < pMemTable->nBucket; iBucket++) {
+ STbData *pTbData = pMemTable->aBucket[iBucket];
+
+ while (pTbData) {
+ taosArrayPush(aTbDataP, &pTbData);
+ pTbData = pTbData->next;
+ }
+ }
+
+ taosArraySort(aTbDataP, tbDataPCmprFn);
+
+_exit:
+ return aTbDataP;
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c
index be2828d187..ec760e3c57 100644
--- a/source/dnode/vnode/src/tsdb/tsdbOpen.c
+++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c
@@ -86,7 +86,7 @@ int tsdbClose(STsdb **pTsdb) {
if (*pTsdb) {
taosThreadRwlockDestroy(&(*pTsdb)->rwLock);
tsdbFSClose(*pTsdb);
- tsdbCloseCache((*pTsdb)->lruCache);
+ tsdbCloseCache(*pTsdb);
taosMemoryFreeClear(*pTsdb);
}
return 0;
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 0831f3d75a..336053911e 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -15,7 +15,10 @@
#include "osDef.h"
#include "tsdb.h"
+
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
+#define ALL_ROWS_CHECKED_INDEX (INT16_MIN)
+#define DEFAULT_ROW_INDEX_VAL (-1)
typedef enum {
EXTERNAL_ROWS_PREV = 0x1,
@@ -29,16 +32,23 @@ typedef struct {
bool hasVal;
} SIterInfo;
+typedef struct {
+ int32_t numOfBlocks;
+ int32_t numOfLastBlocks;
+} SBlockNumber;
+
typedef struct STableBlockScanInfo {
uint64_t uid;
TSKEY lastKey;
- SMapData mapData; // block info (compressed)
- SArray* pBlockList; // block data index list
- SIterInfo iter; // mem buffer skip list iterator
- SIterInfo iiter; // imem buffer skip list iterator
- SArray* delSkyline; // delete info for this table
- int32_t fileDelIndex;
- bool iterInit; // whether to initialize the in-memory skip list iterator or not
+ SMapData mapData; // block info (compressed)
+ SArray* pBlockList; // block data index list
+ SIterInfo iter; // mem buffer skip list iterator
+ SIterInfo iiter; // imem buffer skip list iterator
+ SArray* delSkyline; // delete info for this table
+ int32_t fileDelIndex; // file block delete index
+ int32_t lastBlockDelIndex;// delete index for last block
+ bool iterInit; // whether to initialize the in-memory skip list iterator or not
+ int16_t indexInBlockL;// row position in last block
} STableBlockScanInfo;
typedef struct SBlockOrderWrapper {
@@ -71,11 +81,28 @@ typedef struct SBlockLoadSuppInfo {
char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated.
} SBlockLoadSuppInfo;
+typedef struct SVersionRange {
+ uint64_t minVer;
+ uint64_t maxVer;
+} SVersionRange;
+
+typedef struct SLastBlockReader {
+ SArray* pBlockL;
+ int32_t currentBlockIndex;
+ SBlockData lastBlockData;
+ STimeWindow window;
+ SVersionRange verRange;
+ int32_t order;
+ uint64_t uid;
+ int16_t* rowIndex; // row index ptr, usually from the STableBlockScanInfo->indexInBlockL
+} SLastBlockReader;
+
typedef struct SFilesetIter {
- int32_t numOfFiles; // number of total files
- int32_t index; // current accessed index in the list
- SArray* pFileList; // data file list
+ int32_t numOfFiles; // number of total files
+ int32_t index; // current accessed index in the list
+ SArray* pFileList; // data file list
int32_t order;
+ SLastBlockReader* pLastBlockReader; // last file block reader
} SFilesetIter;
typedef struct SFileDataBlockInfo {
@@ -87,9 +114,9 @@ typedef struct SFileDataBlockInfo {
typedef struct SDataBlockIter {
int32_t numOfBlocks;
int32_t index;
- SArray* blockList; // SArray
+ SArray* blockList; // SArray
int32_t order;
- SBlock block; // current SBlock data
+ SBlock block; // current SBlock data
SHashObj* pTableMap;
} SDataBlockIter;
@@ -100,11 +127,6 @@ typedef struct SFileBlockDumpInfo {
bool allDumped;
} SFileBlockDumpInfo;
-typedef struct SVersionRange {
- uint64_t minVer;
- uint64_t maxVer;
-} SVersionRange;
-
typedef struct SReaderStatus {
bool loadFromFile; // check file stage
SHashObj* pTableMap; // SHash
@@ -145,10 +167,11 @@ static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, i
static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
SRowMerger* pMerger);
+static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
-static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
+static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
@@ -162,6 +185,9 @@ static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdb
static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr,
int8_t* pLevel);
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
+static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader);
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
+static int32_t doBuildDataBlock(STsdbReader* pReader);
static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
@@ -182,7 +208,6 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
pSupInfo->buildBuf[i] = taosMemoryMalloc(pCol->info.bytes);
- // tsdbInfo("-------------------%d\n", pCol->info.bytes);
}
}
@@ -199,7 +224,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
}
for (int32_t j = 0; j < numOfTables; ++j) {
- STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReader->window.skey) {
info.lastKey = pTsdbReader->window.skey;
@@ -293,15 +318,36 @@ static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* cap
}
// init file iterator
-static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, int32_t order, const char* idstr) {
+static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader/*int32_t order, const char* idstr*/) {
size_t numOfFileset = taosArrayGetSize(aDFileSet);
- pIter->index = ASCENDING_TRAVERSE(order) ? -1 : numOfFileset;
- pIter->order = order;
+ pIter->index = ASCENDING_TRAVERSE(pReader->order) ? -1 : numOfFileset;
+ pIter->order = pReader->order;
pIter->pFileList = aDFileSet;
pIter->numOfFiles = numOfFileset;
- tsdbDebug("init fileset iterator, total files:%d %s", pIter->numOfFiles, idstr);
+ if (pIter->pLastBlockReader == NULL) {
+ pIter->pLastBlockReader = taosMemoryCalloc(1, sizeof(struct SLastBlockReader));
+ if (pIter->pLastBlockReader == NULL) {
+ int32_t code = TSDB_CODE_OUT_OF_MEMORY;
+ tsdbError("failed to prepare the last block iterator, code:%d %s", tstrerror(code), pReader->idStr);
+ return code;
+ }
+
+ SLastBlockReader* pLReader = pIter->pLastBlockReader;
+ pLReader->pBlockL = taosArrayInit(4, sizeof(SBlockL));
+ pLReader->order = pReader->order;
+ pLReader->window = pReader->window;
+ pLReader->verRange = pReader->verRange;
+ pLReader->currentBlockIndex = -1;
+
+ int32_t code = tBlockDataCreate(&pLReader->lastBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+
+ tsdbDebug("init fileset iterator, total files:%d %s", pIter->numOfFiles, pReader->idStr);
return TSDB_CODE_SUCCESS;
}
@@ -361,7 +407,7 @@ _err:
static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashObj* pTableMap) {
pIter->order = order;
pIter->index = -1;
- pIter->numOfBlocks = -1;
+ pIter->numOfBlocks = 0;
if (pIter->blockList == NULL) {
pIter->blockList = taosArrayInit(4, sizeof(SFileDataBlockInfo));
} else {
@@ -419,7 +465,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
pReader->suid = pCond->suid;
pReader->order = pCond->order;
- pReader->capacity = capacity;
+ pReader->capacity = 4096;
pReader->idStr = (idstr != NULL) ? strdup(idstr) : NULL;
pReader->verRange = getQueryVerRange(pVnode, pCond, level);
pReader->type = pCond->type;
@@ -440,7 +486,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
pSup->tsColAgg.colId = PRIMARYKEY_TIMESTAMP_COL_ID;
- code = tBlockDataInit(&pReader->status.fileBlockData);
+ code = tBlockDataCreate(&pReader->status.fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto _end;
@@ -547,14 +593,14 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
int64_t st = taosGetTimestampUs();
- int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx, NULL);
+ int32_t code = tsdbReadBlockIdx(pFileReader, aBlockIdx);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
size_t num = taosArrayGetSize(aBlockIdx);
if (num == 0) {
- taosArrayClear(aBlockIdx);
+ taosArrayDestroy(aBlockIdx);
return TSDB_CODE_SUCCESS;
}
@@ -594,24 +640,29 @@ _end:
return code;
}
-static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_t* numOfValidTables,
- int32_t* numOfBlocks) {
- size_t numOfTables = taosArrayGetSize(pIndexList);
- *numOfValidTables = 0;
-
- int64_t st = taosGetTimestampUs();
- size_t size = 0;
-
+static void cleanupTableScanInfo(SHashObj* pTableMap) {
STableBlockScanInfo* px = NULL;
while (1) {
- px = taosHashIterate(pReader->status.pTableMap, px);
+ px = taosHashIterate(pTableMap, px);
if (px == NULL) {
break;
}
+ // reset the index in last block when handing a new file
+ px->indexInBlockL = -1;
tMapDataClear(&px->mapData);
taosArrayClear(px->pBlockList);
}
+}
+
+static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray* pLastBlockIndex,
+ SBlockNumber * pBlockNum, SArray* pQualifiedLastBlock) {
+ int32_t numOfQTable = 0;
+ size_t sizeInDisk = 0;
+ size_t numOfTables = taosArrayGetSize(pIndexList);
+
+ int64_t st = taosGetTimestampUs();
+ cleanupTableScanInfo(pReader->status.pTableMap);
for (int32_t i = 0; i < numOfTables; ++i) {
SBlockIdx* pBlockIdx = taosArrayGet(pIndexList, i);
@@ -619,9 +670,9 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(int64_t));
tMapDataReset(&pScanInfo->mapData);
- tsdbReadBlock(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData, NULL);
+ tsdbReadBlock(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData);
- size += pScanInfo->mapData.nData;
+ sizeInDisk += pScanInfo->mapData.nData;
for (int32_t j = 0; j < pScanInfo->mapData.nItem; ++j) {
SBlock block = {0};
tMapDataGetItemByIdx(&pScanInfo->mapData, j, &block, tGetBlock);
@@ -632,7 +683,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
}
// 2. version range check
- if (block.minVersion > pReader->verRange.maxVer || block.maxVersion < pReader->verRange.minVer) {
+ if (block.minVer > pReader->verRange.maxVer || block.maxVer < pReader->verRange.minVer) {
continue;
}
@@ -642,30 +693,54 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
return TSDB_CODE_OUT_OF_MEMORY;
}
- (*numOfBlocks) += 1;
+ pBlockNum->numOfBlocks += 1;
}
if (pScanInfo->pBlockList != NULL && taosArrayGetSize(pScanInfo->pBlockList) > 0) {
- (*numOfValidTables) += 1;
+ numOfQTable += 1;
}
}
- double el = (taosGetTimestampUs() - st) / 1000.0;
- tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, size:%.2f Kb, elapsed time:%.2f ms %s",
- numOfTables, *numOfBlocks, *numOfValidTables, size / 1000.0, el, pReader->idStr);
+ size_t numOfLast = taosArrayGetSize(pLastBlockIndex);
+ for(int32_t i = 0; i < numOfLast; ++i) {
+ SBlockL* pLastBlock = taosArrayGet(pLastBlockIndex, i);
+ if (pLastBlock->suid != pReader->suid) {
+ continue;
+ }
- pReader->cost.numOfBlocks += (*numOfBlocks);
+ {
+ // 1. time range check
+ if (pLastBlock->minKey > pReader->window.ekey || pLastBlock->maxKey < pReader->window.skey) {
+ continue;
+ }
+
+ // 2. version range check
+ if (pLastBlock->minVer > pReader->verRange.maxVer || pLastBlock->maxVer < pReader->verRange.minVer) {
+ continue;
+ }
+
+ pBlockNum->numOfLastBlocks += 1;
+ taosArrayPush(pQualifiedLastBlock, pLastBlock);
+ }
+ }
+
+ int32_t total = pBlockNum->numOfLastBlocks + pBlockNum->numOfBlocks;
+
+ double el = (taosGetTimestampUs() - st) / 1000.0;
+ tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, lastBlock:%d, size:%.2f Kb, elapsed time:%.2f ms %s",
+ numOfTables, total, numOfQTable, pBlockNum->numOfLastBlocks, sizeInDisk
+ / 1000.0, el, pReader->idStr);
+
+ pReader->cost.numOfBlocks += total;
pReader->cost.headFileLoadTime += el;
return TSDB_CODE_SUCCESS;
}
-// todo remove pblock parameter
-static void setBlockAllDumped(SFileBlockDumpInfo* pDumpInfo, SBlock* pBlock, int32_t order) {
+static void setBlockAllDumped(SFileBlockDumpInfo* pDumpInfo, int64_t maxKey, int32_t order) {
int32_t step = ASCENDING_TRAVERSE(order) ? 1 : -1;
-
pDumpInfo->allDumped = true;
- pDumpInfo->lastKey = pBlock->maxKey.ts + step;
+ pDumpInfo->lastKey = maxKey + step;
}
static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_t colIndex, SColVal* pColVal,
@@ -685,8 +760,13 @@ static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_
}
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) {
- SFileDataBlockInfo* pFBlockInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index);
- return pFBlockInfo;
+ if (taosArrayGetSize(pBlockIter->blockList) == 0) {
+ ASSERT(pBlockIter->numOfBlocks == taosArrayGetSize(pBlockIter->blockList));
+ return NULL;
+ }
+
+ SFileDataBlockInfo* pBlockInfo = taosArrayGet(pBlockIter->blockList, pBlockIter->index);
+ return pBlockInfo;
}
static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; }
@@ -736,19 +816,20 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
pColData = taosArrayGet(pResBlock->pDataBlock, i);
SColData* pData = tBlockDataGetColDataByIdx(pBlockData, colIndex);
-
- if (pData->cid == pColData->info.colId) {
+ if (pData->cid < pColData->info.colId) {
+ colIndex += 1;
+ } else if (pData->cid == pColData->info.colId) {
for (int32_t j = pDumpInfo->rowIndex; j < endIndex && j >= 0; j += step) {
tColDataGetValue(pData, j, &cv);
doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo);
}
colIndex += 1;
+ i += 1;
ASSERT(rowIndex == remain);
} else { // the specified column does not exist in file block, fill with null data
colDataAppendNNULL(pColData, 0, remain);
+ i += 1;
}
-
- i += 1;
}
while (i < numOfOutputCols) {
@@ -760,7 +841,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
pResBlock->info.rows = remain;
pDumpInfo->rowIndex += step * remain;
- setBlockAllDumped(pDumpInfo, pBlock, pReader->order);
+ setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
pReader->cost.blockLoadTime += elapsedTime;
@@ -769,47 +850,77 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, remain, unDumpedRows,
- pBlock->minVersion, pBlock->maxVersion, elapsedTime, pReader->idStr);
+ pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
return TSDB_CODE_SUCCESS;
}
-static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter,
- STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
+static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) {
int64_t st = taosGetTimestampUs();
+ double elapsedTime = 0;
+ int32_t code = 0;
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- SBlock* pBlock = getCurrentBlock(pBlockIter);
-
- SSDataBlock* pResBlock = pReader->pResBlock;
- int32_t numOfCols = blockDataGetNumOfCols(pResBlock);
-
- SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid};
- int32_t code =
- tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, pBlockData, NULL, NULL);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
+ if (pBlockInfo != NULL) {
+ SBlock* pBlock = getCurrentBlock(pBlockIter);
+ code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
+ ", rows:%d, code:%s %s",
+ pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
+ tstrerror(code), pReader->idStr);
+ goto _error;
+ }
+
+ elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
+
+ tsdbDebug("%p load file block into buffer, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
+ ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
+ pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
+ } else {
+#if 0
+ SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
+
+ uint64_t uid = pBlockInfo->uid;
+ SArray* pBlocks = pLastBlockReader->pBlockL;
+
+ pLastBlockReader->currentBlockIndex = -1;
+
+ // find the correct SBlockL
+ for(int32_t i = 0; i < taosArrayGetSize(pBlocks); ++i) {
+ SBlockL* pBlock = taosArrayGet(pBlocks, i);
+ if (pBlock->minUid >= uid && pBlock->maxUid <= uid) {
+ pLastBlockReader->currentBlockIndex = i;
+ break;
+ }
+ }
+
+// SBlockL* pBlockL = taosArrayGet(pLastBlockReader->pBlockL, *index);
+ code = tsdbReadLastBlock(pReader->pFileReader, pBlockL, pBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbDebug("%p error occurs in loading last block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64
+ ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", code:%s %s",
+ pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow,
+ pBlockL->minVer, pBlockL->maxVer, tstrerror(code), pReader->idStr);
+ goto _error;
+ }
+
+ tsdbDebug("%p load last file block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64
+ ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow,
+ pBlockL->minVer, pBlockL->maxVer, elapsedTime, pReader->idStr);
+#endif
}
- double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
pReader->cost.blockLoadTime += elapsedTime;
-
pDumpInfo->allDumped = false;
- tsdbDebug("%p load file block into buffer, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
- pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
- pBlock->minVersion, pBlock->maxVersion, elapsedTime, pReader->idStr);
return TSDB_CODE_SUCCESS;
_error:
- tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, %s",
- pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
- pReader->idStr);
return code;
}
@@ -865,10 +976,11 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v
static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) {
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
-
- int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx);
- tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock);
+ if (pFBlock != NULL) {
+ STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
+ int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx);
+ tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock);
+ }
#if 0
qDebug("check file block, table uid:%"PRIu64" index:%d offset:%"PRId64", ", pScanInfo->uid, *mapDataIndex, pBlockIter->block.aSubBlock[0].offset);
@@ -945,7 +1057,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
int64_t et = taosGetTimestampUs();
tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s",
- pReader, cnt, (et - st) / 1000.0, pReader->idStr);
+ pReader, numOfBlocks, (et - st) / 1000.0, pReader->idStr);
pBlockIter->index = asc ? 0 : (numOfBlocks - 1);
cleanupBlockOrderSupporter(&sup);
@@ -956,7 +1068,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %s", pReader, cnt, sup.numOfTables,
pReader->idStr);
- assert(cnt <= numOfBlocks && sup.numOfTables <= numOfTables);
+ ASSERT(cnt <= numOfBlocks && sup.numOfTables <= numOfTables);
SMultiwayMergeTreeInfo* pTree = NULL;
uint8_t ret = tMergeTreeCreate(&pTree, sup.numOfTables, &sup, fileDataBlockOrderCompar);
@@ -983,7 +1095,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
}
int64_t et = taosGetTimestampUs();
- tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et - st) / 1000.0,
+ tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks, (et - st) / 1000.0,
pReader->idStr);
cleanupBlockOrderSupporter(&sup);
taosMemoryFree(pTree);
@@ -1014,8 +1126,8 @@ static bool blockIteratorNext(SDataBlockIter* pBlockIter) {
static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SBlock* pBlock) {
return (pWindow->ekey < pBlock->maxKey.ts && pWindow->ekey >= pBlock->minKey.ts) ||
(pWindow->skey > pBlock->minKey.ts && pWindow->skey <= pBlock->maxKey.ts) ||
- (pVerRange->minVer > pBlock->minVersion && pVerRange->minVer <= pBlock->maxVersion) ||
- (pVerRange->maxVer < pBlock->maxVersion && pVerRange->maxVer >= pBlock->minVersion);
+ (pVerRange->minVer > pBlock->minVer && pVerRange->minVer <= pBlock->maxVer) ||
+ (pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer);
}
static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
@@ -1095,8 +1207,8 @@ static bool bufferDataInFileBlockGap(int32_t order, TSDBKEY key, SBlock* pBlock)
}
static bool keyOverlapFileBlock(TSDBKEY key, SBlock* pBlock, SVersionRange* pVerRange) {
- return (key.ts >= pBlock->minKey.ts && key.ts <= pBlock->maxKey.ts) && (pBlock->maxVersion >= pVerRange->minVer) &&
- (pBlock->minVersion <= pVerRange->maxVer);
+ return (key.ts >= pBlock->minKey.ts && key.ts <= pBlock->maxKey.ts) && (pBlock->maxVer >= pVerRange->minVer) &&
+ (pBlock->minVer <= pVerRange->maxVer);
}
static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock) {
@@ -1105,11 +1217,11 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons
for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += 1) {
TSDBKEY* p = taosArrayGet(pBlockScanInfo->delSkyline, i);
if (p->ts >= pBlock->minKey.ts && p->ts <= pBlock->maxKey.ts) {
- if (p->version >= pBlock->minVersion) {
+ if (p->version >= pBlock->minVer) {
return true;
}
} else if (p->ts < pBlock->minKey.ts) { // p->ts < pBlock->minKey.ts
- if (p->version >= pBlock->minVersion) {
+ if (p->version >= pBlock->minVer) {
if (i < num - 1) {
TSDBKEY* pnext = taosArrayGet(pBlockScanInfo->delSkyline, i + 1);
if (i + 1 == num - 1) { // pnext is the last point
@@ -1117,7 +1229,7 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons
return true;
}
} else {
- if (pnext->ts >= pBlock->minKey.ts && pnext->version >= pBlock->minVersion) {
+ if (pnext->ts >= pBlock->minKey.ts && pnext->version >= pBlock->minVer) {
return true;
}
}
@@ -1169,7 +1281,7 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl
// 4. output buffer should be large enough to hold all rows in current block
// 5. delete info should not overlap with current block data
static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBlock, SBlock* pBlock,
- STableBlockScanInfo* pScanInfo, TSDBKEY key) {
+ STableBlockScanInfo* pScanInfo, TSDBKEY key, SLastBlockReader* pLastBlockReader) {
int32_t neighborIndex = 0;
SBlock* pNeighbor = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &neighborIndex, pReader->order);
@@ -1184,8 +1296,16 @@ static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBloc
bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
+ // todo here we need to each key in the last files to identify if it is really overlapped with last block
+ bool overlapWithlastBlock = false;
+ if (taosArrayGetSize(pLastBlockReader->pBlockL) > 0 && (pLastBlockReader->currentBlockIndex != -1)) {
+ SBlockL *pBlockL = taosArrayGet(pLastBlockReader->pBlockL, pLastBlockReader->currentBlockIndex);
+ overlapWithlastBlock = !(pBlock->maxKey.ts < pBlockL->minKey || pBlock->minKey.ts > pBlockL->maxKey);
+ }
+
return (overlapWithNeighbor || hasDup || dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock) ||
- keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) || overlapWithDel);
+ keyOverlapFileBlock(key, pBlock, &pReader->verRange) || (pBlock->nRow > pReader->capacity) ||
+ overlapWithDel || overlapWithlastBlock);
}
static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, int64_t endKey) {
@@ -1224,7 +1344,7 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
if (nextKey != key) { // merge is not needed
- doAppendRowFromBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
+ doAppendRowFromFileBlock(pReader->pResBlock, pReader, pBlockData, pDumpInfo->rowIndex);
pDumpInfo->rowIndex += step;
return true;
}
@@ -1258,8 +1378,124 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
return pReader->pMemSchema;
}
+static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
+ SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
+ SRowMerger merge = {0};
+ STSRow* pTSRow = NULL;
+ SBlockData* pBlockData = &pReader->status.fileBlockData;
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ int64_t tsLast = INT64_MIN;
+ if (pLastBlockReader->lastBlockData.nRow > 0) {
+ tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
+ }
+
+ TSDBKEY k = TSDBROW_KEY(pRow);
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+
+ SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+
+ int64_t minKey = 0;
+ if (pReader->order == TSDB_ORDER_ASC) {
+ minKey = INT64_MAX; // chosen the minimum value
+ if (minKey > tsLast && pLastBlockReader->lastBlockData.nRow > 0) {
+ minKey = tsLast;
+ }
+
+ if (minKey > k.ts) {
+ minKey = k.ts;
+ }
+
+ if (minKey > key && pBlockData->nRow > 0) {
+ minKey = key;
+ }
+ } else {
+ minKey = INT64_MIN;
+ if (minKey < tsLast && pLastBlockReader->lastBlockData.nRow > 0) {
+ minKey = tsLast;
+ }
+
+ if (minKey < k.ts) {
+ minKey = k.ts;
+ }
+
+ if (minKey < key && pBlockData->nRow > 0) {
+ minKey = key;
+ }
+ }
+
+ bool init = false;
+
+ // ASC: file block ---> last block -----> imem -----> mem
+ //DESC: mem -----> imem -----> last block -----> file block
+ if (pReader->order == TSDB_ORDER_ASC) {
+ if (minKey == key) {
+ init = true;
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
+
+ if (minKey == tsLast) {
+ TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ if (init) {
+ tRowMerge(&merge, &fRow1);
+ } else {
+ init = true;
+ tRowMergerInit(&merge, &fRow1, pReader->pSchema);
+ }
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge);
+ }
+
+ if (minKey == k.ts) {
+ if (init) {
+ tRowMerge(&merge, pRow);
+ } else {
+ init = true;
+ STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
+ tRowMergerInit(&merge, pRow, pSchema);
+ }
+ doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ }
+ } else {
+ if (minKey == k.ts) {
+ init = true;
+ STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
+ tRowMergerInit(&merge, pRow, pSchema);
+ doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ }
+
+ if (minKey == tsLast) {
+ TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ if (init) {
+ tRowMerge(&merge, &fRow1);
+ } else {
+ init = true;
+ tRowMergerInit(&merge, &fRow1, pReader->pSchema);
+ }
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge);
+ }
+
+ if (minKey == key) {
+ if (init) {
+ tRowMerge(&merge, &fRow);
+ } else {
+ init = true;
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ }
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
+ }
+
+ tRowMergerGetRow(&merge, &pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
- SIterInfo* pIter, int64_t key) {
+ SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
SBlockData* pBlockData = &pReader->status.fileBlockData;
@@ -1331,12 +1567,159 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
return TSDB_CODE_SUCCESS;
}
-static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
+static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
+ SRowMerger merge = {0};
+ STSRow* pTSRow = NULL;
+
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ SArray* pDelList = pBlockScanInfo->delSkyline;
+
+ TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader);
+ TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader);
+ ASSERT(pRow != NULL && piRow != NULL);
+
+ SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+ int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
+
+ int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
+
+ TSDBKEY k = TSDBROW_KEY(pRow);
+ TSDBKEY ik = TSDBROW_KEY(piRow);
+
+ int64_t minKey = 0;//INT64_MAX;
+ if (ASCENDING_TRAVERSE(pReader->order)) {
+ minKey = INT64_MAX; // let's find the minimum
+ if (minKey > k.ts) {
+ minKey = k.ts;
+ }
+
+ if (minKey > ik.ts) {
+ minKey = ik.ts;
+ }
+
+ if (minKey > key && pBlockData->nRow > 0) {
+ minKey = key;
+ }
+
+ if (minKey > tsLast && pLastBlockData->nRow > 0) {
+ minKey = tsLast;
+ }
+ } else {
+ minKey = INT64_MIN; // let find the maximum ts value
+ if (minKey < k.ts) {
+ minKey = k.ts;
+ }
+
+ if (minKey < ik.ts) {
+ minKey = ik.ts;
+ }
+
+ if (minKey < key && pBlockData->nRow > 0) {
+ minKey = key;
+ }
+
+ if (minKey < tsLast && pLastBlockData->nRow > 0) {
+ minKey = tsLast;
+ }
+ }
+
+ bool init = false;
+
+ // ASC: file block -----> last block -----> imem -----> mem
+ // DESC: mem -----> imem -----> last block -----> file block
+ if (ASCENDING_TRAVERSE(pReader->order)) {
+ if (minKey == key) {
+ init = true;
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
+
+ if (minKey == tsLast) {
+ TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ if (init) {
+ tRowMerge(&merge, &fRow1);
+ } else {
+ init = true;
+ tRowMergerInit(&merge, &fRow1, pReader->pSchema);
+ }
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge);
+ }
+
+ if (minKey == ik.ts) {
+ if (init) {
+ tRowMerge(&merge, piRow);
+ } else {
+ init = true;
+ STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
+ tRowMergerInit(&merge, piRow, pSchema);
+ }
+ doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ }
+
+ if (minKey == k.ts) {
+ if (init) {
+ tRowMerge(&merge, pRow);
+ } else {
+ STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
+ tRowMergerInit(&merge, pRow, pSchema);
+ }
+ doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ }
+ } else {
+ if (minKey == k.ts) {
+ init = true;
+ STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
+ tRowMergerInit(&merge, pRow, pSchema);
+ doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ }
+
+ if (minKey == ik.ts) {
+ if (init) {
+ tRowMerge(&merge, piRow);
+ } else {
+ init = true;
+ STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(piRow), pReader, pBlockScanInfo->uid);
+ tRowMergerInit(&merge, piRow, pSchema);
+ }
+ doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ }
+
+ if (minKey == tsLast) {
+ TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ if (init) {
+ tRowMerge(&merge, &fRow1);
+ } else {
+ init = true;
+ tRowMergerInit(&merge, &fRow1, pReader->pSchema);
+ }
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, &merge);
+ }
+
+ if (minKey == key) {
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ if (!init) {
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ } else {
+ tRowMerge(&merge, &fRow);
+ }
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
+ }
+
+ tRowMergerGetRow(&merge, &pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
SArray* pDelList = pBlockScanInfo->delSkyline;
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader);
@@ -1477,6 +1860,14 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo,
STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
+ // it is an multi-table data block
+ if (pBlockData->aUid != NULL) {
+ uint64_t uid = pBlockData->aUid[pDumpInfo->rowIndex];
+ if (uid != pBlockScanInfo->uid) { // move to next row
+ return false;
+ }
+ }
+
// check for version and time range
int64_t ver = pBlockData->aVersion[pDumpInfo->rowIndex];
if (ver > pReader->verRange.maxVer || ver < pReader->verRange.minVer) {
@@ -1498,39 +1889,191 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDum
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
-static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
+static void initLastBlockReader(SLastBlockReader* pLastBlockReader, uint64_t uid, int16_t* startPos) {
+ pLastBlockReader->uid = uid;
+ pLastBlockReader->rowIndex = startPos;
- int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
+ if (*startPos == -1) {
+ if (ASCENDING_TRAVERSE(pLastBlockReader->order)) {
+ // do nothing
+ } else {
+ *startPos = pLastBlockReader->lastBlockData.nRow;
+ }
+ }
+}
+
+static void setAllRowsChecked(SLastBlockReader *pLastBlockReader) {
+ *pLastBlockReader->rowIndex = ALL_ROWS_CHECKED_INDEX;
+}
+
+static bool nextRowInLastBlock(SLastBlockReader *pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) {
+ int32_t step = (pLastBlockReader->order == TSDB_ORDER_ASC) ? 1 : -1;
+ if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
+ return false;
+ }
+
+ *(pLastBlockReader->rowIndex) += step;
+
+ SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
+ for(int32_t i = *(pLastBlockReader->rowIndex); i < pBlockData->nRow && i >= 0; i += step) {
+ if (pBlockData->aUid != NULL && pBlockData->aUid[i] != pLastBlockReader->uid) {
+ continue;
+ }
+
+ int64_t ts = pBlockData->aTSKEY[i];
+ if (ts < pLastBlockReader->window.skey) {
+ continue;
+ }
+
+ int64_t ver = pBlockData->aVersion[i];
+ if (ver < pLastBlockReader->verRange.minVer) {
+ continue;
+ }
+
+ // no data any more, todo opt handle desc case
+ if (ts > pLastBlockReader->window.ekey) {
+ continue;
+ }
+
+ // todo opt handle desc case
+ if (ver > pLastBlockReader->verRange.maxVer) {
+ continue;
+ }
+
+ TSDBKEY k = {.ts = ts, .version = ver};
+ if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) {
+ continue;
+ }
+
+ *(pLastBlockReader->rowIndex) = i;
+ return true;
+ }
+
+ // set all data is consumed in last block
+ setAllRowsChecked(pLastBlockReader);
+ return false;
+}
+
+static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
+ SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
+ return pBlockData->aTSKEY[*pLastBlockReader->rowIndex];
+}
+
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) {
+ if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
+ return false;
+ }
+ return true;
+}
+
+// todo refactor
+static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
+ SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ int64_t key = (pBlockData->nRow > 0)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN;
TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) {
- return doMergeThreeLevelRows(pReader, pBlockScanInfo);
+ return doMergeMultiLevelRowsRv(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
} else {
- // imem + file
+ // imem + file + last block
if (pBlockScanInfo->iiter.hasVal) {
- return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key);
+ return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
}
// mem + file
if (pBlockScanInfo->iter.hasVal) {
- return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key);
+ return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader);
}
- // imem & mem are all empty, only file exist
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
+ if (pBlockData->nRow > 0) {
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ // no last block
+ if (pLastBlockReader->lastBlockData.nRow == 0) {
+ if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
+ return TSDB_CODE_SUCCESS;
+ } else {
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ tRowMergerGetRow(&merge, &pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ // row in last file block
+ int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
+ if (ts < key) { // save rows in last block
+ SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
+ TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+
+ tRowMergerInit(&merge, &fRow1, pReader->pSchema);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
+ tRowMergerGetRow(&merge, &pTSRow);
+
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+ } else if (ts == key) {
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
+
+ tRowMergerGetRow(&merge, &pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+ } else { // ts > key, asc; todo handle desc
+ // imem & mem are all empty, only file exist
+ if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
+ return TSDB_CODE_SUCCESS;
+ } else {
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ tRowMergerGetRow(&merge, &pTSRow);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+ } else { // only last block exists
+ SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+ int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
+
STSRow* pTSRow = NULL;
SRowMerger merge = {0};
+ TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+
tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
tRowMergerGetRow(&merge, &pTSRow);
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
@@ -1540,41 +2083,59 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
}
}
-static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
+static int32_t buildComposedDataBlock(STsdbReader* pReader) {
SSDataBlock* pResBlock = pReader->pResBlock;
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
+
+ STableBlockScanInfo* pBlockScanInfo = NULL;
+ if (pBlockInfo != NULL) {
+ pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
+ } else {
+ pBlockScanInfo = pReader->status.pTableIter;
+ }
+
+ SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SBlockData* pBlockData = &pReader->status.fileBlockData;
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
- int32_t numOfSub = 1;
-
int64_t st = taosGetTimestampUs();
while (1) {
// todo check the validate of row in file block
{
- if (!isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) {
- pDumpInfo->rowIndex += step;
+ bool hasBlockData = false;
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
- numOfSub = pBlock->nSubBlock;
-
- if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
- setBlockAllDumped(pDumpInfo, pBlock, pReader->order);
+ while (pBlockData->nRow > 0) { // find the first qualified row in data block
+ if (isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) {
+ hasBlockData = true;
break;
}
- continue;
+ pDumpInfo->rowIndex += step;
+
+ SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
+ setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
+ break;
+ }
+ }
+
+ bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
+
+ // no data in last block and block, no need to proceed.
+ if ((hasBlockData == false) && (hasBlockLData == false)) {
+ break;
}
}
- buildComposedDataBlockImpl(pReader, pBlockScanInfo);
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ buildComposedDataBlockImpl(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
// currently loaded file data block is consumed
- if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
- setBlockAllDumped(pDumpInfo, pBlock, pReader->order);
+ if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) {
+ SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
break;
}
@@ -1589,9 +2150,8 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
setComposedBlockFlag(pReader, true);
int64_t et = taosGetTimestampUs();
- tsdbDebug("%p uid:%" PRIu64 ", composed data block created, subBlock:%d, brange:%" PRIu64 "-%" PRIu64
- " rows:%d, elapsed time:%.2f ms %s",
- pReader, pBlockScanInfo->uid, numOfSub, pResBlock->info.window.skey, pResBlock->info.window.ekey,
+ tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s",
+ pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
return TSDB_CODE_SUCCESS;
@@ -1617,7 +2177,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
STbData* d = NULL;
if (pReader->pReadSnap->pMem != NULL) {
- tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid, &d);
+ d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
if (d != NULL) {
code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter);
if (code == TSDB_CODE_SUCCESS) {
@@ -1638,7 +2198,7 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
STbData* di = NULL;
if (pReader->pReadSnap->pIMem != NULL) {
- tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid, &di);
+ di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
if (di != NULL) {
code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter);
if (code == TSDB_CODE_SUCCESS) {
@@ -1677,7 +2237,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
SDelFile* pDelFile = pReader->pReadSnap->fs.pDelFile;
if (pDelFile) {
SDelFReader* pDelFReader = NULL;
- code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb, NULL);
+ code = tsdbDelFReaderOpen(&pDelFReader, pDelFile, pTsdb);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
@@ -1688,7 +2248,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
goto _err;
}
- code = tsdbReadDelIdx(pDelFReader, aDelIdx, NULL);
+ code = tsdbReadDelIdx(pDelFReader, aDelIdx);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(aDelIdx);
tsdbDelFReaderClose(&pDelFReader);
@@ -1699,7 +2259,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
SDelIdx* pIdx = taosArraySearch(aDelIdx, &idx, tCmprDelIdx, TD_EQ);
if (pIdx != NULL) {
- code = tsdbReadDelData(pDelFReader, pIdx, pDelData, NULL);
+ code = tsdbReadDelData(pDelFReader, pIdx, pDelData);
}
taosArrayDestroy(aDelIdx);
@@ -1737,6 +2297,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
ASCENDING_TRAVERSE(pReader->order) ? 0 : taosArrayGetSize(pBlockScanInfo->delSkyline) - 1;
pBlockScanInfo->iiter.index = pBlockScanInfo->iter.index;
pBlockScanInfo->fileDelIndex = pBlockScanInfo->iter.index;
+ pBlockScanInfo->lastBlockDelIndex = pBlockScanInfo->iter.index;
return code;
_err:
@@ -1744,12 +2305,9 @@ _err:
return code;
}
-static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pReader) {
+static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
-
initMemDataIterator(pScanInfo, pReader);
TSDBROW* pRow = getValidRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
@@ -1767,11 +2325,15 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead
return key;
}
-static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
+static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
SReaderStatus* pStatus = &pReader->status;
+ pBlockNum->numOfBlocks = 0;
+ pBlockNum->numOfLastBlocks = 0;
size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
+ SArray* pLastBlocks = pStatus->fileIter.pLastBlockReader->pBlockL;
+ taosArrayClear(pLastBlocks);
while (1) {
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
@@ -1786,18 +2348,34 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
return code;
}
- if (taosArrayGetSize(pIndexList) > 0) {
- uint32_t numOfValidTable = 0;
- code = doLoadFileBlock(pReader, pIndexList, &numOfValidTable, numOfBlocks);
+ code = tsdbReadBlockL(pReader->pFileReader, pLastBlocks);
+ if (code != TSDB_CODE_SUCCESS) {
+ taosArrayDestroy(pIndexList);
+ return code;
+ }
+
+ if (taosArrayGetSize(pIndexList) > 0 || taosArrayGetSize(pLastBlocks) > 0) {
+ SArray* pQLastBlock = taosArrayInit(4, sizeof(SBlockL));
+
+ code = doLoadFileBlock(pReader, pIndexList, pLastBlocks, pBlockNum, pQLastBlock);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pIndexList);
+ taosArrayDestroy(pQLastBlock);
return code;
}
- if (numOfValidTable > 0) {
+ if (pBlockNum->numOfBlocks + pBlockNum->numOfLastBlocks > 0) {
+ ASSERT(taosArrayGetSize(pQLastBlock) == pBlockNum->numOfLastBlocks);
+ taosArrayClear(pLastBlocks);
+ taosArrayAddAll(pLastBlocks, pQLastBlock);
+
+ taosArrayDestroy(pQLastBlock);
break;
}
+
+ taosArrayDestroy(pQLastBlock);
}
+
// no blocks in current file, try next files
}
@@ -1805,28 +2383,172 @@ static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
return TSDB_CODE_SUCCESS;
}
+// todo add elapsed time results
+static int32_t doLoadRelatedLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo *pBlockScanInfo, STsdbReader* pReader) {
+ SArray* pBlocks = pLastBlockReader->pBlockL;
+ SBlockL* pBlock = NULL;
+
+ uint64_t uid = pBlockScanInfo->uid;
+ int32_t totalLastBlocks = (int32_t)taosArrayGetSize(pBlocks);
+
+ initMemDataIterator(pBlockScanInfo, pReader);
+
+ // find the correct SBlockL. todo binary search
+ int32_t index = -1;
+ for (int32_t i = 0; i < totalLastBlocks; ++i) {
+ SBlockL* p = taosArrayGet(pBlocks, i);
+ if (p->minUid <= uid && p->maxUid >= uid) {
+ index = i;
+ pBlock = p;
+ break;
+ }
+ }
+
+ if (index == -1) {
+ pLastBlockReader->currentBlockIndex = index;
+ tBlockDataReset(&pLastBlockReader->lastBlockData);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ // the required last datablock has already loaded
+ if (index == pLastBlockReader->currentBlockIndex) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t code = tBlockDataInit(&pLastBlockReader->lastBlockData, pReader->suid, pReader->suid ? 0 : uid, pReader->pSchema);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p init block data failed, code:%s %s", pReader, tstrerror(code), pReader->idStr);
+ return code;
+ }
+
+ code = tsdbReadLastBlock(pReader->pFileReader, pBlock, &pLastBlockReader->lastBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p error occurs in loading last block into buffer, last block index:%d, total:%d code:%s %s", pReader,
+ pLastBlockReader->currentBlockIndex, totalLastBlocks, tstrerror(code), pReader->idStr);
+ } else {
+ tsdbDebug("%p load last block completed, uid:%" PRIu64
+ " last block index:%d, total:%d rows:%d, minVer:%d, maxVer:%d, brange:%" PRId64 " - %" PRId64 " %s",
+ pReader, uid, pLastBlockReader->currentBlockIndex, totalLastBlocks, pBlock->nRow, pBlock->minVer,
+ pBlock->maxVer, pBlock->minKey, pBlock->maxKey, pReader->idStr);
+ }
+
+ pLastBlockReader->currentBlockIndex = index;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
+ SReaderStatus* pStatus = &pReader->status;
+ SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
+
+ while(1) {
+ if (pStatus->pTableIter == NULL) {
+ pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
+ if (pStatus->pTableIter == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ // load the last data block of current table
+ // todo opt perf by avoiding load last block repeatly
+ STableBlockScanInfo* pScanInfo = pStatus->pTableIter;
+ int32_t code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if (pLastBlockReader->currentBlockIndex != -1) {
+ initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
+ int32_t index = pScanInfo->indexInBlockL;
+ if (index == DEFAULT_ROW_INDEX_VAL || index == pLastBlockReader->lastBlockData.nRow) {
+ bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
+ if (!hasData) { // current table does not have rows in last block, try next table
+ pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
+ if (pStatus->pTableIter == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+ continue;
+ }
+ }
+ } else { // no data in last block, try next table
+ pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
+ if (pStatus->pTableIter == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+ continue;
+ }
+
+ code = doBuildDataBlock(pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if (pReader->pResBlock->info.rows > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ // current table is exhausted, let's try next table
+ pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
+ if (pStatus->pTableIter == NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+}
+
static int32_t doBuildDataBlock(STsdbReader* pReader) {
int32_t code = TSDB_CODE_SUCCESS;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- STableBlockScanInfo* pScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
+ TSDBKEY key = {0};
+ SBlock* pBlock = NULL;
+ STableBlockScanInfo* pScanInfo = NULL;
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
+ SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ if (pBlockInfo != NULL) {
+ pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
+ } else {
+ pScanInfo = pReader->status.pTableIter;
+ }
- TSDBKEY key = getCurrentKeyInBuf(pBlockIter, pReader);
- if (fileBlockShouldLoad(pReader, pFBlock, pBlock, pScanInfo, key)) {
+ if (pBlockInfo != NULL) {
+ pBlock = getCurrentBlock(pBlockIter);
+ }
+
+ {
+ key = getCurrentKeyInBuf(pScanInfo, pReader);
+
+ // load the last data block of current table
+ code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // note: the lastblock may be null here
+ initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
+ if (pScanInfo->indexInBlockL == DEFAULT_ROW_INDEX_VAL || pScanInfo->indexInBlockL == pLastBlockReader->lastBlockData.nRow) {
+ bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
+ }
+ }
+
+ if (pBlockInfo == NULL) { // build data block from last data file
+ ASSERT(pBlockIter->numOfBlocks == 0);
+ code = buildComposedDataBlock(pReader);
+ } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, key, pLastBlockReader)) {
tBlockDataReset(&pStatus->fileBlockData);
- tBlockDataClearData(&pStatus->fileBlockData);
- code = doLoadFileBlockData(pReader, pBlockIter, pScanInfo, &pStatus->fileBlockData);
+ code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pScanInfo->uid, pReader->pSchema);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// build composed data block
- code = buildComposedDataBlock(pReader, pScanInfo);
+ code = buildComposedDataBlock(pReader);
} else if (bufferDataInFileBlockGap(pReader->order, key, pBlock)) {
// data in memory that are earlier than current file block
// todo rows in buffer should be less than the file block in asc, greater than file block in desc
@@ -1838,7 +2560,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pInfo->uid = pScanInfo->uid;
pInfo->window = (STimeWindow){.skey = pBlock->minKey.ts, .ekey = pBlock->maxKey.ts};
setComposedBlockFlag(pReader, false);
- setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlock, pReader->order);
+ setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlock->maxKey.ts, pReader->order);
}
return code;
@@ -1890,20 +2612,29 @@ static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter)
}
static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
- int32_t numOfBlocks = 0;
- int32_t code = moveToNextFile(pReader, &numOfBlocks);
+ SBlockNumber num = {0};
+
+ int32_t code = moveToNextFile(pReader, &num);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// all data files are consumed, try data in buffer
- if (numOfBlocks == 0) {
+ if (num.numOfBlocks + num.numOfLastBlocks == 0) {
pReader->status.loadFromFile = false;
return code;
}
// initialize the block iterator for a new fileset
- code = initBlockIterator(pReader, pBlockIter, numOfBlocks);
+ if (num.numOfBlocks > 0) {
+ code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks);
+ } else {
+ tBlockDataReset(&pReader->status.fileBlockData);
+ resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
+ }
+
+ SLastBlockReader* pLReader = pReader->status.fileIter.pLastBlockReader;
+ pLReader->currentBlockIndex = -1;
// set the correct start position according to the query time window
initBlockDumpInfo(pReader, pBlockIter);
@@ -1921,14 +2652,47 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
- while (1) {
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter);
- STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
+ if (pBlockIter->numOfBlocks == 0) {
+ _begin:
+ code = doLoadLastBlockSequentially(pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ if (pReader->pResBlock->info.rows > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ // all data blocks are checked in this last block file, now let's try the next file
+ if (pReader->status.pTableIter == NULL) {
+ code = initForFirstBlockInFile(pReader, pBlockIter);
+
+ // error happens or all the data files are completely checked
+ if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
+ return code;
+ }
+
+ // this file does not have data files, let's start check the last block file if exists
+ if (pBlockIter->numOfBlocks == 0) {
+ goto _begin;
+ }
+ }
+
+ code = doBuildDataBlock(pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ if (pReader->pResBlock->info.rows > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+
+ while (1) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
if (fileBlockPartiallyRead(pDumpInfo, asc)) { // file data block is partially loaded
- code = buildComposedDataBlock(pReader, pScanInfo);
+ code = buildComposedDataBlock(pReader);
} else {
// current block are exhausted, try the next file block
if (pDumpInfo->allDumped) {
@@ -1936,17 +2700,26 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
bool hasNext = blockIteratorNext(&pReader->status.blockIter);
if (hasNext) { // check for the next block in the block accessed order list
initBlockDumpInfo(pReader, pBlockIter);
- } else { // data blocks in current file are exhausted, let's try the next file now
+ } else if (taosArrayGetSize(pReader->status.fileIter.pLastBlockReader->pBlockL) > 0) { // data blocks in current file are exhausted, let's try the next file now
+ // todo dump all data in last block if exists.
+ tBlockDataReset(&pReader->status.fileBlockData);
+ resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
+ goto _begin;
+ } else {
code = initForFirstBlockInFile(pReader, pBlockIter);
// error happens or all the data files are completely checked
if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
return code;
}
+
+ // this file does not have blocks, let's start check the last block file
+ if (pBlockIter->numOfBlocks == 0) {
+ goto _begin;
+ }
}
}
- // current block is not loaded yet, or data in buffer may overlap with the file block.
code = doBuildDataBlock(pReader);
}
@@ -2014,39 +2787,6 @@ SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_
return (SVersionRange){.minVer = startVer, .maxVer = endVer};
}
-// // todo not unref yet, since it is not support multi-group interpolation query
-// static UNUSED_FUNC void changeQueryHandleForInterpQuery(STsdbReader* pHandle) {
-// // filter the queried time stamp in the first place
-// STsdbReader* pTsdbReadHandle = (STsdbReader*)pHandle;
-
-// // starts from the buffer in case of descending timestamp order check data blocks
-// size_t numOfTables = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo);
-
-// int32_t i = 0;
-// while (i < numOfTables) {
-// STableBlockScanInfo* pCheckInfo = taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i);
-
-// // the first qualified table for interpolation query
-// // if ((pTsdbReadHandle->window.skey <= pCheckInfo->pTableObj->lastKey) &&
-// // (pCheckInfo->pTableObj->lastKey != TSKEY_INITIAL_VAL)) {
-// // break;
-// // }
-
-// i++;
-// }
-
-// // there are no data in all the tables
-// if (i == numOfTables) {
-// return;
-// }
-
-// STableBlockScanInfo info = *(STableBlockScanInfo*)taosArrayGet(pTsdbReadHandle->pTableCheckInfo, i);
-// taosArrayClear(pTsdbReadHandle->pTableCheckInfo);
-
-// info.lastKey = pTsdbReadHandle->window.skey;
-// taosArrayPush(pTsdbReadHandle->pTableCheckInfo, &info);
-// }
-
bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order) {
ASSERT(pKey != NULL);
if (pDelList == NULL) {
@@ -2265,8 +3005,7 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
// 3. load the neighbor block, and set it to be the currently accessed file data block
tBlockDataReset(&pStatus->fileBlockData);
- tBlockDataClearData(&pStatus->fileBlockData);
- int32_t code = doLoadFileBlockData(pReader, pBlockIter, pScanInfo, &pStatus->fileBlockData);
+ int32_t code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2315,6 +3054,21 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
return TSDB_CODE_SUCCESS;
}
+// todo check if the rows are dropped or not
+int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger) {
+ while(nextRowInLastBlock(pLastBlockReader, pScanInfo)) {
+ int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
+ if (next1 == ts) {
+ TSDBROW fRow1 = tsdbRowFromBlockData(&pLastBlockReader->lastBlockData, *pLastBlockReader->rowIndex);
+ tRowMerge(pMerger, &fRow1);
+ } else {
+ break;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
STsdbReader* pReader, bool* freeTSRow) {
TSDBROW* pNextRow = NULL;
@@ -2487,7 +3241,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
return TSDB_CODE_SUCCESS;
}
-int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) {
+int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) {
int32_t i = 0, j = 0;
int32_t outputRowIndex = pResBlock->info.rows;
@@ -2564,7 +3318,7 @@ int32_t tsdbSetTableId(STsdbReader* pReader, int64_t uid) {
ASSERT(pReader != NULL);
taosHashClear(pReader->status.pTableMap);
- STableBlockScanInfo info = {.lastKey = 0, .uid = uid};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
taosHashPut(pReader->status.pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info));
return TDB_CODE_SUCCESS;
}
@@ -2585,32 +3339,6 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
-/**
- * @brief Get all suids since suid
- *
- * @param pMeta
- * @param suid return all suids in one vnode if suid is 0
- * @param list
- * @return int32_t
- */
-int32_t tsdbGetStbIdList(SMeta* pMeta, int64_t suid, SArray* list) {
- SMStbCursor* pCur = metaOpenStbCursor(pMeta, suid);
- if (!pCur) {
- return TSDB_CODE_FAILED;
- }
-
- while (1) {
- tb_uid_t id = metaStbCursorNext(pCur);
- if (id == 0) {
- break;
- }
-
- taosArrayPush(list, &id);
- }
-
- metaCloseStbCursor(pCur);
- return TSDB_CODE_SUCCESS;
-}
// ====================================== EXPOSED APIs ======================================
int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTableList, STsdbReader** ppReader,
@@ -2641,6 +3369,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
pCond->order = TSDB_ORDER_ASC;
}
+ // here we only need one more row, so the capacity is set to be ONE.
code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[0], 1, idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
@@ -2684,7 +3413,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
if (pReader->type == TIMEWINDOW_RANGE_CONTAINED) {
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
- initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr);
+ initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
// no data in files, let's try buffer in memory
@@ -2705,8 +3434,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
goto _err;
}
- initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order,
- pPrevReader->idStr);
+ initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader);
resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
// no data in files, let's try buffer in memory
@@ -2746,7 +3474,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
}
}
taosMemoryFree(pSupInfo->buildBuf);
- tBlockDataClear(&pReader->status.fileBlockData, true);
+ tBlockDataDestroy(&pReader->status.fileBlockData, true);
cleanupDataBlockIterator(&pReader->status.blockIter);
@@ -2758,6 +3486,13 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbDataFReaderClose(&pReader->pFileReader);
}
+ SFilesetIter* pFilesetIter = &pReader->status.fileIter;
+ if (pFilesetIter->pLastBlockReader != NULL) {
+ tBlockDataDestroy(&pFilesetIter->pLastBlockReader->lastBlockData, true);
+ taosArrayDestroy(pFilesetIter->pLastBlockReader->pBlockL);
+ taosMemoryFree(pFilesetIter->pLastBlockReader);
+ }
+
SIOCostSummary* pCost = &pReader->cost;
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
@@ -2883,7 +3618,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
if (tBlockHasSma(pBlock)) {
- code = tsdbReadBlockSma(pReader->pFileReader, pBlock, pSup->pColAgg, NULL);
+ code = tsdbReadBlockSma(pReader->pFileReader, pBlock, pSup->pColAgg);
if (code != TSDB_CODE_SUCCESS) {
tsdbDebug("vgId:%d, failed to load block SMA for uid %" PRIu64 ", code:%s, %s", 0, pFBlock->uid, tstrerror(code),
pReader->idStr);
@@ -2949,11 +3684,15 @@ static SArray* doRetrieveDataBlock(STsdbReader* pReader) {
STableBlockScanInfo* pBlockScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
tBlockDataReset(&pStatus->fileBlockData);
- tBlockDataClearData(&pStatus->fileBlockData);
- int32_t code = doLoadFileBlockData(pReader, &pStatus->blockIter, pBlockScanInfo, &pStatus->fileBlockData);
+ int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pBlockScanInfo->uid, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
- tBlockDataClear(&pStatus->fileBlockData, 1);
+ terrno = code;
+ return NULL;
+ }
+ code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ tBlockDataDestroy(&pStatus->fileBlockData, 1);
terrno = code;
return NULL;
}
@@ -2995,7 +3734,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
tsdbDataFReaderClose(&pReader->pFileReader);
- initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader->order, pReader->idStr);
+ initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
resetDataBlockScanInfo(pReader->status.pTableMap);
@@ -3104,7 +3843,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
STbData* d = NULL;
if (pReader->pTsdb->mem != NULL) {
- tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid, &d);
+ d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
if (d != NULL) {
rows += tsdbGetNRowsInTbData(d);
}
@@ -3112,7 +3851,7 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
STbData* di = NULL;
if (pReader->pTsdb->imem != NULL) {
- tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid, &di);
+ di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
if (di != NULL) {
rows += tsdbGetNRowsInTbData(di);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
index ea9c3e5313..c8f3862071 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
+++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
@@ -15,8 +15,6 @@
#include "tsdb.h"
-#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F)
-
// SDelFWriter ====================================================
int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) {
int32_t code = 0;
@@ -63,6 +61,7 @@ _err:
int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) {
int32_t code = 0;
SDelFWriter *pWriter = *ppWriter;
+ STsdb *pTsdb = pWriter->pTsdb;
// sync
if (sync && taosFsyncFile(pWriter->pWriteH) < 0) {
@@ -76,47 +75,47 @@ int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) {
goto _err;
}
+ for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree(pWriter->aBuf[iBuf]);
+ }
+ taosMemoryFree(pWriter);
+
*ppWriter = NULL;
return code;
_err:
- tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf, SDelIdx *pDelIdx) {
- int32_t code = 0;
- uint8_t *pBuf = NULL;
- int64_t size;
- int64_t n;
- SBlockDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, .suid = pDelIdx->suid, .uid = pDelIdx->uid};
-
- if (!ppBuf) ppBuf = &pBuf;
+int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) {
+ int32_t code = 0;
+ int64_t size;
+ int64_t n;
// prepare
- size = sizeof(hdr);
+ size = sizeof(uint32_t);
for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData));
}
size += sizeof(TSCKSUM);
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
// build
n = 0;
- *(SBlockDataHdr *)(*ppBuf) = hdr;
- n += sizeof(hdr);
+ n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
- n += tPutDelData(*ppBuf + n, taosArrayGet(aDelData, iDelData));
+ n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData));
}
- taosCalcChecksumAppend(0, *ppBuf, size);
+ taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
ASSERT(n + sizeof(TSCKSUM) == size);
// write
- n = taosWriteFile(pWriter->pWriteH, *ppBuf, size);
+ n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -129,48 +128,42 @@ int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, uint8_t **ppBuf
pDelIdx->size = size;
pWriter->fDel.size += size;
- tFree(pBuf);
return code;
_err:
tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
-int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf) {
+int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) {
int32_t code = 0;
int64_t size;
int64_t n;
- uint8_t *pBuf = NULL;
SDelIdx *pDelIdx;
- if (!ppBuf) ppBuf = &pBuf;
-
// prepare
- size = 0;
- size += tPutU32(NULL, TSDB_FILE_DLMT);
+ size = sizeof(uint32_t);
for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx));
}
size += sizeof(TSCKSUM);
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
// build
n = 0;
- n += tPutU32(*ppBuf + n, TSDB_FILE_DLMT);
+ n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
- n += tPutDelIdx(*ppBuf + n, taosArrayGet(aDelIdx, iDelIdx));
+ n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx));
}
- taosCalcChecksumAppend(0, *ppBuf, size);
+ taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
ASSERT(n + sizeof(TSCKSUM) == size);
// write
- n = taosWriteFile(pWriter->pWriteH, *ppBuf, size);
+ n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -180,12 +173,10 @@ int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx, uint8_t **ppBuf)
pWriter->fDel.offset = pWriter->fDel.size;
pWriter->fDel.size += size;
- tFree(pBuf);
return code;
_err:
tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
@@ -225,9 +216,11 @@ struct SDelFReader {
STsdb *pTsdb;
SDelFile fDel;
TdFilePtr pReadH;
+
+ uint8_t *aBuf[1];
};
-int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb, uint8_t **ppBuf) {
+int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb) {
int32_t code = 0;
char fname[TSDB_FILENAME_LEN];
SDelFReader *pDelFReader;
@@ -252,32 +245,6 @@ int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb
goto _err;
}
-#if 0
- // load and check hdr if buffer is given
- if (ppBuf) {
- code = tRealloc(ppBuf, TSDB_FHDR_SIZE);
- if (code) {
- goto _err;
- }
-
- n = taosReadFile(pDelFReader->pReadH, *ppBuf, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < TSDB_FHDR_SIZE) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- if (!taosCheckChecksumWhole(*ppBuf, TSDB_FHDR_SIZE)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // TODO: check the content
- }
-#endif
-
_exit:
*ppReader = pDelFReader;
return code;
@@ -297,6 +264,9 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader) {
code = TAOS_SYSTEM_ERROR(errno);
goto _exit;
}
+ for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree(pReader->aBuf[iBuf]);
+ }
taosMemoryFree(pReader);
}
*ppReader = NULL;
@@ -305,16 +275,13 @@ _exit:
return code;
}
-int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, uint8_t **ppBuf) {
- int32_t code = 0;
- int64_t offset = pDelIdx->offset;
- int64_t size = pDelIdx->size;
- int64_t n;
- uint8_t *pBuf = NULL;
- SBlockDataHdr *pHdr;
- SDelData *pDelData = &(SDelData){0};
+int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) {
+ int32_t code = 0;
+ int64_t offset = pDelIdx->offset;
+ int64_t size = pDelIdx->size;
+ int64_t n;
- if (!ppBuf) ppBuf = &pBuf;
+ taosArrayClear(aDelData);
// seek
if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) {
@@ -323,11 +290,11 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData
}
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
// read
- n = taosReadFile(pReader->pReadH, *ppBuf, size);
+ n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -337,23 +304,21 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData
}
// check
- if (!taosCheckChecksumWhole(*ppBuf, size)) {
+ if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
// // decode
n = 0;
- pHdr = (SBlockDataHdr *)(*ppBuf + n);
- ASSERT(pHdr->delimiter == TSDB_FILE_DLMT);
- ASSERT(pHdr->suid == pDelIdx->suid);
- ASSERT(pHdr->uid == pDelIdx->uid);
- n += sizeof(*pHdr);
- taosArrayClear(aDelData);
- while (n < size - sizeof(TSCKSUM)) {
- n += tGetDelData(*ppBuf + n, pDelData);
- if (taosArrayPush(aDelData, pDelData) == NULL) {
+ uint32_t delimiter;
+ n += tGetU32(pReader->aBuf[0] + n, &delimiter);
+ while (n < size - sizeof(TSCKSUM)) {
+ SDelData delData;
+ n += tGetDelData(pReader->aBuf[0] + n, &delData);
+
+ if (taosArrayPush(aDelData, &delData) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
@@ -361,25 +326,20 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData
ASSERT(n == size - sizeof(TSCKSUM));
- tFree(pBuf);
return code;
_err:
tsdbError("vgId:%d, read del data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
-int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) {
- int32_t code = 0;
- int32_t n;
- int64_t offset = pReader->fDel.offset;
- int64_t size = pReader->fDel.size - offset;
- uint32_t delimiter;
- uint8_t *pBuf = NULL;
- SDelIdx *pDelIdx = &(SDelIdx){0};
+int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx) {
+ int32_t code = 0;
+ int32_t n;
+ int64_t offset = pReader->fDel.offset;
+ int64_t size = pReader->fDel.size - offset;
- if (!ppBuf) ppBuf = &pBuf;
+ taosArrayClear(aDelIdx);
// seek
if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) {
@@ -388,11 +348,11 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) {
}
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
// read
- n = taosReadFile(pReader->pReadH, *ppBuf, size);
+ n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -402,21 +362,23 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) {
}
// check
- if (!taosCheckChecksumWhole(*ppBuf, size)) {
+ if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
// decode
n = 0;
- n += tGetU32(*ppBuf + n, &delimiter);
+ uint32_t delimiter;
+ n += tGetU32(pReader->aBuf[0] + n, &delimiter);
ASSERT(delimiter == TSDB_FILE_DLMT);
- taosArrayClear(aDelIdx);
while (n < size - sizeof(TSCKSUM)) {
- n += tGetDelIdx(*ppBuf + n, pDelIdx);
+ SDelIdx delIdx;
- if (taosArrayPush(aDelIdx, pDelIdx) == NULL) {
+ n += tGetDelIdx(pReader->aBuf[0] + n, &delIdx);
+
+ if (taosArrayPush(aDelIdx, &delIdx) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
@@ -424,12 +386,10 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf) {
ASSERT(n == size - sizeof(TSCKSUM));
- tFree(pBuf);
return code;
_err:
tsdbError("vgId:%d, read del idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
@@ -441,6 +401,8 @@ struct SDataFReader {
TdFilePtr pDataFD;
TdFilePtr pLastFD;
TdFilePtr pSmaFD;
+
+ uint8_t *aBuf[3];
};
int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) {
@@ -523,6 +485,10 @@ int32_t tsdbDataFReaderClose(SDataFReader **ppReader) {
goto _err;
}
+ for (int32_t iBuf = 0; iBuf < sizeof((*ppReader)->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree((*ppReader)->aBuf[iBuf]);
+ }
+
taosMemoryFree(*ppReader);
_exit:
@@ -534,19 +500,20 @@ _err:
return code;
}
-int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppBuf) {
- int32_t code = 0;
- int64_t offset = pReader->pSet->pHeadF->offset;
- int64_t size = pReader->pSet->pHeadF->size - offset;
- uint8_t *pBuf = NULL;
- int64_t n;
- uint32_t delimiter;
- SBlockIdx blockIdx;
+int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx) {
+ int32_t code = 0;
+ int64_t offset = pReader->pSet->pHeadF->offset;
+ int64_t size = pReader->pSet->pHeadF->size - offset;
+ int64_t n;
+ uint32_t delimiter;
- if (!ppBuf) ppBuf = &pBuf;
+ taosArrayClear(aBlockIdx);
+ if (size == 0) {
+ goto _exit;
+ }
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
// seek
@@ -556,7 +523,7 @@ int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppB
}
// read
- n = taosReadFile(pReader->pHeadFD, *ppBuf, size);
+ n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -566,19 +533,19 @@ int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppB
}
// check
- if (!taosCheckChecksumWhole(*ppBuf, size)) {
+ if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
// decode
n = 0;
- n = tGetU32(*ppBuf + n, &delimiter);
+ n = tGetU32(pReader->aBuf[0] + n, &delimiter);
ASSERT(delimiter == TSDB_FILE_DLMT);
- taosArrayClear(aBlockIdx);
while (n < size - sizeof(TSCKSUM)) {
- n += tGetBlockIdx(*ppBuf + n, &blockIdx);
+ SBlockIdx blockIdx;
+ n += tGetBlockIdx(pReader->aBuf[0] + n, &blockIdx);
if (taosArrayPush(aBlockIdx, &blockIdx) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -588,28 +555,86 @@ int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx, uint8_t **ppB
ASSERT(n + sizeof(TSCKSUM) == size);
- tFree(pBuf);
+_exit:
return code;
_err:
tsdbError("vgId:%d, read block idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
-int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBlock, uint8_t **ppBuf) {
- int32_t code = 0;
- int64_t offset = pBlockIdx->offset;
- int64_t size = pBlockIdx->size;
- uint8_t *pBuf = NULL;
- int64_t n;
- int64_t tn;
- SBlockDataHdr hdr;
+int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL) {
+ int32_t code = 0;
+ int64_t offset = pReader->pSet->pLastF->offset;
+ int64_t size = pReader->pSet->pLastF->size - offset;
+ int64_t n;
+ uint32_t delimiter;
- if (!ppBuf) ppBuf = &pBuf;
+ taosArrayClear(aBlockL);
+ if (size == 0) {
+ goto _exit;
+ }
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // seek
+ if (taosLSeekFile(pReader->pLastFD, offset, SEEK_SET) < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ // read
+ n = taosReadFile(pReader->pLastFD, pReader->aBuf[0], size);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ } else if (n < size) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _err;
+ }
+
+ // check
+ if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _err;
+ }
+
+ // decode
+ n = 0;
+ n = tGetU32(pReader->aBuf[0] + n, &delimiter);
+ ASSERT(delimiter == TSDB_FILE_DLMT);
+
+ while (n < size - sizeof(TSCKSUM)) {
+ SBlockL blockl;
+ n += tGetBlockL(pReader->aBuf[0] + n, &blockl);
+
+ if (taosArrayPush(aBlockL, &blockl) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+
+ ASSERT(n + sizeof(TSCKSUM) == size);
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d read blockl failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBlock) {
+ int32_t code = 0;
+ int64_t offset = pBlockIdx->offset;
+ int64_t size = pBlockIdx->size;
+ int64_t n;
+ int64_t tn;
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
// seek
@@ -619,7 +644,7 @@ int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBl
}
// read
- n = taosReadFile(pReader->pHeadFD, *ppBuf, size);
+ n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -629,19 +654,19 @@ int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBl
}
// check
- if (!taosCheckChecksumWhole(*ppBuf, size)) {
+ if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
// decode
- hdr = *(SBlockDataHdr *)(*ppBuf);
- ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
- ASSERT(hdr.suid == pBlockIdx->suid);
- ASSERT(hdr.uid == pBlockIdx->uid);
+ n = 0;
- n = sizeof(hdr);
- tn = tGetMapData(*ppBuf + n, mBlock);
+ uint32_t delimiter;
+ n += tGetU32(pReader->aBuf[0] + n, &delimiter);
+ ASSERT(delimiter == TSDB_FILE_DLMT);
+
+ tn = tGetMapData(pReader->aBuf[0] + n, mBlock);
if (tn < 0) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -649,535 +674,38 @@ int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBl
n += tn;
ASSERT(n + sizeof(TSCKSUM) == size);
- tFree(pBuf);
return code;
_err:
tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
-static int32_t tsdbReadBlockDataKey(SBlockData *pBlockData, SSubBlock *pSubBlock, uint8_t *pBuf, uint8_t **ppBuf) {
- int32_t code = 0;
- int64_t size = pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM);
- int64_t n;
+int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg) {
+ int32_t code = 0;
+ SSmaInfo *pSmaInfo = &pBlock->smaInfo;
- if (!taosCheckChecksumWhole(pBuf, size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ ASSERT(pSmaInfo->size > 0);
- code = tRealloc((uint8_t **)&pBlockData->aVersion, sizeof(int64_t) * pSubBlock->nRow);
- if (code) goto _err;
- code = tRealloc((uint8_t **)&pBlockData->aTSKEY, sizeof(TSKEY) * pSubBlock->nRow);
- if (code) goto _err;
+ taosArrayClear(aColumnDataAgg);
- if (pSubBlock->cmprAlg == NO_COMPRESSION) {
- ASSERT(pSubBlock->szVersion == sizeof(int64_t) * pSubBlock->nRow);
- ASSERT(pSubBlock->szTSKEY == sizeof(TSKEY) * pSubBlock->nRow);
-
- // VERSION
- memcpy(pBlockData->aVersion, pBuf, pSubBlock->szVersion);
-
- // TSKEY
- memcpy(pBlockData->aTSKEY, pBuf + pSubBlock->szVersion, pSubBlock->szTSKEY);
- } else {
- size = sizeof(int64_t) * pSubBlock->nRow + COMP_OVERFLOW_BYTES;
- if (pSubBlock->cmprAlg == TWO_STAGE_COMP) {
- code = tRealloc(ppBuf, size);
- if (code) goto _err;
- }
-
- // VERSION
- n = tsDecompressBigint(pBuf, pSubBlock->szVersion, pSubBlock->nRow, (char *)pBlockData->aVersion,
- sizeof(int64_t) * pSubBlock->nRow, pSubBlock->cmprAlg, *ppBuf, size);
- if (n < 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
-
- // TSKEY
- n = tsDecompressTimestamp(pBuf + pSubBlock->szVersion, pSubBlock->szTSKEY, pSubBlock->nRow,
- (char *)pBlockData->aTSKEY, sizeof(TSKEY) * pSubBlock->nRow, pSubBlock->cmprAlg, *ppBuf,
- size);
- if (n < 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
- }
-
- return code;
-
-_err:
- return code;
-}
-
-static int32_t tsdbReadColDataImpl(SSubBlock *pSubBlock, SBlockCol *pBlockCol, SColData *pColData, uint8_t *pBuf,
- uint8_t **ppBuf) {
- int32_t code = 0;
- int64_t size;
- int64_t n;
-
- if (!taosCheckChecksumWhole(pBuf, pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM))) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- pColData->nVal = pSubBlock->nRow;
- pColData->flag = pBlockCol->flag;
-
- // BITMAP
- if (pBlockCol->flag != HAS_VALUE) {
- ASSERT(pBlockCol->szBitmap);
-
- size = BIT2_SIZE(pColData->nVal);
- code = tRealloc(&pColData->pBitMap, size);
- if (code) goto _err;
-
- code = tRealloc(ppBuf, size + COMP_OVERFLOW_BYTES);
- if (code) goto _err;
-
- n = tsDecompressTinyint(pBuf, pBlockCol->szBitmap, size, pColData->pBitMap, size, TWO_STAGE_COMP, *ppBuf,
- size + COMP_OVERFLOW_BYTES);
- if (n <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
-
- ASSERT(n == size);
- } else {
- ASSERT(pBlockCol->szBitmap == 0);
- }
- pBuf = pBuf + pBlockCol->szBitmap;
-
- // OFFSET
- if (IS_VAR_DATA_TYPE(pColData->type)) {
- ASSERT(pBlockCol->szOffset);
-
- size = sizeof(int32_t) * pColData->nVal;
- code = tRealloc((uint8_t **)&pColData->aOffset, size);
- if (code) goto _err;
-
- code = tRealloc(ppBuf, size + COMP_OVERFLOW_BYTES);
- if (code) goto _err;
-
- n = tsDecompressInt(pBuf, pBlockCol->szOffset, pColData->nVal, (char *)pColData->aOffset, size, TWO_STAGE_COMP,
- *ppBuf, size + COMP_OVERFLOW_BYTES);
- if (n <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
-
- ASSERT(n == size);
- } else {
- ASSERT(pBlockCol->szOffset == 0);
- }
- pBuf = pBuf + pBlockCol->szOffset;
-
- // VALUE
- pColData->nData = pBlockCol->szOrigin;
-
- code = tRealloc(&pColData->pData, pColData->nData);
- if (code) goto _err;
-
- if (pSubBlock->cmprAlg == NO_COMPRESSION) {
- memcpy(pColData->pData, pBuf, pColData->nData);
- } else {
- if (pSubBlock->cmprAlg == TWO_STAGE_COMP) {
- code = tRealloc(ppBuf, pColData->nData + COMP_OVERFLOW_BYTES);
- if (code) goto _err;
- }
-
- n = tDataTypes[pBlockCol->type].decompFunc(pBuf, pBlockCol->szValue, pSubBlock->nRow, pColData->pData,
- pColData->nData, pSubBlock->cmprAlg, *ppBuf,
- pColData->nData + COMP_OVERFLOW_BYTES);
- if (n < 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
-
- ASSERT(n == pColData->nData);
- }
-
- return code;
-
-_err:
- return code;
-}
-
-static int32_t tsdbReadBlockCol(SSubBlock *pSubBlock, uint8_t *p, SArray *aBlockCol) {
- int32_t code = 0;
- int32_t n = 0;
- SBlockCol blockCol;
- SBlockCol *pBlockCol = &blockCol;
-
- if (!taosCheckChecksumWhole(p, pSubBlock->szBlockCol + sizeof(TSCKSUM))) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- n += sizeof(SBlockDataHdr);
- while (n < pSubBlock->szBlockCol) {
- n += tGetBlockCol(p + n, pBlockCol);
-
- if (taosArrayPush(aBlockCol, pBlockCol) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
-
- ASSERT(n == pSubBlock->szBlockCol);
-
- return code;
-
-_err:
- return code;
-}
-
-static int32_t tsdbReadSubColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int32_t iSubBlock,
- int16_t *aColId, int32_t nCol, SBlockData *pBlockData, uint8_t **ppBuf1,
- uint8_t **ppBuf2) {
- TdFilePtr pFD = pBlock->last ? pReader->pLastFD : pReader->pDataFD;
- SSubBlock *pSubBlock = &pBlock->aSubBlock[iSubBlock];
- SArray *aBlockCol = NULL;
- int32_t code = 0;
- int64_t offset;
- int64_t size;
- int64_t n;
-
- tBlockDataReset(pBlockData);
- pBlockData->nRow = pSubBlock->nRow;
-
- // TSDBKEY and SBlockCol
- if (nCol == 1) {
- offset = pSubBlock->offset + pSubBlock->szBlockCol + sizeof(TSCKSUM);
- size = pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM);
- } else {
- offset = pSubBlock->offset;
- size = pSubBlock->szBlockCol + sizeof(TSCKSUM) + pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM);
- }
-
- code = tRealloc(ppBuf1, size);
- if (code) goto _err;
-
- n = taosLSeekFile(pFD, offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosReadFile(pFD, *ppBuf1, size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- if (nCol == 1) {
- code = tsdbReadBlockDataKey(pBlockData, pSubBlock, *ppBuf1, ppBuf2);
- if (code) goto _err;
-
- goto _exit;
- } else {
- aBlockCol = taosArrayInit(0, sizeof(SBlockCol));
- if (aBlockCol == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- code = tsdbReadBlockCol(pSubBlock, *ppBuf1, aBlockCol);
- if (code) goto _err;
-
- code = tsdbReadBlockDataKey(pBlockData, pSubBlock, *ppBuf1 + pSubBlock->szBlockCol + sizeof(TSCKSUM), ppBuf2);
- if (code) goto _err;
- }
-
- for (int32_t iCol = 1; iCol < nCol; iCol++) {
- void *p = taosArraySearch(aBlockCol, &(SBlockCol){.cid = aColId[iCol]}, tBlockColCmprFn, TD_EQ);
-
- if (p) {
- SBlockCol *pBlockCol = (SBlockCol *)p;
- SColData *pColData;
-
- ASSERT(pBlockCol->flag && pBlockCol->flag != HAS_NONE);
-
- code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
- if (code) goto _err;
-
- tColDataInit(pColData, pBlockCol->cid, pBlockCol->type, pBlockCol->smaOn);
- if (pBlockCol->flag == HAS_NULL) {
- for (int32_t iRow = 0; iRow < pSubBlock->nRow; iRow++) {
- code = tColDataAppendValue(pColData, &COL_VAL_NULL(pBlockCol->cid, pBlockCol->type));
- if (code) goto _err;
- }
- } else {
- offset = pSubBlock->offset + pSubBlock->szBlockCol + sizeof(TSCKSUM) + pSubBlock->szVersion +
- pSubBlock->szTSKEY + sizeof(TSCKSUM) + pBlockCol->offset;
- size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
-
- code = tRealloc(ppBuf1, size);
- if (code) goto _err;
-
- // seek
- n = taosLSeekFile(pFD, offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // read
- n = taosReadFile(pFD, *ppBuf1, size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- code = tsdbReadColDataImpl(pSubBlock, pBlockCol, pColData, *ppBuf1, ppBuf2);
- if (code) goto _err;
- }
- }
- }
-
-_exit:
- taosArrayDestroy(aBlockCol);
- return code;
-
-_err:
- taosArrayDestroy(aBlockCol);
- return code;
-}
-
-int32_t tsdbReadColData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int16_t *aColId, int32_t nCol,
- SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2) {
- int32_t code = 0;
- uint8_t *pBuf1 = NULL;
- uint8_t *pBuf2 = NULL;
-
- ASSERT(aColId[0] == PRIMARYKEY_TIMESTAMP_COL_ID);
-
- if (!ppBuf1) ppBuf1 = &pBuf1;
- if (!ppBuf2) ppBuf2 = &pBuf2;
-
- code = tsdbReadSubColData(pReader, pBlockIdx, pBlock, 0, aColId, nCol, pBlockData, ppBuf1, ppBuf2);
- if (code) goto _err;
-
- if (pBlock->nSubBlock > 1) {
- SBlockData *pBlockData1 = &(SBlockData){0};
- SBlockData *pBlockData2 = &(SBlockData){0};
-
- tBlockDataInit(pBlockData1);
- tBlockDataInit(pBlockData2);
- for (int32_t iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- code = tsdbReadSubColData(pReader, pBlockIdx, pBlock, iSubBlock, aColId, nCol, pBlockData1, ppBuf1, ppBuf2);
- if (code) goto _err;
-
- code = tBlockDataCopy(pBlockData, pBlockData2);
- if (code) {
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- goto _err;
- }
-
- code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
- if (code) {
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- goto _err;
- }
- }
-
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- }
-
- tFree(pBuf1);
- tFree(pBuf2);
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb read col data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf1);
- tFree(pBuf2);
- return code;
-}
-
-static int32_t tsdbReadSubBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, int32_t iSubBlock,
- SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2) {
- int32_t code = 0;
- uint8_t *p;
- int64_t size;
- int64_t n;
- TdFilePtr pFD = pBlock->last ? pReader->pLastFD : pReader->pDataFD;
- SSubBlock *pSubBlock = &pBlock->aSubBlock[iSubBlock];
- SArray *aBlockCol = NULL;
-
- tBlockDataReset(pBlockData);
-
- // realloc
- code = tRealloc(ppBuf1, pSubBlock->szBlock);
+ // alloc
+ int32_t size = pSmaInfo->size + sizeof(TSCKSUM);
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
// seek
- n = taosLSeekFile(pFD, pSubBlock->offset, SEEK_SET);
+ int64_t n = taosLSeekFile(pReader->pSmaFD, pSmaInfo->offset, SEEK_SET);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
- }
-
- // read
- n = taosReadFile(pFD, *ppBuf1, pSubBlock->szBlock);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < pSubBlock->szBlock) {
+ } else if (n < pSmaInfo->offset) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
- pBlockData->nRow = pSubBlock->nRow;
-
- // TSDBKEY
- p = *ppBuf1 + pSubBlock->szBlockCol + sizeof(TSCKSUM);
- code = tsdbReadBlockDataKey(pBlockData, pSubBlock, p, ppBuf2);
- if (code) goto _err;
-
- // COLUMNS
- aBlockCol = taosArrayInit(0, sizeof(SBlockCol));
- if (aBlockCol == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- code = tsdbReadBlockCol(pSubBlock, *ppBuf1, aBlockCol);
- if (code) goto _err;
-
- for (int32_t iBlockCol = 0; iBlockCol < taosArrayGetSize(aBlockCol); iBlockCol++) {
- SColData *pColData;
- SBlockCol *pBlockCol = (SBlockCol *)taosArrayGet(aBlockCol, iBlockCol);
-
- ASSERT(pBlockCol->flag && pBlockCol->flag != HAS_NONE);
-
- code = tBlockDataAddColData(pBlockData, iBlockCol, &pColData);
- if (code) goto _err;
-
- tColDataInit(pColData, pBlockCol->cid, pBlockCol->type, pBlockCol->smaOn);
- if (pBlockCol->flag == HAS_NULL) {
- for (int32_t iRow = 0; iRow < pSubBlock->nRow; iRow++) {
- code = tColDataAppendValue(pColData, &COL_VAL_NULL(pBlockCol->cid, pBlockCol->type));
- if (code) goto _err;
- }
- } else {
- p = *ppBuf1 + pSubBlock->szBlockCol + sizeof(TSCKSUM) + pSubBlock->szVersion + pSubBlock->szTSKEY +
- sizeof(TSCKSUM) + pBlockCol->offset;
- code = tsdbReadColDataImpl(pSubBlock, pBlockCol, pColData, p, ppBuf2);
- if (code) goto _err;
- }
- }
-
- taosArrayDestroy(aBlockCol);
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb read sub block data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- taosArrayDestroy(aBlockCol);
- return code;
-}
-
-int32_t tsdbReadBlockData(SDataFReader *pReader, SBlockIdx *pBlockIdx, SBlock *pBlock, SBlockData *pBlockData,
- uint8_t **ppBuf1, uint8_t **ppBuf2) {
- int32_t code = 0;
- TdFilePtr pFD = pBlock->last ? pReader->pLastFD : pReader->pDataFD;
- uint8_t *pBuf1 = NULL;
- uint8_t *pBuf2 = NULL;
- int32_t iSubBlock;
-
- if (!ppBuf1) ppBuf1 = &pBuf1;
- if (!ppBuf2) ppBuf2 = &pBuf2;
-
- // read the first sub-block
- iSubBlock = 0;
- code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData, ppBuf1, ppBuf2);
- if (code) goto _err;
-
- // read remain block data and do merg
- if (pBlock->nSubBlock > 1) {
- SBlockData *pBlockData1 = &(SBlockData){0};
- SBlockData *pBlockData2 = &(SBlockData){0};
-
- tBlockDataInit(pBlockData1);
- tBlockDataInit(pBlockData2);
- for (iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- code = tsdbReadSubBlockData(pReader, pBlockIdx, pBlock, iSubBlock, pBlockData1, ppBuf1, ppBuf2);
- if (code) {
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- goto _err;
- }
-
- code = tBlockDataCopy(pBlockData, pBlockData2);
- if (code) {
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- goto _err;
- }
-
- // merge two block data
- code = tBlockDataMerge(pBlockData1, pBlockData2, pBlockData);
- if (code) {
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- goto _err;
- }
- }
-
- tBlockDataClear(pBlockData1, 1);
- tBlockDataClear(pBlockData2, 1);
- }
-
- ASSERT(pBlock->nRow == pBlockData->nRow);
- ASSERT(tsdbKeyCmprFn(&pBlock->minKey, &TSDBROW_KEY(&tBlockDataFirstRow(pBlockData))) == 0);
- ASSERT(tsdbKeyCmprFn(&pBlock->maxKey, &TSDBROW_KEY(&tBlockDataLastRow(pBlockData))) == 0);
-
- if (pBuf1) tFree(pBuf1);
- if (pBuf2) tFree(pBuf2);
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb read block data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- if (pBuf1) tFree(pBuf1);
- if (pBuf2) tFree(pBuf2);
- return code;
-}
-
-int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg, uint8_t **ppBuf) {
- int32_t code = 0;
- TdFilePtr pFD = pReader->pSmaFD;
- int64_t offset = pBlock->aSubBlock[0].sOffset;
- int64_t size = pBlock->aSubBlock[0].nSma * sizeof(SColumnDataAgg) + sizeof(TSCKSUM);
- uint8_t *pBuf = NULL;
- int64_t n;
-
- ASSERT(tBlockHasSma(pBlock));
-
- if (!ppBuf) ppBuf = &pBuf;
- code = tRealloc(ppBuf, size);
- if (code) goto _err;
-
- // lseek
- n = taosLSeekFile(pFD, offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
// read
- n = taosReadFile(pFD, *ppBuf, size);
+ n = taosReadFile(pReader->pSmaFD, pReader->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -1187,26 +715,202 @@ int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnD
}
// check
- if (!taosCheckChecksumWhole(*ppBuf, size)) {
+ if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
// decode
- taosArrayClear(aColumnDataAgg);
- for (int32_t iSma = 0; iSma < pBlock->aSubBlock[0].nSma; iSma++) {
- if (taosArrayPush(aColumnDataAgg, &((SColumnDataAgg *)(*ppBuf))[iSma]) == NULL) {
+ n = 0;
+ while (n < pSmaInfo->size) {
+ SColumnDataAgg sma;
+
+ n += tGetColumnDataAgg(pReader->aBuf[0] + n, &sma);
+ if (taosArrayPush(aColumnDataAgg, &sma) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
}
- tFree(pBuf);
return code;
_err:
- tsdbError("vgId:%d, read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
+ tsdbError("vgId:%d tsdb read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, int8_t fromLast,
+ SBlockData *pBlockData) {
+ int32_t code = 0;
+
+ tBlockDataClear(pBlockData);
+
+ TdFilePtr pFD = fromLast ? pReader->pLastFD : pReader->pDataFD;
+
+ // uid + version + tskey
+ code = tsdbReadAndCheck(pFD, pBlkInfo->offset, &pReader->aBuf[0], pBlkInfo->szKey, 1);
+ if (code) goto _err;
+ SDiskDataHdr hdr;
+ uint8_t *p = pReader->aBuf[0] + tGetDiskDataHdr(pReader->aBuf[0], &hdr);
+
+ ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
+ ASSERT(pBlockData->suid == hdr.suid);
+ ASSERT(pBlockData->uid == hdr.uid);
+
+ pBlockData->nRow = hdr.nRow;
+
+ // uid
+ if (hdr.uid == 0) {
+ ASSERT(hdr.szUid);
+ code = tsdbDecmprData(p, hdr.szUid, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aUid,
+ sizeof(int64_t) * hdr.nRow, &pReader->aBuf[1]);
+ if (code) goto _err;
+ } else {
+ ASSERT(!hdr.szUid);
+ }
+ p += hdr.szUid;
+
+ // version
+ code = tsdbDecmprData(p, hdr.szVer, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aVersion,
+ sizeof(int64_t) * hdr.nRow, &pReader->aBuf[1]);
+ if (code) goto _err;
+ p += hdr.szVer;
+
+ // TSKEY
+ code = tsdbDecmprData(p, hdr.szKey, TSDB_DATA_TYPE_TIMESTAMP, hdr.cmprAlg, (uint8_t **)&pBlockData->aTSKEY,
+ sizeof(TSKEY) * hdr.nRow, &pReader->aBuf[1]);
+ if (code) goto _err;
+ p += hdr.szKey;
+
+ ASSERT(p - pReader->aBuf[0] == pBlkInfo->szKey - sizeof(TSCKSUM));
+
+ // read and decode columns
+ if (taosArrayGetSize(pBlockData->aIdx) == 0) goto _exit;
+
+ if (hdr.szBlkCol > 0) {
+ int64_t offset = pBlkInfo->offset + pBlkInfo->szKey;
+ code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[0], hdr.szBlkCol + sizeof(TSCKSUM), 1);
+ if (code) goto _err;
+ }
+
+ SBlockCol blockCol = {.cid = 0};
+ SBlockCol *pBlockCol = &blockCol;
+ int32_t n = 0;
+
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
+ SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
+
+ while (pBlockCol && pBlockCol->cid < pColData->cid) {
+ if (n < hdr.szBlkCol) {
+ n += tGetBlockCol(pReader->aBuf[0] + n, pBlockCol);
+ } else {
+ ASSERT(n == hdr.szBlkCol);
+ pBlockCol = NULL;
+ }
+ }
+
+ if (pBlockCol == NULL || pBlockCol->cid > pColData->cid) {
+ // add a lot of NONE
+ for (int32_t iRow = 0; iRow < hdr.nRow; iRow++) {
+ code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type));
+ if (code) goto _err;
+ }
+ } else {
+ ASSERT(pBlockCol->type == pColData->type);
+ ASSERT(pBlockCol->flag && pBlockCol->flag != HAS_NONE);
+
+ if (pBlockCol->flag == HAS_NULL) {
+ // add a lot of NULL
+ for (int32_t iRow = 0; iRow < hdr.nRow; iRow++) {
+ code = tColDataAppendValue(pColData, &COL_VAL_NULL(pBlockCol->cid, pBlockCol->type));
+ if (code) goto _err;
+ }
+ } else {
+ // decode from binary
+ int64_t offset = pBlkInfo->offset + pBlkInfo->szKey + hdr.szBlkCol + sizeof(TSCKSUM) + pBlockCol->offset;
+ int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
+
+ code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[1], size, 0);
+ if (code) goto _err;
+
+ code = tsdbDecmprColData(pReader->aBuf[1], pBlockCol, hdr.cmprAlg, hdr.nRow, pColData, &pReader->aBuf[2]);
+ if (code) goto _err;
+ }
+ }
+ }
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read block data impl failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData) {
+ int32_t code = 0;
+
+ code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[0], 0, pBlockData);
+ if (code) goto _err;
+
+ if (pBlock->nSubBlock > 1) {
+ SBlockData bData1;
+ SBlockData bData2;
+
+ // create
+ code = tBlockDataCreate(&bData1);
+ if (code) goto _err;
+ code = tBlockDataCreate(&bData2);
+ if (code) goto _err;
+
+ // init
+ tBlockDataInitEx(&bData1, pBlockData);
+ tBlockDataInitEx(&bData2, pBlockData);
+
+ for (int32_t iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
+ code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[iSubBlock], 0, &bData1);
+ if (code) {
+ tBlockDataDestroy(&bData1, 1);
+ tBlockDataDestroy(&bData2, 1);
+ goto _err;
+ }
+
+ code = tBlockDataCopy(pBlockData, &bData2);
+ if (code) {
+ tBlockDataDestroy(&bData1, 1);
+ tBlockDataDestroy(&bData2, 1);
+ goto _err;
+ }
+
+ code = tBlockDataMerge(&bData1, &bData2, pBlockData);
+ if (code) {
+ tBlockDataDestroy(&bData1, 1);
+ tBlockDataDestroy(&bData2, 1);
+ goto _err;
+ }
+ }
+
+ tBlockDataDestroy(&bData1, 1);
+ tBlockDataDestroy(&bData2, 1);
+ }
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read data block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData) {
+ int32_t code = 0;
+
+ code = tsdbReadBlockDataImpl(pReader, &pBlockL->bInfo, 1, pBlockData);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read last block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
}
@@ -1225,6 +929,7 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
+ if (code) goto _err;
pWriter->pTsdb = pTsdb;
pWriter->wSet = (SDFileSet){.diskId = pSet->diskId,
.fid = pSet->fid,
@@ -1357,10 +1062,11 @@ _err:
int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) {
int32_t code = 0;
- STsdb *pTsdb = (*ppWriter)->pTsdb;
+ STsdb *pTsdb = NULL;
if (*ppWriter == NULL) goto _exit;
+ pTsdb = (*ppWriter)->pTsdb;
if (sync) {
if (taosFsyncFile((*ppWriter)->pHeadFD) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
@@ -1403,6 +1109,9 @@ int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) {
goto _err;
}
+ for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree((*ppWriter)->aBuf[iBuf]);
+ }
taosMemoryFree(*ppWriter);
_exit:
*ppWriter = NULL;
@@ -1493,38 +1202,41 @@ _err:
return code;
}
-int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **ppBuf) {
+int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
int32_t code = 0;
SHeadFile *pHeadFile = &pWriter->fHead;
- uint8_t *pBuf = NULL;
- int64_t size;
+ int64_t size = 0;
int64_t n;
- if (!ppBuf) ppBuf = &pBuf;
+ // check
+ if (taosArrayGetSize(aBlockIdx) == 0) {
+ pHeadFile->offset = pHeadFile->size;
+ goto _exit;
+ }
// prepare
- size = tPutU32(NULL, TSDB_FILE_DLMT);
+ size = sizeof(uint32_t);
for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx));
}
size += sizeof(TSCKSUM);
// alloc
- code = tRealloc(ppBuf, size);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
// build
n = 0;
- n = tPutU32(*ppBuf + n, TSDB_FILE_DLMT);
+ n = tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
- n += tPutBlockIdx(*ppBuf + n, taosArrayGet(aBlockIdx, iBlockIdx));
+ n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx));
}
- taosCalcChecksumAppend(0, *ppBuf, size);
+ taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
ASSERT(n + sizeof(TSCKSUM) == size);
// write
- n = taosWriteFile(pWriter->pHeadFD, *ppBuf, size);
+ n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -1534,44 +1246,39 @@ int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx, uint8_t **pp
pHeadFile->offset = pHeadFile->size;
pHeadFile->size += size;
- tFree(pBuf);
+_exit:
+ tsdbTrace("vgId:%d write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d", TD_VID(pWriter->pTsdb->pVnode),
+ pHeadFile->offset, size, taosArrayGetSize(aBlockIdx));
return code;
_err:
tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf);
return code;
}
-int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, uint8_t **ppBuf, SBlockIdx *pBlockIdx) {
- int32_t code = 0;
- SHeadFile *pHeadFile = &pWriter->fHead;
- SBlockDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, .suid = pBlockIdx->suid, .uid = pBlockIdx->uid};
- uint8_t *pBuf = NULL;
- int64_t size;
- int64_t n;
+int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, SBlockIdx *pBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = &pWriter->fHead;
+ int64_t size;
+ int64_t n;
ASSERT(mBlock->nItem > 0);
- // prepare
- size = sizeof(SBlockDataHdr) + tPutMapData(NULL, mBlock) + sizeof(TSCKSUM);
-
// alloc
- if (!ppBuf) ppBuf = &pBuf;
- code = tRealloc(ppBuf, size);
+ size = sizeof(uint32_t) + tPutMapData(NULL, mBlock) + sizeof(TSCKSUM);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
// build
n = 0;
- *(SBlockDataHdr *)(*ppBuf) = hdr;
- n += sizeof(hdr);
- n += tPutMapData(*ppBuf + n, mBlock);
- taosCalcChecksumAppend(0, *ppBuf, size);
+ n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
+ n += tPutMapData(pWriter->aBuf[0] + n, mBlock);
+ taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
ASSERT(n + sizeof(TSCKSUM) == size);
// write
- n = taosWriteFile(pWriter->pHeadFD, *ppBuf, size);
+ n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -1582,17 +1289,71 @@ int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, uint8_t **ppBuf,
pBlockIdx->size = size;
pHeadFile->size += size;
- tFree(pBuf);
- tsdbTrace("vgId:%d, write block, offset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), pBlockIdx->offset,
- pBlockIdx->size);
+ tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%d suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64
+ " size:%" PRId64 " nItem:%d",
+ TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid,
+ pBlockIdx->offset, pBlockIdx->size, mBlock->nItem);
return code;
_err:
- tFree(pBuf);
tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
+int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL) {
+ int32_t code = 0;
+ SLastFile *pLastFile = &pWriter->fLast;
+ int64_t size;
+ int64_t n;
+
+ // check
+ if (taosArrayGetSize(aBlockL) == 0) {
+ pLastFile->offset = pLastFile->size;
+ goto _exit;
+ }
+
+ // size
+ size = sizeof(uint32_t); // TSDB_FILE_DLMT
+ for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) {
+ size += tPutBlockL(NULL, taosArrayGet(aBlockL, iBlockL));
+ }
+ size += sizeof(TSCKSUM);
+
+ // alloc
+ code = tRealloc(&pWriter->aBuf[0], size);
+ if (code) goto _err;
+
+ // encode
+ n = 0;
+ n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
+ for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) {
+ n += tPutBlockL(pWriter->aBuf[0] + n, taosArrayGet(aBlockL, iBlockL));
+ }
+ taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
+
+ ASSERT(n + sizeof(TSCKSUM) == size);
+
+ // write
+ n = taosWriteFile(pWriter->pLastFD, pWriter->aBuf[0], size);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ // update
+ pLastFile->offset = pLastFile->size;
+ pLastFile->size += size;
+
+_exit:
+ tsdbTrace("vgId:%d tsdb write blockl, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode),
+ pLastFile->offset, size);
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
static void tsdbUpdateBlockInfo(SBlockData *pBlockData, SBlock *pBlock) {
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]};
@@ -1611,357 +1372,127 @@ static void tsdbUpdateBlockInfo(SBlockData *pBlockData, SBlock *pBlock) {
pBlock->maxKey = key;
}
- pBlock->minVersion = TMIN(pBlock->minVersion, key.version);
- pBlock->maxVersion = TMAX(pBlock->maxVersion, key.version);
+ pBlock->minVer = TMIN(pBlock->minVer, key.version);
+ pBlock->maxVer = TMAX(pBlock->maxVer, key.version);
}
pBlock->nRow += pBlockData->nRow;
}
-static int32_t tsdbWriteBlockDataKey(SSubBlock *pSubBlock, SBlockData *pBlockData, uint8_t **ppBuf1, int64_t *nDataP,
- uint8_t **ppBuf2) {
+static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) {
int32_t code = 0;
- int64_t size;
- int64_t tsize;
- if (pSubBlock->cmprAlg == NO_COMPRESSION) {
- pSubBlock->szVersion = sizeof(int64_t) * pSubBlock->nRow;
- pSubBlock->szTSKEY = sizeof(TSKEY) * pSubBlock->nRow;
+ pSmaInfo->offset = 0;
+ pSmaInfo->size = 0;
- code = tRealloc(ppBuf1, *nDataP + pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM));
- if (code) goto _err;
-
- // VERSION
- memcpy(*ppBuf1 + *nDataP, pBlockData->aVersion, pSubBlock->szVersion);
-
- // TSKEY
- memcpy(*ppBuf1 + *nDataP + pSubBlock->szVersion, pBlockData->aTSKEY, pSubBlock->szTSKEY);
- } else {
- size = (sizeof(int64_t) + sizeof(TSKEY)) * pSubBlock->nRow + COMP_OVERFLOW_BYTES * 2;
-
- code = tRealloc(ppBuf1, *nDataP + size + sizeof(TSCKSUM));
- if (code) goto _err;
-
- tsize = sizeof(int64_t) * pSubBlock->nRow + COMP_OVERFLOW_BYTES;
- if (pSubBlock->cmprAlg == TWO_STAGE_COMP) {
- code = tRealloc(ppBuf2, tsize);
- if (code) goto _err;
- }
-
- // VERSION
- pSubBlock->szVersion =
- tsCompressBigint((char *)pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow, pBlockData->nRow,
- *ppBuf1 + *nDataP, size, pSubBlock->cmprAlg, *ppBuf2, tsize);
- if (pSubBlock->szVersion <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
-
- // TSKEY
- pSubBlock->szTSKEY = tsCompressTimestamp((char *)pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow,
- pBlockData->nRow, *ppBuf1 + *nDataP + pSubBlock->szVersion,
- size - pSubBlock->szVersion, pSubBlock->cmprAlg, *ppBuf2, tsize);
- if (pSubBlock->szTSKEY <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
-
- ASSERT(pSubBlock->szVersion + pSubBlock->szTSKEY <= size);
- }
-
- // checksum
- size = pSubBlock->szVersion + pSubBlock->szTSKEY + sizeof(TSCKSUM);
- taosCalcChecksumAppend(0, *ppBuf1 + *nDataP, size);
-
- *nDataP += size;
- return code;
-
-_err:
- return code;
-}
-
-static int32_t tsdbWriteColData(SColData *pColData, SBlockCol *pBlockCol, SSubBlock *pSubBlock, uint8_t **ppBuf1,
- int64_t *nDataP, uint8_t **ppBuf2) {
- int32_t code = 0;
- int64_t size;
- int64_t n = 0;
-
- // BITMAP
- if (pColData->flag != HAS_VALUE) {
- size = BIT2_SIZE(pColData->nVal) + COMP_OVERFLOW_BYTES;
-
- code = tRealloc(ppBuf1, *nDataP + n + size);
- if (code) goto _err;
-
- code = tRealloc(ppBuf2, size);
- if (code) goto _err;
-
- pBlockCol->szBitmap =
- tsCompressTinyint((char *)pColData->pBitMap, BIT2_SIZE(pColData->nVal), BIT2_SIZE(pColData->nVal),
- *ppBuf1 + *nDataP + n, size, TWO_STAGE_COMP, *ppBuf2, size);
- if (pBlockCol->szBitmap <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
- } else {
- pBlockCol->szBitmap = 0;
- }
- n += pBlockCol->szBitmap;
-
- // OFFSET
- if (IS_VAR_DATA_TYPE(pColData->type)) {
- size = sizeof(int32_t) * pColData->nVal + COMP_OVERFLOW_BYTES;
-
- code = tRealloc(ppBuf1, *nDataP + n + size);
- if (code) goto _err;
-
- code = tRealloc(ppBuf2, size);
- if (code) goto _err;
-
- pBlockCol->szOffset = tsCompressInt((char *)pColData->aOffset, sizeof(int32_t) * pColData->nVal, pColData->nVal,
- *ppBuf1 + *nDataP + n, size, TWO_STAGE_COMP, *ppBuf2, size);
- if (pBlockCol->szOffset <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
- } else {
- pBlockCol->szOffset = 0;
- }
- n += pBlockCol->szOffset;
-
- // VALUE
- if (pSubBlock->cmprAlg == NO_COMPRESSION) {
- pBlockCol->szValue = pColData->nData;
-
- code = tRealloc(ppBuf1, *nDataP + n + pBlockCol->szValue + sizeof(TSCKSUM));
- if (code) goto _err;
-
- memcpy(*ppBuf1 + *nDataP + n, pColData->pData, pBlockCol->szValue);
- } else {
- size = pColData->nData + COMP_OVERFLOW_BYTES;
-
- code = tRealloc(ppBuf1, *nDataP + n + size + sizeof(TSCKSUM));
- if (code) goto _err;
-
- if (pSubBlock->cmprAlg == TWO_STAGE_COMP) {
- code = tRealloc(ppBuf2, size);
- if (code) goto _err;
- }
-
- pBlockCol->szValue =
- tDataTypes[pColData->type].compFunc((char *)pColData->pData, pColData->nData, pColData->nVal,
- *ppBuf1 + *nDataP + n, size, pSubBlock->cmprAlg, *ppBuf2, size);
- if (pBlockCol->szValue <= 0) {
- code = TSDB_CODE_COMPRESS_ERROR;
- goto _err;
- }
- }
- n += pBlockCol->szValue;
- pBlockCol->szOrigin = pColData->nData;
-
- // checksum
- n += sizeof(TSCKSUM);
- taosCalcChecksumAppend(0, *ppBuf1 + *nDataP, n);
-
- *nDataP += n;
-
- return code;
-
-_err:
- return code;
-}
-
-static int32_t tsdbWriteBlockDataImpl(TdFilePtr pFD, SSubBlock *pSubBlock, SBlockDataHdr hdr, SArray *aBlockCol,
- uint8_t *pData, int64_t nData, uint8_t **ppBuf) {
- int32_t code = 0;
- int32_t nBlockCol = taosArrayGetSize(aBlockCol);
- int64_t size;
- int64_t n;
-
- // HDR + SArray
- pSubBlock->szBlockCol = sizeof(hdr);
- for (int32_t iBlockCol = 0; iBlockCol < nBlockCol; iBlockCol++) {
- pSubBlock->szBlockCol += tPutBlockCol(NULL, taosArrayGet(aBlockCol, iBlockCol));
- }
-
- code = tRealloc(ppBuf, pSubBlock->szBlockCol + sizeof(TSCKSUM));
- if (code) goto _err;
-
- n = 0;
- memcpy(*ppBuf, &hdr, sizeof(hdr));
- n += sizeof(hdr);
- for (int32_t iBlockCol = 0; iBlockCol < nBlockCol; iBlockCol++) {
- n += tPutBlockCol(*ppBuf + n, taosArrayGet(aBlockCol, iBlockCol));
- }
- taosCalcChecksumAppend(0, *ppBuf, pSubBlock->szBlockCol + sizeof(TSCKSUM));
-
- ASSERT(n == pSubBlock->szBlockCol);
-
- n = taosWriteFile(pFD, *ppBuf, pSubBlock->szBlockCol + sizeof(TSCKSUM));
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // SBlockData
- n = taosWriteFile(pFD, pData, nData);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- return code;
-
-_err:
- return code;
-}
-
-static int32_t tsdbWriteBlockSma(TdFilePtr pFD, SBlockData *pBlockData, SSubBlock *pSubBlock, uint8_t **ppBuf) {
- int32_t code = 0;
- int64_t n;
- SColData *pColData;
-
- // prepare
- pSubBlock->nSma = 0;
+ // encode
for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
- pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
+ SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- if (IS_VAR_DATA_TYPE(pColData->type) || (!pColData->smaOn)) continue;
+ if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
- pSubBlock->nSma++;
- }
- if (pSubBlock->nSma == 0) goto _exit;
+ SColumnDataAgg sma;
+ tsdbCalcColDataSMA(pColData, &sma);
- // calc
- code = tRealloc(ppBuf, sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM));
- if (code) goto _err;
- n = 0;
- for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
- pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
-
- if (IS_VAR_DATA_TYPE(pColData->type) || (!pColData->smaOn)) continue;
-
- tsdbCalcColDataSMA(pColData, &((SColumnDataAgg *)(*ppBuf))[n]);
- n++;
- }
- taosCalcChecksumAppend(0, *ppBuf, sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM));
-
- // write
- n = taosWriteFile(pFD, *ppBuf, sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM));
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
-_exit:
- return code;
-
-_err:
- return code;
-}
-
-int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, uint8_t **ppBuf1, uint8_t **ppBuf2,
- SBlockIdx *pBlockIdx, SBlock *pBlock, int8_t cmprAlg) {
- int32_t code = 0;
- SSubBlock *pSubBlock = &pBlock->aSubBlock[pBlock->nSubBlock++];
- SBlockCol blockCol;
- SBlockCol *pBlockCol = &blockCol;
- int64_t n;
- TdFilePtr pFileFD = pBlock->last ? pWriter->pLastFD : pWriter->pDataFD;
- SBlockDataHdr hdr = {.delimiter = TSDB_FILE_DLMT, .suid = pBlockIdx->suid, .uid = pBlockIdx->uid};
- uint8_t *p;
- int64_t nData;
- uint8_t *pBuf1 = NULL;
- uint8_t *pBuf2 = NULL;
- SArray *aBlockCol = NULL;
-
- if (!ppBuf1) ppBuf1 = &pBuf1;
- if (!ppBuf2) ppBuf2 = &pBuf2;
-
- tsdbUpdateBlockInfo(pBlockData, pBlock);
-
- pSubBlock->nRow = pBlockData->nRow;
- pSubBlock->cmprAlg = cmprAlg;
- if (pBlock->last) {
- pSubBlock->offset = pWriter->fLast.size;
- } else {
- pSubBlock->offset = pWriter->fData.size;
- }
-
- // ======================= BLOCK DATA =======================
- // TSDBKEY
- nData = 0;
- code = tsdbWriteBlockDataKey(pSubBlock, pBlockData, ppBuf1, &nData, ppBuf2);
- if (code) goto _err;
-
- // COLUMNS
- aBlockCol = taosArrayInit(taosArrayGetSize(pBlockData->aIdx), sizeof(SBlockCol));
- if (aBlockCol == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- int32_t offset = 0;
- for (int32_t iCol = 0; iCol < taosArrayGetSize(pBlockData->aIdx); iCol++) {
- SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iCol);
-
- ASSERT(pColData->flag);
-
- if (pColData->flag == HAS_NONE) continue;
-
- pBlockCol->cid = pColData->cid;
- pBlockCol->type = pColData->type;
- pBlockCol->smaOn = pColData->smaOn;
- pBlockCol->flag = pColData->flag;
-
- if (pColData->flag != HAS_NULL) {
- code = tsdbWriteColData(pColData, pBlockCol, pSubBlock, ppBuf1, &nData, ppBuf2);
- if (code) goto _err;
-
- pBlockCol->offset = offset;
- offset = offset + pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
- }
-
- if (taosArrayPush(aBlockCol, pBlockCol) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma));
+ if (code) goto _err;
+ pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma);
}
// write
- code = tsdbWriteBlockDataImpl(pFileFD, pSubBlock, hdr, aBlockCol, *ppBuf1, nData, ppBuf2);
- if (code) goto _err;
+ if (pSmaInfo->size) {
+ int32_t size = pSmaInfo->size + sizeof(TSCKSUM);
- pSubBlock->szBlock = pSubBlock->szBlockCol + sizeof(TSCKSUM) + nData;
- if (pBlock->last) {
- pWriter->fLast.size += pSubBlock->szBlock;
- } else {
- pWriter->fData.size += pSubBlock->szBlock;
+ code = tRealloc(&pWriter->aBuf[0], size);
+ if (code) goto _err;
+
+ taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
+
+ int64_t n = taosWriteFile(pWriter->pSmaFD, pWriter->aBuf[0], size);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ pSmaInfo->offset = pWriter->fSma.size;
+ pWriter->fSma.size += size;
}
- // ======================= BLOCK SMA =======================
- pSubBlock->sOffset = 0;
- pSubBlock->nSma = 0;
-
- if (pBlock->nSubBlock > 1 || pBlock->last || pBlock->hasDup) goto _exit;
-
- code = tsdbWriteBlockSma(pWriter->pSmaFD, pBlockData, pSubBlock, ppBuf1);
- if (code) goto _err;
-
- if (pSubBlock->nSma > 0) {
- pSubBlock->sOffset = pWriter->fSma.size;
- pWriter->fSma.size += (sizeof(SColumnDataAgg) * pSubBlock->nSma + sizeof(TSCKSUM));
- }
-
-_exit:
- tFree(pBuf1);
- tFree(pBuf2);
- taosArrayDestroy(aBlockCol);
return code;
_err:
- tsdbError("vgId:%d, write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
- tFree(pBuf1);
- tFree(pBuf2);
- taosArrayDestroy(aBlockCol);
+ tsdbError("vgId:%d tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
+ int8_t cmprAlg, int8_t toLast) {
+ int32_t code = 0;
+
+ ASSERT(pBlockData->nRow > 0);
+
+ pBlkInfo->offset = toLast ? pWriter->fLast.size : pWriter->fData.size;
+ pBlkInfo->szBlock = 0;
+ pBlkInfo->szKey = 0;
+
+ int32_t aBufN[4] = {0};
+ code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN);
+ if (code) goto _err;
+
+ // write =================
+ TdFilePtr pFD = toLast ? pWriter->pLastFD : pWriter->pDataFD;
+
+ pBlkInfo->szKey = aBufN[3] + aBufN[2];
+ pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
+
+ int64_t n = taosWriteFile(pFD, pWriter->aBuf[3], aBufN[3]);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ n = taosWriteFile(pFD, pWriter->aBuf[2], aBufN[2]);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+
+ if (aBufN[1]) {
+ n = taosWriteFile(pFD, pWriter->aBuf[1], aBufN[1]);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ }
+
+ if (aBufN[0]) {
+ n = taosWriteFile(pFD, pWriter->aBuf[0], aBufN[0]);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ }
+
+ // update info
+ if (toLast) {
+ pWriter->fLast.size += pBlkInfo->szBlock;
+ } else {
+ pWriter->fData.size += pBlkInfo->szBlock;
+ }
+
+ // ================= SMA ====================
+ if (pSmaInfo) {
+ code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo);
+ if (code) goto _err;
+ }
+
+_exit:
+ tsdbTrace("vgId:%d tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d",
+ TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset,
+ pBlkInfo->szBlock);
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
@@ -2075,4 +1606,4 @@ int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
_err:
tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index c40fb98d62..ab2b2b617a 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -27,12 +27,16 @@ struct STsdbSnapReader {
int32_t fid;
SDataFReader* pDataFReader;
SArray* aBlockIdx; // SArray
- int32_t iBlockIdx;
+ SArray* aBlockL; // SArray
SBlockIdx* pBlockIdx;
- SMapData mBlock; // SMapData
- int32_t iBlock;
- SBlockData oBlockData;
- SBlockData nBlockData;
+ SBlockL* pBlockL;
+
+ int32_t iBlockIdx;
+ int32_t iBlockL;
+ SMapData mBlock; // SMapData
+ int32_t iBlock;
+ SBlockData oBlockData;
+ SBlockData nBlockData;
// for del file
int8_t delDone;
SDelFReader* pDelFReader;
@@ -47,114 +51,116 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
while (true) {
if (pReader->pDataFReader == NULL) {
- SDFileSet* pSet =
- taosArraySearch(pReader->fs.aDFileSet, &(SDFileSet){.fid = pReader->fid}, tDFileSetCmprFn, TD_GT);
-
+ // next
+ SDFileSet dFileSet = {.fid = pReader->fid};
+ SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT);
if (pSet == NULL) goto _exit;
-
pReader->fid = pSet->fid;
- code = tsdbDataFReaderOpen(&pReader->pDataFReader, pReader->pTsdb, pSet);
+
+ // load
+ code = tsdbDataFReaderOpen(&pReader->pDataFReader, pTsdb, pSet);
if (code) goto _err;
- // SBlockIdx
- code = tsdbReadBlockIdx(pReader->pDataFReader, pReader->aBlockIdx, NULL);
+ code = tsdbReadBlockIdx(pReader->pDataFReader, pReader->aBlockIdx);
if (code) goto _err;
+ code = tsdbReadBlockL(pReader->pDataFReader, pReader->aBlockL);
+ if (code) goto _err;
+
+ // init
pReader->iBlockIdx = 0;
- pReader->pBlockIdx = NULL;
+ if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) {
+ pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
+
+ code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock);
+ if (code) goto _err;
+
+ pReader->iBlock = 0;
+ } else {
+ pReader->pBlockIdx = NULL;
+ }
+
+ pReader->iBlockL = 0;
+ while (true) {
+ if (pReader->iBlockL >= taosArrayGetSize(pReader->aBlockL)) {
+ pReader->pBlockL = NULL;
+ break;
+ }
+
+ pReader->pBlockL = (SBlockL*)taosArrayGet(pReader->aBlockL, pReader->iBlockL);
+ if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) {
+ // TODO
+ break;
+ }
+
+ pReader->iBlockL++;
+ }
tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path,
pReader->fid);
}
while (true) {
- if (pReader->pBlockIdx == NULL) {
- if (pReader->iBlockIdx >= taosArrayGetSize(pReader->aBlockIdx)) {
- tsdbDataFReaderClose(&pReader->pDataFReader);
- break;
+ if (pReader->pBlockIdx && pReader->pBlockL) {
+ TABLEID id = {.suid = pReader->pBlockL->suid, .uid = pReader->pBlockL->minUid};
+
+ ASSERT(0);
+
+ // if (tTABLEIDCmprFn(pReader->pBlockIdx, &minId) < 0) {
+ // // TODO
+ // } else if (tTABLEIDCmprFn(pReader->pBlockIdx, &maxId) < 0) {
+ // // TODO
+ // } else {
+ // // TODO
+ // }
+ } else if (pReader->pBlockIdx) {
+ while (pReader->iBlock < pReader->mBlock.nItem) {
+ SBlock block;
+ tMapDataGetItemByIdx(&pReader->mBlock, pReader->iBlock, &block, tGetBlock);
+
+ if (block.minVer <= pReader->ever && block.maxVer >= pReader->sver) {
+ // load data (todo)
+ }
+
+ // next
+ pReader->iBlock++;
+ if (*ppData) break;
}
- pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
- pReader->iBlockIdx++;
-
- code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock, NULL);
- if (code) goto _err;
-
- pReader->iBlock = 0;
- }
-
- SBlock block;
- SBlock* pBlock = █
- while (true) {
if (pReader->iBlock >= pReader->mBlock.nItem) {
- pReader->pBlockIdx = NULL;
- break;
+ pReader->iBlockIdx++;
+ if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) {
+ pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
+
+ code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock);
+ if (code) goto _err;
+
+ pReader->iBlock = 0;
+ } else {
+ pReader->pBlockIdx = NULL;
+ }
}
- tMapDataGetItemByIdx(&pReader->mBlock, pReader->iBlock, pBlock, tGetBlock);
- pReader->iBlock++;
+ if (*ppData) goto _exit;
+ } else if (pReader->pBlockL) {
+ while (pReader->pBlockL) {
+ if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) {
+ // load data (todo)
+ }
- if (pBlock->minVersion > pReader->ever || pBlock->maxVersion < pReader->sver) continue;
+ // next
+ pReader->iBlockL++;
+ if (pReader->iBlockL < taosArrayGetSize(pReader->aBlockL)) {
+ pReader->pBlockL = (SBlockL*)taosArrayGetSize(pReader->aBlockL);
+ } else {
+ pReader->pBlockL = NULL;
+ }
- code = tsdbReadBlockData(pReader->pDataFReader, pReader->pBlockIdx, pBlock, &pReader->oBlockData, NULL, NULL);
- if (code) goto _err;
-
- // filter
- tBlockDataReset(&pReader->nBlockData);
- for (int32_t iColData = 0; iColData < taosArrayGetSize(pReader->oBlockData.aIdx); iColData++) {
- SColData* pColDataO = tBlockDataGetColDataByIdx(&pReader->oBlockData, iColData);
- SColData* pColDataN = NULL;
-
- code = tBlockDataAddColData(&pReader->nBlockData, taosArrayGetSize(pReader->nBlockData.aIdx), &pColDataN);
- if (code) goto _err;
-
- tColDataInit(pColDataN, pColDataO->cid, pColDataO->type, pColDataO->smaOn);
+ if (*ppData) goto _exit;
}
-
- for (int32_t iRow = 0; iRow < pReader->oBlockData.nRow; iRow++) {
- TSDBROW row = tsdbRowFromBlockData(&pReader->oBlockData, iRow);
- int64_t version = TSDBROW_VERSION(&row);
-
- tsdbTrace("vgId:%d, vnode snapshot tsdb read for %s, %" PRId64 "(%" PRId64 " , %" PRId64 ")",
- TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path, version, pReader->sver, pReader->ever);
-
- if (version < pReader->sver || version > pReader->ever) continue;
-
- code = tBlockDataAppendRow(&pReader->nBlockData, &row, NULL);
- if (code) goto _err;
- }
-
- if (pReader->nBlockData.nRow <= 0) {
- continue;
- }
-
- // org data
- // compress data (todo)
- int32_t size = sizeof(TABLEID) + tPutBlockData(NULL, &pReader->nBlockData);
-
- *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + size);
- if (*ppData == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
- pHdr->type = pReader->type;
- pHdr->size = size;
-
- TABLEID* pId = (TABLEID*)(&pHdr[1]);
- pId->suid = pReader->pBlockIdx->suid;
- pId->uid = pReader->pBlockIdx->uid;
-
- tPutBlockData((uint8_t*)(&pId[1]), &pReader->nBlockData);
-
- tsdbInfo("vgId:%d, vnode snapshot read data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64
- " iBlock:%d minVersion:%d maxVersion:%d nRow:%d out of %d size:%d",
- TD_VID(pTsdb->pVnode), pTsdb->path, pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
- pReader->iBlock - 1, pBlock->minVersion, pBlock->maxVersion, pReader->nBlockData.nRow, pBlock->nRow,
- size);
-
- goto _exit;
+ } else {
+ tsdbDataFReaderClose(&pReader->pDataFReader);
+ break;
}
}
}
@@ -179,11 +185,11 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
}
// open
- code = tsdbDelFReaderOpen(&pReader->pDelFReader, pDelFile, pTsdb, NULL);
+ code = tsdbDelFReaderOpen(&pReader->pDelFReader, pDelFile, pTsdb);
if (code) goto _err;
// read index
- code = tsdbReadDelIdx(pReader->pDelFReader, pReader->aDelIdx, NULL);
+ code = tsdbReadDelIdx(pReader->pDelFReader, pReader->aDelIdx);
if (code) goto _err;
pReader->iDelIdx = 0;
@@ -199,7 +205,7 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
pReader->iDelIdx++;
- code = tsdbReadDelData(pReader->pDelFReader, pDelIdx, pReader->aDelData, NULL);
+ code = tsdbReadDelData(pReader->pDelFReader, pDelIdx, pReader->aDelData);
if (code) goto _err;
int32_t size = 0;
@@ -292,10 +298,15 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
+ pReader->aBlockL = taosArrayInit(0, sizeof(SBlockL));
+ if (pReader->aBlockL == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
pReader->mBlock = tMapDataInit();
- code = tBlockDataInit(&pReader->oBlockData);
+ code = tBlockDataCreate(&pReader->oBlockData);
if (code) goto _err;
- code = tBlockDataInit(&pReader->nBlockData);
+ code = tBlockDataCreate(&pReader->nBlockData);
if (code) goto _err;
pReader->aDelIdx = taosArrayInit(0, sizeof(SDelIdx));
@@ -327,10 +338,11 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
if (pReader->pDataFReader) {
tsdbDataFReaderClose(&pReader->pDataFReader);
}
+ taosArrayDestroy(pReader->aBlockL);
taosArrayDestroy(pReader->aBlockIdx);
tMapDataClear(&pReader->mBlock);
- tBlockDataClear(&pReader->oBlockData, 1);
- tBlockDataClear(&pReader->nBlockData, 1);
+ tBlockDataDestroy(&pReader->oBlockData, 1);
+ tBlockDataDestroy(&pReader->nBlockData, 1);
if (pReader->pDelFReader) {
tsdbDelFReaderClose(&pReader->pDelFReader);
@@ -405,6 +417,7 @@ struct STsdbSnapWriter {
int8_t cmprAlg;
int64_t commitID;
+ uint8_t* aBuf[5];
// for data file
SBlockData bData;
@@ -418,6 +431,9 @@ struct STsdbSnapWriter {
SBlockData* pBlockData;
int32_t iRow;
SBlockData bDataR;
+ SArray* aBlockL; // SArray
+ int32_t iBlockL;
+ SBlockData lDataR;
SDataFWriter* pDataFWriter;
SBlockIdx* pBlockIdxW; // NULL when no committing table
@@ -427,6 +443,7 @@ struct STsdbSnapWriter {
SMapData mBlockW; // SMapData
SArray* aBlockIdxW; // SArray
+ SArray* aBlockLW; // SArray
// for del file
SDelFReader* pDelFReader;
@@ -437,25 +454,6 @@ struct STsdbSnapWriter {
SArray* aDelIdxW;
};
-static int32_t tsdbSnapWriteAppendData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
- int32_t code = 0;
- int32_t iRow = 0; // todo
- int32_t nRow = 0; // todo
- SBlockData* pBlockData = NULL; // todo
-
- while (iRow < nRow) {
- code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pBlockData, iRow), NULL);
- if (code) goto _err;
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb snapshot write append data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
- return code;
-}
-
static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
int32_t code = 0;
@@ -467,20 +465,21 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
if (pWriter->pBlockData) {
ASSERT(pWriter->iRow < pWriter->pBlockData->nRow);
while (pWriter->iRow < pWriter->pBlockData->nRow) {
- code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL);
+ code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL,
+ 0); // todo
if (code) goto _err;
if (pWriter->bDataW.nRow >= pWriter->maxRow * 4 / 5) {
- pWriter->blockW.last = 0;
- code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- &pWriter->blockW, pWriter->cmprAlg);
+ // pWriter->blockW.last = 0;
+ // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
+ // &pWriter->blockW, pWriter->cmprAlg);
if (code) goto _err;
code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
if (code) goto _err;
tBlockReset(&pWriter->blockW);
- tBlockDataClearData(&pWriter->bDataW);
+ tBlockDataClear(&pWriter->bDataW);
}
pWriter->iRow++;
@@ -489,16 +488,16 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
// write remain data if has
if (pWriter->bDataW.nRow > 0) {
- pWriter->blockW.last = 0;
+ // pWriter->blockW.last = 0;
if (pWriter->bDataW.nRow < pWriter->minRow) {
if (pWriter->iBlock > pWriter->mBlock.nItem) {
- pWriter->blockW.last = 1;
+ // pWriter->blockW.last = 1;
}
}
- code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- &pWriter->blockW, pWriter->cmprAlg);
- if (code) goto _err;
+ // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
+ // &pWriter->blockW, pWriter->cmprAlg);
+ // if (code) goto _err;
code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
if (code) goto _err;
@@ -510,16 +509,16 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
SBlock block;
tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock);
- if (block.last) {
- code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
- if (code) goto _err;
+ // if (block.last) {
+ // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
+ // if (code) goto _err;
- tBlockReset(&block);
- block.last = 1;
- code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pWriter->pBlockIdxW, &block,
- pWriter->cmprAlg);
- if (code) goto _err;
- }
+ // tBlockReset(&block);
+ // block.last = 1;
+ // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pWriter->pBlockIdxW, &block,
+ // pWriter->cmprAlg);
+ // if (code) goto _err;
+ // }
code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
if (code) goto _err;
@@ -528,8 +527,8 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
}
// SBlock
- code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, pWriter->pBlockIdxW);
- if (code) goto _err;
+ // code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, pWriter->pBlockIdxW);
+ // if (code) goto _err;
// SBlockIdx
if (taosArrayPush(pWriter->aBlockIdxW, pWriter->pBlockIdxW) == NULL) {
@@ -550,7 +549,7 @@ _err:
static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* pBlockIdx) {
int32_t code = 0;
- code = tsdbReadBlock(pWriter->pDataFReader, pBlockIdx, &pWriter->mBlock, NULL);
+ code = tsdbReadBlock(pWriter->pDataFReader, pBlockIdx, &pWriter->mBlock);
if (code) goto _err;
// SBlockData
@@ -559,16 +558,17 @@ static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* p
for (int32_t iBlock = 0; iBlock < pWriter->mBlock.nItem; iBlock++) {
tMapDataGetItemByIdx(&pWriter->mBlock, iBlock, &block, tGetBlock);
- if (block.last) {
- code = tsdbReadBlockData(pWriter->pDataFReader, pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
- if (code) goto _err;
+ // if (block.last) {
+ // code = tsdbReadBlockData(pWriter->pDataFReader, pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
+ // if (code) goto _err;
- tBlockReset(&block);
- block.last = 1;
- code =
- tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pBlockIdx, &block, pWriter->cmprAlg);
- if (code) goto _err;
- }
+ // tBlockReset(&block);
+ // block.last = 1;
+ // code =
+ // tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pBlockIdx, &block,
+ // pWriter->cmprAlg);
+ // if (code) goto _err;
+ // }
code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
if (code) goto _err;
@@ -576,7 +576,7 @@ static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* p
// SBlock
SBlockIdx blockIdx = {.suid = pBlockIdx->suid, .uid = pBlockIdx->uid};
- code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, &blockIdx);
+ code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, &blockIdx);
if (code) goto _err;
// SBlockIdx
@@ -601,9 +601,9 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
TSDBROW row;
TSDBROW* pRow = &row;
- // correct schema
- code = tBlockDataCorrectSchema(&pWriter->bDataW, pBlockData);
- if (code) goto _err;
+ // // correct schema
+ // code = tBlockDataCorrectSchema(&pWriter->bDataW, pBlockData);
+ // if (code) goto _err;
// loop to merge
*pRow = tsdbRowFromBlockData(pBlockData, iRow);
@@ -618,8 +618,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
ASSERT(c);
if (c < 0) {
- code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
- if (code) goto _err;
+ // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
+ // if (code) goto _err;
iRow++;
if (iRow < pWriter->pBlockData->nRow) {
@@ -628,8 +628,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
pRow = NULL;
}
} else if (c > 0) {
- code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL);
- if (code) goto _err;
+ // code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow),
+ // NULL); if (code) goto _err;
pWriter->iRow++;
if (pWriter->iRow >= pWriter->pBlockData->nRow) {
@@ -647,16 +647,15 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock);
- if (block.last) {
- pWriter->pBlockData = &pWriter->bDataR;
+ // if (block.last) {
+ // pWriter->pBlockData = &pWriter->bDataR;
- code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL, NULL);
- if (code) goto _err;
- pWriter->iRow = 0;
+ // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL,
+ // NULL); if (code) goto _err; pWriter->iRow = 0;
- pWriter->iBlock++;
- break;
- }
+ // pWriter->iBlock++;
+ // break;
+ // }
c = tsdbKeyCmprFn(&block.maxKey, &key);
@@ -664,16 +663,16 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
if (c < 0) {
if (pWriter->bDataW.nRow) {
- pWriter->blockW.last = 0;
- code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- &pWriter->blockW, pWriter->cmprAlg);
- if (code) goto _err;
+ // pWriter->blockW.last = 0;
+ // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
+ // &pWriter->blockW, pWriter->cmprAlg);
+ // if (code) goto _err;
code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
if (code) goto _err;
tBlockReset(&pWriter->blockW);
- tBlockDataClearData(&pWriter->bDataW);
+ tBlockDataClear(&pWriter->bDataW);
}
code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
@@ -687,9 +686,10 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
if (c > 0) {
pWriter->pBlockData = &pWriter->bDataR;
- code =
- tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL, NULL);
- if (code) goto _err;
+ // code =
+ // tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL,
+ // NULL);
+ // if (code) goto _err;
pWriter->iRow = 0;
pWriter->iBlock++;
@@ -700,8 +700,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
if (pWriter->pBlockData) continue;
- code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
- if (code) goto _err;
+ // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
+ // if (code) goto _err;
iRow++;
if (iRow < pBlockData->nRow) {
@@ -715,15 +715,15 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
if (pWriter->bDataW.nRow < pWriter->maxRow * 4 / 5) continue;
_write_block:
- code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- &pWriter->blockW, pWriter->cmprAlg);
- if (code) goto _err;
+ // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
+ // &pWriter->blockW, pWriter->cmprAlg);
+ // if (code) goto _err;
code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
if (code) goto _err;
tBlockReset(&pWriter->blockW);
- tBlockDataClearData(&pWriter->bDataW);
+ tBlockDataClear(&pWriter->bDataW);
}
return code;
@@ -789,7 +789,7 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
}
if (pWriter->pBlockIdx) {
- code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock, NULL);
+ code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock);
if (code) goto _err;
} else {
tMapDataReset(&pWriter->mBlock);
@@ -831,9 +831,11 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
if (pWriter->pDataFWriter == NULL) goto _exit;
+ // finish current table
code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
+ // move remain table
while (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
code = tsdbSnapMoveWriteTableData(pWriter, (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx));
if (code) goto _err;
@@ -841,8 +843,16 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
pWriter->iBlockIdx++;
}
- code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdxW, NULL);
- if (code) goto _err;
+ // write remain stuff
+ if (taosArrayGetSize(pWriter->aBlockLW) > 0) {
+ code = tsdbWriteBlockL(pWriter->pDataFWriter, pWriter->aBlockIdxW);
+ if (code) goto _err;
+ }
+
+ if (taosArrayGetSize(pWriter->aBlockIdx) > 0) {
+ code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdxW);
+ if (code) goto _err;
+ }
code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->pDataFWriter->wSet);
if (code) goto _err;
@@ -866,19 +876,22 @@ _err:
}
static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
- int32_t code = 0;
- STsdb* pTsdb = pWriter->pTsdb;
- TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
- int64_t n;
+ int32_t code = 0;
+ STsdb* pTsdb = pWriter->pTsdb;
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
+ TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
+ int64_t n;
// decode
SBlockData* pBlockData = &pWriter->bData;
- n = tGetBlockData(pData + sizeof(SSnapDataHdr) + sizeof(TABLEID), pBlockData);
- ASSERT(n + sizeof(SSnapDataHdr) + sizeof(TABLEID) == nData);
+ code = tDecmprBlockData(pData + sizeof(SSnapDataHdr) + sizeof(TABLEID), pHdr->size - sizeof(TABLEID), pBlockData,
+ pWriter->aBuf);
+ if (code) goto _err;
// open file
- TSDBKEY keyFirst = tBlockDataFirstKey(pBlockData);
- TSDBKEY keyLast = tBlockDataLastKey(pBlockData);
+ TSDBKEY keyFirst = {.version = pBlockData->aVersion[0], .ts = pBlockData->aTSKEY[0]};
+ TSDBKEY keyLast = {.version = pBlockData->aVersion[pBlockData->nRow - 1],
+ .ts = pBlockData->aTSKEY[pBlockData->nRow - 1]};
int32_t fid = tsdbKeyFid(keyFirst.ts, pWriter->minutes, pWriter->precision);
ASSERT(fid == tsdbKeyFid(keyLast.ts, pWriter->minutes, pWriter->precision));
@@ -895,11 +908,15 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
code = tsdbDataFReaderOpen(&pWriter->pDataFReader, pTsdb, pSet);
if (code) goto _err;
- code = tsdbReadBlockIdx(pWriter->pDataFReader, pWriter->aBlockIdx, NULL);
+ code = tsdbReadBlockIdx(pWriter->pDataFReader, pWriter->aBlockIdx);
+ if (code) goto _err;
+
+ code = tsdbReadBlockL(pWriter->pDataFReader, pWriter->aBlockL);
if (code) goto _err;
} else {
ASSERT(pWriter->pDataFReader == NULL);
taosArrayClear(pWriter->aBlockIdx);
+ taosArrayClear(pWriter->aBlockL);
}
pWriter->iBlockIdx = 0;
pWriter->pBlockIdx = NULL;
@@ -907,7 +924,9 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
pWriter->iBlock = 0;
pWriter->pBlockData = NULL;
pWriter->iRow = 0;
+ pWriter->iBlockL = 0;
tBlockDataReset(&pWriter->bDataR);
+ tBlockDataReset(&pWriter->lDataR);
// write
SHeadFile fHead;
@@ -928,7 +947,7 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
wSet.fid = fid;
fHead = (SHeadFile){.commitID = pWriter->commitID, .offset = 0, .size = 0};
fData = (SDataFile){.commitID = pWriter->commitID, .size = 0};
- fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0};
+ fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0, .offset = 0};
fSma = (SSmaFile){.commitID = pWriter->commitID, .size = 0};
}
@@ -936,6 +955,7 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
if (code) goto _err;
taosArrayClear(pWriter->aBlockIdxW);
+ taosArrayClear(pWriter->aBlockLW);
tMapDataReset(&pWriter->mBlockW);
pWriter->pBlockIdxW = NULL;
tBlockDataReset(&pWriter->bDataW);
@@ -963,10 +983,10 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
// reader
if (pDelFile) {
- code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb, NULL);
+ code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb);
if (code) goto _err;
- code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR, NULL);
+ code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR);
if (code) goto _err;
}
@@ -980,52 +1000,16 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
while (true) {
- SDelIdx* pDelIdx = NULL;
- int64_t n = sizeof(SSnapDataHdr) + sizeof(TABLEID);
- SDelData delData;
- SDelIdx delIdx;
- int8_t toBreak = 0;
+ if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break;
+ if (tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) >= 0) break;
- if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR)) {
- pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
- }
+ SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
- if (pDelIdx) {
- int32_t c = tTABLEIDCmprFn(&id, pDelIdx);
- if (c < 0) {
- goto _new_del;
- } else {
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData, NULL);
- if (code) goto _err;
+ code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
+ if (code) goto _err;
- pWriter->iDelIdx++;
- if (c == 0) {
- toBreak = 1;
- delIdx = (SDelIdx){.suid = id.suid, .uid = id.uid};
- goto _merge_del;
- } else {
- delIdx = (SDelIdx){.suid = pDelIdx->suid, .uid = pDelIdx->uid};
- goto _write_del;
- }
- }
- }
-
- _new_del:
- toBreak = 1;
- delIdx = (SDelIdx){.suid = id.suid, .uid = id.uid};
- taosArrayClear(pWriter->aDelData);
-
- _merge_del:
- while (n < nData) {
- n += tGetDelData(pData + n, &delData);
- if (taosArrayPush(pWriter->aDelData, &delData) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
-
- _write_del:
- code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, NULL, &delIdx);
+ SDelIdx delIdx = *pDelIdx;
+ code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
if (code) goto _err;
if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) {
@@ -1033,7 +1017,40 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
goto _err;
}
- if (toBreak) break;
+ pWriter->iDelIdx++;
+ }
+
+ if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR) &&
+ tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) == 0) {
+ SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
+
+ code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
+ if (code) goto _err;
+
+ pWriter->iDelIdx++;
+ } else {
+ taosArrayClear(pWriter->aDelData);
+ }
+
+ int64_t n = sizeof(SSnapDataHdr) + sizeof(TABLEID);
+ while (n < nData) {
+ SDelData delData;
+
+ n += tGetDelData(pData + n, &delData);
+
+ if (taosArrayPush(pWriter->aDelData, &delData) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+
+ SDelIdx delIdx = {.suid = id.suid, .uid = id.uid};
+ code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
+ if (code) goto _err;
+
+ if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
}
_exit:
@@ -1054,11 +1071,11 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
for (; pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR); pWriter->iDelIdx++) {
SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData, NULL);
+ code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
if (code) goto _err;
- SDelIdx delIdx = (SDelIdx){.suid = pDelIdx->suid, .uid = pDelIdx->uid};
- code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, NULL, &delIdx);
+ SDelIdx delIdx = *pDelIdx;
+ code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
if (code) goto _err;
if (taosArrayPush(pWriter->aDelIdxR, &delIdx) == NULL) {
@@ -1117,7 +1134,7 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
pWriter->commitID = pTsdb->pVnode->state.commitID;
// for data file
- code = tBlockDataInit(&pWriter->bData);
+ code = tBlockDataCreate(&pWriter->bData);
if (code) goto _err;
pWriter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
@@ -1125,17 +1142,29 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- code = tBlockDataInit(&pWriter->bDataR);
+ code = tBlockDataCreate(&pWriter->bDataR);
if (code) goto _err;
+ pWriter->aBlockL = taosArrayInit(0, sizeof(SBlockL));
+ if (pWriter->aBlockL == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
pWriter->aBlockIdxW = taosArrayInit(0, sizeof(SBlockIdx));
if (pWriter->aBlockIdxW == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- code = tBlockDataInit(&pWriter->bDataW);
+ code = tBlockDataCreate(&pWriter->bDataW);
if (code) goto _err;
+ pWriter->aBlockLW = taosArrayInit(0, sizeof(SBlockL));
+ if (pWriter->aBlockLW == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+
// for del file
pWriter->aDelIdxR = taosArrayInit(0, sizeof(SDelIdx));
if (pWriter->aDelIdxR == NULL) {
@@ -1186,6 +1215,10 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
if (code) goto _err;
}
+ for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t*); iBuf++) {
+ tFree(pWriter->aBuf[iBuf]);
+ }
+
tsdbInfo("vgId:%d, vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
taosMemoryFree(pWriter);
*ppWriter = NULL;
@@ -1224,6 +1257,7 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
_exit:
tsdbDebug("vgId:%d, tsdb snapshot write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
+
return code;
_err:
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 60f0b18a62..6db9d5e6f4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -152,25 +152,6 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2) {
return 0;
}
-// TSDBKEY ======================================================
-static FORCE_INLINE int32_t tPutTSDBKEY(uint8_t *p, TSDBKEY *pKey) {
- int32_t n = 0;
-
- n += tPutI64v(p ? p + n : p, pKey->version);
- n += tPutI64(p ? p + n : p, pKey->ts);
-
- return n;
-}
-
-static FORCE_INLINE int32_t tGetTSDBKEY(uint8_t *p, TSDBKEY *pKey) {
- int32_t n = 0;
-
- n += tGetI64v(p + n, &pKey->version);
- n += tGetI64(p + n, &pKey->ts);
-
- return n;
-}
-
// SBlockIdx ======================================================
int32_t tPutBlockIdx(uint8_t *p, void *ph) {
int32_t n = 0;
@@ -215,34 +196,51 @@ int32_t tCmprBlockIdx(void const *lhs, void const *rhs) {
return 0;
}
+int32_t tCmprBlockL(void const *lhs, void const *rhs) {
+ SBlockIdx *lBlockIdx = (SBlockIdx *)lhs;
+ SBlockL *rBlockL = (SBlockL *)rhs;
+
+ if (lBlockIdx->suid < rBlockL->suid) {
+ return -1;
+ } else if (lBlockIdx->suid > rBlockL->suid) {
+ return 1;
+ }
+
+ if (lBlockIdx->uid < rBlockL->minUid) {
+ return -1;
+ } else if (lBlockIdx->uid > rBlockL->maxUid) {
+ return 1;
+ }
+
+ return 0;
+}
+
// SBlock ======================================================
void tBlockReset(SBlock *pBlock) {
- *pBlock =
- (SBlock){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVersion = VERSION_MAX, .maxVersion = VERSION_MIN};
+ *pBlock = (SBlock){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVer = VERSION_MAX, .maxVer = VERSION_MIN};
}
int32_t tPutBlock(uint8_t *p, void *ph) {
int32_t n = 0;
SBlock *pBlock = (SBlock *)ph;
- n += tPutTSDBKEY(p ? p + n : p, &pBlock->minKey);
- n += tPutTSDBKEY(p ? p + n : p, &pBlock->maxKey);
- n += tPutI64v(p ? p + n : p, pBlock->minVersion);
- n += tPutI64v(p ? p + n : p, pBlock->maxVersion);
+ n += tPutI64v(p ? p + n : p, pBlock->minKey.version);
+ n += tPutI64v(p ? p + n : p, pBlock->minKey.ts);
+ n += tPutI64v(p ? p + n : p, pBlock->maxKey.version);
+ n += tPutI64v(p ? p + n : p, pBlock->maxKey.ts);
+ n += tPutI64v(p ? p + n : p, pBlock->minVer);
+ n += tPutI64v(p ? p + n : p, pBlock->maxVer);
n += tPutI32v(p ? p + n : p, pBlock->nRow);
- n += tPutI8(p ? p + n : p, pBlock->last);
n += tPutI8(p ? p + n : p, pBlock->hasDup);
n += tPutI8(p ? p + n : p, pBlock->nSubBlock);
for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].nRow);
- n += tPutI8(p ? p + n : p, pBlock->aSubBlock[iSubBlock].cmprAlg);
n += tPutI64v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].offset);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szBlockCol);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szVersion);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szTSKEY);
n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szBlock);
- n += tPutI64v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].sOffset);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].nSma);
+ n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szKey);
+ }
+ if (pBlock->nSubBlock == 1 && !pBlock->hasDup) {
+ n += tPutI64v(p ? p + n : p, pBlock->smaInfo.offset);
+ n += tPutI32v(p ? p + n : p, pBlock->smaInfo.size);
}
return n;
@@ -252,24 +250,26 @@ int32_t tGetBlock(uint8_t *p, void *ph) {
int32_t n = 0;
SBlock *pBlock = (SBlock *)ph;
- n += tGetTSDBKEY(p + n, &pBlock->minKey);
- n += tGetTSDBKEY(p + n, &pBlock->maxKey);
- n += tGetI64v(p + n, &pBlock->minVersion);
- n += tGetI64v(p + n, &pBlock->maxVersion);
+ n += tGetI64v(p + n, &pBlock->minKey.version);
+ n += tGetI64v(p + n, &pBlock->minKey.ts);
+ n += tGetI64v(p + n, &pBlock->maxKey.version);
+ n += tGetI64v(p + n, &pBlock->maxKey.ts);
+ n += tGetI64v(p + n, &pBlock->minVer);
+ n += tGetI64v(p + n, &pBlock->maxVer);
n += tGetI32v(p + n, &pBlock->nRow);
- n += tGetI8(p + n, &pBlock->last);
n += tGetI8(p + n, &pBlock->hasDup);
n += tGetI8(p + n, &pBlock->nSubBlock);
for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].nRow);
- n += tGetI8(p + n, &pBlock->aSubBlock[iSubBlock].cmprAlg);
n += tGetI64v(p + n, &pBlock->aSubBlock[iSubBlock].offset);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szBlockCol);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szVersion);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szTSKEY);
n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szBlock);
- n += tGetI64v(p + n, &pBlock->aSubBlock[iSubBlock].sOffset);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].nSma);
+ n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szKey);
+ }
+ if (pBlock->nSubBlock == 1 && !pBlock->hasDup) {
+ n += tGetI64v(p + n, &pBlock->smaInfo.offset);
+ n += tGetI32v(p + n, &pBlock->smaInfo.size);
+ } else {
+ pBlock->smaInfo.offset = 0;
+ pBlock->smaInfo.size = 0;
}
return n;
@@ -290,10 +290,48 @@ int32_t tBlockCmprFn(const void *p1, const void *p2) {
bool tBlockHasSma(SBlock *pBlock) {
if (pBlock->nSubBlock > 1) return false;
- if (pBlock->last) return false;
if (pBlock->hasDup) return false;
- return pBlock->aSubBlock[0].nSma > 0;
+ return pBlock->smaInfo.size > 0;
+}
+
+// SBlockL ======================================================
+int32_t tPutBlockL(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SBlockL *pBlockL = (SBlockL *)ph;
+
+ n += tPutI64(p ? p + n : p, pBlockL->suid);
+ n += tPutI64(p ? p + n : p, pBlockL->minUid);
+ n += tPutI64(p ? p + n : p, pBlockL->maxUid);
+ n += tPutI64v(p ? p + n : p, pBlockL->minKey);
+ n += tPutI64v(p ? p + n : p, pBlockL->maxKey);
+ n += tPutI64v(p ? p + n : p, pBlockL->minVer);
+ n += tPutI64v(p ? p + n : p, pBlockL->maxVer);
+ n += tPutI32v(p ? p + n : p, pBlockL->nRow);
+ n += tPutI64v(p ? p + n : p, pBlockL->bInfo.offset);
+ n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szBlock);
+ n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szKey);
+
+ return n;
+}
+
+int32_t tGetBlockL(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SBlockL *pBlockL = (SBlockL *)ph;
+
+ n += tGetI64(p + n, &pBlockL->suid);
+ n += tGetI64(p + n, &pBlockL->minUid);
+ n += tGetI64(p + n, &pBlockL->maxUid);
+ n += tGetI64v(p + n, &pBlockL->minKey);
+ n += tGetI64v(p + n, &pBlockL->maxKey);
+ n += tGetI64v(p + n, &pBlockL->minVer);
+ n += tGetI64v(p + n, &pBlockL->maxVer);
+ n += tGetI32v(p + n, &pBlockL->nRow);
+ n += tGetI64v(p + n, &pBlockL->bInfo.offset);
+ n += tGetI32v(p + n, &pBlockL->bInfo.szBlock);
+ n += tGetI32v(p + n, &pBlockL->bInfo.szKey);
+
+ return n;
}
// SBlockCol ======================================================
@@ -307,15 +345,25 @@ int32_t tPutBlockCol(uint8_t *p, void *ph) {
n += tPutI8(p ? p + n : p, pBlockCol->type);
n += tPutI8(p ? p + n : p, pBlockCol->smaOn);
n += tPutI8(p ? p + n : p, pBlockCol->flag);
+ n += tPutI32v(p ? p + n : p, pBlockCol->szOrigin);
if (pBlockCol->flag != HAS_NULL) {
+ if (pBlockCol->flag != HAS_VALUE) {
+ n += tPutI32v(p ? p + n : p, pBlockCol->szBitmap);
+ }
+
+ if (IS_VAR_DATA_TYPE(pBlockCol->type)) {
+ n += tPutI32v(p ? p + n : p, pBlockCol->szOffset);
+ }
+
+ if (pBlockCol->flag != (HAS_NULL | HAS_NONE)) {
+ n += tPutI32v(p ? p + n : p, pBlockCol->szValue);
+ }
+
n += tPutI32v(p ? p + n : p, pBlockCol->offset);
- n += tPutI32v(p ? p + n : p, pBlockCol->szBitmap);
- n += tPutI32v(p ? p + n : p, pBlockCol->szOffset);
- n += tPutI32v(p ? p + n : p, pBlockCol->szValue);
- n += tPutI32v(p ? p + n : p, pBlockCol->szOrigin);
}
+_exit:
return n;
}
@@ -327,15 +375,29 @@ int32_t tGetBlockCol(uint8_t *p, void *ph) {
n += tGetI8(p + n, &pBlockCol->type);
n += tGetI8(p + n, &pBlockCol->smaOn);
n += tGetI8(p + n, &pBlockCol->flag);
+ n += tGetI32v(p + n, &pBlockCol->szOrigin);
ASSERT(pBlockCol->flag && (pBlockCol->flag != HAS_NONE));
+ pBlockCol->szBitmap = 0;
+ pBlockCol->szOffset = 0;
+ pBlockCol->szValue = 0;
+ pBlockCol->offset = 0;
+
if (pBlockCol->flag != HAS_NULL) {
+ if (pBlockCol->flag != HAS_VALUE) {
+ n += tGetI32v(p + n, &pBlockCol->szBitmap);
+ }
+
+ if (IS_VAR_DATA_TYPE(pBlockCol->type)) {
+ n += tGetI32v(p + n, &pBlockCol->szOffset);
+ }
+
+ if (pBlockCol->flag != (HAS_NULL | HAS_NONE)) {
+ n += tGetI32v(p + n, &pBlockCol->szValue);
+ }
+
n += tGetI32v(p + n, &pBlockCol->offset);
- n += tGetI32v(p + n, &pBlockCol->szBitmap);
- n += tGetI32v(p + n, &pBlockCol->szOffset);
- n += tGetI32v(p + n, &pBlockCol->szValue);
- n += tGetI32v(p + n, &pBlockCol->szOrigin);
}
return n;
@@ -866,6 +928,9 @@ int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) {
size = BIT2_SIZE(pColData->nVal + 1);
code = tRealloc(&pColData->pBitMap, size);
if (code) goto _exit;
+ if ((pColData->nVal & 3) == 0) {
+ pColData->pBitMap[pColData->nVal >> 2] = 0;
+ }
// put value
if (pColVal->isNone) {
@@ -910,13 +975,14 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
int32_t size;
ASSERT(pColDataSrc->nVal > 0);
+ ASSERT(pColDataDest->cid = pColDataSrc->cid);
+ ASSERT(pColDataDest->type = pColDataSrc->type);
- pColDataDest->cid = pColDataSrc->cid;
- pColDataDest->type = pColDataSrc->type;
pColDataDest->smaOn = pColDataSrc->smaOn;
pColDataDest->nVal = pColDataSrc->nVal;
pColDataDest->flag = pColDataSrc->flag;
+ // bitmap
if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) {
size = BIT2_SIZE(pColDataSrc->nVal);
code = tRealloc(&pColDataDest->pBitMap, size);
@@ -924,6 +990,7 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size);
}
+ // offset
if (IS_VAR_DATA_TYPE(pColDataDest->type)) {
size = sizeof(int32_t) * pColDataSrc->nVal;
@@ -933,9 +1000,10 @@ int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size);
}
+ // value
+ pColDataDest->nData = pColDataSrc->nData;
code = tRealloc(&pColDataDest->pData, pColDataSrc->nData);
if (code) goto _exit;
- pColDataDest->nData = pColDataSrc->nData;
memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData);
_exit:
@@ -1068,10 +1136,13 @@ static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) {
}
// SBlockData ======================================================
-int32_t tBlockDataInit(SBlockData *pBlockData) {
+int32_t tBlockDataCreate(SBlockData *pBlockData) {
int32_t code = 0;
+ pBlockData->suid = 0;
+ pBlockData->uid = 0;
pBlockData->nRow = 0;
+ pBlockData->aUid = NULL;
pBlockData->aVersion = NULL;
pBlockData->aTSKEY = NULL;
pBlockData->aIdx = taosArrayInit(0, sizeof(int32_t));
@@ -1090,42 +1161,77 @@ _exit:
return code;
}
-void tBlockDataReset(SBlockData *pBlockData) {
- pBlockData->nRow = 0;
- taosArrayClear(pBlockData->aIdx);
-}
-
-void tBlockDataClear(SBlockData *pBlockData, int8_t deepClear) {
+void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear) {
+ tFree((uint8_t *)pBlockData->aUid);
tFree((uint8_t *)pBlockData->aVersion);
tFree((uint8_t *)pBlockData->aTSKEY);
taosArrayDestroy(pBlockData->aIdx);
taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL);
- pBlockData->aColData = NULL;
- pBlockData->aIdx = NULL;
- pBlockData->aTSKEY = NULL;
+ pBlockData->aUid = NULL;
pBlockData->aVersion = NULL;
+ pBlockData->aTSKEY = NULL;
+ pBlockData->aIdx = NULL;
+ pBlockData->aColData = NULL;
}
-int32_t tBlockDataSetSchema(SBlockData *pBlockData, STSchema *pTSchema) {
- int32_t code = 0;
- SColData *pColData;
- STColumn *pTColumn;
+int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema) {
+ int32_t code = 0;
- tBlockDataReset(pBlockData);
+ ASSERT(suid || uid);
+
+ pBlockData->suid = suid;
+ pBlockData->uid = uid;
+ pBlockData->nRow = 0;
+
+ taosArrayClear(pBlockData->aIdx);
for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
- pTColumn = &pTSchema->columns[iColumn];
+ STColumn *pTColumn = &pTSchema->columns[iColumn];
+ SColData *pColData;
code = tBlockDataAddColData(pBlockData, iColumn - 1, &pColData);
if (code) goto _exit;
- tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) != 0);
+ tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
}
_exit:
return code;
}
-void tBlockDataClearData(SBlockData *pBlockData) {
+int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom) {
+ int32_t code = 0;
+
+ ASSERT(pBlockDataFrom->suid || pBlockDataFrom->uid);
+
+ pBlockData->suid = pBlockDataFrom->suid;
+ pBlockData->uid = pBlockDataFrom->uid;
+ pBlockData->nRow = 0;
+
+ taosArrayClear(pBlockData->aIdx);
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockDataFrom->aIdx); iColData++) {
+ SColData *pColDataFrom = tBlockDataGetColDataByIdx(pBlockDataFrom, iColData);
+
+ SColData *pColData;
+ code = tBlockDataAddColData(pBlockData, iColData, &pColData);
+ if (code) goto _exit;
+
+ tColDataInit(pColData, pColDataFrom->cid, pColDataFrom->type, pColDataFrom->smaOn);
+ }
+
+_exit:
+ return code;
+}
+
+void tBlockDataReset(SBlockData *pBlockData) {
+ pBlockData->suid = 0;
+ pBlockData->uid = 0;
+ pBlockData->nRow = 0;
+ taosArrayClear(pBlockData->aIdx);
+}
+
+void tBlockDataClear(SBlockData *pBlockData) {
+ ASSERT(pBlockData->suid || pBlockData->uid);
+
pBlockData->nRow = 0;
for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
@@ -1159,52 +1265,47 @@ _err:
return code;
}
-int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema) {
+int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid) {
int32_t code = 0;
- // TSDBKEY
+ ASSERT(pBlockData->suid || pBlockData->uid);
+
+ // uid
+ if (pBlockData->uid == 0) {
+ ASSERT(uid);
+ code = tRealloc((uint8_t **)&pBlockData->aUid, sizeof(int64_t) * (pBlockData->nRow + 1));
+ if (code) goto _err;
+ pBlockData->aUid[pBlockData->nRow] = uid;
+ }
+ // version
code = tRealloc((uint8_t **)&pBlockData->aVersion, sizeof(int64_t) * (pBlockData->nRow + 1));
if (code) goto _err;
+ pBlockData->aVersion[pBlockData->nRow] = TSDBROW_VERSION(pRow);
+ // timestamp
code = tRealloc((uint8_t **)&pBlockData->aTSKEY, sizeof(TSKEY) * (pBlockData->nRow + 1));
if (code) goto _err;
- pBlockData->aVersion[pBlockData->nRow] = TSDBROW_VERSION(pRow);
pBlockData->aTSKEY[pBlockData->nRow] = TSDBROW_TS(pRow);
// OTHER
- int32_t iColData = 0;
- int32_t nColData = taosArrayGetSize(pBlockData->aIdx);
- SRowIter iter = {0};
- SRowIter *pIter = &iter;
- SColData *pColData;
- SColVal *pColVal;
+ SRowIter rIter = {0};
+ SColVal *pColVal;
- if (nColData == 0) goto _exit;
+ tRowIterInit(&rIter, pRow, pTSchema);
+ pColVal = tRowIterNext(&rIter);
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
+ SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- tRowIterInit(pIter, pRow, pTSchema);
- pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- pColVal = tRowIterNext(pIter);
+ while (pColVal && pColVal->cid < pColData->cid) {
+ pColVal = tRowIterNext(&rIter);
+ }
- while (pColData) {
- if (pColVal) {
- if (pColData->cid == pColVal->cid) {
- code = tColDataAppendValue(pColData, pColVal);
- if (code) goto _err;
-
- pColVal = tRowIterNext(pIter);
- pColData = ((++iColData) < nColData) ? tBlockDataGetColDataByIdx(pBlockData, iColData) : NULL;
- } else if (pColData->cid < pColVal->cid) {
- code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type));
- if (code) goto _err;
-
- pColData = ((++iColData) < nColData) ? tBlockDataGetColDataByIdx(pBlockData, iColData) : NULL;
- } else {
- pColVal = tRowIterNext(pIter);
- }
- } else {
+ if (pColVal == NULL || pColVal->cid > pColData->cid) {
code = tColDataAppendValue(pColData, &COL_VAL_NONE(pColData->cid, pColData->type));
if (code) goto _err;
-
- pColData = ((++iColData) < nColData) ? tBlockDataGetColDataByIdx(pBlockData, iColData) : NULL;
+ } else {
+ code = tColDataAppendValue(pColData, pColVal);
+ if (code) goto _err;
+ pColVal = tRowIterNext(&rIter);
}
}
@@ -1259,128 +1360,111 @@ _exit:
int32_t tBlockDataMerge(SBlockData *pBlockData1, SBlockData *pBlockData2, SBlockData *pBlockData) {
int32_t code = 0;
- // set target
- int32_t iColData1 = 0;
- int32_t nColData1 = taosArrayGetSize(pBlockData1->aIdx);
- int32_t iColData2 = 0;
- int32_t nColData2 = taosArrayGetSize(pBlockData2->aIdx);
- SColData *pColData1;
- SColData *pColData2;
- SColData *pColData;
+ ASSERT(pBlockData->suid == pBlockData1->suid);
+ ASSERT(pBlockData->uid == pBlockData1->uid);
+ ASSERT(pBlockData1->nRow > 0);
+ ASSERT(pBlockData2->nRow > 0);
- tBlockDataReset(pBlockData);
- while (iColData1 < nColData1 && iColData2 < nColData2) {
- pColData1 = tBlockDataGetColDataByIdx(pBlockData1, iColData1);
- pColData2 = tBlockDataGetColDataByIdx(pBlockData2, iColData2);
+ tBlockDataClear(pBlockData);
- if (pColData1->cid == pColData2->cid) {
- code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
- if (code) goto _exit;
- tColDataInit(pColData, pColData2->cid, pColData2->type, pColData2->smaOn);
+ TSDBROW row1 = tsdbRowFromBlockData(pBlockData1, 0);
+ TSDBROW row2 = tsdbRowFromBlockData(pBlockData2, 0);
+ TSDBROW *pRow1 = &row1;
+ TSDBROW *pRow2 = &row2;
- iColData1++;
- iColData2++;
- } else if (pColData1->cid < pColData2->cid) {
- code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
- if (code) goto _exit;
- tColDataInit(pColData, pColData1->cid, pColData1->type, pColData1->smaOn);
+ while (pRow1 && pRow2) {
+ int32_t c = tsdbRowCmprFn(pRow1, pRow2);
- iColData1++;
- } else {
- code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
- if (code) goto _exit;
- tColDataInit(pColData, pColData2->cid, pColData2->type, pColData2->smaOn);
-
- iColData2++;
- }
- }
-
- while (iColData1 < nColData1) {
- code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
- if (code) goto _exit;
- tColDataInit(pColData, pColData1->cid, pColData1->type, pColData1->smaOn);
-
- iColData1++;
- }
-
- while (iColData2 < nColData2) {
- code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
- if (code) goto _exit;
- tColDataInit(pColData, pColData2->cid, pColData2->type, pColData2->smaOn);
-
- iColData2++;
- }
-
- // loop to merge
- int32_t iRow1 = 0;
- int32_t nRow1 = pBlockData1->nRow;
- int32_t iRow2 = 0;
- int32_t nRow2 = pBlockData2->nRow;
- TSDBROW row1;
- TSDBROW row2;
- int32_t c;
-
- while (iRow1 < nRow1 && iRow2 < nRow2) {
- row1 = tsdbRowFromBlockData(pBlockData1, iRow1);
- row2 = tsdbRowFromBlockData(pBlockData2, iRow2);
-
- c = tsdbKeyCmprFn(&TSDBROW_KEY(&row1), &TSDBROW_KEY(&row2));
if (c < 0) {
- code = tBlockDataAppendRow(pBlockData, &row1, NULL);
+ code = tBlockDataAppendRow(pBlockData, pRow1, NULL,
+ pBlockData1->uid ? pBlockData1->uid : pBlockData1->aUid[pRow1->iRow]);
if (code) goto _exit;
- iRow1++;
+
+ pRow1->iRow++;
+ if (pRow1->iRow < pBlockData1->nRow) {
+ *pRow1 = tsdbRowFromBlockData(pBlockData1, pRow1->iRow);
+ } else {
+ pRow1 = NULL;
+ }
} else if (c > 0) {
- code = tBlockDataAppendRow(pBlockData, &row2, NULL);
+ code = tBlockDataAppendRow(pBlockData, pRow2, NULL,
+ pBlockData2->uid ? pBlockData2->uid : pBlockData2->aUid[pRow2->iRow]);
if (code) goto _exit;
- iRow2++;
+
+ pRow2->iRow++;
+ if (pRow2->iRow < pBlockData2->nRow) {
+ *pRow2 = tsdbRowFromBlockData(pBlockData2, pRow2->iRow);
+ } else {
+ pRow2 = NULL;
+ }
} else {
ASSERT(0);
}
}
- while (iRow1 < nRow1) {
- row1 = tsdbRowFromBlockData(pBlockData1, iRow1);
- code = tBlockDataAppendRow(pBlockData, &row1, NULL);
+ while (pRow1) {
+ code = tBlockDataAppendRow(pBlockData, pRow1, NULL,
+ pBlockData1->uid ? pBlockData1->uid : pBlockData1->aUid[pRow1->iRow]);
if (code) goto _exit;
- iRow1++;
+
+ pRow1->iRow++;
+ if (pRow1->iRow < pBlockData1->nRow) {
+ *pRow1 = tsdbRowFromBlockData(pBlockData1, pRow1->iRow);
+ } else {
+ pRow1 = NULL;
+ }
}
- while (iRow2 < nRow2) {
- row2 = tsdbRowFromBlockData(pBlockData2, iRow2);
- code = tBlockDataAppendRow(pBlockData, &row2, NULL);
+ while (pRow2) {
+ code = tBlockDataAppendRow(pBlockData, pRow2, NULL,
+ pBlockData2->uid ? pBlockData2->uid : pBlockData2->aUid[pRow2->iRow]);
if (code) goto _exit;
- iRow2++;
+
+ pRow2->iRow++;
+ if (pRow2->iRow < pBlockData2->nRow) {
+ *pRow2 = tsdbRowFromBlockData(pBlockData2, pRow2->iRow);
+ } else {
+ pRow2 = NULL;
+ }
}
_exit:
return code;
}
-int32_t tBlockDataCopy(SBlockData *pBlockDataSrc, SBlockData *pBlockDataDest) {
- int32_t code = 0;
- SColData *pColDataSrc;
- SColData *pColDataDest;
+int32_t tBlockDataCopy(SBlockData *pSrc, SBlockData *pDest) {
+ int32_t code = 0;
- ASSERT(pBlockDataSrc->nRow > 0);
+ tBlockDataClear(pDest);
- tBlockDataReset(pBlockDataDest);
+ ASSERT(pDest->suid == pSrc->suid);
+ ASSERT(pDest->uid == pSrc->uid);
+ ASSERT(taosArrayGetSize(pSrc->aIdx) == taosArrayGetSize(pDest->aIdx));
- pBlockDataDest->nRow = pBlockDataSrc->nRow;
- // TSDBKEY
- code = tRealloc((uint8_t **)&pBlockDataDest->aVersion, sizeof(int64_t) * pBlockDataSrc->nRow);
- if (code) goto _exit;
- code = tRealloc((uint8_t **)&pBlockDataDest->aTSKEY, sizeof(TSKEY) * pBlockDataSrc->nRow);
- if (code) goto _exit;
- memcpy(pBlockDataDest->aVersion, pBlockDataSrc->aVersion, sizeof(int64_t) * pBlockDataSrc->nRow);
- memcpy(pBlockDataDest->aTSKEY, pBlockDataSrc->aTSKEY, sizeof(TSKEY) * pBlockDataSrc->nRow);
+ pDest->nRow = pSrc->nRow;
- // other
- for (size_t iColData = 0; iColData < taosArrayGetSize(pBlockDataSrc->aIdx); iColData++) {
- pColDataSrc = tBlockDataGetColDataByIdx(pBlockDataSrc, iColData);
- code = tBlockDataAddColData(pBlockDataDest, iColData, &pColDataDest);
+ if (pSrc->uid == 0) {
+ code = tRealloc((uint8_t **)&pDest->aUid, sizeof(int64_t) * pDest->nRow);
if (code) goto _exit;
+ memcpy(pDest->aUid, pSrc->aUid, sizeof(int64_t) * pDest->nRow);
+ }
- code = tColDataCopy(pColDataSrc, pColDataDest);
+ code = tRealloc((uint8_t **)&pDest->aVersion, sizeof(int64_t) * pDest->nRow);
+ if (code) goto _exit;
+ memcpy(pDest->aVersion, pSrc->aVersion, sizeof(int64_t) * pDest->nRow);
+
+ code = tRealloc((uint8_t **)&pDest->aTSKEY, sizeof(TSKEY) * pDest->nRow);
+ if (code) goto _exit;
+ memcpy(pDest->aTSKEY, pSrc->aTSKEY, sizeof(TSKEY) * pDest->nRow);
+
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pSrc->aIdx); iColData++) {
+ SColData *pColSrc = tBlockDataGetColDataByIdx(pSrc, iColData);
+ SColData *pColDest = tBlockDataGetColDataByIdx(pDest, iColData);
+
+ ASSERT(pColSrc->cid == pColDest->cid);
+ ASSERT(pColSrc->type == pColDest->type);
+
+ code = tColDataCopy(pColSrc, pColDest);
if (code) goto _exit;
}
@@ -1416,57 +1500,249 @@ void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColD
*ppColData = NULL;
}
-int32_t tPutBlockData(uint8_t *p, SBlockData *pBlockData) {
- int32_t n = 0;
+int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut, int32_t *szOut, uint8_t *aBuf[],
+ int32_t aBufN[]) {
+ int32_t code = 0;
- n += tPutI32v(p ? p + n : p, pBlockData->nRow);
- if (p) {
- memcpy(p + n, pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow);
- }
- n = n + sizeof(int64_t) * pBlockData->nRow;
- if (p) {
- memcpy(p + n, pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow);
- }
- n = n + sizeof(TSKEY) * pBlockData->nRow;
+ SDiskDataHdr hdr = {.delimiter = TSDB_FILE_DLMT,
+ .fmtVer = 0,
+ .suid = pBlockData->suid,
+ .uid = pBlockData->uid,
+ .nRow = pBlockData->nRow,
+ .cmprAlg = cmprAlg};
- int32_t nCol = taosArrayGetSize(pBlockData->aIdx);
- n += tPutI32v(p ? p + n : p, nCol);
- for (int32_t iCol = 0; iCol < nCol; iCol++) {
- SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iCol);
- n += tPutColData(p ? p + n : p, pColData);
+ // encode =================
+ // columns AND SBlockCol
+ aBufN[0] = 0;
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
+ SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
+
+ ASSERT(pColData->flag);
+
+ if (pColData->flag == HAS_NONE) continue;
+
+ SBlockCol blockCol = {.cid = pColData->cid,
+ .type = pColData->type,
+ .smaOn = pColData->smaOn,
+ .flag = pColData->flag,
+ .szOrigin = pColData->nData};
+
+ if (pColData->flag != HAS_NULL) {
+ code = tsdbCmprColData(pColData, cmprAlg, &blockCol, &aBuf[0], aBufN[0], &aBuf[2]);
+ if (code) goto _exit;
+
+ blockCol.offset = aBufN[0];
+ aBufN[0] = aBufN[0] + blockCol.szBitmap + blockCol.szOffset + blockCol.szValue + sizeof(TSCKSUM);
+ }
+
+ code = tRealloc(&aBuf[1], hdr.szBlkCol + tPutBlockCol(NULL, &blockCol));
+ if (code) goto _exit;
+ hdr.szBlkCol += tPutBlockCol(aBuf[1] + hdr.szBlkCol, &blockCol);
}
+ aBufN[1] = 0;
+ if (hdr.szBlkCol > 0) {
+ aBufN[1] = hdr.szBlkCol + sizeof(TSCKSUM);
+
+ code = tRealloc(&aBuf[1], aBufN[1]);
+ if (code) goto _exit;
+
+ taosCalcChecksumAppend(0, aBuf[1], aBufN[1]);
+ }
+
+ // uid + version + tskey
+ aBufN[2] = 0;
+ if (pBlockData->uid == 0) {
+ code = tsdbCmprData((uint8_t *)pBlockData->aUid, sizeof(int64_t) * pBlockData->nRow, TSDB_DATA_TYPE_BIGINT, cmprAlg,
+ &aBuf[2], aBufN[2], &hdr.szUid, &aBuf[3]);
+ if (code) goto _exit;
+ }
+ aBufN[2] += hdr.szUid;
+
+ code = tsdbCmprData((uint8_t *)pBlockData->aVersion, sizeof(int64_t) * pBlockData->nRow, TSDB_DATA_TYPE_BIGINT,
+ cmprAlg, &aBuf[2], aBufN[2], &hdr.szVer, &aBuf[3]);
+ if (code) goto _exit;
+ aBufN[2] += hdr.szVer;
+
+ code = tsdbCmprData((uint8_t *)pBlockData->aTSKEY, sizeof(TSKEY) * pBlockData->nRow, TSDB_DATA_TYPE_TIMESTAMP,
+ cmprAlg, &aBuf[2], aBufN[2], &hdr.szKey, &aBuf[3]);
+ if (code) goto _exit;
+ aBufN[2] += hdr.szKey;
+
+ aBufN[2] += sizeof(TSCKSUM);
+ code = tRealloc(&aBuf[2], aBufN[2]);
+ if (code) goto _exit;
+
+ // hdr
+ aBufN[3] = tPutDiskDataHdr(NULL, &hdr);
+ code = tRealloc(&aBuf[3], aBufN[3]);
+ if (code) goto _exit;
+ tPutDiskDataHdr(aBuf[3], &hdr);
+ taosCalcChecksumAppend(taosCalcChecksum(0, aBuf[3], aBufN[3]), aBuf[2], aBufN[2]);
+
+ // aggragate
+ if (ppOut) {
+ *szOut = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
+ code = tRealloc(ppOut, *szOut);
+ if (code) goto _exit;
+
+ memcpy(*ppOut, aBuf[3], aBufN[3]);
+ memcpy(*ppOut + aBufN[3], aBuf[2], aBufN[2]);
+ if (aBufN[1]) {
+ memcpy(*ppOut + aBufN[3] + aBufN[2], aBuf[1], aBufN[1]);
+ }
+ if (aBufN[0]) {
+ memcpy(*ppOut + aBufN[3] + aBufN[2] + aBufN[1], aBuf[0], aBufN[0]);
+ }
+ }
+
+_exit:
+ return code;
+}
+
+int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]) {
+ int32_t code = 0;
+
+ tBlockDataClear(pBlockData);
+
+ int32_t n = 0;
+ SDiskDataHdr hdr = {0};
+
+ // SDiskDataHdr
+ n += tGetDiskDataHdr(pIn + n, &hdr);
+ if (!taosCheckChecksumWhole(pIn, n + hdr.szUid + hdr.szVer + hdr.szKey + sizeof(TSCKSUM))) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
+ }
+ ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
+
+ pBlockData->suid = hdr.suid;
+ pBlockData->uid = hdr.uid;
+ pBlockData->nRow = hdr.nRow;
+
+ // uid
+ if (hdr.uid == 0) {
+ ASSERT(hdr.szUid);
+ code = tsdbDecmprData(pIn + n, hdr.szUid, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aUid,
+ sizeof(int64_t) * hdr.nRow, &aBuf[0]);
+ if (code) goto _exit;
+ } else {
+ ASSERT(!hdr.szUid);
+ }
+ n += hdr.szUid;
+
+ // version
+ code = tsdbDecmprData(pIn + n, hdr.szVer, TSDB_DATA_TYPE_BIGINT, hdr.cmprAlg, (uint8_t **)&pBlockData->aVersion,
+ sizeof(int64_t) * hdr.nRow, &aBuf[0]);
+ if (code) goto _exit;
+ n += hdr.szVer;
+
+ // TSKEY
+ code = tsdbDecmprData(pIn + n, hdr.szKey, TSDB_DATA_TYPE_TIMESTAMP, hdr.cmprAlg, (uint8_t **)&pBlockData->aTSKEY,
+ sizeof(TSKEY) * hdr.nRow, &aBuf[0]);
+ if (code) goto _exit;
+ n = n + hdr.szKey + sizeof(TSCKSUM);
+
+ // loop to decode each column data
+ if (hdr.szBlkCol == 0) goto _exit;
+
+ int32_t nt = 0;
+ while (nt < hdr.szBlkCol) {
+ SBlockCol blockCol = {0};
+ nt += tGetBlockCol(pIn + n + nt, &blockCol);
+ ASSERT(nt <= hdr.szBlkCol);
+
+ SColData *pColData;
+ code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
+ if (code) goto _exit;
+
+ tColDataInit(pColData, blockCol.cid, blockCol.type, blockCol.smaOn);
+ if (blockCol.flag == HAS_NULL) {
+ for (int32_t iRow = 0; iRow < hdr.nRow; iRow++) {
+ code = tColDataAppendValue(pColData, &COL_VAL_NULL(blockCol.cid, blockCol.type));
+ if (code) goto _exit;
+ }
+ } else {
+ code = tsdbDecmprColData(pIn + n + hdr.szBlkCol + sizeof(TSCKSUM) + blockCol.offset, &blockCol, hdr.cmprAlg,
+ hdr.nRow, pColData, &aBuf[0]);
+ if (code) goto _exit;
+ }
+ }
+
+_exit:
+ return code;
+}
+
+// SDiskDataHdr ==============================
+int32_t tPutDiskDataHdr(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SDiskDataHdr *pHdr = (SDiskDataHdr *)ph;
+
+ n += tPutU32(p ? p + n : p, pHdr->delimiter);
+ n += tPutU32v(p ? p + n : p, pHdr->fmtVer);
+ n += tPutI64(p ? p + n : p, pHdr->suid);
+ n += tPutI64(p ? p + n : p, pHdr->uid);
+ n += tPutI32v(p ? p + n : p, pHdr->szUid);
+ n += tPutI32v(p ? p + n : p, pHdr->szVer);
+ n += tPutI32v(p ? p + n : p, pHdr->szKey);
+ n += tPutI32v(p ? p + n : p, pHdr->szBlkCol);
+ n += tPutI32v(p ? p + n : p, pHdr->nRow);
+ n += tPutI8(p ? p + n : p, pHdr->cmprAlg);
+
return n;
}
-int32_t tGetBlockData(uint8_t *p, SBlockData *pBlockData) {
- int32_t n = 0;
+int32_t tGetDiskDataHdr(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SDiskDataHdr *pHdr = (SDiskDataHdr *)ph;
- tBlockDataReset(pBlockData);
-
- n += tGetI32v(p + n, &pBlockData->nRow);
- pBlockData->aVersion = (int64_t *)(p + n);
- n = n + sizeof(int64_t) * pBlockData->nRow;
- pBlockData->aTSKEY = (TSKEY *)(p + n);
- n = n + sizeof(TSKEY) * pBlockData->nRow;
-
- int32_t nCol;
- n += tGetI32v(p + n, &nCol);
- for (int32_t iCol = 0; iCol < nCol; iCol++) {
- SColData *pColData;
-
- if (tBlockDataAddColData(pBlockData, iCol, &pColData)) return -1;
- n += tGetColData(p + n, pColData);
- }
+ n += tGetU32(p + n, &pHdr->delimiter);
+ n += tGetU32v(p + n, &pHdr->fmtVer);
+ n += tGetI64(p + n, &pHdr->suid);
+ n += tGetI64(p + n, &pHdr->uid);
+ n += tGetI32v(p + n, &pHdr->szUid);
+ n += tGetI32v(p + n, &pHdr->szVer);
+ n += tGetI32v(p + n, &pHdr->szKey);
+ n += tGetI32v(p + n, &pHdr->szBlkCol);
+ n += tGetI32v(p + n, &pHdr->nRow);
+ n += tGetI8(p + n, &pHdr->cmprAlg);
return n;
}
// ALGORITHM ==============================
+int32_t tPutColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg) {
+ int32_t n = 0;
+
+ n += tPutI16v(p ? p + n : p, pColAgg->colId);
+ n += tPutI16v(p ? p + n : p, pColAgg->numOfNull);
+ n += tPutI64(p ? p + n : p, pColAgg->sum);
+ n += tPutI64(p ? p + n : p, pColAgg->max);
+ n += tPutI64(p ? p + n : p, pColAgg->min);
+
+ return n;
+}
+
+int32_t tGetColumnDataAgg(uint8_t *p, SColumnDataAgg *pColAgg) {
+ int32_t n = 0;
+
+ n += tGetI16v(p + n, &pColAgg->colId);
+ n += tGetI16v(p + n, &pColAgg->numOfNull);
+ n += tGetI64(p + n, &pColAgg->sum);
+ n += tGetI64(p + n, &pColAgg->max);
+ n += tGetI64(p + n, &pColAgg->min);
+
+ return n;
+}
+
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
SColVal colVal;
SColVal *pColVal = &colVal;
+ memset(pColAgg, 0, sizeof(*pColAgg));
+ bool minAssigned = false;
+ bool maxAssigned = false;
+
*pColAgg = (SColumnDataAgg){.colId = pColData->cid};
for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
tColDataGetValue(pColData, iVal, pColVal);
@@ -1481,72 +1757,86 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
break;
case TSDB_DATA_TYPE_TINYINT: {
pColAgg->sum += colVal.value.i8;
- if (pColAgg->min > colVal.value.i8) {
+ if (!minAssigned || pColAgg->min > colVal.value.i8) {
pColAgg->min = colVal.value.i8;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.i8) {
+ if (!maxAssigned || pColAgg->max < colVal.value.i8) {
pColAgg->max = colVal.value.i8;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
pColAgg->sum += colVal.value.i16;
- if (pColAgg->min > colVal.value.i16) {
+ if (!minAssigned || pColAgg->min > colVal.value.i16) {
pColAgg->min = colVal.value.i16;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.i16) {
+ if (!maxAssigned || pColAgg->max < colVal.value.i16) {
pColAgg->max = colVal.value.i16;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_INT: {
pColAgg->sum += colVal.value.i32;
- if (pColAgg->min > colVal.value.i32) {
+ if (!minAssigned || pColAgg->min > colVal.value.i32) {
pColAgg->min = colVal.value.i32;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.i32) {
+ if (!maxAssigned || pColAgg->max < colVal.value.i32) {
pColAgg->max = colVal.value.i32;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_BIGINT: {
pColAgg->sum += colVal.value.i64;
- if (pColAgg->min > colVal.value.i64) {
+ if (!minAssigned || pColAgg->min > colVal.value.i64) {
pColAgg->min = colVal.value.i64;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.i64) {
+ if (!maxAssigned || pColAgg->max < colVal.value.i64) {
pColAgg->max = colVal.value.i64;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_FLOAT: {
- *(double*)(&pColAgg->sum) += colVal.value.f;
- if (*(double*)(&pColAgg->min) > colVal.value.f) {
- *(double*)(&pColAgg->min) = colVal.value.f;
+ *(double *)(&pColAgg->sum) += colVal.value.f;
+ if (!minAssigned || *(double *)(&pColAgg->min) > colVal.value.f) {
+ *(double *)(&pColAgg->min) = colVal.value.f;
+ minAssigned = true;
}
- if (*(double*)(&pColAgg->max) < colVal.value.f) {
- *(double*)(&pColAgg->max) = colVal.value.f;
+ if (!maxAssigned || *(double *)(&pColAgg->max) < colVal.value.f) {
+ *(double *)(&pColAgg->max) = colVal.value.f;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
- *(double*)(&pColAgg->sum) += colVal.value.d;
- if (*(double*)(&pColAgg->min) > colVal.value.d) {
- *(double*)(&pColAgg->min) = colVal.value.d;
+ *(double *)(&pColAgg->sum) += colVal.value.d;
+ if (!minAssigned || *(double *)(&pColAgg->min) > colVal.value.d) {
+ *(double *)(&pColAgg->min) = colVal.value.d;
+ minAssigned = true;
}
- if (*(double*)(&pColAgg->max) < colVal.value.d) {
- *(double*)(&pColAgg->max) = colVal.value.d;
+ if (!maxAssigned || *(double *)(&pColAgg->max) < colVal.value.d) {
+ *(double *)(&pColAgg->max) = colVal.value.d;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_VARCHAR:
break;
case TSDB_DATA_TYPE_TIMESTAMP: {
- if (pColAgg->min > colVal.value.i64) {
+ if (!minAssigned || pColAgg->min > colVal.value.i64) {
pColAgg->min = colVal.value.i64;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.i64) {
+ if (!maxAssigned || pColAgg->max < colVal.value.i64) {
pColAgg->max = colVal.value.i64;
+ maxAssigned = true;
}
break;
}
@@ -1554,41 +1844,49 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
break;
case TSDB_DATA_TYPE_UTINYINT: {
pColAgg->sum += colVal.value.u8;
- if (pColAgg->min > colVal.value.u8) {
+ if (!minAssigned || pColAgg->min > colVal.value.u8) {
pColAgg->min = colVal.value.u8;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.u8) {
+ if (!maxAssigned || pColAgg->max < colVal.value.u8) {
pColAgg->max = colVal.value.u8;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
pColAgg->sum += colVal.value.u16;
- if (pColAgg->min > colVal.value.u16) {
+ if (!minAssigned || pColAgg->min > colVal.value.u16) {
pColAgg->min = colVal.value.u16;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.u16) {
+ if (!maxAssigned || pColAgg->max < colVal.value.u16) {
pColAgg->max = colVal.value.u16;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_UINT: {
pColAgg->sum += colVal.value.u32;
- if (pColAgg->min > colVal.value.u32) {
+ if (!minAssigned || pColAgg->min > colVal.value.u32) {
pColAgg->min = colVal.value.u32;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.u32) {
+ if (!minAssigned || pColAgg->max < colVal.value.u32) {
pColAgg->max = colVal.value.u32;
+ maxAssigned = true;
}
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
pColAgg->sum += colVal.value.u64;
- if (pColAgg->min > colVal.value.u64) {
+ if (!minAssigned || pColAgg->min > colVal.value.u64) {
pColAgg->min = colVal.value.u64;
+ minAssigned = true;
}
- if (pColAgg->max < colVal.value.u64) {
+ if (!maxAssigned || pColAgg->max < colVal.value.u64) {
pColAgg->max = colVal.value.u64;
+ maxAssigned = true;
}
break;
}
@@ -1608,3 +1906,268 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
}
}
}
+
+int32_t tsdbCmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t nOut,
+ int32_t *szOut, uint8_t **ppBuf) {
+ int32_t code = 0;
+
+ ASSERT(szIn > 0 && ppOut);
+
+ if (cmprAlg == NO_COMPRESSION) {
+ code = tRealloc(ppOut, nOut + szIn);
+ if (code) goto _exit;
+
+ memcpy(*ppOut + nOut, pIn, szIn);
+ *szOut = szIn;
+ } else {
+ int32_t size = szIn + COMP_OVERFLOW_BYTES;
+
+ code = tRealloc(ppOut, nOut + size);
+ if (code) goto _exit;
+
+ if (cmprAlg == TWO_STAGE_COMP) {
+ ASSERT(ppBuf);
+ code = tRealloc(ppBuf, size);
+ if (code) goto _exit;
+ }
+
+ *szOut =
+ tDataTypes[type].compFunc(pIn, szIn, szIn / tDataTypes[type].bytes, *ppOut + nOut, size, cmprAlg, *ppBuf, size);
+ if (*szOut <= 0) {
+ code = TSDB_CODE_COMPRESS_ERROR;
+ goto _exit;
+ }
+ }
+
+_exit:
+ return code;
+}
+
+int32_t tsdbDecmprData(uint8_t *pIn, int32_t szIn, int8_t type, int8_t cmprAlg, uint8_t **ppOut, int32_t szOut,
+ uint8_t **ppBuf) {
+ int32_t code = 0;
+
+ code = tRealloc(ppOut, szOut);
+ if (code) goto _exit;
+
+ if (cmprAlg == NO_COMPRESSION) {
+ ASSERT(szIn == szOut);
+ memcpy(*ppOut, pIn, szOut);
+ } else {
+ if (cmprAlg == TWO_STAGE_COMP) {
+ code = tRealloc(ppBuf, szOut + COMP_OVERFLOW_BYTES);
+ if (code) goto _exit;
+ }
+
+ int32_t size = tDataTypes[type].decompFunc(pIn, szIn, szOut / tDataTypes[type].bytes, *ppOut, szOut, cmprAlg,
+ *ppBuf, szOut + COMP_OVERFLOW_BYTES);
+ if (size <= 0) {
+ code = TSDB_CODE_COMPRESS_ERROR;
+ goto _exit;
+ }
+
+ ASSERT(size == szOut);
+ }
+
+_exit:
+ return code;
+}
+
+int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol, uint8_t **ppOut, int32_t nOut,
+ uint8_t **ppBuf) {
+ int32_t code = 0;
+
+ ASSERT(pColData->flag && (pColData->flag != HAS_NONE) && (pColData->flag != HAS_NULL));
+
+ pBlockCol->szBitmap = 0;
+ pBlockCol->szOffset = 0;
+ pBlockCol->szValue = 0;
+
+ int32_t size = 0;
+ // bitmap
+ if (pColData->flag != HAS_VALUE) {
+ uint8_t *pBitMap = pColData->pBitMap;
+ int32_t szBitMap = BIT2_SIZE(pColData->nVal);
+
+ // BIT2 to BIT1
+ if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) {
+ szBitMap = BIT1_SIZE(pColData->nVal);
+ pBitMap = taosMemoryCalloc(1, szBitMap);
+ if (pBitMap == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
+ uint8_t v = GET_BIT2(pColData->pBitMap, iVal);
+ switch (pColData->flag) {
+ case (HAS_NULL | HAS_NONE):
+ SET_BIT1(pBitMap, iVal, v);
+ break;
+ case (HAS_VALUE | HAS_NONE):
+ if (v) {
+ SET_BIT1(pBitMap, iVal, 1);
+ } else {
+ SET_BIT1(pBitMap, iVal, 0);
+ }
+ break;
+ case (HAS_VALUE | HAS_NULL):
+ SET_BIT1(pBitMap, iVal, v - 1);
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+ }
+
+ code = tsdbCmprData(pBitMap, szBitMap, TSDB_DATA_TYPE_TINYINT, cmprAlg, ppOut, nOut + size, &pBlockCol->szBitmap,
+ ppBuf);
+ if (code) goto _exit;
+
+ if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) {
+ taosMemoryFree(pBitMap);
+ }
+ }
+ size += pBlockCol->szBitmap;
+
+ // offset
+ if (IS_VAR_DATA_TYPE(pColData->type)) {
+ code = tsdbCmprData((uint8_t *)pColData->aOffset, sizeof(int32_t) * pColData->nVal, TSDB_DATA_TYPE_INT, cmprAlg,
+ ppOut, nOut + size, &pBlockCol->szOffset, ppBuf);
+ if (code) goto _exit;
+ }
+ size += pBlockCol->szOffset;
+
+ // value
+ if (pColData->flag != (HAS_NULL | HAS_NONE)) {
+ code = tsdbCmprData((uint8_t *)pColData->pData, pColData->nData, pColData->type, cmprAlg, ppOut, nOut + size,
+ &pBlockCol->szValue, ppBuf);
+ if (code) goto _exit;
+ }
+ size += pBlockCol->szValue;
+
+ // checksum
+ size += sizeof(TSCKSUM);
+ code = tRealloc(ppOut, nOut + size);
+ if (code) goto _exit;
+ taosCalcChecksumAppend(0, *ppOut + nOut, size);
+
+_exit:
+ return code;
+}
+
+int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData,
+ uint8_t **ppBuf) {
+ int32_t code = 0;
+
+ int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
+ if (!taosCheckChecksumWhole(pIn, size)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
+ }
+
+ ASSERT(pColData->cid == pBlockCol->cid);
+ ASSERT(pColData->type == pBlockCol->type);
+ pColData->smaOn = pBlockCol->smaOn;
+ pColData->flag = pBlockCol->flag;
+ pColData->nVal = nVal;
+ pColData->nData = pBlockCol->szOrigin;
+
+ uint8_t *p = pIn;
+ // bitmap
+ if (pBlockCol->szBitmap) {
+ if (pBlockCol->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) {
+ uint8_t *pBitMap = NULL;
+ code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pBitMap,
+ BIT1_SIZE(pColData->nVal), ppBuf);
+ if (code) goto _exit;
+
+ code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal));
+ if (code) {
+ tFree(pBitMap);
+ goto _exit;
+ }
+
+ // BIT1 to BIT2
+ for (int32_t iVal = 0; iVal < nVal; iVal++) {
+ uint8_t v = GET_BIT1(pBitMap, iVal);
+ switch (pBlockCol->flag) {
+ case (HAS_NULL | HAS_NONE):
+ SET_BIT2(pColData->pBitMap, iVal, v);
+ break;
+ case (HAS_VALUE | HAS_NONE):
+ if (v) {
+ SET_BIT2(pColData->pBitMap, iVal, 2);
+ } else {
+ SET_BIT2(pColData->pBitMap, iVal, 0);
+ }
+ break;
+ case (HAS_VALUE | HAS_NULL):
+ SET_BIT2(pColData->pBitMap, iVal, v + 1);
+ break;
+ default:
+ ASSERT(0);
+ }
+ }
+
+ tFree(pBitMap);
+ } else {
+ code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pColData->pBitMap,
+ BIT2_SIZE(pColData->nVal), ppBuf);
+ if (code) goto _exit;
+ }
+ }
+ p += pBlockCol->szBitmap;
+
+ // offset
+ if (pBlockCol->szOffset) {
+ code = tsdbDecmprData(p, pBlockCol->szOffset, TSDB_DATA_TYPE_INT, cmprAlg, (uint8_t **)&pColData->aOffset,
+ sizeof(int32_t) * pColData->nVal, ppBuf);
+ if (code) goto _exit;
+ }
+ p += pBlockCol->szOffset;
+
+ // value
+ if (pBlockCol->szValue) {
+ code = tsdbDecmprData(p, pBlockCol->szValue, pColData->type, cmprAlg, &pColData->pData, pColData->nData, ppBuf);
+ if (code) goto _exit;
+ }
+ p += pBlockCol->szValue;
+
+_exit:
+ return code;
+}
+
+int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck) {
+ int32_t code = 0;
+
+ // alloc
+ code = tRealloc(ppOut, size);
+ if (code) goto _exit;
+
+ // seek
+ int64_t n = taosLSeekFile(pFD, offset, SEEK_SET);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+ // read
+ n = taosReadFile(pFD, *ppOut, size);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ } else if (n < size) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
+ }
+
+ // check
+ if (toCheck && !taosCheckChecksumWhole(*ppOut, size)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
+ }
+
+_exit:
+ return code;
+}
diff --git a/source/dnode/vnode/src/vnd/vnodeBufPool.c b/source/dnode/vnode/src/vnd/vnodeBufPool.c
index 0623b3bd10..5a22114ab4 100644
--- a/source/dnode/vnode/src/vnd/vnodeBufPool.c
+++ b/source/dnode/vnode/src/vnd/vnodeBufPool.c
@@ -78,7 +78,7 @@ void vnodeBufPoolReset(SVBufPool *pPool) {
void *vnodeBufPoolMalloc(SVBufPool *pPool, int size) {
SVBufPoolNode *pNode;
void *p;
-
+ taosThreadSpinLock(&pPool->lock);
if (pPool->node.size >= pPool->ptr - pPool->node.data + size) {
// allocate from the anchor node
p = pPool->ptr;
@@ -89,6 +89,7 @@ void *vnodeBufPoolMalloc(SVBufPool *pPool, int size) {
pNode = taosMemoryMalloc(sizeof(*pNode) + size);
if (pNode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosThreadSpinUnlock(&pPool->lock);
return NULL;
}
@@ -101,7 +102,7 @@ void *vnodeBufPoolMalloc(SVBufPool *pPool, int size) {
pPool->size = pPool->size + sizeof(*pNode) + size;
}
-
+ taosThreadSpinUnlock(&pPool->lock);
return p;
}
@@ -129,6 +130,12 @@ static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool)
return -1;
}
+ if (taosThreadSpinInit(&pPool->lock, 0) != 0) {
+ taosMemoryFree(pPool);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+
pPool->next = NULL;
pPool->pVnode = pVnode;
pPool->nRef = 0;
@@ -145,6 +152,7 @@ static int vnodeBufPoolCreate(SVnode *pVnode, int64_t size, SVBufPool **ppPool)
static int vnodeBufPoolDestroy(SVBufPool *pPool) {
vnodeBufPoolReset(pPool);
+ taosThreadSpinDestroy(&pPool->lock);
taosMemoryFree(pPool);
return 0;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c
index c8dc07af0a..8c73499229 100644
--- a/source/dnode/vnode/src/vnd/vnodeCommit.c
+++ b/source/dnode/vnode/src/vnd/vnodeCommit.c
@@ -220,9 +220,6 @@ int vnodeCommit(SVnode *pVnode) {
vInfo("vgId:%d, start to commit, commit ID:%" PRId64 " version:%" PRId64, TD_VID(pVnode), pVnode->state.commitID,
pVnode->state.applied);
- vnodeBufPoolUnRef(pVnode->inUse);
- pVnode->inUse = NULL;
-
pVnode->state.commitTerm = pVnode->state.applyTerm;
// save info
@@ -239,7 +236,13 @@ int vnodeCommit(SVnode *pVnode) {
// preCommit
// smaSyncPreCommit(pVnode->pSma);
- smaAsyncPreCommit(pVnode->pSma);
+ if(smaAsyncPreCommit(pVnode->pSma) < 0){
+ ASSERT(0);
+ return -1;
+ }
+
+ vnodeBufPoolUnRef(pVnode->inUse);
+ pVnode->inUse = NULL;
// commit each sub-system
if (metaCommit(pVnode->pMeta) < 0) {
@@ -248,7 +251,10 @@ int vnodeCommit(SVnode *pVnode) {
}
if (VND_IS_RSMA(pVnode)) {
- smaAsyncCommit(pVnode->pSma);
+ if (smaAsyncCommit(pVnode->pSma) < 0) {
+ ASSERT(0);
+ return -1;
+ }
if (tsdbCommit(VND_RSMA0(pVnode)) < 0) {
ASSERT(0);
@@ -285,7 +291,10 @@ int vnodeCommit(SVnode *pVnode) {
// postCommit
// smaSyncPostCommit(pVnode->pSma);
- smaAsyncPostCommit(pVnode->pSma);
+ if (smaAsyncPostCommit(pVnode->pSma) < 0) {
+ ASSERT(0);
+ return -1;
+ }
// apply the commit (TODO)
walEndSnapshot(pVnode->pWal);
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index a4fd984fb7..dcfbd33b90 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -87,6 +87,7 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
pVnode->msgCb = msgCb;
taosThreadMutexInit(&pVnode->lock, NULL);
pVnode->blocked = false;
+ pVnode->inClose = false;
tsem_init(&pVnode->syncSem, 0, 0);
tsem_init(&(pVnode->canCommit), 0, 1);
@@ -181,6 +182,8 @@ _err:
void vnodePreClose(SVnode *pVnode) {
if (pVnode) {
syncLeaderTransfer(pVnode->sync);
+ pVnode->inClose = true;
+ smaPreClose(pVnode->pSma);
}
}
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index d55f1796ad..8d799e919d 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -424,6 +424,25 @@ int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
return TSDB_CODE_SUCCESS;
}
+int32_t vnodeGetStbIdList(SVnode* pVnode, int64_t suid, SArray* list) {
+ SMStbCursor* pCur = metaOpenStbCursor(pVnode->pMeta, suid);
+ if (!pCur) {
+ return TSDB_CODE_FAILED;
+ }
+
+ while (1) {
+ tb_uid_t id = metaStbCursorNext(pCur);
+ if (id == 0) {
+ break;
+ }
+
+ taosArrayPush(list, &id);
+ }
+
+ metaCloseStbCursor(pCur);
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid);
if (!pCur) {
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index d5c5e18668..7a8d168f4f 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -196,36 +196,42 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
break;
/* TQ */
case TDMT_VND_MQ_VG_CHANGE:
- if (tqProcessVgChangeReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
+ if (tqProcessVgChangeReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
case TDMT_VND_MQ_VG_DELETE:
- if (tqProcessVgDeleteReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) {
+ if (tqProcessVgDeleteReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
break;
case TDMT_VND_MQ_COMMIT_OFFSET:
- if (tqProcessOffsetCommitReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead), version) < 0) {
+ if (tqProcessOffsetCommitReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
+ pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
- case TDMT_VND_CHECK_ALTER_INFO:
- if (tqProcessCheckAlterInfoReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ case TDMT_VND_ADD_CHECK_INFO:
+ if (tqProcessAddCheckInfoReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
+ pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ goto _err;
+ }
+ break;
+ case TDMT_VND_DELETE_CHECK_INFO:
+ if (tqProcessDelCheckInfoReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
+ pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
case TDMT_STREAM_TASK_DEPLOY: {
- if (tqProcessTaskDeployReq(pVnode->pTq, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
+ if (tqProcessTaskDeployReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_DROP: {
- if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) {
+ if (tqProcessTaskDropReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
} break;
@@ -247,6 +253,8 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
vTrace("vgId:%d, process %s request success, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), version);
+ walApplyVer(pVnode->pWal, version);
+
if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
@@ -281,7 +289,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
- if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) {
+ if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsReadyForRead(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
}
@@ -293,8 +301,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_QUERY_CONTINUE:
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
- case TDMT_VND_FETCH_RSMA:
- return smaProcessFetch(pVnode->pSma, pMsg);
+ case TDMT_VND_EXEC_RSMA:
+ return smaProcessExec(pVnode->pSma, pMsg);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -305,7 +313,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
vTrace("vgId:%d, msg:%p in fetch queue is processing", pVnode->config.vgId, pMsg);
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
pMsg->msgType == TDMT_VND_BATCH_META) &&
- !vnodeIsLeader(pVnode)) {
+ !vnodeIsReadyForRead(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
}
@@ -522,7 +530,9 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
}
tqUpdateTbUidList(pVnode->pTq, tbUids, true);
- tdUpdateTbUidList(pVnode->pSma, pStore);
+ if (tdUpdateTbUidList(pVnode->pSma, pStore) < 0) {
+ goto _exit;
+ }
tdUidStoreFree(pStore);
// prepare rsp
@@ -867,7 +877,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
- sprintf(submitBlkRsp.tblFName, "%s.", pVnode->config.dbname);
+ sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
msgIter.uid = createTbReq.uid;
if (createTbReq.type == TSDB_CHILD_TABLE) {
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 9703ed27ae..65d4e9aaf1 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -764,6 +764,8 @@ void vnodeSyncStart(SVnode *pVnode) {
void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); }
+bool vnodeIsRoleLeader(SVnode *pVnode) { return syncGetMyRole(pVnode->sync) == TAOS_SYNC_STATE_LEADER; }
+
bool vnodeIsLeader(SVnode *pVnode) {
if (!syncIsReady(pVnode->sync)) {
vDebug("vgId:%d, vnode not ready, state:%s, restore:%d", pVnode->config.vgId, syncGetMyRoleStr(pVnode->sync),
@@ -779,3 +781,17 @@ bool vnodeIsLeader(SVnode *pVnode) {
return true;
}
+
+bool vnodeIsReadyForRead(SVnode *pVnode) {
+ if (syncIsReady(pVnode->sync)) {
+ return true;
+ }
+
+ if (syncIsReadyForRead(pVnode->sync)) {
+ return true;
+ }
+
+ vDebug("vgId:%d, vnode not ready for read, state:%s, last:%ld, cmt:%ld", pVnode->config.vgId,
+ syncGetMyRoleStr(pVnode->sync), syncGetLastIndex(pVnode->sync), syncGetCommitIndex(pVnode->sync));
+ return false;
+}
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 311d82c8a2..5e339eb113 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -205,7 +205,7 @@ typedef struct SExprSupp {
} SExprSupp;
typedef struct SOperatorInfo {
- uint8_t operatorType;
+ uint16_t operatorType;
bool blocking; // block operator or not
uint8_t status; // denote if current operator is completed
char* name; // name, for debug purpose
@@ -434,7 +434,7 @@ typedef struct SStreamAggSupporter {
typedef struct SessionWindowSupporter {
SStreamAggSupporter* pStreamAggSup;
int64_t gap;
- uint8_t parentType;
+ uint16_t parentType;
SAggSupporter* pIntervalAggSup;
} SessionWindowSupporter;
@@ -924,9 +924,6 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
-SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp *pTwAggSupp, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
diff --git a/source/libs/executor/inc/tsimplehash.h b/source/libs/executor/inc/tsimplehash.h
index a56f8e8c04..4c5a80e2f1 100644
--- a/source/libs/executor/inc/tsimplehash.h
+++ b/source/libs/executor/inc/tsimplehash.h
@@ -17,7 +17,6 @@
#define TDENGINE_TSIMPLEHASH_H
#include "tarray.h"
-#include "tlockfree.h"
#ifdef __cplusplus
extern "C" {
@@ -27,6 +26,10 @@ typedef uint32_t (*_hash_fn_t)(const char *, uint32_t);
typedef int32_t (*_equal_fn_t)(const void *, const void *, size_t len);
typedef void (*_hash_free_fn_t)(void *);
+/**
+ * @brief single thread hash
+ *
+ */
typedef struct SSHashObj SSHashObj;
/**
@@ -36,7 +39,7 @@ typedef struct SSHashObj SSHashObj;
* @param fn hash function to generate the hash value
* @return
*/
-SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t dataLen);
+SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn);
/**
* return the size of hash table
@@ -48,22 +51,26 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj);
int32_t tSimpleHashPrint(const SSHashObj *pHashObj);
/**
- * put element into hash table, if the element with the same key exists, update it
- * @param pHashObj
- * @param key
- * @param data
- * @return
+ * @brief put element into hash table, if the element with the same key exists, update it
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param data
+ * @param dataLen
+ * @return int32_t
*/
-int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data);
+int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen);
/**
* return the payload data with the specified key
*
* @param pHashObj
* @param key
+ * @param keyLen
* @return
*/
-void *tSimpleHashGet(SSHashObj *pHashObj, const void *key);
+void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen);
/**
* remove item with the specified key
@@ -71,7 +78,7 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key);
* @param key
* @param keyLen
*/
-int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key);
+int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen);
/**
* Clear the hash table.
@@ -98,7 +105,7 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj);
* @param keyLen
* @return
*/
-void *tSimpleHashGetKey(const SSHashObj* pHashObj, void *data, size_t* keyLen);
+void *tSimpleHashGetKey(void *data, size_t* keyLen);
/**
* Create the hash table iterator
@@ -109,7 +116,18 @@ void *tSimpleHashGetKey(const SSHashObj* pHashObj, void *data, size_t* keyLen);
*/
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter);
+/**
+ * Create the hash table iterator
+ *
+ * @param pHashObj
+ * @param data
+ * @param key
+ * @param iter
+ * @return void*
+ */
+void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter);
+
#ifdef __cplusplus
}
#endif
-#endif // TDENGINE_TSIMPLEHASH_H
+#endif // TDENGINE_TSIMPLEHASH_H
\ No newline at end of file
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 34247d3b47..bf969bf2e4 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -221,7 +221,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
STagVal tagVal = {0};
tagVal.cid = pSColumnNode->colId;
- const char* p = metaGetTableTagVal(&mr->me, pSColumnNode->node.resType.type, &tagVal);
+ const char* p = metaGetTableTagVal(mr->me.ctbEntry.pTags, pSColumnNode->node.resType.type, &tagVal);
if (p == NULL) {
res->node.resType.type = TSDB_DATA_TYPE_NULL;
} else if (pSColumnNode->node.resType.type == TSDB_DATA_TYPE_JSON) {
@@ -298,6 +298,211 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
return TSDB_CODE_SUCCESS;
}
+typedef struct tagFilterAssist{
+ SHashObj *colHash;
+ int32_t index;
+ SArray *cInfoList;
+}tagFilterAssist;
+
+static EDealRes getColumn(SNode** pNode, void* pContext) {
+ SColumnNode* pSColumnNode = NULL;
+ if (QUERY_NODE_COLUMN == nodeType((*pNode))) {
+ pSColumnNode = *(SColumnNode**)pNode;
+ }else if(QUERY_NODE_FUNCTION == nodeType((*pNode))){
+ SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode);
+ if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) {
+ pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+ if (NULL == pSColumnNode) {
+ return DEAL_RES_ERROR;
+ }
+ pSColumnNode->colId = -1;
+ pSColumnNode->colType = COLUMN_TYPE_TBNAME;
+ pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR;
+ pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE;
+ nodesDestroyNode(*pNode);
+ *pNode = (SNode*)pSColumnNode;
+ }else{
+ return DEAL_RES_CONTINUE;
+ }
+ }else{
+ return DEAL_RES_CONTINUE;
+ }
+
+ tagFilterAssist *pData = (tagFilterAssist *)pContext;
+ void *data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId));
+ if(!data){
+ taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode)));
+ pSColumnNode->slotId = pData->index++;
+ SColumnInfo cInfo = {.colId = pSColumnNode->colId, .type = pSColumnNode->node.resType.type, .bytes = pSColumnNode->node.resType.bytes};
+#if TAG_FILTER_DEBUG
+ qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type);
+#endif
+ taosArrayPush(pData->cInfoList, &cInfo);
+ }else{
+ SColumnNode* col = *(SColumnNode**)data;
+ pSColumnNode->slotId = col->slotId;
+ }
+
+ return DEAL_RES_CONTINUE;
+}
+
+static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) {
+ SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData));
+ if (pColumnData == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return terrno;
+ }
+
+ pColumnData->info.type = pType->type;
+ pColumnData->info.bytes = pType->bytes;
+ pColumnData->info.scale = pType->scale;
+ pColumnData->info.precision = pType->precision;
+
+ int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pColumnData);
+ return terrno;
+ }
+
+ pParam->columnData = pColumnData;
+ pParam->colAlloced = true;
+ return TSDB_CODE_SUCCESS;
+}
+
+static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond){
+ int32_t code = TSDB_CODE_SUCCESS;
+ SArray* pBlockList = NULL;
+ SSDataBlock* pResBlock = NULL;
+ SHashObj * tags = NULL;
+ SScalarParam output = {0};
+
+ tagFilterAssist ctx = {0};
+ ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
+ if(ctx.colHash == NULL){
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ ctx.index = 0;
+ ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
+ if(ctx.cInfoList == NULL){
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ nodesRewriteExprPostOrder(&pTagCond, getColumn, (void *)&ctx);
+
+ pResBlock = createDataBlock();
+ if (pResBlock == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+
+ for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
+ SColumnInfoData colInfo = {{0}, 0};
+ colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
+ blockDataAppendColInfo(pResBlock, &colInfo);
+ }
+
+// int64_t stt = taosGetTimestampUs();
+ tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ code = metaGetTableTags(metaHandle, suid, uidList, tags);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ goto end;
+ }
+
+ int32_t rows = taosArrayGetSize(uidList);
+ if(rows == 0){
+ goto end;
+ }
+// int64_t stt1 = taosGetTimestampUs();
+// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
+
+ code = blockDataEnsureCapacity(pResBlock, rows);
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ goto end;
+ }
+
+// int64_t st = taosGetTimestampUs();
+ for (int32_t i = 0; i < rows; i++) {
+ int64_t* uid = taosArrayGet(uidList, i);
+ void* tag = taosHashGet(tags, uid, sizeof(int64_t));
+ if (suid != 0) {
+ ASSERT(tag);
+ }
+ for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){
+ SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
+
+ if(pColInfo->info.colId == -1){ // tbname
+ char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ metaGetTableNameByUid(metaHandle, *uid, str);
+ colDataAppend(pColInfo, i, str, false);
+#if TAG_FILTER_DEBUG
+ qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2);
+#endif
+ }else{
+ STagVal tagVal = {0};
+ tagVal.cid = pColInfo->info.colId;
+ const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
+
+ if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){
+ colDataAppend(pColInfo, i, p, true);
+ } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
+ colDataAppend(pColInfo, i, p, false);
+ } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
+ char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
+ varDataSetLen(tmp, tagVal.nData);
+ memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
+ colDataAppend(pColInfo, i, tmp, false);
+#if TAG_FILTER_DEBUG
+ qDebug("tagfilter varch:%s", tmp+2);
+#endif
+ taosMemoryFree(tmp);
+ } else {
+ colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
+#if TAG_FILTER_DEBUG
+ if(pColInfo->info.type == TSDB_DATA_TYPE_INT){
+ qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
+ }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){
+ qDebug("tagfilter double:%f", *(double *)(&tagVal.i64));
+ }
+#endif
+ }
+ }
+ }
+ }
+ pResBlock->info.rows = rows;
+
+// int64_t st1 = taosGetTimestampUs();
+// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
+
+ pBlockList = taosArrayInit(2, POINTER_BYTES);
+ taosArrayPush(pBlockList, &pResBlock);
+
+ SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
+ code = createResultData(&type, rows, &output);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
+
+ code = scalarCalculate(pTagCond, pBlockList, &output);
+ if(code != TSDB_CODE_SUCCESS){
+ terrno = code;
+ }
+// int64_t st2 = taosGetTimestampUs();
+// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
+
+end:
+ taosHashCleanup(tags);
+ taosHashCleanup(ctx.colHash);
+ taosArrayDestroy(ctx.cInfoList);
+ blockDataDestroy(pResBlock);
+ taosArrayDestroy(pBlockList);
+ return output.columnData;
+}
+
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
STableListInfo* pListInfo) {
int32_t code = TSDB_CODE_SUCCESS;
@@ -308,63 +513,70 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
}
uint64_t tableUid = pScanNode->uid;
-
pListInfo->suid = pScanNode->suid;
+ SArray* res = taosArrayInit(8, sizeof(uint64_t));
if (pScanNode->tableType == TSDB_SUPER_TABLE) {
if (pTagIndexCond) {
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
- SArray* res = taosArrayInit(8, sizeof(uint64_t));
+// int64_t stt = taosGetTimestampUs();
SIdxFltStatus status = SFLT_NOT_INDEX;
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
- // code = TSDB_CODE_INDEX_REBUILDING;
- code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
- } else {
- qDebug("success to get tableIds, size:%d, suid:%" PRIu64, (int)taosArrayGetSize(res), tableUid);
+ code = TDB_CODE_SUCCESS;
}
- for (int i = 0; i < taosArrayGetSize(res); i++) {
- STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
- taosArrayPush(pListInfo->pTableList, &info);
- }
- taosArrayDestroy(res);
- } else {
- code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
- }
-
- if (code != TSDB_CODE_SUCCESS) {
- qError("failed to get tableIds, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
- terrno = code;
- return code;
+// int64_t stt1 = taosGetTimestampUs();
+// qDebug("generate table list, cost:%ld us", stt1-stt);
+ }else if(!pTagCond){
+ vnodeGetCtbIdList(pVnode, pScanNode->suid, res);
}
} else { // Create one table group.
- STableKeyInfo info = {.uid = tableUid, .groupId = 0};
- taosArrayPush(pListInfo->pTableList, &info);
+ if(metaIsTableExist(metaHandle, tableUid)){
+ taosArrayPush(res, &tableUid);
+ }
}
if (pTagCond) {
+ SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond);
+ if(terrno != TDB_CODE_SUCCESS){
+ colDataDestroy(pColInfoData);
+ taosMemoryFreeClear(pColInfoData);
+ taosArrayDestroy(res);
+ return terrno;
+ }
+
int32_t i = 0;
- while (i < taosArrayGetSize(pListInfo->pTableList)) {
- STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i);
+ int32_t j = 0;
+ int32_t len = taosArrayGetSize(res);
+ while (i < taosArrayGetSize(res) && j < len && pColInfoData) {
+ void* var = POINTER_SHIFT(pColInfoData->pData, j * pColInfoData->info.bytes);
- bool qualified = true;
- code = isQualifiedTable(info, pTagCond, metaHandle, &qualified);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- if (!qualified) {
- taosArrayRemove(pListInfo->pTableList, i);
+ int64_t* uid = taosArrayGet(res, i);
+ qDebug("tagfilter get uid:%ld, res:%d", *uid, *(bool*)var);
+ if (*(bool*)var == false) {
+ taosArrayRemove(res, i);
+ j++;
continue;
}
i++;
+ j++;
}
+ colDataDestroy(pColInfoData);
+ taosMemoryFreeClear(pColInfoData);
}
+ for (int i = 0; i < taosArrayGetSize(res); i++) {
+ STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
+ taosArrayPush(pListInfo->pTableList, &info);
+ qDebug("tagfilter get uid:%ld", info.uid);
+ }
+
+ taosArrayDestroy(res);
+
pListInfo->pGroupList = taosArrayInit(4, POINTER_BYTES);
if (pListInfo->pGroupList == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -391,7 +603,10 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
- metaGetTableEntryByUid(&mr, uid);
+ if(metaGetTableEntryByUid(&mr, uid) != 0){ // table not exist
+ metaReaderClear(&mr);
+ return TSDB_CODE_PAR_TABLE_NOT_EXIST;
+ }
SNodeList* groupNew = nodesCloneList(pGroupNode);
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 7115ad85a5..d8f63cb008 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -55,7 +55,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
taosArrayClear(pInfo->pBlockLists);
if (type == STREAM_INPUT__MERGED_SUBMIT) {
- ASSERT(numOfBlocks > 1);
+ // ASSERT(numOfBlocks > 1);
for (int32_t i = 0; i < numOfBlocks; i++) {
SSubmitReq* pReq = *(void**)POINTER_SHIFT(input, i * sizeof(void*));
taosArrayPush(pInfo->pBlockLists, &pReq);
@@ -348,7 +348,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
taosThreadOnce(&initPoolOnce, initRefPool);
atexit(cleanupRefPool);
- qDebug("start to create subplan task, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
+ qDebug("start to create subplan task, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId);
int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model);
if (code != TSDB_CODE_SUCCESS) {
@@ -374,7 +374,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
}
}
- qDebug("subplan task create completed, TID:0x%"PRIx64 " QID:0x%"PRIx64, taskId, pSubplan->id.queryId);
+ qDebug("subplan task create completed, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId);
_error:
// if failed to add ref for all tables in this query, abort current query
@@ -427,7 +427,7 @@ int waitMoment(SQInfo* pQInfo) {
#endif
static void freeBlock(void* param) {
- SSDataBlock* pBlock = *(SSDataBlock**) param;
+ SSDataBlock* pBlock = *(SSDataBlock**)param;
blockDataDestroy(pBlock);
}
@@ -467,12 +467,12 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
qDebug("%s execTask is launched", GET_TASKID(pTaskInfo));
- int32_t current = 0;
+ int32_t current = 0;
SSDataBlock* pRes = NULL;
int64_t st = taosGetTimestampUs();
- while((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) {
+ while ((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) {
SSDataBlock* p = createOneDataBlock(pRes, true);
current += p->info.rows;
ASSERT(p->info.rows > 0);
@@ -494,7 +494,7 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
qDebug("%s task suspended, %d rows in %d blocks returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
- GET_TASKID(pTaskInfo), current, (int32_t) taosArrayGetSize(pResList), total, 0, el / 1000.0);
+ GET_TASKID(pTaskInfo), current, (int32_t)taosArrayGetSize(pResList), total, 0, el / 1000.0);
atomic_store_64(&pTaskInfo->owner, 0);
return pTaskInfo->code;
@@ -632,7 +632,7 @@ int32_t qExtractStreamScanner(qTaskInfo_t tinfo, void** scanner) {
SOperatorInfo* pOperator = pTaskInfo->pRoot;
while (1) {
- uint8_t type = pOperator->operatorType;
+ uint16_t type = pOperator->operatorType;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
*scanner = pOperator->info;
return 0;
@@ -691,7 +691,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
pTaskInfo->streamInfo.prepareStatus = *pOffset;
if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
while (1) {
- uint8_t type = pOperator->operatorType;
+ uint16_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
// TODO add more check
if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 86b9036454..f7fb6cd405 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -76,6 +76,12 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define realloc u_realloc
#endif
+#define T_LONG_JMP(_obj, _c) \
+ do { \
+ assert((_c) != -1); \
+ longjmp((_obj), (_c)); \
+ } while (0);
+
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
@@ -3131,6 +3137,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
initResultRow(resultRow);
pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
+ // releaseBufPage(pSup->pResultBuf, getBufPage(pSup->pResultBuf, pageId));
}
if (offset != length) {
@@ -3217,8 +3224,8 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa
}
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
-static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo *pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo,
- SExecTaskInfo* pTaskInfo) {
+static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo,
+ SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) {
pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
SSDataBlock* pResBlock = pInfo->pFinalRes;
@@ -3242,8 +3249,8 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo *pOperator, SFillOp
pInfo->existNewGroupBlock = NULL;
}
-static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo, SResultInfo* pResultInfo,
- SExecTaskInfo* pTaskInfo) {
+static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo,
+ SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) {
if (taosFillHasMoreResults(pInfo->pFillInfo)) {
int32_t numOfResultRows = pResultInfo->capacity - pInfo->pFinalRes->info.rows;
taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pFinalRes, numOfResultRows);
@@ -3259,8 +3266,8 @@ static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOpera
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag) {
SFillOperatorInfo* pInfo = pOperator->info;
- SExprSupp* pSup = &pOperator->exprSupp;
- SSDataBlock* pResBlock = pInfo->pFinalRes;
+ SExprSupp* pSup = &pOperator->exprSupp;
+ SSDataBlock* pResBlock = pInfo->pFinalRes;
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs, NULL);
@@ -3270,13 +3277,13 @@ static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlo
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, pInfo->primarySrcSlotId);
colDataAssign(pDst, pSrc, pInfo->pRes->info.rows, &pResBlock->info);
- for(int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) {
+ for (int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) {
SFillColInfo* pCol = &pInfo->pFillInfo->pFillCol[i + pInfo->numOfExpr];
ASSERT(pCol->notFillCol);
SExprInfo* pExpr = pCol->pExpr;
- int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId;
- int32_t dstSlotId = pExpr->base.resSchema.slotId;
+ int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId;
+ int32_t dstSlotId = pExpr->base.resSchema.slotId;
SColumnInfoData* pDst1 = taosArrayGet(pInfo->pRes->pDataBlock, dstSlotId);
SColumnInfoData* pSrc1 = taosArrayGet(pBlock->pDataBlock, srcSlotId);
@@ -3315,7 +3322,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey);
} else {
- blockDataUpdateTsWindow(pBlock, pInfo->primaryTsCol);
+ blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId);
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) {
@@ -3664,7 +3671,7 @@ void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput) {
taosRemoveRef(exchangeObjRefPool, pExInfo->self);
}
-void freeSourceDataInfo(void *p) {
+void freeSourceDataInfo(void* p) {
SSourceDataInfo* pInfo = (SSourceDataInfo*)p;
taosMemoryFreeClear(pInfo->pRsp);
}
@@ -3694,8 +3701,8 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, win.skey);
w = getFirstQualifiedTimeWindow(win.skey, &w, pInterval, TSDB_ORDER_ASC);
- pInfo->pFillInfo =
- taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo, pInfo->primaryTsCol, order, id);
+ pInfo->pFillInfo = taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo,
+ pInfo->primaryTsCol, order, id);
pInfo->win = win;
pInfo->p = taosMemoryCalloc(numOfCols, POINTER_BYTES);
@@ -3721,10 +3728,10 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr);
pInfo->pNotFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr);
- SInterval* pInterval =
+ SInterval* pInterval =
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType
- ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
- : &((SIntervalAggOperatorInfo*)downstream->info)->interval;
+ ? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
+ : &((SIntervalAggOperatorInfo*)downstream->info)->interval;
int32_t order = (pPhyFillNode->inputTsOrder == ORDER_ASC) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
int32_t type = convertFillType(pPhyFillNode->mode);
@@ -3741,9 +3748,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc,
&numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
- int32_t code =
- initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr, (SNodeListNode*)pPhyFillNode->pValues,
- pPhyFillNode->timeRange, pResultInfo->capacity, pTaskInfo->id.str, pInterval, type, order);
+ int32_t code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr,
+ (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity,
+ pTaskInfo->id.str, pInterval, type, order);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 77c5d073a3..7b13aa8ad8 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -13,11 +13,11 @@
* along with this program. If not, see .
*/
-#include "os.h"
#include "executorimpl.h"
#include "filter.h"
#include "function.h"
#include "functionMgt.h"
+#include "os.h"
#include "querynodes.h"
#include "systable.h"
#include "tname.h"
@@ -128,7 +128,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.skey);
assert(w.ekey >= pBlockInfo->window.skey);
- if (w.ekey < pBlockInfo->window.ekey) {
+ if (TMAX(w.skey, pBlockInfo->window.skey) <= TMIN(w.ekey, pBlockInfo->window.ekey)) {
return true;
}
@@ -139,7 +139,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
}
assert(w.ekey > pBlockInfo->window.ekey);
- if (w.skey <= pBlockInfo->window.ekey && w.skey > pBlockInfo->window.skey) {
+ if (TMAX(w.skey, pBlockInfo->window.skey) <= pBlockInfo->window.ekey) {
return true;
}
}
@@ -147,7 +147,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.ekey);
assert(w.skey <= pBlockInfo->window.ekey);
- if (w.skey > pBlockInfo->window.skey) {
+ if (TMAX(w.skey, pBlockInfo->window.skey) <= TMIN(w.ekey, pBlockInfo->window.ekey)) {
return true;
}
@@ -158,7 +158,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
}
assert(w.skey < pBlockInfo->window.skey);
- if (w.ekey < pBlockInfo->window.ekey && w.ekey >= pBlockInfo->window.skey) {
+ if (pBlockInfo->window.skey <= TMIN(w.ekey, pBlockInfo->window.ekey)) {
return true;
}
}
@@ -178,8 +178,8 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
STableScanInfo* pTableScanInfo = pOperator->info;
- SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf, GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
+ SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
if (p1 == NULL) {
return NULL;
@@ -238,7 +238,7 @@ static FORCE_INLINE bool doFilterByBlockSMA(const SNode* pFilterNode, SColumnDat
// todo move to the initialization function
int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0);
- bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows);
+ bool keep = filterRangeExecute(filter, pColsAgg, numOfCols, numOfRows);
filterFreeInfo(filter);
return keep;
@@ -312,9 +312,9 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) {
pCost->loadBlockStatis += 1;
- loadSMA = true; // mark the operation of load sma;
+ loadSMA = true; // mark the operation of load sma;
bool success = doLoadBlockSMA(pTableScanInfo, pBlock, pTaskInfo);
- if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
+ if (success) { // failed to load the block sma data, data block statistics does not exist, load data block instead
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
return TSDB_CODE_SUCCESS;
@@ -440,7 +440,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int
} else { // these are tags
STagVal tagVal = {0};
tagVal.cid = pExpr->base.pParam[0].pCol->colId;
- const char* p = metaGetTableTagVal(&mr.me, pColInfoData->info.type, &tagVal);
+ const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pColInfoData->info.type, &tagVal);
char* data = NULL;
if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
@@ -454,7 +454,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int
colDataAppendNNULL(pColInfoData, 0, pBlock->info.rows);
} else if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) {
colDataAppendNItems(pColInfoData, 0, data, pBlock->info.rows);
- } else { // todo opt for json tag
+ } else { // todo opt for json tag
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
colDataAppend(pColInfoData, i, data, false);
}
@@ -571,7 +571,10 @@ static SSDataBlock* doTableScanGroup(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
pTableScanInfo->scanFlag = REPEAT_SCAN;
- qDebug("%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks due to query func required", GET_TASKID(pTaskInfo));
+ qDebug(
+ "%s start to repeat ascending order scan data SELECT last_row(*),hostname from cpu group by hostname;blocks "
+ "due to query func required",
+ GET_TASKID(pTaskInfo));
// do prepare for the next round table scan operation
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
@@ -1172,19 +1175,21 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* tsCol = (TSKEY*)pColDataInfo->pData;
+ bool tableInserted = updateInfoIsTableInserted(pInfo->pUpdateInfo, pBlock->info.uid);
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- bool isClosed = false;
+ bool isClosed = false;
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
- if (isOverdue(tsCol[rowId], &pInfo->twAggSup)) {
+ if (tableInserted && isOverdue(tsCol[rowId], &pInfo->twAggSup)) {
win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC);
isClosed = isCloseWindow(&win, &pInfo->twAggSup);
}
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
- if ((update || (isSignleIntervalWindow(pInfo) && isClosed &&
- isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) {
+ bool closedWin = isClosed && isSignleIntervalWindow(pInfo) &&
+ isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup);
+ if ((update || closedWin) && out) {
appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid);
}
}
@@ -1391,8 +1396,8 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader);
- updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId,version);
+ uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader);
+ updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
return pSDB;
@@ -1446,7 +1451,8 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
setBlockIntoRes(pInfo, &block);
- if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId, pInfo->pRes->info.version)) {
+ if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId,
+ pInfo->pRes->info.version)) {
printDataBlock(pInfo->pRes, "stream scan ignore");
blockDataCleanup(pInfo->pRes);
continue;
@@ -1643,6 +1649,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
}
taosArrayDestroy(tableIdList);
memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->cond, sizeof(SQueryTableDataCond));
+ } else {
+ taosArrayDestroy(pColIds);
}
// create the pseduo columns info
@@ -2032,10 +2040,34 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
metaReaderClear(&smr);
if (numOfRows >= pOperator->resultInfo.capacity) {
- break;
+ p->info.rows = numOfRows;
+ pInfo->pRes->info.rows = numOfRows;
+
+ relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
+ doFilterResult(pInfo);
+
+ blockDataCleanup(p);
+ numOfRows = 0;
+
+ if (pInfo->pRes->info.rows > 0) {
+ break;
+ }
}
}
+ if (numOfRows > 0) {
+ p->info.rows = numOfRows;
+ pInfo->pRes->info.rows = numOfRows;
+
+ relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
+ doFilterResult(pInfo);
+
+ blockDataCleanup(p);
+ numOfRows = 0;
+ }
+
+ blockDataDestroy(p);
+
// todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
if (ret != 0) {
metaCloseTbCursor(pInfo->pCur);
@@ -2043,14 +2075,6 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
- p->info.rows = numOfRows;
- pInfo->pRes->info.rows = numOfRows;
-
- relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
- doFilterResult(pInfo);
-
- blockDataDestroy(p);
-
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
@@ -2207,10 +2231,34 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
colDataAppend(pColInfoData, numOfRows, n, false);
if (++numOfRows >= pOperator->resultInfo.capacity) {
- break;
+ p->info.rows = numOfRows;
+ pInfo->pRes->info.rows = numOfRows;
+
+ relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
+ doFilterResult(pInfo);
+
+ blockDataCleanup(p);
+ numOfRows = 0;
+
+ if (pInfo->pRes->info.rows > 0) {
+ break;
+ }
}
}
+ if (numOfRows > 0) {
+ p->info.rows = numOfRows;
+ pInfo->pRes->info.rows = numOfRows;
+
+ relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
+ doFilterResult(pInfo);
+
+ blockDataCleanup(p);
+ numOfRows = 0;
+ }
+
+ blockDataDestroy(p);
+
// todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
if (ret != 0) {
metaCloseTbCursor(pInfo->pCur);
@@ -2218,14 +2266,6 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
doSetOperatorCompleted(pOperator);
}
- p->info.rows = numOfRows;
- pInfo->pRes->info.rows = numOfRows;
-
- relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
- doFilterResult(pInfo);
-
- blockDataDestroy(p);
-
pInfo->loadInfo.totalRows += pInfo->pRes->info.rows;
return (pInfo->pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
@@ -2249,7 +2289,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
// build message and send to mnode to fetch the content of system tables.
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SSysTableScanInfo* pInfo = pOperator->info;
- char dbName[TSDB_DB_NAME_LEN] = {0};
+ char dbName[TSDB_DB_NAME_LEN] = {0};
const char* name = tNameGetTableName(&pInfo->name);
if (pInfo->showRewrite) {
@@ -2261,8 +2301,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
return sysTableScanUserTables(pOperator);
} else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) {
return sysTableScanUserTags(pOperator);
- } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 &&
- pInfo->showRewrite && IS_SYS_DBNAME(dbName)) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && pInfo->showRewrite &&
+ IS_SYS_DBNAME(dbName)) {
return sysTableScanUserSTables(pOperator);
} else { // load the meta from mnode of the given epset
if (pOperator->status == OP_EXEC_DONE) {
@@ -2500,7 +2540,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
} else { // it is a tag value
STagVal val = {0};
val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
- const char* p = metaGetTableTagVal(&mr.me, pDst->info.type, &val);
+ const char* p = metaGetTableTagVal(mr.me.ctbEntry.pTags, pDst->info.type, &val);
char* data = NULL;
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
@@ -2542,7 +2582,7 @@ static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
pInfo->pRes = blockDataDestroy(pInfo->pRes);
taosArrayDestroy(pInfo->pColMatchInfo);
-
+
taosMemoryFreeClear(param);
}
@@ -2598,7 +2638,6 @@ _error:
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
const char* idStr) {
-
int64_t st = taosGetTimestampUs();
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo);
@@ -2607,7 +2646,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
}
int64_t st1 = taosGetTimestampUs();
- qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1-st)/1000.0, idStr);
+ qDebug("generate queried table list completed, elapsed time:%.2f ms %s", (st1 - st) / 1000.0, idStr);
if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
qDebug("no table qualified for query, %s" PRIx64, idStr);
@@ -2621,7 +2660,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
}
int64_t st2 = taosGetTimestampUs();
- qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2-st1)/1000.0, idStr);
+ qDebug("generate group id map completed, elapsed time:%.2f ms %s", (st2 - st1) / 1000.0, idStr);
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 6418f5305c..9b9a38c7ea 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -871,7 +871,6 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_
static int32_t saveWinResultRow(SResultRow* result, uint64_t groupId, SHashObj* pUpdatedMap) {
return saveWinResult(result->win.skey, result->pageId, result->offset, groupId, pUpdatedMap);
- ;
}
static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpdated) {
@@ -910,11 +909,11 @@ int32_t compareWinRes(void* pKey, void* data, int32_t index) {
}
static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
- if (!pUpdatedMap || taosHashGetSize(pUpdatedMap) == 0) {
+ int32_t delSize = taosArrayGetSize(pDelWins);
+ if (taosHashGetSize(pUpdatedMap) == 0 || delSize == 0) {
return;
}
- int32_t delSize = taosArrayGetSize(pDelWins);
- void* pIte = NULL;
+ void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
SResKeyPos* pResKey = (SResKeyPos*)pIte;
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
@@ -1595,7 +1594,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* downstream = pOperator->pDownstream[0];
SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
@@ -1706,14 +1705,19 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
blockDataDestroy(pInfo->pPullDataRes);
taosArrayDestroy(pInfo->pRecycledPages);
blockDataDestroy(pInfo->pUpdateRes);
+ taosArrayDestroy(pInfo->pDelWins);
+ blockDataDestroy(pInfo->pDelRes);
if (pInfo->pChildren) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
destroyStreamFinalIntervalOperatorInfo(pChildOp->info, numOfOutput);
+ taosMemoryFree(pChildOp->pDownstream);
+ cleanupExprSupp(&pChildOp->exprSupp);
taosMemoryFreeClear(pChildOp);
}
+ taosArrayDestroy(pInfo->pChildren);
}
nodesDestroyNode((SNode*)pInfo->pPhyNode);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
@@ -1780,7 +1784,7 @@ void increaseTs(SqlFunctionCtx* pCtx) {
}
}
-void initIntervalDownStream(SOperatorInfo* downstream, uint8_t type, SAggSupporter* pSup) {
+void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
// Todo(liuyao) support partition by column
return;
@@ -1881,62 +1885,6 @@ _error:
return NULL;
}
-SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp* pTwAggSupp, SExecTaskInfo* pTaskInfo) {
- SIntervalAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SIntervalAggOperatorInfo));
- SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- if (pInfo == NULL || pOperator == NULL) {
- goto _error;
- }
-
- pOperator->pTaskInfo = pTaskInfo;
- pInfo->inputOrder = TSDB_ORDER_ASC;
- pInfo->interval = *pInterval;
- pInfo->execModel = OPTR_EXEC_MODEL_STREAM;
- pInfo->win = pTaskInfo->window;
- pInfo->twAggSup = *pTwAggSupp;
- pInfo->primaryTsIndex = primaryTsSlotId;
-
- int32_t numOfRows = 4096;
- size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
-
- initResultSizeInfo(&pOperator->resultInfo, numOfRows);
- int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&pInfo->binfo, pResBlock);
- initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pInfo->win);
-
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- initResultRowInfo(&pInfo->binfo.resultRowInfo);
-
- pOperator->name = "StreamTimeIntervalAggOperator";
- pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL;
- pOperator->blocking = true;
- pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->info = pInfo;
-
- pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doStreamIntervalAgg, doStreamIntervalAgg, NULL,
- destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
-
- code = appendDownstream(pOperator, &downstream, 1);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- return pOperator;
-
-_error:
- destroyIntervalOperatorInfo(pInfo, numOfCols);
- taosMemoryFreeClear(pOperator);
- pTaskInfo->code = code;
- return NULL;
-}
-
// todo handle multiple timeline cases. assume no timeline interweaving
static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperatorInfo* pInfo, SSDataBlock* pBlock) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -2154,7 +2102,7 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock) {
int32_t rows = pResBlock->info.rows;
-
+ blockDataEnsureCapacity(pResBlock, rows + 1);
// todo set the correct primary timestamp column
// output the result
@@ -2408,11 +2356,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
break;
}
}
+ }
- if (pSliceInfo->current > pSliceInfo->win.ekey) {
- doSetOperatorCompleted(pOperator);
- break;
- }
+ if (pSliceInfo->current > pSliceInfo->win.ekey) {
+ doSetOperatorCompleted(pOperator);
+ break;
}
if (ts == pSliceInfo->current) {
@@ -2644,7 +2592,6 @@ void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo) {
STimeSliceOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(STimeSliceOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@@ -3105,11 +3052,12 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
- SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP);
- SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
- TSKEY maxTs = INT64_MIN;
+
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+ TSKEY maxTs = INT64_MIN;
SExprSupp* pSup = &pOperator->exprSupp;
@@ -3542,7 +3490,7 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num
}
void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark,
- uint8_t type) {
+ uint16_t type) {
ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
SStreamScanInfo* pScanInfo = downstream->info;
pScanInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index 7989ad2b5a..6b2edf0d5e 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -14,8 +14,8 @@
*/
#include "tsimplehash.h"
-#include "os.h"
#include "taoserror.h"
+#include "tlog.h"
#define SHASH_DEFAULT_LOAD_FACTOR 0.75
#define HASH_MAX_CAPACITY (1024 * 1024 * 16)
@@ -31,19 +31,21 @@
taosMemoryFreeClear(_n); \
} while (0);
+#pragma pack(push, 4)
typedef struct SHNode {
struct SHNode *next;
+ uint32_t keyLen : 20;
+ uint32_t dataLen : 12;
char data[];
} SHNode;
+#pragma pack(pop)
struct SSHashObj {
SHNode **hashList;
size_t capacity; // number of slots
- int64_t size; // number of elements in hash table
- _hash_fn_t hashFp; // hash function
- _equal_fn_t equalFp; // equal function
- int32_t keyLen;
- int32_t dataLen;
+ int64_t size; // number of elements in hash table
+ _hash_fn_t hashFp; // hash function
+ _equal_fn_t equalFp; // equal function
};
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
@@ -54,7 +56,7 @@ static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
return i;
}
-SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t dataLen) {
+SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) {
ASSERT(fn != NULL);
if (capacity == 0) {
@@ -74,8 +76,6 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn, size_t keyLen, size_t
pHashObj->hashFp = fn;
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
- pHashObj->keyLen = keyLen;
- pHashObj->dataLen = dataLen;
pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *));
if (!pHashObj->hashList) {
@@ -93,40 +93,41 @@ int32_t tSimpleHashGetSize(const SSHashObj *pHashObj) {
return (int32_t)atomic_load_64((int64_t *)&pHashObj->size);
}
-static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *pData, size_t dsize, uint32_t hashVal) {
- SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dsize);
+static SHNode *doCreateHashNode(const void *key, size_t keyLen, const void *data, size_t dataLen, uint32_t hashVal) {
+ SHNode *pNewNode = taosMemoryMalloc(sizeof(SHNode) + keyLen + dataLen);
if (!pNewNode) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
-
+ pNewNode->keyLen = keyLen;
+ pNewNode->dataLen = dataLen;
pNewNode->next = NULL;
- memcpy(GET_SHASH_NODE_DATA(pNewNode), pData, dsize);
- memcpy(GET_SHASH_NODE_KEY(pNewNode, dsize), key, keyLen);
+ memcpy(GET_SHASH_NODE_DATA(pNewNode), data, dataLen);
+ memcpy(GET_SHASH_NODE_KEY(pNewNode, dataLen), key, keyLen);
return pNewNode;
}
-static void taosHashTableResize(SSHashObj *pHashObj) {
+static void tSimpleHashTableResize(SSHashObj *pHashObj) {
if (!SHASH_NEED_RESIZE(pHashObj)) {
return;
}
int32_t newCapacity = (int32_t)(pHashObj->capacity << 1u);
if (newCapacity > HASH_MAX_CAPACITY) {
- // uDebug("current capacity:%zu, maximum capacity:%d, no resize applied due to limitation is reached",
- // pHashObj->capacity, HASH_MAX_CAPACITY);
+ uDebug("current capacity:%zu, maximum capacity:%" PRIu64 ", no resize applied due to limitation is reached",
+ pHashObj->capacity, HASH_MAX_CAPACITY);
return;
}
int64_t st = taosGetTimestampUs();
void *pNewEntryList = taosMemoryRealloc(pHashObj->hashList, sizeof(void *) * newCapacity);
if (!pNewEntryList) {
- // qWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity);
+ uWarn("hash resize failed due to out of memory, capacity remain:%zu", pHashObj->capacity);
return;
}
size_t inc = newCapacity - pHashObj->capacity;
- memset((char *)pNewEntryList + pHashObj->capacity * sizeof(void *), 0, inc);
+ memset((char *)pNewEntryList + pHashObj->capacity * sizeof(void *), 0, inc * sizeof(void *));
pHashObj->hashList = pNewEntryList;
pHashObj->capacity = newCapacity;
@@ -141,8 +142,8 @@ static void taosHashTableResize(SSHashObj *pHashObj) {
SHNode *pPrev = NULL;
while (pNode != NULL) {
- void *key = GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen);
- uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen);
+ void *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pNode->keyLen);
int32_t newIdx = HASH_INDEX(hashVal, pHashObj->capacity);
pNext = pNode->next;
@@ -170,23 +171,23 @@ static void taosHashTableResize(SSHashObj *pHashObj) {
// ((double)pHashObj->size) / pHashObj->capacity, (et - st) / 1000.0);
}
-int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
+int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen) {
if (!pHashObj || !key) {
return -1;
}
- uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen);
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
// need the resize process, write lock applied
if (SHASH_NEED_RESIZE(pHashObj)) {
- taosHashTableResize(pHashObj);
+ tSimpleHashTableResize(pHashObj);
}
int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHNode *pNode = pHashObj->hashList[slot];
if (!pNode) {
- SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->dataLen, hashVal);
+ SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal);
if (!pNewNode) {
return -1;
}
@@ -197,14 +198,14 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
}
while (pNode) {
- if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) {
break;
}
pNode = pNode->next;
}
if (!pNode) {
- SHNode *pNewNode = doCreateHashNode(key, pHashObj->keyLen, data, pHashObj->dataLen, hashVal);
+ SHNode *pNewNode = doCreateHashNode(key, keyLen, data, dataLen, hashVal);
if (!pNewNode) {
return -1;
}
@@ -212,16 +213,16 @@ int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, const void *data) {
pHashObj->hashList[slot] = pNewNode;
atomic_add_fetch_64(&pHashObj->size, 1);
} else { // update data
- memcpy(GET_SHASH_NODE_DATA(pNode), data, pHashObj->dataLen);
+ memcpy(GET_SHASH_NODE_DATA(pNode), data, dataLen);
}
return 0;
}
-static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void *key, int32_t index) {
+static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void *key, size_t keyLen, int32_t index) {
SHNode *pNode = pHashObj->hashList[index];
while (pNode) {
- if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) {
break;
}
@@ -233,12 +234,12 @@ static FORCE_INLINE SHNode *doSearchInEntryList(SSHashObj *pHashObj, const void
static FORCE_INLINE bool taosHashTableEmpty(const SSHashObj *pHashObj) { return tSimpleHashGetSize(pHashObj) == 0; }
-void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
+void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen) {
if (!pHashObj || taosHashTableEmpty(pHashObj) || !key) {
return NULL;
}
- uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen);
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHNode *pNode = pHashObj->hashList[slot];
@@ -247,7 +248,7 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
}
char *data = NULL;
- pNode = doSearchInEntryList(pHashObj, key, slot);
+ pNode = doSearchInEntryList(pHashObj, key, keyLen, slot);
if (pNode != NULL) {
data = GET_SHASH_NODE_DATA(pNode);
}
@@ -255,19 +256,19 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key) {
return data;
}
-int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key) {
+int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) {
if (!pHashObj || !key) {
return TSDB_CODE_FAILED;
}
- uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)pHashObj->keyLen);
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHNode *pNode = pHashObj->hashList[slot];
SHNode *pPrev = NULL;
while (pNode) {
- if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pHashObj->dataLen), key, pHashObj->keyLen) == 0) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) {
if (!pPrev) {
pHashObj->hashList[slot] = pNode->next;
} else {
@@ -312,6 +313,7 @@ void tSimpleHashCleanup(SSHashObj *pHashObj) {
tSimpleHashClear(pHashObj);
taosMemoryFreeClear(pHashObj->hashList);
+ taosMemoryFree(pHashObj);
}
size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
@@ -322,23 +324,13 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
return (pHashObj->capacity * sizeof(void *)) + sizeof(SHNode) * tSimpleHashGetSize(pHashObj) + sizeof(SSHashObj);
}
-void *tSimpleHashGetKey(const SSHashObj *pHashObj, void *data, size_t *keyLen) {
-#if 0
- int32_t offset = offsetof(SHNode, data);
- SHNode *node = ((SHNode *)(char *)data - offset);
+void *tSimpleHashGetKey(void *data, size_t *keyLen) {
+ SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
if (keyLen) {
- *keyLen = pHashObj->keyLen;
+ *keyLen = node->keyLen;
}
- return POINTER_SHIFT(data, pHashObj->dataLen);
-
- return GET_SHASH_NODE_KEY(node, pHashObj->dataLen);
-#endif
- if (keyLen) {
- *keyLen = pHashObj->keyLen;
- }
-
- return POINTER_SHIFT(data, pHashObj->dataLen);
+ return POINTER_SHIFT(data, node->dataLen);
}
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
@@ -376,5 +368,52 @@ void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
return GET_SHASH_NODE_DATA(pNode);
}
+ return NULL;
+}
+
+void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter) {
+ if (!pHashObj) {
+ return NULL;
+ }
+
+ SHNode *pNode = NULL;
+
+ if (!data) {
+ for (int32_t i = 0; i < pHashObj->capacity; ++i) {
+ pNode = pHashObj->hashList[i];
+ if (!pNode) {
+ continue;
+ }
+ *iter = i;
+ if (key) {
+ *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
+ }
+ return GET_SHASH_NODE_DATA(pNode);
+ }
+ return NULL;
+ }
+
+ pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
+
+ if (pNode->next) {
+ if (key) {
+ *key = GET_SHASH_NODE_KEY(pNode->next, pNode->next->dataLen);
+ }
+ return GET_SHASH_NODE_DATA(pNode->next);
+ }
+
+ ++(*iter);
+ for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
+ pNode = pHashObj->hashList[i];
+ if (!pNode) {
+ continue;
+ }
+ *iter = i;
+ if (key) {
+ *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
+ }
+ return GET_SHASH_NODE_DATA(pNode);
+ }
+
return NULL;
}
\ No newline at end of file
diff --git a/source/libs/executor/test/tSimpleHashTests.cpp b/source/libs/executor/test/tSimpleHashTests.cpp
index a17a7146ea..acb6d434b4 100644
--- a/source/libs/executor/test/tSimpleHashTests.cpp
+++ b/source/libs/executor/test/tSimpleHashTests.cpp
@@ -32,31 +32,33 @@
TEST(testCase, tSimpleHashTest) {
SSHashObj *pHashObj =
- tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), sizeof(int64_t), sizeof(int64_t));
+ tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
assert(pHashObj != nullptr);
ASSERT_EQ(0, tSimpleHashGetSize(pHashObj));
+ size_t keyLen = sizeof(int64_t);
+ size_t dataLen = sizeof(int64_t);
+
int64_t originKeySum = 0;
for (int64_t i = 1; i <= 100; ++i) {
originKeySum += i;
- tSimpleHashPut(pHashObj, (const void *)&i, (const void *)&i);
+ tSimpleHashPut(pHashObj, (const void *)&i, keyLen, (const void *)&i, dataLen);
ASSERT_EQ(i, tSimpleHashGetSize(pHashObj));
}
for (int64_t i = 1; i <= 100; ++i) {
- void *data = tSimpleHashGet(pHashObj, (const void *)&i);
+ void *data = tSimpleHashGet(pHashObj, (const void *)&i, keyLen);
ASSERT_EQ(i, *(int64_t *)data);
}
-
void *data = NULL;
int32_t iter = 0;
int64_t keySum = 0;
int64_t dataSum = 0;
while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
- void *key = tSimpleHashGetKey(pHashObj, data, NULL);
+ void *key = tSimpleHashGetKey(data, NULL);
keySum += *(int64_t *)key;
dataSum += *(int64_t *)data;
}
@@ -65,7 +67,7 @@ TEST(testCase, tSimpleHashTest) {
ASSERT_EQ(keySum, originKeySum);
for (int64_t i = 1; i <= 100; ++i) {
- tSimpleHashRemove(pHashObj, (const void *)&i);
+ tSimpleHashRemove(pHashObj, (const void *)&i, keyLen);
ASSERT_EQ(100 - i, tSimpleHashGetSize(pHashObj));
}
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index b234ff97c9..ed82e4cb50 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -192,6 +192,24 @@ static bool validateTimezoneFormat(const SValueNode* pVal) {
return true;
}
+static int32_t countTrailingSpaces(const SValueNode* pVal, bool isLtrim) {
+ int32_t numOfSpaces = 0;
+ int32_t len = varDataLen(pVal->datum.p);
+ char* str = varDataVal(pVal->datum.p);
+
+ int32_t startPos = isLtrim ? 0 : len - 1;
+ int32_t step = isLtrim ? 1 : -1;
+ for (int32_t i = startPos; i < len || i >= 0; i += step) {
+ if (!isspace(str[i])) {
+ break;
+ }
+ numOfSpaces++;
+ }
+
+ return numOfSpaces;
+
+}
+
void static addTimezoneParam(SNodeList* pList) {
char buf[6] = {0};
time_t t = taosTime(NULL);
@@ -293,6 +311,40 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
+static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) {
+ if (1 != LIST_LENGTH(pFunc->pParameterList)) {
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
+ if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ int32_t numOfSpaces = 0;
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 0);
+ // for select trim functions with constant value from table,
+ // need to set the proper result result schema bytes to avoid
+ // trailing garbage characters
+ if (nodeType(pParamNode1) == QUERY_NODE_VALUE) {
+ SValueNode* pValue = (SValueNode*)pParamNode1;
+ numOfSpaces = countTrailingSpaces(pValue, isLtrim);
+ }
+
+
+ int32_t resBytes = pPara1->resType.bytes - numOfSpaces;
+ pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pPara1->resType.type};
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t translateLtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ return translateTrimStr(pFunc, pErrBuf, len, true);
+}
+
+static int32_t translateRtrim(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ return translateTrimStr(pFunc, pErrBuf, len, false);
+}
+
static int32_t translateLogarithm(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
if (1 != numOfParams && 2 != numOfParams) {
@@ -1451,11 +1503,17 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
+ uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 0));
+ uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_NUMERIC_TYPE(paraType) || QUERY_NODE_VALUE == nodeType) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
if (3 <= numOfParams) {
int64_t timeVal[2] = {0};
for (int32_t i = 1; i < 3; ++i) {
- uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
- uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
+ nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
+ paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
if (!IS_VAR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1473,8 +1531,8 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
if (4 == numOfParams) {
- uint8_t nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 3));
- uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type;
+ nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, 3));
+ paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type;
if (!IS_INTEGER_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -2827,7 +2885,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "ltrim",
.type = FUNCTION_TYPE_LTRIM,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
- .translateFunc = translateInOutStr,
+ .translateFunc = translateLtrim,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = ltrimFunction,
@@ -2837,7 +2895,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "rtrim",
.type = FUNCTION_TYPE_RTRIM,
.classification = FUNC_MGT_SCALAR_FUNC | FUNC_MGT_STRING_FUNC,
- .translateFunc = translateInOutStr,
+ .translateFunc = translateRtrim,
.getEnvFunc = NULL,
.initFunc = NULL,
.sprocessFunc = rtrimFunction,
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index de72c32fa1..5051dcd65c 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -1210,7 +1210,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
int64_t val = GET_INT64_VAL(tval);
if ((prev < val) ^ isMinFunc) {
- pBuf->v = val;
+ *(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
@@ -1223,7 +1223,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
uint64_t val = GET_UINT64_VAL(tval);
if ((prev < val) ^ isMinFunc) {
- pBuf->v = val;
+ *(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
@@ -1231,11 +1231,11 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
double prev = 0;
- GET_TYPED_DATA(prev, int64_t, type, &pBuf->v);
+ GET_TYPED_DATA(prev, double, type, &pBuf->v);
double val = GET_DOUBLE_VAL(tval);
if ((prev < val) ^ isMinFunc) {
- pBuf->v = val;
+ *(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
@@ -1243,11 +1243,11 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
double prev = 0;
- GET_TYPED_DATA(prev, int64_t, type, &pBuf->v);
+ GET_TYPED_DATA(prev, double, type, &pBuf->v);
double val = GET_DOUBLE_VAL(tval);
if ((prev < val) ^ isMinFunc) {
- pBuf->v = val;
+ *(double*)&pBuf->v = val;
}
if (pCtx->subsidiaries.num > 0) {
@@ -3970,16 +3970,16 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
TSKEY* ptsList = (int64_t*)colDataGetData(pCol, 0);
if (pCtx->order == TSDB_ORDER_DESC) {
if (pCtx->start.key == INT64_MIN) {
- pInfo->max =
- (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ? ptsList[start + pInput->numOfRows - 1] : pInfo->max;
+ pInfo->max = (pInfo->max < ptsList[start]) ? ptsList[start] : pInfo->max;
} else {
pInfo->max = pCtx->start.key + 1;
}
- if (pCtx->end.key != INT64_MIN) {
- pInfo->min = pCtx->end.key;
+ if (pCtx->end.key == INT64_MIN) {
+ pInfo->min = (pInfo->min > ptsList[start + pInput->numOfRows - 1]) ?
+ ptsList[start + pInput->numOfRows - 1] : pInfo->min;
} else {
- pInfo->min = ptsList[start];
+ pInfo->min = pCtx->end.key;
}
} else {
if (pCtx->start.key == INT64_MIN) {
@@ -3988,10 +3988,11 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
pInfo->min = pCtx->start.key;
}
- if (pCtx->end.key != INT64_MIN) {
- pInfo->max = pCtx->end.key + 1;
+ if (pCtx->end.key == INT64_MIN) {
+ pInfo->max = (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ?
+ ptsList[start + pInput->numOfRows - 1] : pInfo->max;
} else {
- pInfo->max = ptsList[start + pInput->numOfRows - 1];
+ pInfo->max = pCtx->end.key + 1;
}
}
}
@@ -4918,6 +4919,16 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
return numOfElems;
}
+static SSampleInfo* getSampleOutputInfo(SqlFunctionCtx* pCtx) {
+ SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
+ SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
+
+ pInfo->data = (char*)pInfo + sizeof(SSampleInfo);
+ pInfo->tuplePos = (STuplePos*)((char*)pInfo + sizeof(SSampleInfo) + pInfo->samples * pInfo->colBytes);
+
+ return pInfo;
+}
+
bool getSampleFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
@@ -4972,7 +4983,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
int32_t sampleFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
- SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
+ SSampleInfo* pInfo = getSampleOutputInfo(pCtx);
SInputColumnInfoData* pInput = &pCtx->input;
@@ -4998,7 +5009,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
- SSampleInfo* pInfo = GET_ROWCELL_INTERBUF(pEntryInfo);
+ SSampleInfo* pInfo = getSampleOutputInfo(pCtx);
pEntryInfo->complete = true;
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
@@ -5563,6 +5574,7 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
if (pCtx->end.key != INT64_MIN) {
pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end);
pInfo->p = pCtx->end;
+ numOfElems += 1;
}
pInfo->win.ekey = pInfo->p.key;
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index a6546f3299..0f32001c47 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -4673,7 +4673,6 @@ static int32_t jsonToNode(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkNodeType, pNode->type, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, nodesNodeName(pNode->type), jsonToSpecificNode, pNode);
if (TSDB_CODE_SUCCESS != code) {
diff --git a/source/libs/nodes/src/nodesToSQLFuncs.c b/source/libs/nodes/src/nodesToSQLFuncs.c
index 3b129740e8..e521c57c3d 100644
--- a/source/libs/nodes/src/nodesToSQLFuncs.c
+++ b/source/libs/nodes/src/nodesToSQLFuncs.c
@@ -21,36 +21,89 @@
#include "taoserror.h"
#include "thash.h"
-char *gOperatorStr[] = {NULL,
- "+",
- "-",
- "*",
- "/",
- "%",
- "-",
- "&",
- "|",
- ">",
- ">=",
- "<",
- "<=",
- "=",
- "<>",
- "IN",
- "NOT IN",
- "LIKE",
- "NOT LIKE",
- "MATCH",
- "NMATCH",
- "IS NULL",
- "IS NOT NULL",
- "IS TRUE",
- "IS FALSE",
- "IS UNKNOWN",
- "IS NOT TRUE",
- "IS NOT FALSE",
- "IS NOT UNKNOWN"};
-char *gLogicConditionStr[] = {"AND", "OR", "NOT"};
+const char *operatorTypeStr(EOperatorType type) {
+ switch (type) {
+ case OP_TYPE_ADD:
+ return "+";
+ case OP_TYPE_SUB:
+ return "-";
+ case OP_TYPE_MULTI:
+ return "*";
+ case OP_TYPE_DIV:
+ return "/";
+ case OP_TYPE_REM:
+ return "%";
+ case OP_TYPE_MINUS:
+ return "-";
+ case OP_TYPE_BIT_AND:
+ return "&";
+ case OP_TYPE_BIT_OR:
+ return "|";
+ case OP_TYPE_GREATER_THAN:
+ return ">";
+ case OP_TYPE_GREATER_EQUAL:
+ return ">=";
+ case OP_TYPE_LOWER_THAN:
+ return "<";
+ case OP_TYPE_LOWER_EQUAL:
+ return "<=";
+ case OP_TYPE_EQUAL:
+ return "=";
+ case OP_TYPE_NOT_EQUAL:
+ return "<>";
+ case OP_TYPE_IN:
+ return "IN";
+ case OP_TYPE_NOT_IN:
+ return "NOT IN";
+ case OP_TYPE_LIKE:
+ return "LIKE";
+ case OP_TYPE_NOT_LIKE:
+ return "NOT LIKE";
+ case OP_TYPE_MATCH:
+ return "MATCH";
+ case OP_TYPE_NMATCH:
+ return "NMATCH";
+ case OP_TYPE_IS_NULL:
+ return "IS NULL";
+ case OP_TYPE_IS_NOT_NULL:
+ return "IS NOT NULL";
+ case OP_TYPE_IS_TRUE:
+ return "IS TRUE";
+ case OP_TYPE_IS_FALSE:
+ return "IS FALSE";
+ case OP_TYPE_IS_UNKNOWN:
+ return "IS UNKNOWN";
+ case OP_TYPE_IS_NOT_TRUE:
+ return "IS NOT TRUE";
+ case OP_TYPE_IS_NOT_FALSE:
+ return "IS NOT FALSE";
+ case OP_TYPE_IS_NOT_UNKNOWN:
+ return "IS NOT UNKNOWN";
+ case OP_TYPE_JSON_GET_VALUE:
+ return "=>";
+ case OP_TYPE_JSON_CONTAINS:
+ return "CONTAINS";
+ case OP_TYPE_ASSIGN:
+ return "=";
+ default:
+ break;
+ }
+ return "UNKNOWN";
+}
+
+const char *logicConditionTypeStr(ELogicConditionType type) {
+ switch (type) {
+ case LOGIC_COND_TYPE_AND:
+ return "AND";
+ case LOGIC_COND_TYPE_OR:
+ return "OR";
+ case LOGIC_COND_TYPE_NOT:
+ return "NOT";
+ default:
+ break;
+ }
+ return "UNKNOWN";
+}
int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
switch (pNode->type) {
@@ -94,12 +147,7 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
NODES_ERR_RET(nodesNodeToSQL(pOpNode->pLeft, buf, bufSize, len));
}
- if (pOpNode->opType >= (sizeof(gOperatorStr) / sizeof(gOperatorStr[0]))) {
- nodesError("unknown operation type:%d", pOpNode->opType);
- NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
- }
-
- *len += snprintf(buf + *len, bufSize - *len, " %s ", gOperatorStr[pOpNode->opType]);
+ *len += snprintf(buf + *len, bufSize - *len, " %s ", operatorTypeStr(pOpNode->opType));
if (pOpNode->pRight) {
NODES_ERR_RET(nodesNodeToSQL(pOpNode->pRight, buf, bufSize, len));
@@ -118,7 +166,7 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
FOREACH(node, pLogicNode->pParameterList) {
if (!first) {
- *len += snprintf(buf + *len, bufSize - *len, " %s ", gLogicConditionStr[pLogicNode->condType]);
+ *len += snprintf(buf + *len, bufSize - *len, " %s ", logicConditionTypeStr(pLogicNode->condType));
}
NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len));
first = false;
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index 2249bc7823..308afd467f 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -22,6 +22,7 @@ extern "C" {
#include "catalog.h"
#include "os.h"
+#include "parser.h"
#include "query.h"
#define parserFatal(param, ...) qFatal("PARSER: " param, ##__VA_ARGS__)
@@ -44,18 +45,37 @@ typedef struct SParseTablesMetaReq {
SHashObj* pTables;
} SParseTablesMetaReq;
+typedef enum ECatalogReqType {
+ CATALOG_REQ_TYPE_META = 1,
+ CATALOG_REQ_TYPE_VGROUP,
+ CATALOG_REQ_TYPE_BOTH
+} ECatalogReqType;
+
+typedef struct SInsertTablesMetaReq {
+ char dbFName[TSDB_DB_FNAME_LEN];
+ SArray* pTableMetaPos;
+ SArray* pTableMetaReq; // element is SName
+ SArray* pTableVgroupPos;
+ SArray* pTableVgroupReq; // element is SName
+} SInsertTablesMetaReq;
+
typedef struct SParseMetaCache {
- SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
- SHashObj* pDbVgroup; // key is dbFName, element is SArray*
- SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
- SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
- SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
- SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
- SHashObj* pUdf; // key is funcName, element is SFuncInfo*
- SHashObj* pTableIndex; // key is tbFName, element is SArray*
- SHashObj* pTableCfg; // key is tbFName, element is STableCfg*
- SArray* pDnodes; // element is SEpSet
- bool dnodeRequired;
+ SHashObj* pTableMeta; // key is tbFName, element is STableMeta*
+ SHashObj* pDbVgroup; // key is dbFName, element is SArray*
+ SHashObj* pTableVgroup; // key is tbFName, element is SVgroupInfo*
+ SHashObj* pDbCfg; // key is tbFName, element is SDbCfgInfo*
+ SHashObj* pDbInfo; // key is tbFName, element is SDbInfo*
+ SHashObj* pUserAuth; // key is SUserAuthInfo serialized string, element is bool indicating whether or not to pass
+ SHashObj* pUdf; // key is funcName, element is SFuncInfo*
+ SHashObj* pTableIndex; // key is tbFName, element is SArray*
+ SHashObj* pTableCfg; // key is tbFName, element is STableCfg*
+ SArray* pDnodes; // element is SEpSet
+ bool dnodeRequired;
+ SHashObj* pInsertTables; // key is dbName, element is SInsertTablesMetaReq*, for insert
+ const char* pUser;
+ const SArray* pTableMetaData; // pRes = STableMeta*
+ const SArray* pTableVgroupData; // pRes = SVgroupInfo*
+ int32_t sqlTableNum;
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
@@ -72,8 +92,9 @@ STableMeta* tableMetaDup(const STableMeta* pTableMeta);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
-int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
-int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache);
+int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq);
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool insertValuesStmt);
int32_t reserveTableMetaInCache(int32_t acctId, const char* pDb, const char* pTable, SParseMetaCache* pMetaCache);
int32_t reserveTableMetaInCacheExt(const SName* pName, SParseMetaCache* pMetaCache);
int32_t reserveDbVgInfoInCache(int32_t acctId, const char* pDb, SParseMetaCache* pMetaCache);
@@ -100,6 +121,12 @@ int32_t getUdfInfoFromCache(SParseMetaCache* pMetaCache, const char* pFunc, SFun
int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName, SArray** pIndexes);
int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput);
int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes);
+int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SParseMetaCache* pMetaCache);
+int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ STableMeta** pMeta);
+int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ SVgroupInfo* pVgroup);
void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request);
#ifdef __cplusplus
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index b7532173c8..0922cdb6b9 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -73,6 +73,9 @@ typedef struct SInsertParseContext {
SStmtCallback* pStmtCb;
SParseMetaCache* pMetaCache;
char sTableName[TSDB_TABLE_NAME_LEN];
+ char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW];
+ int64_t memElapsed;
+ int64_t parRowElapsed;
} SInsertParseContext;
typedef struct SInsertParseSyntaxCxt {
@@ -140,9 +143,9 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con
}
char name[TSDB_DB_FNAME_LEN] = {0};
strncpy(name, pTableName->z, dbLen);
- dbLen = strdequote(name);
+ int32_t actualDbLen = strdequote(name);
- code = tNameSetDbName(pName, acctId, name, dbLen);
+ code = tNameSetDbName(pName, acctId, name, actualDbLen);
if (code != TSDB_CODE_SUCCESS) {
return buildInvalidOperationMsg(pMsgBuf, msg1);
}
@@ -203,10 +206,11 @@ static int32_t checkAuth(SInsertParseContext* pCxt, char* pDbFname, bool* pPass)
return catalogChkAuth(pBasicCtx->pCatalog, &conn, pBasicCtx->pUser, pDbFname, AUTH_TYPE_WRITE, pPass);
}
-static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool isStb, STableMeta** pTableMeta) {
+static int32_t getTableSchema(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, bool isStb,
+ STableMeta** pTableMeta) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (pBasicCtx->async) {
- return getTableMetaFromCache(pCxt->pMetaCache, pTbName, pTableMeta);
+ return getTableMetaFromCacheForInsert(pBasicCtx->pTableMetaPos, pCxt->pMetaCache, tbNo, pTableMeta);
}
SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter,
.requestId = pBasicCtx->requestId,
@@ -219,10 +223,10 @@ static int32_t getTableSchema(SInsertParseContext* pCxt, SName* pTbName, bool is
return catalogGetTableMeta(pBasicCtx->pCatalog, &conn, pTbName, pTableMeta);
}
-static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroupInfo* pVg) {
+static int32_t getTableVgroup(SInsertParseContext* pCxt, int32_t tbNo, SName* pTbName, SVgroupInfo* pVg) {
SParseContext* pBasicCtx = pCxt->pComCxt;
if (pBasicCtx->async) {
- return getTableVgroupFromCache(pCxt->pMetaCache, pTbName, pVg);
+ return getTableVgroupFromCacheForInsert(pBasicCtx->pTableVgroupPos, pCxt->pMetaCache, tbNo, pVg);
}
SRequestConnInfo conn = {.pTrans = pBasicCtx->pTransporter,
.requestId = pBasicCtx->requestId,
@@ -231,28 +235,22 @@ static int32_t getTableVgroup(SInsertParseContext* pCxt, SName* pTbName, SVgroup
return catalogGetTableHashVgroup(pBasicCtx->pCatalog, &conn, pTbName, pVg);
}
-static int32_t getTableMetaImpl(SInsertParseContext* pCxt, SName* name, char* dbFname, bool isStb) {
- bool pass = false;
- CHECK_CODE(checkAuth(pCxt, dbFname, &pass));
- if (!pass) {
- return TSDB_CODE_PAR_PERMISSION_DENIED;
- }
-
- CHECK_CODE(getTableSchema(pCxt, name, isStb, &pCxt->pTableMeta));
+static int32_t getTableMetaImpl(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname, bool isStb) {
+ CHECK_CODE(getTableSchema(pCxt, tbNo, name, isStb, &pCxt->pTableMeta));
if (!isStb) {
SVgroupInfo vg;
- CHECK_CODE(getTableVgroup(pCxt, name, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, tbNo, name, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
}
return TSDB_CODE_SUCCESS;
}
-static int32_t getTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
- return getTableMetaImpl(pCxt, name, dbFname, false);
+static int32_t getTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) {
+ return getTableMetaImpl(pCxt, tbNo, name, dbFname, false);
}
-static int32_t getSTableMeta(SInsertParseContext* pCxt, SName* name, char* dbFname) {
- return getTableMetaImpl(pCxt, name, dbFname, true);
+static int32_t getSTableMeta(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* dbFname) {
+ return getTableMetaImpl(pCxt, tbNo, name, dbFname, true);
}
static int32_t getDBCfg(SInsertParseContext* pCxt, const char* pDbFName, SDbCfgInfo* pInfo) {
@@ -1028,13 +1026,13 @@ end:
return code;
}
-static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName* pTableName, const char* pName,
- int32_t len, STableMeta* pMeta) {
+static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, int32_t tbNo, SName* pTableName,
+ const char* pName, int32_t len, STableMeta* pMeta) {
SVgroupInfo vg;
- CHECK_CODE(getTableVgroup(pCxt, pTableName, &vg));
+ CHECK_CODE(getTableVgroup(pCxt, tbNo, pTableName, &vg));
CHECK_CODE(taosHashPut(pCxt->pVgroupsHashObj, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg)));
- pMeta->uid = 0;
+ pMeta->uid = tbNo;
pMeta->vgId = vg.vgId;
pMeta->tableType = TSDB_CHILD_TABLE;
@@ -1084,7 +1082,7 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) {
}
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
-static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tbFName) {
+static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) {
int32_t len = strlen(tbFName);
STableMeta** pMeta = taosHashGet(pCxt->pSubTableHashObj, tbFName, len);
if (NULL != pMeta) {
@@ -1102,11 +1100,11 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, SName* name, char* tb
tNameGetFullDbName(&sname, dbFName);
strcpy(pCxt->sTableName, sname.tname);
- CHECK_CODE(getSTableMeta(pCxt, &sname, dbFName));
+ CHECK_CODE(getSTableMeta(pCxt, tbNo, &sname, dbFName));
if (TSDB_SUPER_TABLE != pCxt->pTableMeta->tableType) {
return buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed");
}
- CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, name, tbFName, len, pCxt->pTableMeta));
+ CHECK_CODE(storeTableMeta(pCxt, pCxt->pSubTableHashObj, tbNo, name, tbFName, len, pCxt->pTableMeta));
SSchema* pTagsSchema = getTableTagSchema(pCxt->pTableMeta);
setBoundColumnInfo(&pCxt->tags, pTagsSchema, getNumOfTags(pCxt->pTableMeta));
@@ -1195,7 +1193,7 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks,
tdSRowEnd(pBuilder);
*gotRow = true;
-
+
#ifdef TD_DEBUG_PRINT_ROW
STSchema* pSTSchema = tdGetSTSChemaFromSSChema(schema, spd->numOfCols, 1);
tdSRowPrint(row, pSTSchema, __func__);
@@ -1214,7 +1212,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
CHECK_CODE(initRowBuilder(&pDataBlock->rowBuilder, pDataBlock->pTableMeta->sversion, &pDataBlock->boundColumnInfo));
(*numOfRows) = 0;
- char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
+ // char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
SToken sToken;
while (1) {
int32_t index = 0;
@@ -1232,7 +1230,7 @@ static int32_t parseValues(SInsertParseContext* pCxt, STableDataBlocks* pDataBlo
}
bool gotRow = false;
- CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, tmpTokenBuf));
+ CHECK_CODE(parseOneRow(pCxt, pDataBlock, tinfo.precision, &gotRow, pCxt->tmpTokenBuf));
if (gotRow) {
pDataBlock->size += extendedRowSize; // len;
}
@@ -1347,7 +1345,9 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa
}
static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
- taosMemoryFreeClear(pCxt->pTableMeta);
+ if (!pCxt->pComCxt->async) {
+ taosMemoryFreeClear(pCxt->pTableMeta);
+ }
destroyBoundColumnInfo(&pCxt->tags);
tdDestroySVCreateTbReq(&pCxt->createTblReq);
}
@@ -1365,6 +1365,20 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
destroyBlockArrayList(pCxt->pVgDataBlocks);
}
+static int32_t parseTableName(SInsertParseContext* pCxt, SToken* pTbnameToken, SName* pName, char* pDbFName,
+ char* pTbFName) {
+ int32_t code = createSName(pName, pTbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg);
+ if (TSDB_CODE_SUCCESS == code) {
+ tNameExtractFullName(pName, pTbFName);
+ code = taosHashPut(pCxt->pTableNameHashObj, pTbFName, strlen(pTbFName), pName, sizeof(SName));
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ tNameGetFullDbName(pName, pDbFName);
+ code = taosHashPut(pCxt->pDbFNameHashObj, pDbFName, strlen(pDbFName), pDbFName, TSDB_DB_FNAME_LEN);
+ }
+ return code;
+}
+
// tb_name
// [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)]
// [(field1_name, ...)]
@@ -1372,7 +1386,9 @@ static void destroyInsertParseContext(SInsertParseContext* pCxt) {
// [...];
static int32_t parseInsertBody(SInsertParseContext* pCxt) {
int32_t tbNum = 0;
+ SName name;
char tbFName[TSDB_TABLE_FNAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
bool autoCreateTbl = false;
// for each table
@@ -1415,20 +1431,15 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
SToken tbnameToken = sToken;
NEXT_TOKEN(pCxt->pSql, sToken);
- SName name;
- CHECK_CODE(createSName(&name, &tbnameToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
-
- tNameExtractFullName(&name, tbFName);
- CHECK_CODE(taosHashPut(pCxt->pTableNameHashObj, tbFName, strlen(tbFName), &name, sizeof(SName)));
- char dbFName[TSDB_DB_FNAME_LEN];
- tNameGetFullDbName(&name, dbFName);
- CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName)));
+ if (!pCxt->pComCxt->async || TK_USING == sToken.type) {
+ CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName));
+ }
bool existedUsing = false;
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
- CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
+ CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
}
@@ -1438,22 +1449,31 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
// pSql -> field1_name, ...)
pBoundColsStart = pCxt->pSql;
CHECK_CODE(ignoreBoundColumns(pCxt));
- // CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_USING == sToken.type) {
- CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
+ if (pCxt->pComCxt->async) {
+ CHECK_CODE(parseTableName(pCxt, &tbnameToken, &name, dbFName, tbFName));
+ }
+ CHECK_CODE(parseUsingClause(pCxt, tbNum, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
} else if (!existedUsing) {
- CHECK_CODE(getTableMeta(pCxt, &name, dbFName));
+ CHECK_CODE(getTableMeta(pCxt, tbNum, &name, dbFName));
}
STableDataBlocks* dataBuf = NULL;
- CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
- sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
- &dataBuf, NULL, &pCxt->createTblReq));
+ if (pCxt->pComCxt->async) {
+ CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, &pCxt->pTableMeta->uid, sizeof(pCxt->pTableMeta->uid),
+ TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk),
+ getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta, &dataBuf, NULL,
+ &pCxt->createTblReq));
+ } else {
+ CHECK_CODE(getDataBlockFromList(pCxt->pTableBlockHashObj, tbFName, strlen(tbFName), TSDB_DEFAULT_PAYLOAD_SIZE,
+ sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
+ &dataBuf, NULL, &pCxt->createTblReq));
+ }
if (NULL != pBoundColsStart) {
char* pCurrPos = pCxt->pSql;
@@ -1532,7 +1552,9 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
.totalNum = 0,
.pOutput = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT),
.pStmtCb = pContext->pStmtCb,
- .pMetaCache = pMetaCache};
+ .pMetaCache = pMetaCache,
+ .memElapsed = 0,
+ .parRowElapsed = 0};
if (pContext->pStmtCb && *pQuery) {
(*pContext->pStmtCb->getExecInfoFn)(pContext->pStmtCb->pStmt, &context.pVgroupsHashObj,
@@ -1547,7 +1569,7 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
} else {
context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
context.pTableBlockHashObj =
- taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
}
if (NULL == context.pVgroupsHashObj || NULL == context.pTableBlockHashObj || NULL == context.pSubTableHashObj ||
@@ -1656,24 +1678,24 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
return TSDB_CODE_SUCCESS;
}
-static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, int32_t tableNo, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
- CHECK_CODE(reserveUserAuthInCacheExt(pCxt->pComCxt->pUser, &name, AUTH_TYPE_WRITE, pCxt->pMetaCache));
- CHECK_CODE(reserveTableMetaInCacheExt(&name, pCxt->pMetaCache));
- CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheForInsert(&name, isStable ? CATALOG_REQ_TYPE_META : CATALOG_REQ_TYPE_BOTH, tableNo,
+ pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
-static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, SToken* pTbToken) {
+static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) {
SName name;
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
- CHECK_CODE(reserveTableVgroupInCacheExt(&name, pCxt->pMetaCache));
+ CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache));
return TSDB_CODE_SUCCESS;
}
static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
- bool hasData = false;
+ bool hasData = false;
+ int32_t tableNo = 0;
// for each table
while (1) {
SToken sToken;
@@ -1702,9 +1724,9 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
- CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
+ CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
- CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
@@ -1717,15 +1739,17 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
if (TK_USING == sToken.type && !existedUsing) {
existedUsing = true;
- CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
+ CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, tableNo, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
- CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
+ CHECK_CODE(collectTableMetaKey(pCxt, true, tableNo, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
- } else {
- CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken));
+ } else if (!existedUsing) {
+ CHECK_CODE(collectTableMetaKey(pCxt, false, tableNo, &tbnameToken));
}
+ ++tableNo;
+
if (TK_VALUES == sToken.type) {
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
CHECK_CODE(skipValuesClause(pCxt));
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index ef985a3894..3c0d9a5f63 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1399,7 +1399,7 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu
"%s function must be used in select statements", pFunc->functionName);
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
- if (QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
+ if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
!isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s function requires valid time series input", pFunc->functionName);
@@ -2037,16 +2037,13 @@ static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName,
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
- if (TSDB_CODE_SUCCESS == code &&
- 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
- 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) &&
- isSelectStmt(pCxt->pCurrStmt) &&
+ if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
+ 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) && isSelectStmt(pCxt->pCurrStmt) &&
0 == taosArrayGetSize(vgroupList)) {
((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true;
}
- if (TSDB_CODE_SUCCESS == code &&
- 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
+ if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES)) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
@@ -2825,6 +2822,29 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) {
return TSDB_CODE_SUCCESS;
}
+static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
+ int32_t len = strlen(pInterval->literal);
+
+ char *unit = &pInterval->literal[len - 1];
+ if (*unit == 'n' || *unit == 'y') {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE,
+ "Unsupported time unit in EVERY clause");
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) {
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ code = checkEvery(pCxt, (SValueNode *)(*pEvery));
+ if (TSDB_CODE_SUCCESS == code) {
+ code = translateExpr(pCxt, pEvery);
+ }
+
+ return code;
+}
+
static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect) {
int32_t code = TSDB_CODE_SUCCESS;
@@ -2859,7 +2879,7 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
int32_t code = translateExpr(pCxt, &pSelect->pRange);
if (TSDB_CODE_SUCCESS == code) {
- code = translateExpr(pCxt, &pSelect->pEvery);
+ code = translateInterpEvery(pCxt, &pSelect->pEvery);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateInterpFill(pCxt, pSelect);
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index ae5a281aab..17e78e7806 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -476,9 +476,11 @@ static int32_t buildDbReq(SHashObj* pDbsHash, SArray** pDbs) {
static int32_t buildTableReqFromDb(SHashObj* pDbsHash, SArray** pDbs) {
if (NULL != pDbsHash) {
- *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq));
if (NULL == *pDbs) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ *pDbs = taosArrayInit(taosHashGetSize(pDbsHash), sizeof(STablesReq));
+ if (NULL == *pDbs) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
SParseTablesMetaReq* p = taosHashIterate(pDbsHash, NULL);
while (NULL != p) {
@@ -530,7 +532,62 @@ static int32_t buildUdfReq(SHashObj* pUdfHash, SArray** pUdf) {
return TSDB_CODE_SUCCESS;
}
-int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+static int32_t buildCatalogReqForInsert(SParseContext* pCxt, const SParseMetaCache* pMetaCache,
+ SCatalogReq* pCatalogReq) {
+ int32_t ndbs = taosHashGetSize(pMetaCache->pInsertTables);
+ pCatalogReq->pTableMeta = taosArrayInit(ndbs, sizeof(STablesReq));
+ if (NULL == pCatalogReq->pTableMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCatalogReq->pTableHash = taosArrayInit(ndbs, sizeof(STablesReq));
+ if (NULL == pCatalogReq->pTableHash) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCatalogReq->pUser = taosArrayInit(ndbs, sizeof(SUserAuthInfo));
+ if (NULL == pCatalogReq->pUser) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pCxt->pTableMetaPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t));
+ pCxt->pTableVgroupPos = taosArrayInit(pMetaCache->sqlTableNum, sizeof(int32_t));
+
+ int32_t metaReqNo = 0;
+ int32_t vgroupReqNo = 0;
+ SInsertTablesMetaReq* p = taosHashIterate(pMetaCache->pInsertTables, NULL);
+ while (NULL != p) {
+ STablesReq req = {0};
+ strcpy(req.dbFName, p->dbFName);
+ TSWAP(req.pTables, p->pTableMetaReq);
+ taosArrayPush(pCatalogReq->pTableMeta, &req);
+
+ req.pTables = NULL;
+ TSWAP(req.pTables, p->pTableVgroupReq);
+ taosArrayPush(pCatalogReq->pTableHash, &req);
+
+ int32_t ntables = taosArrayGetSize(p->pTableMetaPos);
+ for (int32_t i = 0; i < ntables; ++i) {
+ taosArrayInsert(pCxt->pTableMetaPos, *(int32_t*)taosArrayGet(p->pTableMetaPos, i), &metaReqNo);
+ ++metaReqNo;
+ }
+
+ ntables = taosArrayGetSize(p->pTableVgroupPos);
+ for (int32_t i = 0; i < ntables; ++i) {
+ taosArrayInsert(pCxt->pTableVgroupPos, *(int32_t*)taosArrayGet(p->pTableVgroupPos, i), &vgroupReqNo);
+ ++vgroupReqNo;
+ }
+
+ SUserAuthInfo auth = {0};
+ strcpy(auth.user, pCxt->pUser);
+ strcpy(auth.dbFName, p->dbFName);
+ auth.type = AUTH_TYPE_WRITE;
+ taosArrayPush(pCatalogReq->pUser, &auth);
+
+ p = taosHashIterate(pMetaCache->pInsertTables, p);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t buildCatalogReqForQuery(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
int32_t code = buildTableReqFromDb(pMetaCache->pTableMeta, &pCatalogReq->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = buildDbReq(pMetaCache->pDbVgroup, &pCatalogReq->pDbVgroup);
@@ -560,6 +617,13 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
return code;
}
+int32_t buildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ if (NULL != pMetaCache->pInsertTables) {
+ return buildCatalogReqForInsert(pCxt, pMetaCache, pCatalogReq);
+ }
+ return buildCatalogReqForQuery(pMetaCache, pCatalogReq);
+}
+
static int32_t putMetaDataToHash(const char* pKey, int32_t len, const SArray* pData, int32_t index, SHashObj** pHash) {
if (NULL == *pHash) {
*pHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
@@ -647,7 +711,8 @@ static int32_t putUdfToCache(const SArray* pUdfReq, const SArray* pUdfData, SHas
return TSDB_CODE_SUCCESS;
}
-int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+int32_t putMetaDataToCacheForQuery(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
+ SParseMetaCache* pMetaCache) {
int32_t code = putDbTableDataToCache(pCatalogReq->pTableMeta, pMetaData->pTableMeta, &pMetaCache->pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = putDbDataToCache(pCatalogReq->pDbVgroup, pMetaData->pDbVgroup, &pMetaCache->pDbVgroup);
@@ -677,6 +742,30 @@ int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMet
return code;
}
+int32_t putMetaDataToCacheForInsert(const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
+ int32_t ndbs = taosArrayGetSize(pMetaData->pUser);
+ for (int32_t i = 0; i < ndbs; ++i) {
+ SMetaRes* pRes = taosArrayGet(pMetaData->pUser, i);
+ if (TSDB_CODE_SUCCESS != pRes->code) {
+ return pRes->code;
+ }
+ if (!(*(bool*)pRes->pRes)) {
+ return TSDB_CODE_PAR_PERMISSION_DENIED;
+ }
+ }
+ pMetaCache->pTableMetaData = pMetaData->pTableMeta;
+ pMetaCache->pTableVgroupData = pMetaData->pTableHash;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t putMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool insertValuesStmt) {
+ if (insertValuesStmt) {
+ return putMetaDataToCacheForInsert(pMetaData, pMetaCache);
+ }
+ return putMetaDataToCacheForQuery(pCatalogReq, pMetaData, pMetaCache);
+}
+
static int32_t reserveTableReqInCacheImpl(const char* pTbFName, int32_t len, SHashObj** pTables) {
if (NULL == *pTables) {
*pTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
@@ -977,6 +1066,82 @@ int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes) {
return TSDB_CODE_SUCCESS;
}
+static int32_t reserveTableReqInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SInsertTablesMetaReq* pReq) {
+ switch (reqType) {
+ case CATALOG_REQ_TYPE_META:
+ taosArrayPush(pReq->pTableMetaReq, pName);
+ taosArrayPush(pReq->pTableMetaPos, &tableNo);
+ break;
+ case CATALOG_REQ_TYPE_VGROUP:
+ taosArrayPush(pReq->pTableVgroupReq, pName);
+ taosArrayPush(pReq->pTableVgroupPos, &tableNo);
+ break;
+ case CATALOG_REQ_TYPE_BOTH:
+ taosArrayPush(pReq->pTableMetaReq, pName);
+ taosArrayPush(pReq->pTableMetaPos, &tableNo);
+ taosArrayPush(pReq->pTableVgroupReq, pName);
+ taosArrayPush(pReq->pTableVgroupPos, &tableNo);
+ break;
+ default:
+ break;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t reserveTableReqInDbCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SHashObj* pDbs) {
+ SInsertTablesMetaReq req = {.pTableMetaReq = taosArrayInit(4, sizeof(SName)),
+ .pTableMetaPos = taosArrayInit(4, sizeof(int32_t)),
+ .pTableVgroupReq = taosArrayInit(4, sizeof(SName)),
+ .pTableVgroupPos = taosArrayInit(4, sizeof(int32_t))};
+ tNameGetFullDbName(pName, req.dbFName);
+ int32_t code = reserveTableReqInCacheForInsert(pName, reqType, tableNo, &req);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = taosHashPut(pDbs, pName->dbname, strlen(pName->dbname), &req, sizeof(SInsertTablesMetaReq));
+ }
+ return code;
+}
+
+int32_t reserveTableMetaInCacheForInsert(const SName* pName, ECatalogReqType reqType, int32_t tableNo,
+ SParseMetaCache* pMetaCache) {
+ if (NULL == pMetaCache->pInsertTables) {
+ pMetaCache->pInsertTables = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ if (NULL == pMetaCache->pInsertTables) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ pMetaCache->sqlTableNum = tableNo;
+ SInsertTablesMetaReq* pReq = taosHashGet(pMetaCache->pInsertTables, pName->dbname, strlen(pName->dbname));
+ if (NULL == pReq) {
+ return reserveTableReqInDbCacheForInsert(pName, reqType, tableNo, pMetaCache->pInsertTables);
+ }
+ return reserveTableReqInCacheForInsert(pName, reqType, tableNo, pReq);
+}
+
+int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ STableMeta** pMeta) {
+ int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo);
+ SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex);
+ if (TSDB_CODE_SUCCESS == pRes->code) {
+ *pMeta = pRes->pRes;
+ if (NULL == *pMeta) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ return pRes->code;
+}
+
+int32_t getTableVgroupFromCacheForInsert(SArray* pTableVgroupPos, SParseMetaCache* pMetaCache, int32_t tableNo,
+ SVgroupInfo* pVgroup) {
+ int32_t reqIndex = *(int32_t*)taosArrayGet(pTableVgroupPos, tableNo);
+ SMetaRes* pRes = taosArrayGet(pMetaCache->pTableVgroupData, reqIndex);
+ if (TSDB_CODE_SUCCESS == pRes->code) {
+ memcpy(pVgroup, pRes->pRes, sizeof(SVgroupInfo));
+ }
+ return pRes->code;
+}
+
void destoryParseTablesMetaReqHash(SHashObj* pHash) {
SParseTablesMetaReq* p = taosHashIterate(pHash, NULL);
while (NULL != p) {
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 34cd783ace..7e27132f3c 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -185,7 +185,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
code = parseSqlSyntax(pCxt, pQuery, &metaCache);
}
if (TSDB_CODE_SUCCESS == code) {
- code = buildCatalogReq(&metaCache, pCatalogReq);
+ code = buildCatalogReq(pCxt, &metaCache, pCatalogReq);
}
destoryParseMetaCache(&metaCache, true);
terrno = code;
@@ -195,7 +195,7 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
- int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache);
+ int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
if (TSDB_CODE_SUCCESS == code) {
if (NULL == pQuery->pRoot) {
code = parseInsertSql(pCxt, &pQuery, &metaCache);
diff --git a/source/libs/parser/test/parInsertTest.cpp b/source/libs/parser/test/parInsertTest.cpp
index 7302491ba7..ddf15ec67b 100644
--- a/source/libs/parser/test/parInsertTest.cpp
+++ b/source/libs/parser/test/parInsertTest.cpp
@@ -13,21 +13,13 @@
* along with this program. If not, see .
*/
-#include
-
#include
-#include "mockCatalogService.h"
-#include "os.h"
-#include "parInt.h"
+#include "parTestUtil.h"
using namespace std;
-using namespace std::placeholders;
-using namespace testing;
-namespace {
-string toString(int32_t code) { return tstrerror(code); }
-} // namespace
+namespace ParserTest {
// syntax:
// INSERT INTO
@@ -36,259 +28,60 @@ string toString(int32_t code) { return tstrerror(code); }
// [(field1_name, ...)]
// VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
// [...];
-class InsertTest : public Test {
- protected:
- InsertTest() : res_(nullptr) {}
- ~InsertTest() { reset(); }
-
- void setDatabase(const string& acctId, const string& db) {
- acctId_ = acctId;
- db_ = db;
- }
-
- void bind(const char* sql) {
- reset();
- cxt_.acctId = atoi(acctId_.c_str());
- cxt_.db = (char*)db_.c_str();
- strcpy(sqlBuf_, sql);
- cxt_.sqlLen = strlen(sql);
- sqlBuf_[cxt_.sqlLen] = '\0';
- cxt_.pSql = sqlBuf_;
- }
-
- int32_t run() {
- code_ = parseInsertSql(&cxt_, &res_, nullptr);
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- }
- return code_;
- }
-
- int32_t runAsync() {
- cxt_.async = true;
- bool request = true;
- unique_ptr > metaCache(
- new SParseMetaCache(), std::bind(_destoryParseMetaCache, _1, cref(request)));
- code_ = parseInsertSyntax(&cxt_, &res_, metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "parseInsertSyntax code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- unique_ptr catalogReq(new SCatalogReq(),
- MockCatalogService::destoryCatalogReq);
- code_ = buildCatalogReq(metaCache.get(), catalogReq.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "buildCatalogReq code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- unique_ptr metaData(new SMetaData(), MockCatalogService::destoryMetaData);
- g_mockCatalogService->catalogGetAllMeta(catalogReq.get(), metaData.get());
-
- metaCache.reset(new SParseMetaCache());
- request = false;
- code_ = putMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "putMetaDataToCache code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- code_ = parseInsertSql(&cxt_, &res_, metaCache.get());
- if (code_ != TSDB_CODE_SUCCESS) {
- cout << "parseInsertSql code:" << toString(code_) << ", msg:" << errMagBuf_ << endl;
- return code_;
- }
-
- return code_;
- }
-
- void dumpReslut() {
- SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
- size_t num = taosArrayGetSize(pStmt->pDataBlocks);
- cout << "payloadType:" << (int32_t)pStmt->payloadType << ", insertType:" << pStmt->insertType
- << ", numOfVgs:" << num << endl;
- for (size_t i = 0; i < num; ++i) {
- SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i);
- cout << "vgId:" << vg->vg.vgId << ", numOfTables:" << vg->numOfTables << ", dataSize:" << vg->size << endl;
- SSubmitReq* submit = (SSubmitReq*)vg->pData;
- cout << "length:" << ntohl(submit->length) << ", numOfBlocks:" << ntohl(submit->numOfBlocks) << endl;
- int32_t numOfBlocks = ntohl(submit->numOfBlocks);
- SSubmitBlk* blk = (SSubmitBlk*)(submit + 1);
- for (int32_t i = 0; i < numOfBlocks; ++i) {
- cout << "Block:" << i << endl;
- cout << "\tuid:" << be64toh(blk->uid) << ", tid:" << be64toh(blk->suid) << ", sversion:" << ntohl(blk->sversion)
- << ", dataLen:" << ntohl(blk->dataLen) << ", schemaLen:" << ntohl(blk->schemaLen)
- << ", numOfRows:" << ntohl(blk->numOfRows) << endl;
- blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen));
- }
- }
- }
-
- void checkReslut(int32_t numOfTables, int32_t numOfRows1, int32_t numOfRows2 = -1) {
- SVnodeModifOpStmt* pStmt = getVnodeModifStmt(res_);
- ASSERT_EQ(pStmt->payloadType, PAYLOAD_TYPE_KV);
- ASSERT_EQ(pStmt->insertType, TSDB_QUERY_TYPE_INSERT);
- size_t num = taosArrayGetSize(pStmt->pDataBlocks);
- ASSERT_GE(num, 0);
- for (size_t i = 0; i < num; ++i) {
- SVgDataBlocks* vg = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, i);
- ASSERT_EQ(vg->numOfTables, numOfTables);
- ASSERT_GE(vg->size, 0);
- SSubmitReq* submit = (SSubmitReq*)vg->pData;
- ASSERT_GE(ntohl(submit->length), 0);
- ASSERT_GE(ntohl(submit->numOfBlocks), 0);
- int32_t numOfBlocks = ntohl(submit->numOfBlocks);
- SSubmitBlk* blk = (SSubmitBlk*)(submit + 1);
- for (int32_t i = 0; i < numOfBlocks; ++i) {
- ASSERT_EQ(ntohl(blk->numOfRows), (0 == i ? numOfRows1 : (numOfRows2 > 0 ? numOfRows2 : numOfRows1)));
- blk = (SSubmitBlk*)(blk->data + ntohl(blk->dataLen));
- }
- }
- }
-
- private:
- static const int max_err_len = 1024;
- static const int max_sql_len = 1024 * 1024;
-
- static void _destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) {
- destoryParseMetaCache(pMetaCache, request);
- delete pMetaCache;
- }
-
- void reset() {
- memset(&cxt_, 0, sizeof(cxt_));
- memset(errMagBuf_, 0, max_err_len);
- cxt_.pMsg = errMagBuf_;
- cxt_.msgLen = max_err_len;
- code_ = TSDB_CODE_SUCCESS;
- qDestroyQuery(res_);
- res_ = nullptr;
- }
-
- SVnodeModifOpStmt* getVnodeModifStmt(SQuery* pQuery) { return (SVnodeModifOpStmt*)pQuery->pRoot; }
-
- string acctId_;
- string db_;
- char errMagBuf_[max_err_len];
- char sqlBuf_[max_sql_len];
- SParseContext cxt_;
- int32_t code_;
- SQuery* res_;
-};
+class ParserInsertTest : public ParserTestBase {};
// INSERT INTO tb_name [(field1_name, ...)] VALUES (field1_value, ...)
-TEST_F(InsertTest, singleTableSingleRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, singleTableSingleRowTest) {
+ useDb("root", "test");
- bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 1);
+ run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)");
- bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
-
- bind("insert into t1 values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 1);
-
- bind("insert into t1 (ts, c1, c2, c3, c4, c5) values (now, 1, 'beijing', 3, 4, 5)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO t1 (ts, c1, c2, c3, c4, c5) VALUES (now, 1, 'beijing', 3, 4, 5)");
}
// INSERT INTO tb_name VALUES (field1_value, ...)(field1_value, ...)
-TEST_F(InsertTest, singleTableMultiRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, singleTableMultiRowTest) {
+ useDb("root", "test");
- bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
+ run("INSERT INTO t1 VALUES (now, 1, 'beijing', 3, 4, 5)"
+ "(now+1s, 2, 'shanghai', 6, 7, 8)"
"(now+2s, 3, 'guangzhou', 9, 10, 11)");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 3);
-
- bind(
- "insert into t1 values (now, 1, 'beijing', 3, 4, 5)(now+1s, 2, 'shanghai', 6, 7, 8)"
- "(now+2s, 3, 'guangzhou', 9, 10, 11)");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
-TEST_F(InsertTest, multiTableSingleRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, multiTableSingleRowTest) {
+ useDb("root", "test");
- bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(2, 1);
-
- bind("insert into st1s1 values (now, 1, \"beijing\") st1s2 values (now, 10, \"131028\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 VALUES (now, 1, 'beijing') st1s2 VALUES (now, 10, '131028')");
}
// INSERT INTO tb1_name VALUES (field1_value, ...) tb2_name VALUES (field1_value, ...)
-TEST_F(InsertTest, multiTableMultiRowTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, multiTableMultiRowTest) {
+ useDb("root", "test");
- bind(
- "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
- " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(2, 3, 2);
-
- bind(
- "insert into st1s1 values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")"
- " st1s2 values (now, 10, \"131028\")(now+1s, 20, \"132028\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO "
+ "st1s1 VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou') "
+ "st1s2 VALUES (now, 10, '131028')(now+1s, 20, '132028')");
}
// INSERT INTO
// tb1_name USING st1_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...)
// tb2_name USING st2_name [(tag1_name, ...)] TAGS (tag1_value, ...) VALUES (field1_value, ...)
-TEST_F(InsertTest, autoCreateTableTest) {
- setDatabase("root", "test");
+TEST_F(ParserInsertTest, autoCreateTableTest) {
+ useDb("root", "test");
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) "
- "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
- dumpReslut();
- checkReslut(1, 3);
+ run("INSERT INTO st1s1 USING st1 TAGS(1, 'wxy', now) "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
- "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) "
- "values (now, 1, \"beijing\")(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO st1s1 (ts, c1, c2) USING st1 (tag1, tag2) TAGS(1, 'wxy') "
+ "VALUES (now, 1, 'beijing')(now+1s, 2, 'shanghai')(now+2s, 3, 'guangzhou')");
- bind(
- "insert into st1s1 using st1 (tag1, tag2) tags(1, 'wxy') values (now, 1, \"beijing\")"
- "(now+1s, 2, \"shanghai\")(now+2s, 3, \"guangzhou\")");
- ASSERT_EQ(runAsync(), TSDB_CODE_SUCCESS);
-
- bind(
- "insert into st1s1 using st1 tags(1, 'wxy', now) values (now, 1, \"beijing\")"
- "st1s1 using st1 tags(1, 'wxy', now) values (now+1s, 2, \"shanghai\")");
- ASSERT_EQ(run(), TSDB_CODE_SUCCESS);
+ run("INSERT INTO "
+ "st1s1 USING st1 (tag1, tag2) TAGS(1, 'wxy') (ts, c1, c2) VALUES (now, 1, 'beijing') "
+ "st1s2 (ts, c1, c2) USING st1 TAGS(2, 'abc', now) VALUES (now+1s, 2, 'shanghai')");
}
-TEST_F(InsertTest, toleranceTest) {
- setDatabase("root", "test");
-
- bind("insert into");
- ASSERT_NE(run(), TSDB_CODE_SUCCESS);
- bind("insert into t");
- ASSERT_NE(run(), TSDB_CODE_SUCCESS);
-
- bind("insert into");
- ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
- bind("insert into t");
- ASSERT_NE(runAsync(), TSDB_CODE_SUCCESS);
-}
+} // namespace ParserTest
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 3fe4b533e4..98281b7bf0 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -225,16 +225,17 @@ class ParserTestBaseImpl {
DO_WITH_THROW(collectMetaKey, pCxt, pQuery, pMetaCache);
}
- void doBuildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
- DO_WITH_THROW(buildCatalogReq, pMetaCache, pCatalogReq);
+ void doBuildCatalogReq(SParseContext* pCxt, const SParseMetaCache* pMetaCache, SCatalogReq* pCatalogReq) {
+ DO_WITH_THROW(buildCatalogReq, pCxt, pMetaCache, pCatalogReq);
}
void doGetAllMeta(const SCatalogReq* pCatalogReq, SMetaData* pMetaData) {
DO_WITH_THROW(g_mockCatalogService->catalogGetAllMeta, pCatalogReq, pMetaData);
}
- void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache) {
- DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache);
+ void doPutMetaDataToCache(const SCatalogReq* pCatalogReq, const SMetaData* pMetaData, SParseMetaCache* pMetaCache,
+ bool isInsertValues) {
+ DO_WITH_THROW(putMetaDataToCache, pCatalogReq, pMetaData, pMetaCache, isInsertValues);
}
void doAuthenticate(SParseContext* pCxt, SQuery* pQuery, SParseMetaCache* pMetaCache) {
@@ -261,7 +262,9 @@ class ParserTestBaseImpl {
void doParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatalogReq) {
DO_WITH_THROW(qParseSqlSyntax, pCxt, pQuery, pCatalogReq);
ASSERT_NE(*pQuery, nullptr);
- res_.parsedAst_ = toString((*pQuery)->pRoot);
+ if (nullptr != (*pQuery)->pRoot) {
+ res_.parsedAst_ = toString((*pQuery)->pRoot);
+ }
}
void doAnalyseSqlSemantic(SParseContext* pCxt, const SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
@@ -270,6 +273,17 @@ class ParserTestBaseImpl {
res_.calcConstAst_ = toString(pQuery->pRoot);
}
+ void doParseInsertSql(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(parseInsertSql, pCxt, pQuery, pMetaCache);
+ ASSERT_NE(*pQuery, nullptr);
+ res_.parsedAst_ = toString((*pQuery)->pRoot);
+ }
+
+ void doParseInsertSyntax(SParseContext* pCxt, SQuery** pQuery, SParseMetaCache* pMetaCache) {
+ DO_WITH_THROW(parseInsertSyntax, pCxt, pQuery, pMetaCache);
+ ASSERT_NE(*pQuery, nullptr);
+ }
+
string toString(const SNode* pRoot) {
char* pStr = NULL;
int32_t len = 0;
@@ -287,15 +301,20 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParse(&cxt, query.get());
- SQuery* pQuery = *(query.get());
+ if (qIsInsertValuesSql(cxt.pSql, cxt.sqlLen)) {
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParseInsertSql(&cxt, query.get(), nullptr);
+ } else {
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ doParse(&cxt, query.get());
+ SQuery* pQuery = *(query.get());
- doAuthenticate(&cxt, pQuery, nullptr);
+ doAuthenticate(&cxt, pQuery, nullptr);
- doTranslate(&cxt, pQuery, nullptr);
+ doTranslate(&cxt, pQuery, nullptr);
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(&cxt, pQuery);
+ }
if (g_dump) {
dump();
@@ -338,17 +357,22 @@ class ParserTestBaseImpl {
setParseContext(sql, &cxt, true);
unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParse(&cxt, query.get());
- SQuery* pQuery = *(query.get());
-
- bool request = true;
+ bool request = true;
unique_ptr > metaCache(
new SParseMetaCache(), bind(_destoryParseMetaCache, _1, cref(request)));
- doCollectMetaKey(&cxt, pQuery, metaCache.get());
+ bool isInsertValues = qIsInsertValuesSql(cxt.pSql, cxt.sqlLen);
+ if (isInsertValues) {
+ doParseInsertSyntax(&cxt, query.get(), metaCache.get());
+ } else {
+ doParse(&cxt, query.get());
+ doCollectMetaKey(&cxt, *(query.get()), metaCache.get());
+ }
+
+ SQuery* pQuery = *(query.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- doBuildCatalogReq(metaCache.get(), catalogReq.get());
+ doBuildCatalogReq(&cxt, metaCache.get(), catalogReq.get());
string err;
thread t1([&]() {
@@ -358,13 +382,17 @@ class ParserTestBaseImpl {
metaCache.reset(new SParseMetaCache());
request = false;
- doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get());
+ doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get(), isInsertValues);
- doAuthenticate(&cxt, pQuery, metaCache.get());
+ if (isInsertValues) {
+ doParseInsertSql(&cxt, query.get(), metaCache.get());
+ } else {
+ doAuthenticate(&cxt, pQuery, metaCache.get());
- doTranslate(&cxt, pQuery, metaCache.get());
+ doTranslate(&cxt, pQuery, metaCache.get());
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(&cxt, pQuery);
+ }
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index c843dd0a67..71f084d412 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -1002,7 +1002,7 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesListGetNode(pCxt->pCurrRoot->pTargets, 0));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index e06b752862..862d142100 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -1,3 +1,5 @@
+#include "qworker.h"
+
#include "dataSinkMgt.h"
#include "executor.h"
#include "planner.h"
@@ -7,7 +9,6 @@
#include "tcommon.h"
#include "tmsg.h"
#include "tname.h"
-#include "qworker.h"
SQWorkerMgmt gQwMgmt = {
.lock = 0,
@@ -15,7 +16,6 @@ SQWorkerMgmt gQwMgmt = {
.qwNum = 0,
};
-
int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
int32_t code = 0;
SSchedulerHbRsp rsp = {0};
@@ -26,7 +26,7 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
QW_LOCK(QW_WRITE, &sch->hbConnLock);
sch->hbBrokenTs = taosGetTimestampMs();
-
+
if (qwMsg->connInfo.handle == sch->hbConnInfo.handle) {
tmsgReleaseHandle(&sch->hbConnInfo, TAOS_CONN_SERVER);
sch->hbConnInfo.handle = NULL;
@@ -44,8 +44,8 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
QW_RET(TSDB_CODE_SUCCESS);
}
-static void freeItem(void* param) {
- SExplainExecInfo* pInfo = param;
+static void freeItem(void *param) {
+ SExplainExecInfo *pInfo = param;
taosMemoryFree(pInfo->verboseInfo);
}
@@ -54,7 +54,7 @@ int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) {
if (ctx->explain) {
- SArray* execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
+ SArray *execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList));
SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
@@ -81,7 +81,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
qTaskInfo_t taskHandle = ctx->taskHandle;
DataSinkHandle sinkHandle = ctx->sinkHandle;
- SArray* pResList = taosArrayInit(4, POINTER_BYTES);
+ SArray *pResList = taosArrayInit(4, POINTER_BYTES);
while (true) {
QW_TASK_DLOG("start to execTask, loopIdx:%d", i++);
@@ -95,7 +95,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
} else {
QW_TASK_DLOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
}
- QW_ERR_RET(code);
+ QW_ERR_JRET(code);
}
}
@@ -105,7 +105,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
QW_TASK_DLOG("qExecTask end with empty res, useconds:%" PRIu64, useconds);
dsEndPut(sinkHandle, useconds);
- QW_ERR_RET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
+ QW_ERR_JRET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
if (queryStop) {
*queryStop = true;
@@ -114,7 +114,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
break;
}
- for(int32_t j = 0; j < taosArrayGetSize(pResList); ++j) {
+ for (int32_t j = 0; j < taosArrayGetSize(pResList); ++j) {
SSDataBlock *pRes = taosArrayGetP(pResList, j);
ASSERT(pRes->info.rows > 0);
@@ -122,7 +122,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
code = dsPutDataBlock(sinkHandle, &inputData, &qcontinue);
if (code) {
QW_TASK_ELOG("dsPutDataBlock failed, code:%x - %s", code, tstrerror(code));
- QW_ERR_RET(code);
+ QW_ERR_JRET(code);
}
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", pRes->info.rows, qcontinue);
@@ -132,7 +132,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
if (queryStop) {
*queryStop = true;
}
-
+
break;
}
@@ -151,6 +151,11 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
taosArrayDestroy(pResList);
QW_RET(code);
+
+_return:
+ taosArrayDestroy(pResList);
+
+ return code;
}
int32_t qwGenerateSchHbRsp(SQWorker *mgmt, SQWSchStatus *sch, SQWHbInfo *hbInfo) {
@@ -222,7 +227,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
QW_ERR_RET(code);
}
- QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows);
+ QW_TASK_DLOG("no more data in sink and query end, fetched blocks %d rows %d", pOutput->numOfBlocks,
+ pOutput->numOfRows);
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
if (NULL == rsp) {
@@ -266,7 +272,8 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
pOutput->numOfBlocks++;
if (DS_BUF_EMPTY == pOutput->bufStatus && pOutput->queryEnd) {
- QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks, pOutput->numOfRows);
+ QW_TASK_DLOG("task all data fetched and done, fetched blocks %d rows %d", pOutput->numOfBlocks,
+ pOutput->numOfRows);
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
break;
}
@@ -288,10 +295,10 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
}
int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes) {
- int64_t len = 0;
- bool queryEnd = false;
- int32_t code = 0;
- SOutputData output = {0};
+ int64_t len = 0;
+ bool queryEnd = false;
+ int32_t code = 0;
+ SOutputData output = {0};
dsGetDataLength(ctx->sinkHandle, &len, &queryEnd);
@@ -304,7 +311,7 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
if (NULL == output.pData) {
QW_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
code = dsGetDataBlock(ctx->sinkHandle, &output);
if (code) {
QW_TASK_ELOG("dsGetDataBlock failed, code:%x - %s", code, tstrerror(code));
@@ -312,8 +319,8 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
QW_ERR_RET(code);
}
- SDeleterRes* pDelRes = (SDeleterRes*)output.pData;
-
+ SDeleterRes *pDelRes = (SDeleterRes *)output.pData;
+
pRes->suid = pDelRes->suid;
pRes->uidList = pDelRes->uidList;
pRes->skey = pDelRes->skey;
@@ -322,14 +329,13 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, SDeleteRes *pRes
strcpy(pRes->tableFName, pDelRes->tableName);
strcpy(pRes->tsColName, pDelRes->tsColName);
taosMemoryFree(output.pData);
-
+
return TSDB_CODE_SUCCESS;
}
-
int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
- int32_t code = 0;
- SQWTaskCtx *ctx = NULL;
+ int32_t code = 0;
+ SQWTaskCtx *ctx = NULL;
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@@ -355,8 +361,8 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
- //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
break;
@@ -391,8 +397,8 @@ int32_t qwHandlePrePhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inpu
if (QW_EVENT_RECEIVED(ctx, QW_EVENT_DROP)) {
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
- //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
}
@@ -428,9 +434,9 @@ _return:
}
int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *input, SQWPhaseOutput *output) {
- int32_t code = 0;
- SQWTaskCtx *ctx = NULL;
- SRpcHandleInfo connInfo = {0};
+ int32_t code = 0;
+ SQWTaskCtx *ctx = NULL;
+ SRpcHandleInfo connInfo = {0};
QW_TASK_DLOG("start to handle event at phase %s", qwPhaseStr(phase));
@@ -449,8 +455,8 @@ int32_t qwHandlePostPhaseEvents(QW_FPARAMS_DEF, int8_t phase, SQWPhaseInput *inp
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
}
- //qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
- //QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ // qwBuildAndSendDropRsp(&ctx->ctrlConnInfo, code);
+ // QW_TASK_DLOG("drop rsp send, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
QW_ERR_JRET(qwDropTask(QW_FPARAMS()));
QW_ERR_JRET(TSDB_CODE_QRY_TASK_DROPPED);
@@ -473,14 +479,14 @@ _return:
if (QW_PHASE_POST_QUERY == phase && ctx) {
ctx->queryRsped = true;
- bool rsped = false;
+ bool rsped = false;
SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo};
qwDbgSimulateRedirect(&qwMsg, ctx, &rsped);
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
if (!rsped) {
qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx);
QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
- }
+ }
}
if (ctx) {
@@ -507,7 +513,6 @@ int32_t qwAbortPrerocessQuery(QW_FPARAMS_DEF) {
QW_RET(TSDB_CODE_SUCCESS);
}
-
int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
int32_t code = 0;
bool queryRsped = false;
@@ -537,8 +542,7 @@ _return:
QW_RET(TSDB_CODE_SUCCESS);
}
-
-int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
+int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) {
int32_t code = 0;
bool queryRsped = false;
SSubplan *plan = NULL;
@@ -556,7 +560,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
ctx->needFetch = qwMsg->msgInfo.needFetch;
ctx->msgType = qwMsg->msgType;
- //QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
+ // QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
code = qStringToSubplan(qwMsg->msg, &plan);
if (TSDB_CODE_SUCCESS != code) {
@@ -594,7 +598,7 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char* sql) {
_return:
taosMemoryFree(sql);
-
+
input.code = code;
input.msgType = qwMsg->msgType;
code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL);
@@ -648,7 +652,7 @@ int32_t qwProcessCQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
qwBuildAndSendFetchRsp(ctx->fetchType, &qwMsg->connInfo, rsp, dataLen, code);
rsp = NULL;
-
+
QW_TASK_DLOG("fetch rsp send, handle:%p, code:%x - %s, dataLen:%d", qwMsg->connInfo.handle, code,
tstrerror(code), dataLen);
} else {
@@ -754,13 +758,13 @@ _return:
if (code || rsp) {
bool rsped = false;
if (ctx) {
- qwDbgSimulateRedirect(qwMsg, ctx, &rsped);
+ qwDbgSimulateRedirect(qwMsg, ctx, &rsped);
qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
}
if (!rsped) {
qwBuildAndSendFetchRsp(qwMsg->msgType + 1, &qwMsg->connInfo, rsp, dataLen, code);
- QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1), qwMsg->connInfo.handle, code, tstrerror(code),
- dataLen);
+ QW_TASK_DLOG("%s send, handle:%p, code:%x - %s, dataLen:%d", TMSG_INFO(qwMsg->msgType + 1),
+ qwMsg->connInfo.handle, code, tstrerror(code), dataLen);
}
}
@@ -919,10 +923,11 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) {
uint64_t *sId = taosHashGetKey(pIter, NULL);
QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId);
- if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) && taosHashGetSize(sch->tasksHash) <= 0) {
+ if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) &&
+ taosHashGetSize(sch->tasksHash) <= 0) {
taosArrayPush(pExpiredSch, sId);
}
-
+
pIter = taosHashIterate(mgmt->schHash, pIter);
continue;
}
@@ -998,7 +1003,6 @@ _return:
QW_RET(TSDB_CODE_SUCCESS);
}
-
int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) {
if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) {
qError("invalid param to init qworker");
@@ -1119,12 +1123,12 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt
QW_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
- SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
+ SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
SDataSinkStat sinkStat = {0};
-
+
dsDataSinkGetCacheSize(&sinkStat);
pStat->cacheDataSize = sinkStat.cachedSize;
-
+
pStat->queryProcessed = QW_STAT_GET(mgmt->stat.msgStat.queryProcessed);
pStat->cqueryProcessed = QW_STAT_GET(mgmt->stat.msgStat.cqueryProcessed);
pStat->fetchProcessed = QW_STAT_GET(mgmt->stat.msgStat.fetchProcessed);
@@ -1139,6 +1143,3 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt
return TSDB_CODE_SUCCESS;
}
-
-
-
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 54e873065b..23693c785a 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -350,7 +350,6 @@ struct SFilterInfo {
extern bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right);
extern __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr);
-extern OptrStr gOptrStr[];
#ifdef __cplusplus
}
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 1664a4d612..4377dbf14e 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -24,46 +24,6 @@
#include "ttime.h"
#include "functionMgt.h"
-OptrStr gOptrStr[] = {
- {0, "invalid"},
- {OP_TYPE_ADD, "+"},
- {OP_TYPE_SUB, "-"},
- {OP_TYPE_MULTI, "*"},
- {OP_TYPE_DIV, "/"},
- {OP_TYPE_REM, "%"},
- {OP_TYPE_MINUS, "minus"},
- {OP_TYPE_ASSIGN, "assign"},
- // bit operator
- {OP_TYPE_BIT_AND, "&"},
- {OP_TYPE_BIT_OR, "|"},
-
- // comparison operator
- {OP_TYPE_GREATER_THAN, ">"},
- {OP_TYPE_GREATER_EQUAL, ">="},
- {OP_TYPE_LOWER_THAN, "<"},
- {OP_TYPE_LOWER_EQUAL, "<="},
- {OP_TYPE_EQUAL, "=="},
- {OP_TYPE_NOT_EQUAL, "!="},
- {OP_TYPE_IN, "in"},
- {OP_TYPE_NOT_IN, "not in"},
- {OP_TYPE_LIKE, "like"},
- {OP_TYPE_NOT_LIKE, "not like"},
- {OP_TYPE_MATCH, "match"},
- {OP_TYPE_NMATCH, "nmatch"},
- {OP_TYPE_IS_NULL, "is null"},
- {OP_TYPE_IS_NOT_NULL, "not null"},
- {OP_TYPE_IS_TRUE, "is true"},
- {OP_TYPE_IS_FALSE, "is false"},
- {OP_TYPE_IS_UNKNOWN, "is unknown"},
- {OP_TYPE_IS_NOT_TRUE, "not true"},
- {OP_TYPE_IS_NOT_FALSE, "not false"},
- {OP_TYPE_IS_NOT_UNKNOWN, "not unknown"},
-
- // json operator
- {OP_TYPE_JSON_GET_VALUE, "->"},
- {OP_TYPE_JSON_CONTAINS, "json contains"}
-};
-
bool filterRangeCompGi (const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) {
int32_t result = cfunc(maxv, minr);
return result >= 0;
@@ -986,7 +946,7 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
} else {
int32_t paramNum = scalarGetOperatorParamNum(optr);
if (1 != paramNum) {
- fltError("invalid right field in unit, operator:%s, rightType:%d", gOptrStr[optr].str, u->right.type);
+ fltError("invalid right field in unit, operator:%s, rightType:%d", operatorTypeStr(optr), u->right.type);
return TSDB_CODE_QRY_APP_ERROR;
}
}
@@ -1517,7 +1477,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit);
SColumnNode *refNode = (SColumnNode *)left->desc;
if (unit->compare.optr >= 0 && unit->compare.optr <= OP_TYPE_JSON_CONTAINS){
- len = sprintf(str, "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId, gOptrStr[unit->compare.optr].str);
+ len = sprintf(str, "UNIT[%d] => [%d][%d] %s [", i, refNode->dataBlockId, refNode->slotId, operatorTypeStr(unit->compare.optr));
}
if (unit->right.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != OP_TYPE_IN) {
@@ -1536,7 +1496,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
if (unit->compare.optr2) {
strcat(str, " && ");
if (unit->compare.optr2 >= 0 && unit->compare.optr2 <= OP_TYPE_JSON_CONTAINS){
- sprintf(str + strlen(str), "[%d][%d] %s [", refNode->dataBlockId, refNode->slotId, gOptrStr[unit->compare.optr2].str);
+ sprintf(str + strlen(str), "[%d][%d] %s [", refNode->dataBlockId, refNode->slotId, operatorTypeStr(unit->compare.optr2));
}
if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != OP_TYPE_IN) {
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index d0c5a76f4b..6634a29f40 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -292,6 +292,9 @@ int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t
}
SColumnInfoData *columnData = (SColumnInfoData *)taosArrayGet(block->pDataBlock, ref->slotId);
+#if TAG_FILTER_DEBUG
+ qDebug("tagfilter column info, slotId:%d, colId:%d, type:%d", ref->slotId, columnData->info.colId, columnData->info.type);
+#endif
param->numOfRows = block->info.rows;
param->columnData = columnData;
break;
@@ -758,7 +761,9 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
res->datum.p = taosMemoryCalloc(len, 1);
memcpy(res->datum.p, output.columnData->pData, len);
} else if (IS_VAR_DATA_TYPE(type)) {
- res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1);
+ //res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1);
+ res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData), 1);
+ res->node.resType.bytes = varDataTLen(output.columnData->pData);
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
} else {
nodesSetValueNodeValue(res, output.columnData->pData);
diff --git a/source/libs/scalar/test/scalar/scalarTests.cpp b/source/libs/scalar/test/scalar/scalarTests.cpp
index 9b40f0a465..7229fdec38 100644
--- a/source/libs/scalar/test/scalar/scalarTests.cpp
+++ b/source/libs/scalar/test/scalar/scalarTests.cpp
@@ -1089,16 +1089,16 @@ void makeCalculate(void *json, void *key, int32_t rightType, void *rightData, do
}else if(opType == OP_TYPE_ADD || opType == OP_TYPE_SUB || opType == OP_TYPE_MULTI || opType == OP_TYPE_DIV ||
opType == OP_TYPE_REM || opType == OP_TYPE_MINUS){
- printf("op:%s,1result:%f,except:%f\n", gOptrStr[opType].str, *((double *)colDataGetData(column, 0)), exceptValue);
+ printf("op:%s,1result:%f,except:%f\n", operatorTypeStr(opType), *((double *)colDataGetData(column, 0)), exceptValue);
ASSERT_TRUE(fabs(*((double *)colDataGetData(column, 0)) - exceptValue) < 0.0001);
}else if(opType == OP_TYPE_BIT_AND || opType == OP_TYPE_BIT_OR){
- printf("op:%s,2result:%" PRId64 ",except:%f\n", gOptrStr[opType].str, *((int64_t *)colDataGetData(column, 0)), exceptValue);
+ printf("op:%s,2result:%" PRId64 ",except:%f\n", operatorTypeStr(opType), *((int64_t *)colDataGetData(column, 0)), exceptValue);
ASSERT_EQ(*((int64_t *)colDataGetData(column, 0)), exceptValue);
}else if(opType == OP_TYPE_GREATER_THAN || opType == OP_TYPE_GREATER_EQUAL || opType == OP_TYPE_LOWER_THAN ||
opType == OP_TYPE_LOWER_EQUAL || opType == OP_TYPE_EQUAL || opType == OP_TYPE_NOT_EQUAL ||
opType == OP_TYPE_IS_NULL || opType == OP_TYPE_IS_NOT_NULL || opType == OP_TYPE_IS_TRUE ||
opType == OP_TYPE_LIKE || opType == OP_TYPE_NOT_LIKE || opType == OP_TYPE_MATCH || opType == OP_TYPE_NMATCH){
- printf("op:%s,3result:%d,except:%f\n", gOptrStr[opType].str, *((bool *)colDataGetData(column, 0)), exceptValue);
+ printf("op:%s,3result:%d,except:%f\n", operatorTypeStr(opType), *((bool *)colDataGetData(column, 0)), exceptValue);
ASSERT_EQ(*((bool *)colDataGetData(column, 0)), exceptValue);
}
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index 8c9003a9b2..ecd9daf1bc 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -20,7 +20,7 @@
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
-
+// clang-format off
int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) {
int32_t lastMsgType = pTask->lastMsgType;
int32_t taskStatus = SCH_GET_TASK_STATUS(pTask);
@@ -402,7 +402,7 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) {
qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId,
code);
if (pMsg) {
- taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pData);
}
return TSDB_CODE_SUCCESS;
}
@@ -415,7 +415,7 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) {
if (head->isHbParam) {
taosMemoryFree(pMsg->pData);
-
+
SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param;
SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL};
SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans));
@@ -1104,7 +1104,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
#if 1
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
- schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
+ code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
msg = NULL;
SCH_ERR_JRET(code);
@@ -1114,7 +1114,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
#else
if (TDMT_VND_SUBMIT != msgType) {
SSchTrans trans = {.pTrans = pJob->conn.pTrans, .pHandle = SCH_GET_TASK_HANDLE(pTask)};
- schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
+ code = schAsyncSendMsg(pJob, pTask, &trans, addr, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL));
msg = NULL;
SCH_ERR_JRET(code);
@@ -1136,3 +1136,4 @@ _return:
taosMemoryFreeClear(msg);
SCH_RET(code);
}
+// clang-format on
diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h
index 3776cb261f..6e30eeaa86 100644
--- a/source/libs/stream/inc/streamInc.h
+++ b/source/libs/stream/inc/streamInc.h
@@ -32,7 +32,6 @@ typedef struct {
static SStreamGlobalEnv streamEnv;
-int32_t streamExec(SStreamTask* pTask);
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch);
int32_t streamDispatch(SStreamTask* pTask);
diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c
index 6da7d4fd59..d6e87c2736 100644
--- a/source/libs/stream/src/stream.c
+++ b/source/libs/stream/src/stream.c
@@ -185,7 +185,9 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
tFreeStreamDispatchReq(pReq);
if (exec) {
- streamTryExec(pTask);
+ if (streamTryExec(pTask) < 0) {
+ return -1;
+ }
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
streamDispatch(pTask);
@@ -221,7 +223,9 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
}
int32_t streamProcessRunReq(SStreamTask* pTask) {
- streamTryExec(pTask);
+ if (streamTryExec(pTask) < 0) {
+ return -1;
+ }
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
streamDispatch(pTask);
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index b74e838628..5ff700546c 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -15,6 +15,7 @@
#include "executor.h"
#include "tstream.h"
+#include "ttimer.h"
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) {
SStreamMeta* pMeta = taosMemoryCalloc(1, sizeof(SStreamMeta));
@@ -81,7 +82,7 @@ void streamMetaClose(SStreamMeta* pMeta) {
taosMemoryFree(pMeta);
}
-int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
+int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen) {
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
return -1;
@@ -99,16 +100,19 @@ int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, char* msg, int32_t msgLe
goto FAIL;
}
- taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
+ if (taosHashPut(pMeta->pTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)) < 0) {
+ goto FAIL;
+ }
if (tdbTbUpsert(pMeta->pTaskDb, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn) < 0) {
+ taosHashRemove(pMeta->pTasks, &pTask->taskId, sizeof(int32_t));
ASSERT(0);
- return -1;
+ goto FAIL;
}
return 0;
FAIL:
- if (pTask) taosMemoryFree(pTask);
+ if (pTask) tFreeSStreamTask(pTask);
return -1;
}
@@ -158,11 +162,28 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) {
SStreamTask* pTask = *ppTask;
taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t));
atomic_store_8(&pTask->taskStatus, TASK_STATUS__DROPPING);
+
+ if (tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), &pMeta->txn) < 0) {
+ /*return -1;*/
+ }
+
+ if (pTask->triggerParam != 0) {
+ taosTmrStop(pTask->timer);
+ }
+
+ while (1) {
+ int8_t schedStatus =
+ atomic_val_compare_exchange_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE, TASK_SCHED_STATUS__DROPPING);
+ if (schedStatus == TASK_SCHED_STATUS__INACTIVE) {
+ tFreeSStreamTask(pTask);
+ break;
+ } else if (schedStatus == TASK_SCHED_STATUS__DROPPING) {
+ break;
+ }
+ taosMsleep(10);
+ }
}
- if (tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), &pMeta->txn) < 0) {
- /*return -1;*/
- }
return 0;
}
diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c
index 6819e5329f..ac10c82587 100644
--- a/source/libs/stream/src/streamQueue.c
+++ b/source/libs/stream/src/streamQueue.c
@@ -13,7 +13,7 @@
* along with this program. If not, see .
*/
-#include "tstream.h"
+#include "streamInc.h"
SStreamQueue* streamQueueOpen() {
SStreamQueue* pQueue = taosMemoryCalloc(1, sizeof(SStreamQueue));
@@ -36,9 +36,12 @@ void streamQueueClose(SStreamQueue* queue) {
while (1) {
void* qItem = streamQueueNextItem(queue);
if (qItem) {
- taosFreeQitem(qItem);
+ streamFreeQitem(qItem);
} else {
- return;
+ break;
}
}
+ taosFreeQall(queue->qall);
+ taosCloseQueue(queue->queue);
+ taosMemoryFree(queue);
}
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index 638d39e5cc..4009a47c65 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -152,9 +152,18 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
}
void tFreeSStreamTask(SStreamTask* pTask) {
- streamQueueClose(pTask->inputQueue);
- streamQueueClose(pTask->outputQueue);
+ qDebug("free stream task %d", pTask->taskId);
+ if (pTask->inputQueue) streamQueueClose(pTask->inputQueue);
+ if (pTask->outputQueue) streamQueueClose(pTask->outputQueue);
if (pTask->exec.qmsg) taosMemoryFree(pTask->exec.qmsg);
if (pTask->exec.executor) qDestroyTask(pTask->exec.executor);
+ taosArrayDestroyP(pTask->childEpInfo, taosMemoryFree);
+ if (pTask->outputType == TASK_OUTPUT__TABLE) {
+ tDeleteSSchemaWrapper(pTask->tbSink.pSchemaWrapper);
+ taosMemoryFree(pTask->tbSink.pTSchema);
+ }
+ if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+ taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
+ }
taosMemoryFree(pTask);
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index 0b1ce27b77..d053662bd3 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -13,33 +13,31 @@
* along with this program. If not, see .
*/
-#include "tstreamUpdate.h"
-#include "tencode.h"
-#include "ttime.h"
#include "query.h"
+#include "tencode.h"
+#include "tstreamUpdate.h"
+#include "ttime.h"
-#define DEFAULT_FALSE_POSITIVE 0.01
-#define DEFAULT_BUCKET_SIZE 1310720
-#define DEFAULT_MAP_CAPACITY 1310720
-#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
-#define ROWS_PER_MILLISECOND 1
-#define MAX_NUM_SCALABLE_BF 100000
-#define MIN_NUM_SCALABLE_BF 10
-#define DEFAULT_PREADD_BUCKET 1
-#define MAX_INTERVAL MILLISECOND_PER_MINUTE
-#define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10)
-#define DEFAULT_EXPECTED_ENTRIES 10000
+#define DEFAULT_FALSE_POSITIVE 0.01
+#define DEFAULT_BUCKET_SIZE 1310720
+#define DEFAULT_MAP_CAPACITY 1310720
+#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
+#define ROWS_PER_MILLISECOND 1
+#define MAX_NUM_SCALABLE_BF 100000
+#define MIN_NUM_SCALABLE_BF 10
+#define DEFAULT_PREADD_BUCKET 1
+#define MAX_INTERVAL MILLISECOND_PER_MINUTE
+#define MIN_INTERVAL (MILLISECOND_PER_SECOND * 10)
+#define DEFAULT_EXPECTED_ENTRIES 10000
-static int64_t adjustExpEntries(int64_t entries) {
- return TMIN(DEFAULT_EXPECTED_ENTRIES, entries);
-}
+static int64_t adjustExpEntries(int64_t entries) { return TMIN(DEFAULT_EXPECTED_ENTRIES, entries); }
static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
if (pInfo->numSBFs < count) {
count = pInfo->numSBFs;
}
for (uint64_t i = 0; i < count; ++i) {
- int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND);
+ int64_t rows = adjustExpEntries(pInfo->interval * ROWS_PER_MILLISECOND);
SScalableBf *tsSBF = tScalableBfInit(rows, DEFAULT_FALSE_POSITIVE);
taosArrayPush(pInfo->pTsSBFs, &tsSBF);
}
@@ -78,7 +76,7 @@ static int64_t adjustInterval(int64_t interval, int32_t precision) {
static int64_t adjustWatermark(int64_t adjInterval, int64_t originInt, int64_t watermark) {
if (watermark <= adjInterval) {
- watermark = TMAX(originInt/adjInterval, 1) * adjInterval;
+ watermark = TMAX(originInt / adjInterval, 1) * adjInterval;
} else if (watermark > MAX_NUM_SCALABLE_BF * adjInterval) {
watermark = MAX_NUM_SCALABLE_BF * adjInterval;
}/* else if (watermark < MIN_NUM_SCALABLE_BF * adjInterval) {
@@ -158,11 +156,17 @@ static SScalableBf *getSBf(SUpdateInfo *pInfo, TSKEY ts) {
return res;
}
+bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid) {
+ void *pVal = taosHashGet(pInfo->pMap, &tbUid, sizeof(int64_t));
+ if (pVal || taosHashGetSize(pInfo->pMap) >= DEFAULT_MAP_SIZE) return true;
+ return false;
+}
+
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
- int32_t res = TSDB_CODE_FAILED;
- TSKEY* pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t));
- uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
- TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
+ int32_t res = TSDB_CODE_FAILED;
+ TSKEY *pMapMaxTs = taosHashGet(pInfo->pMap, &tableId, sizeof(uint64_t));
+ uint64_t index = ((uint64_t)tableId) % pInfo->numBuckets;
+ TSKEY maxTs = *(TSKEY *)taosArrayGet(pInfo->pTsBuckets, index);
if (ts < maxTs - pInfo->watermark) {
// this window has been closed.
if (pInfo->pCloseWinSBF) {
@@ -178,42 +182,47 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
}
int32_t size = taosHashGetSize(pInfo->pMap);
- if ( (!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) {
+ if ((!pMapMaxTs && size < DEFAULT_MAP_SIZE) || (pMapMaxTs && *pMapMaxTs < ts)) {
taosHashPut(pInfo->pMap, &tableId, sizeof(uint64_t), &ts, sizeof(TSKEY));
return false;
}
- if ( !pMapMaxTs && maxTs < ts ) {
+ if (!pMapMaxTs && maxTs < ts) {
taosArraySet(pInfo->pTsBuckets, index, &ts);
return false;
}
if (ts < pInfo->minTS) {
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts);
+ qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
return true;
} else if (res == TSDB_CODE_SUCCESS) {
return false;
}
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts);
+ qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
// check from tsdb api
return true;
}
-void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) {
- qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version);
+void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version) {
+ qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId,
+ pWin->skey, pWin->ekey, version);
pInfo->scanWindow = *pWin;
pInfo->scanGroupId = groupId;
pInfo->maxVersion = version;
}
-bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) {
+bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version) {
if (!pInfo) {
return false;
}
- qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version);
- if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey &&
- pWin->ekey <= pInfo->scanWindow.ekey && version <= pInfo->maxVersion ) {
- qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version);
+ qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId,
+ pWin->skey, pWin->ekey, version);
+ if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey && pWin->ekey <= pInfo->scanWindow.ekey &&
+ version <= pInfo->maxVersion) {
+ qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64, groupId,
+ pWin->skey, pWin->ekey, version);
return true;
}
return false;
@@ -261,7 +270,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo)
int32_t size = taosArrayGetSize(pInfo->pTsBuckets);
if (tEncodeI32(&encoder, size) < 0) return -1;
for (int32_t i = 0; i < size; i++) {
- TSKEY* pTs = (TSKEY*)taosArrayGet(pInfo->pTsBuckets, i);
+ TSKEY *pTs = (TSKEY *)taosArrayGet(pInfo->pTsBuckets, i);
if (tEncodeI64(&encoder, *pTs) < 0) return -1;
}
@@ -270,7 +279,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo)
int32_t sBfSize = taosArrayGetSize(pInfo->pTsSBFs);
if (tEncodeI32(&encoder, sBfSize) < 0) return -1;
for (int32_t i = 0; i < sBfSize; i++) {
- SScalableBf* pSBf = taosArrayGetP(pInfo->pTsSBFs, i);
+ SScalableBf *pSBf = taosArrayGetP(pInfo->pTsSBFs, i);
if (tScalableBfEncode(pSBf, &encoder) < 0) return -1;
}
@@ -278,17 +287,17 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo)
if (tEncodeI64(&encoder, pInfo->interval) < 0) return -1;
if (tEncodeI64(&encoder, pInfo->watermark) < 0) return -1;
if (tEncodeI64(&encoder, pInfo->minTS) < 0) return -1;
-
+
if (tScalableBfEncode(pInfo->pCloseWinSBF, &encoder) < 0) return -1;
int32_t mapSize = taosHashGetSize(pInfo->pMap);
if (tEncodeI32(&encoder, mapSize) < 0) return -1;
- void* pIte = NULL;
+ void *pIte = NULL;
size_t keyLen = 0;
while ((pIte = taosHashIterate(pInfo->pMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
- if (tEncodeU64(&encoder, *(uint64_t*)key) < 0) return -1;
- if (tEncodeI64(&encoder, *(TSKEY*)pIte) < 0) return -1;
+ void *key = taosHashGetKey(pIte, &keyLen);
+ if (tEncodeU64(&encoder, *(uint64_t *)key) < 0) return -1;
+ if (tEncodeI64(&encoder, *(TSKEY *)pIte) < 0) return -1;
}
if (tEncodeI64(&encoder, pInfo->scanWindow.skey) < 0) return -1;
@@ -311,7 +320,7 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) {
int32_t size = 0;
if (tDecodeI32(&decoder, &size) < 0) return -1;
- pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
+ pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY));
TSKEY ts = INT64_MIN;
for (int32_t i = 0; i < size; i++) {
if (tDecodeI64(&decoder, &ts) < 0) return -1;
@@ -324,7 +333,7 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) {
if (tDecodeI32(&decoder, &sBfSize) < 0) return -1;
pInfo->pTsSBFs = taosArrayInit(sBfSize, sizeof(void *));
for (int32_t i = 0; i < sBfSize; i++) {
- SScalableBf* pSBf = tScalableBfDecode(&decoder);
+ SScalableBf *pSBf = tScalableBfDecode(&decoder);
if (!pSBf) return -1;
taosArrayPush(pInfo->pTsSBFs, &pSBf);
}
@@ -337,11 +346,11 @@ int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) {
int32_t mapSize = 0;
if (tDecodeI32(&decoder, &mapSize) < 0) return -1;
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT);
pInfo->pMap = taosHashInit(mapSize, hashFn, true, HASH_NO_LOCK);
uint64_t uid = 0;
ts = INT64_MIN;
- for(int32_t i = 0; i < mapSize; i++) {
+ for (int32_t i = 0; i < mapSize; i++) {
if (tDecodeU64(&decoder, &uid) < 0) return -1;
if (tDecodeI64(&decoder, &ts) < 0) return -1;
taosHashPut(pInfo->pMap, &uid, sizeof(uint64_t), &ts, sizeof(TSKEY));
diff --git a/source/libs/sync/inc/syncIndexMgr.h b/source/libs/sync/inc/syncIndexMgr.h
index 1f60a9d57e..fb85b89419 100644
--- a/source/libs/sync/inc/syncIndexMgr.h
+++ b/source/libs/sync/inc/syncIndexMgr.h
@@ -29,8 +29,12 @@ extern "C" {
// SIndexMgr -----------------------------
typedef struct SSyncIndexMgr {
SRaftId (*replicas)[TSDB_MAX_REPLICA];
- SyncIndex index[TSDB_MAX_REPLICA];
- SyncTerm privateTerm[TSDB_MAX_REPLICA]; // for advanced function
+ SyncIndex index[TSDB_MAX_REPLICA];
+ SyncTerm privateTerm[TSDB_MAX_REPLICA]; // for advanced function
+
+ int64_t startTimeArr[TSDB_MAX_REPLICA];
+ int64_t recvTimeArr[TSDB_MAX_REPLICA];
+
int32_t replicaNum;
SSyncNode *pSyncNode;
} SSyncIndexMgr;
@@ -41,8 +45,13 @@ void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr);
void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr);
void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncIndex index);
SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
-cJSON * syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr);
-char * syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr);
+cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr);
+char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr);
+
+void syncIndexMgrSetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t startTime);
+int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
+void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime);
+int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
// void syncIndexMgrSetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, SyncTerm term);
// SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId);
diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h
index 3e247e5d79..0afc373f2d 100644
--- a/source/libs/sync/inc/syncInt.h
+++ b/source/libs/sync/inc/syncInt.h
@@ -237,7 +237,7 @@ void syncNodeVoteForSelf(SSyncNode* pSyncNode);
bool syncNodeHasSnapshot(SSyncNode* pSyncNode);
void syncNodeMaybeUpdateCommitBySnapshot(SSyncNode* pSyncNode);
-SyncIndex syncNodeGetLastIndex(SSyncNode* pSyncNode);
+SyncIndex syncNodeGetLastIndex(const SSyncNode* pSyncNode);
SyncTerm syncNodeGetLastTerm(SSyncNode* pSyncNode);
int32_t syncNodeGetLastIndexTerm(SSyncNode* pSyncNode, SyncIndex* pLastIndex, SyncTerm* pLastTerm);
@@ -269,6 +269,8 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode);
int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader);
int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry);
+int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode);
+
// trace log
void syncLogSendRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, const char* s);
diff --git a/source/libs/sync/inc/syncReplication.h b/source/libs/sync/inc/syncReplication.h
index 21821be6c7..edce124ee5 100644
--- a/source/libs/sync/inc/syncReplication.h
+++ b/source/libs/sync/inc/syncReplication.h
@@ -55,6 +55,8 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode);
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode);
+int32_t syncNodeAppendEntriesOnePeer(SSyncNode* pSyncNode, SRaftId* pDestId, SyncIndex nextIndex);
+
int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer);
int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg);
int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg);
diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c
index 4f93d8197d..e000ba8bf8 100644
--- a/source/libs/sync/src/syncAppendEntries.c
+++ b/source/libs/sync/src/syncAppendEntries.c
@@ -148,6 +148,7 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
pReply->term = ths->pRaftStore->currentTerm;
pReply->success = false;
pReply->matchIndex = SYNC_INDEX_INVALID;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -290,6 +291,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
pReply->matchIndex = pMsg->prevLogIndex;
}
+ pReply->startTime = ths->startTime;
+
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -603,6 +606,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = true;
pReply->matchIndex = matchIndex;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -651,6 +655,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = false;
pReply->matchIndex = ths->commitIndex;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -729,6 +734,7 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = true;
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + pMsg->dataCount : pMsg->prevLogIndex;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -874,6 +880,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = true;
pReply->matchIndex = matchIndex;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -919,6 +926,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = false;
pReply->matchIndex = SYNC_INDEX_INVALID;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
@@ -984,6 +992,7 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
pReply->success = true;
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + 1 : pMsg->prevLogIndex;
+ pReply->startTime = ths->startTime;
// msg event log
syncLogSendAppendEntriesReply(ths, pReply, "");
diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c
index 4928c54bd7..9253ed0129 100644
--- a/source/libs/sync/src/syncAppendEntriesReply.c
+++ b/source/libs/sync/src/syncAppendEntriesReply.c
@@ -64,6 +64,10 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
+ // update time
+ syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime);
+ syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs());
+
SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
@@ -170,6 +174,10 @@ int32_t syncNodeOnAppendEntriesReplySnapshot2Cb(SSyncNode* ths, SyncAppendEntrie
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
+ // update time
+ syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime);
+ syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs());
+
SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
@@ -330,6 +338,10 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries
ASSERT(pMsg->term == ths->pRaftStore->currentTerm);
+ // update time
+ syncIndexMgrSetStartTime(ths->pNextIndex, &(pMsg->srcId), pMsg->startTime);
+ syncIndexMgrSetRecvTime(ths->pNextIndex, &(pMsg->srcId), taosGetTimestampMs());
+
SyncIndex beforeNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId));
SyncIndex beforeMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId));
diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c
index 3a94ed9713..1e68fe346c 100644
--- a/source/libs/sync/src/syncCommit.c
+++ b/source/libs/sync/src/syncCommit.c
@@ -133,6 +133,87 @@ bool syncAgreeIndex(SSyncNode* pSyncNode, SRaftId* pRaftId, SyncIndex index) {
return false;
}
+static inline int64_t syncNodeAbs64(int64_t a, int64_t b) {
+ ASSERT(a >= 0);
+ ASSERT(b >= 0);
+
+ int64_t c = a > b ? a - b : b - a;
+ return c;
+}
+
+int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) {
+ int32_t quorum = 1; // self
+
+ int64_t timeNow = taosGetTimestampMs();
+ for (int i = 0; i < pSyncNode->peersNum; ++i) {
+ int64_t peerStartTime = syncIndexMgrGetStartTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]);
+ int64_t peerRecvTime = syncIndexMgrGetRecvTime(pSyncNode->pNextIndex, &(pSyncNode->peersId)[i]);
+ SyncIndex peerMatchIndex = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId)[i]);
+
+ int64_t recvTimeDiff = TABS(peerRecvTime - timeNow);
+ int64_t startTimeDiff = TABS(peerStartTime - pSyncNode->startTime);
+ int64_t logDiff = TABS(peerMatchIndex - syncNodeGetLastIndex(pSyncNode));
+
+ /*
+ int64_t recvTimeDiff = syncNodeAbs64(peerRecvTime, timeNow);
+ int64_t startTimeDiff = syncNodeAbs64(peerStartTime, pSyncNode->startTime);
+ int64_t logDiff = syncNodeAbs64(peerMatchIndex, syncNodeGetLastIndex(pSyncNode));
+ */
+
+ int32_t addQuorum = 0;
+
+ if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) {
+ if (startTimeDiff < SYNC_MAX_START_TIME_RANGE_MS) {
+ addQuorum = 1;
+ } else {
+ if (logDiff < SYNC_ADD_QUORUM_COUNT) {
+ addQuorum = 1;
+ } else {
+ addQuorum = 0;
+ }
+ }
+ } else {
+ addQuorum = 0;
+ }
+
+ /*
+ if (recvTimeDiff < SYNC_MAX_RECV_TIME_RANGE_MS) {
+ addQuorum = 1;
+ } else {
+ addQuorum = 0;
+ }
+
+ if (startTimeDiff > SYNC_MAX_START_TIME_RANGE_MS) {
+ addQuorum = 0;
+ }
+ */
+
+ quorum += addQuorum;
+ }
+
+ ASSERT(quorum <= pSyncNode->replicaNum);
+
+ if (quorum < pSyncNode->quorum) {
+ quorum = pSyncNode->quorum;
+ }
+
+ return quorum;
+}
+
+bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
+ int agreeCount = 0;
+ for (int i = 0; i < pSyncNode->replicaNum; ++i) {
+ if (syncAgreeIndex(pSyncNode, &(pSyncNode->replicasId[i]), index)) {
+ ++agreeCount;
+ }
+ if (agreeCount >= syncNodeDynamicQuorum(pSyncNode)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
int agreeCount = 0;
for (int i = 0; i < pSyncNode->replicaNum; ++i) {
@@ -145,3 +226,4 @@ bool syncAgree(SSyncNode* pSyncNode, SyncIndex index) {
}
return false;
}
+*/
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 8c820fcd9c..07c4fa8429 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -47,6 +47,13 @@ void syncIndexMgrDestroy(SSyncIndexMgr *pSyncIndexMgr) {
void syncIndexMgrClear(SSyncIndexMgr *pSyncIndexMgr) {
memset(pSyncIndexMgr->index, 0, sizeof(pSyncIndexMgr->index));
memset(pSyncIndexMgr->privateTerm, 0, sizeof(pSyncIndexMgr->privateTerm));
+
+ // int64_t timeNow = taosGetMonotonicMs();
+ for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
+ pSyncIndexMgr->startTimeArr[i] = 0;
+ pSyncIndexMgr->recvTimeArr[i] = 0;
+ }
+
/*
for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
pSyncIndexMgr->index[i] = 0;
@@ -68,7 +75,8 @@ void syncIndexMgrSetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId,
char host[128];
uint16_t port;
syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port);
- sError("vgId:%d, index mgr set for %s:%d, index:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port, index);
+ sError("vgId:%d, index mgr set for %s:%d, index:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port,
+ index);
}
SyncIndex syncIndexMgrGetIndex(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
@@ -125,11 +133,65 @@ cJSON *syncIndexMgr2Json(SSyncIndexMgr *pSyncIndexMgr) {
char *syncIndexMgr2Str(SSyncIndexMgr *pSyncIndexMgr) {
cJSON *pJson = syncIndexMgr2Json(pSyncIndexMgr);
- char * serialized = cJSON_Print(pJson);
+ char *serialized = cJSON_Print(pJson);
cJSON_Delete(pJson);
return serialized;
}
+void syncIndexMgrSetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t startTime) {
+ for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
+ if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
+ (pSyncIndexMgr->startTimeArr)[i] = startTime;
+ return;
+ }
+ }
+
+ // maybe config change
+ // ASSERT(0);
+ char host[128];
+ uint16_t port;
+ syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port);
+ sError("vgId:%d, index mgr set for %s:%d, start-time:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port,
+ startTime);
+}
+
+int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
+ for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
+ if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
+ int64_t startTime = (pSyncIndexMgr->startTimeArr)[i];
+ return startTime;
+ }
+ }
+ ASSERT(0);
+}
+
+void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) {
+ for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
+ if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
+ (pSyncIndexMgr->recvTimeArr)[i] = recvTime;
+ return;
+ }
+ }
+
+ // maybe config change
+ // ASSERT(0);
+ char host[128];
+ uint16_t port;
+ syncUtilU642Addr(pRaftId->addr, host, sizeof(host), &port);
+ sError("vgId:%d, index mgr set for %s:%d, recv-time:%" PRId64 " error", pSyncIndexMgr->pSyncNode->vgId, host, port,
+ recvTime);
+}
+
+int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId) {
+ for (int i = 0; i < pSyncIndexMgr->replicaNum; ++i) {
+ if (syncUtilSameId(&((*(pSyncIndexMgr->replicas))[i]), pRaftId)) {
+ int64_t recvTime = (pSyncIndexMgr->recvTimeArr)[i];
+ return recvTime;
+ }
+ }
+ ASSERT(0);
+}
+
// for debug -------------------
void syncIndexMgrPrint(SSyncIndexMgr *pObj) {
char *serialized = syncIndexMgr2Str(pObj);
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index c7784cd62e..3fe600ecbb 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -392,6 +392,29 @@ bool syncIsReady(int64_t rid) {
return b;
}
+bool syncIsReadyForRead(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return false;
+ }
+ ASSERT(rid == pSyncNode->rid);
+
+ // TODO: last not noop?
+ SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
+ bool b = (pSyncNode->state == TAOS_SYNC_STATE_LEADER) && (pSyncNode->commitIndex >= lastIndex - SYNC_MAX_READ_RANGE);
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+
+ // if false, set error code
+ if (false == b) {
+ if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
+ terrno = TSDB_CODE_SYN_NOT_LEADER;
+ } else {
+ terrno = TSDB_CODE_APP_NOT_READY;
+ }
+ }
+ return b;
+}
+
bool syncIsRestoreFinish(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@@ -519,6 +542,30 @@ SyncTerm syncGetMyTerm(int64_t rid) {
return term;
}
+SyncIndex syncGetLastIndex(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return SYNC_INDEX_INVALID;
+ }
+ ASSERT(rid == pSyncNode->rid);
+ SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return lastIndex;
+}
+
+SyncIndex syncGetCommitIndex(int64_t rid) {
+ SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
+ if (pSyncNode == NULL) {
+ return SYNC_INDEX_INVALID;
+ }
+ ASSERT(rid == pSyncNode->rid);
+ SyncIndex cmtIndex = pSyncNode->commitIndex;
+
+ taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+ return cmtIndex;
+}
+
SyncGroupId syncGetVgId(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@@ -828,6 +875,15 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
pSyncNode->changing = true;
}
+ // not restored, vnode enable
+ if (!pSyncNode->restoreFinish && pSyncNode->vgId != 1) {
+ ret = -1;
+ terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
+ sError("vgId:%d, failed to sync propose since not ready, type:%s, last:%ld, cmt:%ld", pSyncNode->vgId,
+ TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
+ goto _END;
+ }
+
SRespStub stub;
stub.createTime = taosGetTimestampMs();
stub.rpcMsg = *pMsg;
@@ -1626,13 +1682,13 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
", sby:%d, "
"stgy:%d, bch:%d, "
"r-num:%d, "
- "lcfg:%" PRId64 ", chging:%d, rsto:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
+ "lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm,
pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize,
pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing,
- pSyncNode->restoreFinish, pSyncNode->electTimerLogicClockUser, pSyncNode->heartbeatTimerLogicClockUser,
- printStr);
+ pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), pSyncNode->electTimerLogicClockUser,
+ pSyncNode->heartbeatTimerLogicClockUser, printStr);
} else {
snprintf(logBuf, sizeof(logBuf), "%s", str);
}
@@ -1650,12 +1706,13 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
", sby:%d, "
"stgy:%d, bch:%d, "
"r-num:%d, "
- "lcfg:%" PRId64 ", chging:%d, rsto:%d, %s",
+ "lcfg:%" PRId64 ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s",
pSyncNode->vgId, syncUtilState2String(pSyncNode->state), str, pSyncNode->pRaftStore->currentTerm,
pSyncNode->commitIndex, logBeginIndex, logLastIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
pSyncNode->pRaftCfg->isStandBy, pSyncNode->pRaftCfg->snapshotStrategy, pSyncNode->pRaftCfg->batchSize,
pSyncNode->replicaNum, pSyncNode->pRaftCfg->lastConfigIndex, pSyncNode->changing,
- pSyncNode->restoreFinish, printStr);
+ pSyncNode->restoreFinish, syncNodeDynamicQuorum(pSyncNode), pSyncNode->electTimerLogicClockUser,
+ pSyncNode->heartbeatTimerLogicClockUser, printStr);
} else {
snprintf(s, len, "%s", str);
}
@@ -2227,7 +2284,7 @@ bool syncNodeHasSnapshot(SSyncNode* pSyncNode) {
// return max(logLastIndex, snapshotLastIndex)
// if no snapshot and log, return -1
-SyncIndex syncNodeGetLastIndex(SSyncNode* pSyncNode) {
+SyncIndex syncNodeGetLastIndex(const SSyncNode* pSyncNode) {
SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0, .lastConfigIndex = -1};
if (pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
@@ -2716,11 +2773,27 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p
return 0;
}
- if (ths->vgId > 1) {
- syncNodeEventLog(ths, "I am vnode, can not do leader transfer");
+ if (pEntry->term < ths->pRaftStore->currentTerm) {
+ char logBuf[128];
+ snprintf(logBuf, sizeof(logBuf), "little term:%lu, can not do leader transfer", pEntry->term);
+ syncNodeEventLog(ths, logBuf);
return 0;
}
+ if (pEntry->index < syncNodeGetLastIndex(ths)) {
+ char logBuf[128];
+ snprintf(logBuf, sizeof(logBuf), "little index:%ld, can not do leader transfer", pEntry->index);
+ syncNodeEventLog(ths, logBuf);
+ return 0;
+ }
+
+ /*
+ if (ths->vgId > 1) {
+ syncNodeEventLog(ths, "I am vnode, can not do leader transfer");
+ return 0;
+ }
+ */
+
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "do leader transfer, index:%ld", pEntry->index);
diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c
index 13adaf055c..b42aba560f 100644
--- a/source/libs/sync/src/syncMessage.c
+++ b/source/libs/sync/src/syncMessage.c
@@ -1947,6 +1947,8 @@ cJSON* syncAppendEntriesReply2Json(const SyncAppendEntriesReply* pMsg) {
cJSON_AddNumberToObject(pRoot, "success", pMsg->success);
snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->matchIndex);
cJSON_AddStringToObject(pRoot, "matchIndex", u64buf);
+ snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->startTime);
+ cJSON_AddStringToObject(pRoot, "startTime", u64buf);
}
cJSON* pJson = cJSON_CreateObject();
diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c
index 24f75de5d3..886f7ad199 100644
--- a/source/libs/sync/src/syncReplication.c
+++ b/source/libs/sync/src/syncReplication.c
@@ -116,6 +116,120 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode) {
return ret;
}
+int32_t syncNodeAppendEntriesOnePeer(SSyncNode* pSyncNode, SRaftId* pDestId, SyncIndex nextIndex) {
+ int32_t ret = 0;
+
+ // pre index, pre term
+ SyncIndex preLogIndex = syncNodeGetPreIndex(pSyncNode, nextIndex);
+ SyncTerm preLogTerm = syncNodeGetPreTerm(pSyncNode, nextIndex);
+ if (preLogTerm == SYNC_TERM_INVALID) {
+ SyncIndex newNextIndex = syncNodeGetLastIndex(pSyncNode) + 1;
+ // SyncIndex newNextIndex = nextIndex + 1;
+
+ syncIndexMgrSetIndex(pSyncNode->pNextIndex, pDestId, newNextIndex);
+ syncIndexMgrSetIndex(pSyncNode->pMatchIndex, pDestId, SYNC_INDEX_INVALID);
+ sError("vgId:%d, sync get pre term error, nextIndex:%" PRId64 ", update next-index:%" PRId64
+ ", match-index:%d, raftid:%" PRId64,
+ pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr);
+ return -1;
+ }
+
+ // entry pointer array
+ SSyncRaftEntry* entryPArr[SYNC_MAX_BATCH_SIZE];
+ memset(entryPArr, 0, sizeof(entryPArr));
+
+ // get entry batch
+ int32_t getCount = 0;
+ SyncIndex getEntryIndex = nextIndex;
+ for (int32_t i = 0; i < pSyncNode->pRaftCfg->batchSize; ++i) {
+ SSyncRaftEntry* pEntry = NULL;
+ int32_t code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, getEntryIndex, &pEntry);
+ if (code == 0) {
+ ASSERT(pEntry != NULL);
+ entryPArr[i] = pEntry;
+ getCount++;
+ getEntryIndex++;
+
+ } else {
+ break;
+ }
+ }
+
+ // event log
+ do {
+ char logBuf[128];
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port);
+ snprintf(logBuf, sizeof(logBuf), "build batch:%d for %s:%d", getCount, host, port);
+ syncNodeEventLog(pSyncNode, logBuf);
+ } while (0);
+
+ // build msg
+ SyncAppendEntriesBatch* pMsg = syncAppendEntriesBatchBuild(entryPArr, getCount, pSyncNode->vgId);
+ ASSERT(pMsg != NULL);
+
+ // free entries
+ for (int32_t i = 0; i < pSyncNode->pRaftCfg->batchSize; ++i) {
+ SSyncRaftEntry* pEntry = entryPArr[i];
+ if (pEntry != NULL) {
+ syncEntryDestory(pEntry);
+ entryPArr[i] = NULL;
+ }
+ }
+
+ // prepare msg
+ pMsg->srcId = pSyncNode->myRaftId;
+ pMsg->destId = *pDestId;
+ pMsg->term = pSyncNode->pRaftStore->currentTerm;
+ pMsg->prevLogIndex = preLogIndex;
+ pMsg->prevLogTerm = preLogTerm;
+ pMsg->commitIndex = pSyncNode->commitIndex;
+ pMsg->privateTerm = 0;
+ pMsg->dataCount = getCount;
+
+ // send msg
+ syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg);
+
+ // speed up
+ if (pMsg->dataCount > 0 && pSyncNode->commitIndex - pMsg->prevLogIndex > SYNC_SLOW_DOWN_RANGE) {
+ ret = 1;
+
+#if 0
+ do {
+ char logBuf[128];
+ char host[64];
+ uint16_t port;
+ syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port);
+ snprintf(logBuf, sizeof(logBuf), "maybe speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex);
+ syncNodeEventLog(pSyncNode, logBuf);
+ } while (0);
+#endif
+ }
+
+ syncAppendEntriesBatchDestroy(pMsg);
+
+ return ret;
+}
+
+int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
+ if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
+ return -1;
+ }
+
+ int32_t ret = 0;
+ for (int i = 0; i < pSyncNode->peersNum; ++i) {
+ SRaftId* pDestId = &(pSyncNode->peersId[i]);
+
+ // next index
+ SyncIndex nextIndex = syncIndexMgrGetIndex(pSyncNode->pNextIndex, pDestId);
+ ret = syncNodeAppendEntriesOnePeer(pSyncNode, pDestId, nextIndex);
+ }
+
+ return ret;
+}
+
+#if 0
int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
return -1;
@@ -221,6 +335,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) {
return ret;
}
+#endif
int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) {
ASSERT(pSyncNode->state == TAOS_SYNC_STATE_LEADER);
diff --git a/source/libs/sync/test/syncAppendEntriesReplyTest.cpp b/source/libs/sync/test/syncAppendEntriesReplyTest.cpp
index d41e99a3cd..72d3fd5ef3 100644
--- a/source/libs/sync/test/syncAppendEntriesReplyTest.cpp
+++ b/source/libs/sync/test/syncAppendEntriesReplyTest.cpp
@@ -24,6 +24,7 @@ SyncAppendEntriesReply *createMsg() {
pMsg->matchIndex = 77;
pMsg->term = 33;
pMsg->privateTerm = 44;
+ pMsg->startTime = taosGetTimestampMs();
return pMsg;
}
@@ -89,6 +90,8 @@ void test5() {
}
int main() {
+ gRaftDetailLog = true;
+
tsAsyncLog = 0;
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
logTest();
diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c
index 22229ea0e8..ab9b21dc3f 100644
--- a/source/libs/tdb/src/db/tdbPCache.c
+++ b/source/libs/tdb/src/db/tdbPCache.c
@@ -199,10 +199,20 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
if (pPageH) {
// copy the page content
memcpy(&(pPage->pgid), pPgid, sizeof(*pPgid));
+
+ for (int nLoops = 0;;) {
+ if (pPageH->pPager) break;
+ if (++nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
+
pPage->pLruNext = NULL;
pPage->pPager = pPageH->pPager;
memcpy(pPage->pData, pPageH->pData, pPage->pageSize);
+ tdbDebug("pcache/pPageH: %p %d %p %p", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage);
tdbPageInit(pPage, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize);
pPage->kLen = pPageH->kLen;
pPage->vLen = pPageH->vLen;
diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h
index 117455f722..c6f3066be7 100644
--- a/source/libs/transport/inc/transComm.h
+++ b/source/libs/transport/inc/transComm.h
@@ -98,6 +98,11 @@ typedef void* queue[2];
#define TRANS_RETRY_INTERVAL 15 // retry interval (ms)
#define TRANS_CONN_TIMEOUT 3 // connect timeout (s)
#define TRANS_READ_TIMEOUT 3000 // read timeout (ms)
+#define TRANS_PACKET_LIMIT 1024 * 1024 * 512
+
+#define TRANS_MAGIC_NUM 0x5f375a86
+
+#define TRANS_NOVALID_PACKET(src) ((src) != TRANS_MAGIC_NUM ? 1 : 0)
#define TRANS_PACKET_LIMIT 1024 * 1024 * 512
@@ -301,7 +306,7 @@ int transSendResponse(const STransMsg* msg);
int transRegisterMsg(const STransMsg* msg);
int transSetDefaultAddr(void* shandle, const char* ip, const char* fqdn);
-int transGetSockDebugInfo(struct sockaddr* sockname, char* dst);
+int transSockInfo2Str(struct sockaddr* sockname, char* dst);
int64_t transAllocHandle();
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 62277a7569..7cfb188ac9 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -14,15 +14,25 @@
*/
#define _DEFAULT_SOURCE
-#ifdef USE_UV
-#include
-#endif
// clang-format off
+#include
#include "zlib.h"
#include "thttp.h"
#include "taoserror.h"
#include "tlog.h"
+
+#define HTTP_RECV_BUF_SIZE 1024
+typedef struct SHttpClient {
+ uv_connect_t conn;
+ uv_tcp_t tcp;
+ uv_write_t req;
+ uv_buf_t* wbuf;
+ char *rbuf;
+ char* addr;
+ uint16_t port;
+} SHttpClient;
+
static int32_t taosBuildHttpHeader(const char* server, int32_t contLen, char* pHead, int32_t headLen,
EHttpCompFlag flag) {
if (flag == HTTP_FLAT) {
@@ -45,7 +55,7 @@ static int32_t taosBuildHttpHeader(const char* server, int32_t contLen, char* pH
}
}
-int32_t taosCompressHttpRport(char* pSrc, int32_t srcLen) {
+static int32_t taosCompressHttpRport(char* pSrc, int32_t srcLen) {
int32_t code = -1;
int32_t destLen = srcLen;
void* pDest = taosMemoryMalloc(destLen);
@@ -114,84 +124,69 @@ _OVER:
return code;
}
-#ifdef USE_UV
-static void clientConnCb(uv_connect_t* req, int32_t status) {
- if (status < 0) {
+static void destroyHttpClient(SHttpClient* cli) {
+ taosMemoryFree(cli->wbuf);
+ taosMemoryFree(cli->rbuf);
+ taosMemoryFree(cli->addr);
+ taosMemoryFree(cli);
+
+}
+static void clientCloseCb(uv_handle_t* handle) {
+ SHttpClient* cli = handle->data;
+ destroyHttpClient(cli);
+}
+static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
+ SHttpClient* cli = handle->data;
+ buf->base = cli->rbuf;
+ buf->len = HTTP_RECV_BUF_SIZE;
+}
+static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) {
+ SHttpClient* cli = handle->data;
+ if (nread < 0) {
+ uError("http-report read error:%s", uv_err_name(nread));
+ } else {
+ uInfo("http-report succ to read %d bytes, just ignore it", nread);
+ }
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+}
+static void clientSentCb(uv_write_t* req, int32_t status) {
+ SHttpClient* cli = req->data;
+ if (status != 0) {
terrno = TAOS_SYSTEM_ERROR(status);
- uError("connection error %s", uv_strerror(status));
- uv_close((uv_handle_t*)req->handle, NULL);
+ uError("http-report failed to send data %s", uv_strerror(status));
+ } else {
+ uInfo("http-report succ to send data");
+ }
+ uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb);
+}
+static void clientConnCb(uv_connect_t* req, int32_t status) {
+ SHttpClient* cli = req->data;
+ if (status != 0) {
+ terrno = TAOS_SYSTEM_ERROR(status);
+ uError("http-report failed to conn to server, reason:%s, dst:%s:%d", uv_strerror(status), cli->addr, cli->port);
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
return;
}
- uv_buf_t* wb = req->data;
- assert(wb != NULL);
- uv_write_t write_req;
- uv_write(&write_req, req->handle, wb, 2, NULL);
- uv_close((uv_handle_t*)req->handle, NULL);
+ uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb);
}
-int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) {
- uint32_t ipv4 = taosGetIpv4FromFqdn(server);
- if (ipv4 == 0xffffffff) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- uError("failed to get http server:%s ip since %s", server, terrstr());
- return -1;
- }
-
- char ipv4Buf[128] = {0};
- tinet_ntoa(ipv4Buf, ipv4);
-
- struct sockaddr_in dest = {0};
- uv_ip4_addr(ipv4Buf, port, &dest);
-
- uv_tcp_t socket_tcp = {0};
- uv_loop_t* loop = uv_default_loop();
- uv_tcp_init(loop, &socket_tcp);
- uv_connect_t* connect = (uv_connect_t*)taosMemoryMalloc(sizeof(uv_connect_t));
-
- if (flag == HTTP_GZIP) {
- int32_t dstLen = taosCompressHttpRport(pCont, contLen);
- if (dstLen > 0) {
- contLen = dstLen;
- } else {
- flag = HTTP_FLAT;
- }
- }
-
- char header[1024] = {0};
- int32_t headLen = taosBuildHttpHeader(server, contLen, header, sizeof(header), flag);
-
- uv_buf_t wb[2];
- wb[0] = uv_buf_init((char*)header, headLen);
- wb[1] = uv_buf_init((char*)pCont, contLen);
-
- connect->data = wb;
- terrno = 0;
- uv_tcp_connect(connect, &socket_tcp, (const struct sockaddr*)&dest, clientConnCb);
- uv_run(loop, UV_RUN_DEFAULT);
- uv_loop_close(loop);
- taosMemoryFree(connect);
- return terrno;
-}
-
-#else
-int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) {
- int32_t code = -1;
- TdSocketPtr pSocket = NULL;
-
+static int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockaddr_in* dest) {
uint32_t ip = taosGetIpv4FromFqdn(server);
if (ip == 0xffffffff) {
terrno = TAOS_SYSTEM_ERROR(errno);
- uError("failed to get http server:%s ip since %s", server, terrstr());
- goto SEND_OVER;
+ uError("http-report failed to get http server:%s ip since %s", server, terrstr());
+ return -1;
}
-
- pSocket = taosOpenTcpClientSocket(ip, port, 0);
- if (pSocket == NULL) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- uError("failed to create http socket to %s:%u since %s", server, port, terrstr());
- goto SEND_OVER;
+ char buf[128] = {0};
+ tinet_ntoa(buf, ip);
+ uv_ip4_addr(buf, port, dest);
+ return 0;
+}
+int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag) {
+ struct sockaddr_in dest = {0};
+ if (taosBuildDstAddr(server, port, &dest) < 0) {
+ return -1;
}
-
if (flag == HTTP_GZIP) {
int32_t dstLen = taosCompressHttpRport(pCont, contLen);
if (dstLen > 0) {
@@ -200,37 +195,38 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
flag = HTTP_FLAT;
}
}
+ terrno = 0;
- char header[1024] = {0};
+ char header[2048] = {0};
int32_t headLen = taosBuildHttpHeader(server, contLen, header, sizeof(header), flag);
- if (taosWriteMsg(pSocket, header, headLen) < 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- uError("failed to send http header to %s:%u since %s", server, port, terrstr());
- goto SEND_OVER;
+
+ uv_buf_t* wb = taosMemoryCalloc(2, sizeof(uv_buf_t));
+ wb[0] = uv_buf_init((char*)header, headLen); // stack var
+ wb[1] = uv_buf_init((char*)pCont, contLen); // heap var
+
+ SHttpClient* cli = taosMemoryCalloc(1, sizeof(SHttpClient));
+ cli->conn.data = cli;
+ cli->tcp.data = cli;
+ cli->req.data = cli;
+ cli->wbuf = wb;
+ cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
+ cli->addr = tstrdup(server);
+ cli->port = port;
+
+ uv_loop_t* loop = uv_default_loop();
+ uv_tcp_init(loop, &cli->tcp);
+ // set up timeout to avoid stuck;
+ int32_t fd = taosCreateSocketWithTimeout(5);
+ uv_tcp_open((uv_tcp_t*)&cli->tcp, fd);
+
+ int32_t ret = uv_tcp_connect(&cli->conn, &cli->tcp, (const struct sockaddr*)&dest, clientConnCb);
+ if (ret != 0) {
+ uError("http-report failed to connect to server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr, cli->port);
+ destroyHttpClient(cli);
}
- if (taosWriteMsg(pSocket, (void*)pCont, contLen) < 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- uError("failed to send http content to %s:%u since %s", server, port, terrstr());
- goto SEND_OVER;
- }
-
- // read something to avoid nginx error 499
- if (taosWriteMsg(pSocket, header, 10) < 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- uError("failed to receive response from %s:%u since %s", server, port, terrstr());
- goto SEND_OVER;
- }
-
- code = 0;
-
-SEND_OVER:
- if (pSocket != NULL) {
- taosCloseSocket(&pSocket);
- }
-
- return code;
+ uv_run(loop, UV_RUN_DEFAULT);
+ uv_loop_close(loop);
+ return terrno;
}
-
// clang-format on
-#endif
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 5428b8acf6..7052b0b915 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -103,14 +103,6 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port);
static void addConnToPool(void* pool, SCliConn* conn);
static void doCloseIdleConn(void* param);
-static int sockDebugInfo(struct sockaddr* sockname, char* dst) {
- struct sockaddr_in addr = *(struct sockaddr_in*)sockname;
-
- char buf[16] = {0};
- int r = uv_ip4_name(&addr, (char*)buf, sizeof(buf));
- sprintf(dst, "%s:%d", buf, ntohs(addr.sin_port));
- return r;
-}
// register timer for read
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
@@ -121,7 +113,7 @@ static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
// callback after write data to socket
static void cliSendCb(uv_write_t* req, int status);
-// callback after conn to server
+// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
static void cliAsyncCb(uv_async_t* handle);
static void cliIdleCb(uv_idle_t* handle);
@@ -348,7 +340,6 @@ void cliHandleResp(SCliConn* conn) {
SCliMsg* pMsg = NULL;
STransConnCtx* pCtx = NULL;
-
if (CONN_NO_PERSIST_BY_APP(conn)) {
pMsg = transQueuePop(&conn->cliMsgs);
@@ -369,7 +360,7 @@ void cliHandleResp(SCliConn* conn) {
transMsg.info.ahandle);
}
} else {
- pCtx = pMsg ? pMsg->ctx : NULL;
+ pCtx = pMsg->ctx;
transMsg.info.ahandle = pCtx ? pCtx->ahandle : NULL;
tDebug("%s conn %p get ahandle %p, persist: 1", CONN_GET_INST_LABEL(conn), conn, transMsg.info.ahandle);
}
@@ -381,7 +372,6 @@ void cliHandleResp(SCliConn* conn) {
}
STraceId* trace = &transMsg.info.traceId;
-
tGDebug("%s conn %p %s received from %s, local info:%s, len:%d, code str:%s", CONN_GET_INST_LABEL(conn), conn,
TMSG_INFO(pHead->msgType), conn->dst, conn->src, transMsg.contLen, tstrerror(transMsg.code));
@@ -822,11 +812,11 @@ void cliConnCb(uv_connect_t* req, int status) {
int addrlen = sizeof(peername);
uv_tcp_getpeername((uv_tcp_t*)pConn->stream, &peername, &addrlen);
- transGetSockDebugInfo(&peername, pConn->dst);
+ transSockInfo2Str(&peername, pConn->dst);
addrlen = sizeof(sockname);
uv_tcp_getsockname((uv_tcp_t*)pConn->stream, &sockname, &addrlen);
- transGetSockDebugInfo(&sockname, pConn->src);
+ transSockInfo2Str(&sockname, pConn->src);
tTrace("%s conn %p connect to server successfully", CONN_GET_INST_LABEL(pConn), pConn);
assert(pConn->stream == req->handle);
@@ -1414,7 +1404,7 @@ int transReleaseCliHandle(void* handle) {
}
STransMsg tmsg = {.info.handle = handle};
- // TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64());
+ TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64());
SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cmsg->msg = tmsg;
@@ -1442,7 +1432,7 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran
if (pThrd == NULL && valid == false) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
- return -1;
+ return TSDB_CODE_RPC_BROKEN_LINK;
}
TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64());
@@ -1487,7 +1477,7 @@ int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMs
if (pThrd == NULL && valid == false) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
- return -1;
+ return TSDB_CODE_RPC_BROKEN_LINK;
}
tsem_t* sem = taosMemoryCalloc(1, sizeof(tsem_t));
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index c50d0d3e5c..a4d679b281 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -77,7 +77,7 @@ void transFreeMsg(void* msg) {
}
taosMemoryFree((char*)msg - sizeof(STransMsgHead));
}
-int transGetSockDebugInfo(struct sockaddr* sockname, char* dst) {
+int transSockInfo2Str(struct sockaddr* sockname, char* dst) {
struct sockaddr_in addr = *(struct sockaddr_in*)sockname;
char buf[20] = {0};
@@ -115,7 +115,8 @@ int transClearBuffer(SConnBuffer* buf) {
int transDumpFromBuffer(SConnBuffer* connBuf, char** buf) {
static const int HEADSIZE = sizeof(STransMsgHead);
- SConnBuffer* p = connBuf;
+
+ SConnBuffer* p = connBuf;
if (p->left != 0) {
return -1;
}
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 68b911f553..447db76136 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -125,7 +125,7 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
-static void uvStartSendRespInternal(SSvrMsg* smsg);
+static void uvStartSendRespImpl(SSvrMsg* smsg);
static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
@@ -265,26 +265,25 @@ static bool uvHandleReq(SSvrConn* pConn) {
}
void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
- // opt
- SSvrConn* conn = cli->data;
+ SSvrConn* conn = cli->data;
+ STrans* pTransInst = conn->pTransInst;
+
SConnBuffer* pBuf = &conn->readBuf;
- STrans* pTransInst = conn->pTransInst;
if (nread > 0) {
pBuf->len += nread;
tTrace("%s conn %p total read:%d, current read:%d", transLabel(pTransInst), conn, pBuf->len, (int)nread);
if (pBuf->len <= TRANS_PACKET_LIMIT) {
while (transReadComplete(pBuf)) {
tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn);
- if (pBuf->invalid) {
- tTrace("%s conn %p alread read invalid packet", transLabel(pTransInst), conn);
+ if (true == pBuf->invalid || false == uvHandleReq(conn)) {
+ tError("%s conn %p read invalid packet", transLabel(pTransInst), conn);
destroyConn(conn, true);
return;
- } else {
- if (false == uvHandleReq(conn)) break;
}
}
return;
} else {
+ tError("%s conn %p read invalid packet, exceed limit", transLabel(pTransInst), conn);
destroyConn(conn, true);
return;
}
@@ -344,10 +343,10 @@ void uvOnSendCb(uv_write_t* req, int status) {
msg = (SSvrMsg*)transQueueGet(&conn->srvMsgs, 0);
if (msg != NULL) {
- uvStartSendRespInternal(msg);
+ uvStartSendRespImpl(msg);
}
} else {
- uvStartSendRespInternal(msg);
+ uvStartSendRespImpl(msg);
}
}
}
@@ -412,7 +411,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
-static void uvStartSendRespInternal(SSvrMsg* smsg) {
+static void uvStartSendRespImpl(SSvrMsg* smsg) {
SSvrConn* pConn = smsg->pConn;
if (pConn->broken) {
return;
@@ -442,7 +441,7 @@ static void uvStartSendResp(SSvrMsg* smsg) {
if (!transQueuePush(&pConn->srvMsgs, smsg)) {
return;
}
- uvStartSendRespInternal(smsg);
+ uvStartSendRespImpl(smsg);
return;
}
@@ -533,6 +532,35 @@ static void uvShutDownCb(uv_shutdown_t* req, int status) {
uv_close((uv_handle_t*)req->handle, uvDestroyConn);
taosMemoryFree(req);
}
+static bool uvRecvReleaseReq(SSvrConn* pConn, STransMsgHead* pHead) {
+ if ((pHead)->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
+ reallocConnRef(pConn);
+ tTrace("conn %p received release request", pConn);
+
+ STraceId traceId = pHead->traceId;
+ pConn->status = ConnRelease;
+ transClearBuffer(&pConn->readBuf);
+ transFreeMsg(transContFromHead((char*)pHead));
+
+ STransMsg tmsg = {.code = 0, .info.handle = (void*)pConn, .info.traceId = traceId, .info.ahandle = (void*)0x9527};
+ SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg));
+ srvMsg->msg = tmsg;
+ srvMsg->type = Release;
+ srvMsg->pConn = pConn;
+ if (!transQueuePush(&pConn->srvMsgs, srvMsg)) {
+ return true;
+ }
+ if (pConn->regArg.init) {
+ tTrace("conn %p release, notify server app", pConn);
+ STrans* pTransInst = pConn->pTransInst;
+ (*pTransInst->cfp)(pTransInst->parent, &(pConn->regArg.msg), NULL);
+ memset(&pConn->regArg, 0, sizeof(pConn->regArg));
+ }
+ uvStartSendRespImpl(srvMsg);
+ return true;
+ }
+ return false;
+}
static void uvPrepareCb(uv_prepare_t* handle) {
// prepare callback
SWorkThrd* pThrd = handle->data;
@@ -580,36 +608,6 @@ static void uvPrepareCb(uv_prepare_t* handle) {
}
}
-static bool uvRecvReleaseReq(SSvrConn* pConn, STransMsgHead* pHead) {
- if ((pHead)->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
- reallocConnRef(pConn);
- tTrace("conn %p received release request", pConn);
-
- STraceId traceId = pHead->traceId;
- pConn->status = ConnRelease;
- transClearBuffer(&pConn->readBuf);
- transFreeMsg(transContFromHead((char*)pHead));
-
- STransMsg tmsg = {.code = 0, .info.handle = (void*)pConn, .info.traceId = traceId, .info.ahandle = (void*)0x9527};
- SSvrMsg* srvMsg = taosMemoryCalloc(1, sizeof(SSvrMsg));
- srvMsg->msg = tmsg;
- srvMsg->type = Release;
- srvMsg->pConn = pConn;
- if (!transQueuePush(&pConn->srvMsgs, srvMsg)) {
- return true;
- }
- if (pConn->regArg.init) {
- tTrace("conn %p release, notify server app", pConn);
- STrans* pTransInst = pConn->pTransInst;
- (*pTransInst->cfp)(pTransInst->parent, &(pConn->regArg.msg), NULL);
- memset(&pConn->regArg, 0, sizeof(pConn->regArg));
- }
- uvStartSendRespInternal(srvMsg);
- return true;
- }
- return false;
-}
-
static void uvWorkDoTask(uv_work_t* req) {
// doing time-consumeing task
// only auth conn currently, add more func later
@@ -715,7 +713,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
transUnrefSrvHandle(pConn);
return;
}
- transGetSockDebugInfo(&peername, pConn->dst);
+ transSockInfo2Str(&peername, pConn->dst);
addrlen = sizeof(sockname);
if (0 != uv_tcp_getsockname(pConn->pTcp, (struct sockaddr*)&sockname, &addrlen)) {
@@ -723,7 +721,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
transUnrefSrvHandle(pConn);
return;
}
- transGetSockDebugInfo(&sockname, pConn->src);
+ transSockInfo2Str(&sockname, pConn->src);
struct sockaddr_in addr = *(struct sockaddr_in*)&sockname;
pConn->clientIp = addr.sin_addr.s_addr;
@@ -1011,7 +1009,7 @@ void uvHandleRelease(SSvrMsg* msg, SWorkThrd* thrd) {
if (!transQueuePush(&conn->srvMsgs, msg)) {
return;
}
- uvStartSendRespInternal(msg);
+ uvStartSendRespImpl(msg);
return;
} else if (conn->status == ConnRelease || conn->status == ConnNormal) {
tDebug("%s conn %p already released, ignore release-msg", transLabel(thrd->pTransInst), conn);
diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c
index 461a72e962..bd2bfa486e 100644
--- a/source/os/src/osRand.c
+++ b/source/os/src/osRand.c
@@ -37,9 +37,13 @@ uint32_t taosRandR(uint32_t *pSeed) {
uint32_t taosSafeRand(void) {
#ifdef WINDOWS
- uint32_t seed;
+ uint32_t seed = taosRand();
HCRYPTPROV hCryptProv;
- if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) return seed;
+ if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, 0)) {
+ if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_NEWKEYSET)) {
+ return seed;
+ }
+ }
if (hCryptProv != NULL) {
if (!CryptGenRandom(hCryptProv, 4, &seed)) return seed;
}
diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c
index 3c4a0a20bd..454739348e 100644
--- a/source/util/src/tarray.c
+++ b/source/util/src/tarray.c
@@ -386,6 +386,7 @@ void* taosArrayDestroy(SArray* pArray) {
}
void taosArrayDestroyP(SArray* pArray, FDelete fp) {
+ if(!pArray) return;
for (int32_t i = 0; i < pArray->size; i++) {
fp(*(void**)TARRAY_GET_ELEM(pArray, i));
}
diff --git a/source/util/src/tconfig.c b/source/util/src/tconfig.c
index fdb397561d..2a28ec66d2 100644
--- a/source/util/src/tconfig.c
+++ b/source/util/src/tconfig.c
@@ -33,6 +33,8 @@ int32_t cfgLoadFromEnvCmd(SConfig *pConfig, const char **envCmd);
int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url);
int32_t cfgSetItem(SConfig *pConfig, const char *name, const char *value, ECfgSrcType stype);
+extern char **environ;
+
SConfig *cfgInit() {
SConfig *pCfg = taosMemoryCalloc(1, sizeof(SConfig));
if (pCfg == NULL) {
@@ -627,24 +629,17 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) {
}
int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
- char *line = NULL, *name, *value, *value2, *value3;
+ char line[1024], *name, *value, *value2, *value3;
int32_t olen, vlen, vlen2, vlen3;
int32_t code = 0;
- ssize_t _bytes = 0;
- TdCmdPtr pCmd = taosOpenCmd("set");
- if (pCmd == NULL) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- return -1;
- }
- while (!taosEOFCmd(pCmd)) {
+ char **pEnv = environ;
+ line[1023] = 0;
+ while(*pEnv != NULL) {
name = value = value2 = value3 = NULL;
olen = vlen = vlen2 = vlen3 = 0;
- _bytes = taosGetLineCmd(pCmd, &line);
- if (_bytes < 0) {
- break;
- }
- if(line[_bytes - 1] == '\n') line[_bytes - 1] = 0;
+ strncpy(line, *pEnv, sizeof(line)-1);
+ pEnv++;
taosEnvToCfg(line, line);
paGetToken(line, &name, &olen);
@@ -671,9 +666,6 @@ int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
}
}
- taosCloseCmd(&pCmd);
- if (line != NULL) taosMemoryFreeClear(line);
-
uInfo("load from env variables cfg success");
return 0;
}
@@ -1040,34 +1032,25 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl
index++;
}
- char *line = NULL;
- ssize_t _bytes = 0;
- TdCmdPtr pCmd = taosOpenCmd("set");
- if (pCmd != NULL) {
- while (!taosEOFCmd(pCmd)) {
- _bytes = taosGetLineCmd(pCmd, &line);
- if (_bytes < 0) {
- break;
- }
- if(line[_bytes - 1] == '\n') line[_bytes - 1] = 0;
- if (strncmp(line, "TAOS_APOLLO_URL", 14) == 0) {
- char *p = strchr(line, '=');
- if (p != NULL) {
+ char line[1024];
+ char **pEnv = environ;
+ line[1023] = 0;
+ while(*pEnv != NULL) {
+ strncpy(line, *pEnv, sizeof(line)-1);
+ pEnv++;
+ if (strncmp(line, "TAOS_APOLLO_URL", 14) == 0) {
+ char *p = strchr(line, '=');
+ if (p != NULL) {
+ p++;
+ if (*p == '\'') {
p++;
- if (*p == '\'') {
- p++;
- p[strlen(p)-1] = '\0';
- }
- memcpy(apolloUrl, p, TMIN(strlen(p)+1,PATH_MAX));
- uInfo("get apollo url from env variables success, apolloUrl=%s",apolloUrl);
- taosCloseCmd(&pCmd);
- if (line != NULL) taosMemoryFreeClear(line);
- return 0;
+ p[strlen(p)-1] = '\0';
}
+ memcpy(apolloUrl, p, TMIN(strlen(p)+1,PATH_MAX));
+ uInfo("get apollo url from env variables success, apolloUrl=%s",apolloUrl);
+ return 0;
}
}
- taosCloseCmd(&pCmd);
- if (line != NULL) taosMemoryFreeClear(line);
}
const char *filepath = ".env";
@@ -1083,10 +1066,11 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl
return 0;
}
}
+ int64_t _bytes;
TdFilePtr pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_STREAM);
if (pFile != NULL) {
while (!taosEOFFile(pFile)) {
- _bytes = taosGetLineFile(pFile, &line);
+ _bytes = taosGetsFile(pFile, sizeof(line) - 1, line);
if (_bytes <= 0) {
break;
}
@@ -1101,14 +1085,12 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl
}
memcpy(apolloUrl, p, TMIN(strlen(p)+1,PATH_MAX));
taosCloseFile(&pFile);
- if (line != NULL) taosMemoryFreeClear(line);
uInfo("get apollo url from env file success");
return 0;
}
}
}
taosCloseFile(&pFile);
- if (line != NULL) taosMemoryFreeClear(line);
}
uInfo("fail get apollo url from cmd env file");
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 6e3067d44e..662a3f0c88 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -293,6 +293,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_NOT_EXIST, "Stream not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_OPTION, "Invalid stream option")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_MUST_BE_DELETED, "Stream must be dropped first")
// mnode-sma
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SMA_ALREADY_EXIST, "SMA already exists")
@@ -616,6 +617,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty")
+TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_SCHEMA, "Rsma invalid schema")
//index
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
@@ -624,6 +626,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file"
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
+TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed")
#ifdef TAOS_ERROR_C
};
diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c
index 50beba8a9b..eb70002680 100644
--- a/source/util/src/tqueue.c
+++ b/source/util/src/tqueue.c
@@ -298,7 +298,8 @@ int32_t taosGetQitem(STaosQall *qall, void **ppItem) {
return num;
}
-void taosResetQitems(STaosQall *qall) { qall->current = qall->start; }
+void taosResetQitems(STaosQall *qall) { qall->current = qall->start; }
+int32_t taosQallItemSize(STaosQall *qall) { return qall->numOfItems; }
STaosQset *taosOpenQset() {
STaosQset *qset = taosMemoryCalloc(sizeof(STaosQset), 1);
diff --git a/source/util/src/trbtree.c b/source/util/src/trbtree.c
new file mode 100644
index 0000000000..0970485dad
--- /dev/null
+++ b/source/util/src/trbtree.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "os.h"
+
+typedef int32_t (*tRBTreeCmprFn)(void *, void *);
+
+typedef struct SRBTree SRBTree;
+typedef struct SRBTreeNode SRBTreeNode;
+typedef struct SRBTreeIter SRBTreeIter;
+
+struct SRBTreeNode {
+ enum { RED, BLACK } color;
+ SRBTreeNode *parent;
+ SRBTreeNode *left;
+ SRBTreeNode *right;
+ uint8_t payload[];
+};
+
+struct SRBTree {
+ tRBTreeCmprFn cmprFn;
+ SRBTreeNode *root;
+};
+
+struct SRBTreeIter {
+ SRBTree *pTree;
+};
+
+#define RBTREE_NODE_COLOR(N) ((N) ? (N)->color : BLACK)
+
+// APIs ================================================
+static void tRBTreeRotateLeft(SRBTree *pTree, SRBTreeNode *pNode) {
+ SRBTreeNode *right = pNode->right;
+
+ pNode->right = right->left;
+ if (pNode->right) {
+ pNode->right->parent = pNode;
+ }
+
+ right->parent = pNode->parent;
+ if (pNode->parent == NULL) {
+ pTree->root = right;
+ } else if (pNode == pNode->parent->left) {
+ pNode->parent->left = right;
+ } else {
+ pNode->parent->right = right;
+ }
+
+ right->left = pNode;
+ pNode->parent = right;
+}
+
+static void tRBTreeRotateRight(SRBTree *pTree, SRBTreeNode *pNode) {
+ SRBTreeNode *left = pNode->left;
+
+ pNode->left = left->right;
+ if (pNode->left) {
+ pNode->left->parent = pNode;
+ }
+
+ left->parent = pNode->parent;
+ if (pNode->parent == NULL) {
+ pTree->root = left;
+ } else if (pNode == pNode->parent->left) {
+ pNode->parent->left = left;
+ } else {
+ pNode->parent->right = left;
+ }
+
+ left->right = pNode;
+ pNode->parent = left;
+}
+
+#define tRBTreeCreate(compare) \
+ (SRBTree) { .cmprFn = (compare), .root = NULL }
+
+SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *pNew) {
+ pNew->left = NULL;
+ pNew->right = NULL;
+ pNew->color = RED;
+
+ // insert
+ if (pTree->root == NULL) {
+ pNew->parent = NULL;
+ pTree->root = pNew;
+ } else {
+ SRBTreeNode *pNode = pTree->root;
+ while (true) {
+ ASSERT(pNode);
+
+ int32_t c = pTree->cmprFn(pNew->payload, pNode->payload);
+ if (c < 0) {
+ if (pNode->left) {
+ pNode = pNode->left;
+ } else {
+ pNew->parent = pNode;
+ pNode->left = pNew;
+ break;
+ }
+ } else if (c > 0) {
+ if (pNode->right) {
+ pNode = pNode->right;
+ } else {
+ pNew->parent = pNode;
+ pNode->right = pNew;
+ break;
+ }
+ } else {
+ return NULL;
+ }
+ }
+ }
+
+ // fix
+ SRBTreeNode *pNode = pNew;
+ while (pNode->parent && pNode->parent->color == RED) {
+ SRBTreeNode *p = pNode->parent;
+ SRBTreeNode *g = p->parent;
+
+ if (p == g->left) {
+ SRBTreeNode *u = g->right;
+
+ if (RBTREE_NODE_COLOR(u) == RED) {
+ p->color = BLACK;
+ u->color = BLACK;
+ g->color = RED;
+ pNode = g;
+ } else {
+ if (pNode == p->right) {
+ pNode = p;
+ tRBTreeRotateLeft(pTree, pNode);
+ }
+ pNode->parent->color = BLACK;
+ pNode->parent->parent->color = RED;
+ tRBTreeRotateRight(pTree, pNode->parent->parent);
+ }
+ } else {
+ SRBTreeNode *u = g->left;
+
+ if (RBTREE_NODE_COLOR(u) == RED) {
+ p->color = BLACK;
+ u->color = BLACK;
+ g->color = RED;
+ } else {
+ if (pNode == p->left) {
+ pNode = p;
+ tRBTreeRotateRight(pTree, pNode);
+ }
+ pNode->parent->color = BLACK;
+ pNode->parent->parent->color = RED;
+ tRBTreeRotateLeft(pTree, pNode->parent->parent);
+ }
+ }
+ }
+
+ pTree->root->color = BLACK;
+ return pNew;
+}
+
+SRBTreeNode *tRBTreeDrop(SRBTree *pTree, void *pKey) {
+ SRBTreeNode *pNode = pTree->root;
+
+ while (pNode) {
+ int32_t c = pTree->cmprFn(pKey, pNode->payload);
+
+ if (c < 0) {
+ pNode = pNode->left;
+ } else if (c > 0) {
+ pNode = pNode->right;
+ } else {
+ break;
+ }
+ }
+
+ if (pNode) {
+ // TODO
+ }
+
+ return pNode;
+}
+
+SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey) {
+ SRBTreeNode *pNode = pTree->root;
+
+ while (pNode) {
+ int32_t c = pTree->cmprFn(pKey, pNode->payload);
+
+ if (c < 0) {
+ pNode = pNode->left;
+ } else if (c > 0) {
+ pNode = pNode->right;
+ } else {
+ break;
+ }
+ }
+
+ return pNode;
+}
diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py
index 203541f14a..600c64b8e6 100755
--- a/tests/pytest/crash_gen/crash_gen_main.py
+++ b/tests/pytest/crash_gen/crash_gen_main.py
@@ -1120,7 +1120,7 @@ class Database:
@classmethod
def setupLastTick(cls):
# start time will be auto generated , start at 10 years ago local time
- local_time = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
+ local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
local_epoch_time = [int(i) for i in local_time.split("-")]
#local_epoch_time will be such as : [2022, 7, 18]
diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py
index fd73f97fcb..6a8a59a027 100644
--- a/tests/pytest/crash_gen/shared/misc.py
+++ b/tests/pytest/crash_gen/shared/misc.py
@@ -46,7 +46,7 @@ class Logging:
@classmethod
def _get_datetime(cls):
- return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
+ return datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
@classmethod
def getLogger(cls):
diff --git a/tests/pytest/util/taosadapter.py b/tests/pytest/util/taosadapter.py
index 614eb95d6b..0307675dfb 100644
--- a/tests/pytest/util/taosadapter.py
+++ b/tests/pytest/util/taosadapter.py
@@ -238,19 +238,23 @@ class TAdapter:
if self.running != 0:
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
+ # psCmd = f"pgrep {toBeKilled}"
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True)
while(processID):
- killCmd = f"kill {signal} {processID} > /dev/null 2>&1"
+ killCmd = f"pkill {signal} {processID} > /dev/null 2>&1"
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
if not platform.system().lower() == 'windows':
- for port in range(6030, 6041):
- fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
- os.system(fuserCmd)
+ port = 6041
+ fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
+ os.system(fuserCmd)
+ # for port in range(6030, 6041):
+ # fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
+ # os.system(fuserCmd)
self.running = 0
tdLog.debug(f"taosadapter is stopped by kill {signal}")
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index fda5e5cb6e..97295d75e0 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -237,8 +237,8 @@
./test.sh -f tsim/stream/distributeInterval0.sim
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
./test.sh -f tsim/stream/distributeSession0.sim
-#./test.sh -f tsim/stream/session0.sim
-#./test.sh -f tsim/stream/session1.sim
+./test.sh -f tsim/stream/session0.sim
+./test.sh -f tsim/stream/session1.sim
./test.sh -f tsim/stream/state0.sim
./test.sh -f tsim/stream/triggerInterval0.sim
./test.sh -f tsim/stream/triggerSession0.sim
diff --git a/tests/script/tsim/alter/cached_schema_after_alter.sim b/tests/script/tsim/alter/cached_schema_after_alter.sim
index bd2b1d272c..30b879b612 100644
--- a/tests/script/tsim/alter/cached_schema_after_alter.sim
+++ b/tests/script/tsim/alter/cached_schema_after_alter.sim
@@ -14,7 +14,7 @@ print ========== cached_schema_after_alter.sim
sql drop database $db -x step1
step1:
-sql create database $db
+sql create database $db
print ====== create tables
sql use $db
@@ -32,10 +32,10 @@ if $rows != 1 then
endi
if $data01 != 1 then
return -1
-endi
+endi
if $data02 != 1 then
return -1
-endi
+endi
sql select * from $tb2
if $rows != 1 then
@@ -43,10 +43,10 @@ if $rows != 1 then
endi
if $data01 != 1 then
return -1
-endi
+endi
if $data02 != 1 then
return -1
-endi
+endi
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
@@ -61,10 +61,10 @@ if $rows != 1 then
endi
if $data01 != 1 then
return -1
-endi
+endi
if $data02 != 1 then
return -1
-endi
+endi
sql select * from $tb2
print select * from $tb2 ==> $data00 $data01 $data02
@@ -73,10 +73,10 @@ if $rows != 1 then
endi
if $data01 != 1 then
return -1
-endi
+endi
if $data02 != 1 then
return -1
-endi
+endi
$ts = $ts0 + $delta
sql insert into $tb2 values ( $ts , 2, 2)
@@ -86,16 +86,16 @@ if $rows != 2 then
endi
if $data01 != 1 then
return -1
-endi
+endi
if $data02 != 1 then
return -1
-endi
+endi
if $data11 != 2 then
return -1
-endi
+endi
if $data12 != 2 then
return -1
-endi
+endi
sql select * from $tb2 order by ts asc
if $rows != 2 then
@@ -103,15 +103,15 @@ if $rows != 2 then
endi
if $data01 != 1 then
return -1
-endi
+endi
if $data02 != 1 then
return -1
-endi
+endi
if $data11 != 2 then
return -1
-endi
+endi
if $data12 != 2 then
return -1
-endi
+endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/alter/dnode.sim b/tests/script/tsim/alter/dnode.sim
index d773c1f8a9..be3c385d45 100644
--- a/tests/script/tsim/alter/dnode.sim
+++ b/tests/script/tsim/alter/dnode.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql alter dnode 1 'resetlog'
sql alter dnode 1 'monitor' '1'
sql alter dnode 1 'monitor' '0'
@@ -65,4 +65,4 @@ sql alter dnode 1 balance "vnode:2-dnode:1" -x step4
step4:
print ======= over
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/alter/table.sim b/tests/script/tsim/alter/table.sim
index 48ab7ddab0..dccfc7f5d6 100644
--- a/tests/script/tsim/alter/table.sim
+++ b/tests/script/tsim/alter/table.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d1
sql use d1
sql create table tb (ts timestamp, a int)
diff --git a/tests/script/tsim/bnode/basic1.sim b/tests/script/tsim/bnode/basic1.sim
index 003d0ceb3d..0a20001636 100644
--- a/tests/script/tsim/bnode/basic1.sim
+++ b/tests/script/tsim/bnode/basic1.sim
@@ -7,24 +7,24 @@ sql connect
print =============== select * from information_schema.ins_dnodes
sql select * from information_schema.ins_dnodes;
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != 1 then
+if $data00 != 1 then
return -1
endi
sql select * from information_schema.ins_mnodes;
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != 1 then
+if $data00 != 1 then
return -1
endi
-if $data02 != leader then
+if $data02 != leader then
return -1
endi
@@ -33,62 +33,62 @@ sql create dnode $hostname port 7200
sleep 2000
sql select * from information_schema.ins_dnodes;
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
-if $data00 != 1 then
+if $data00 != 1 then
return -1
endi
-if $data10 != 2 then
+if $data10 != 2 then
return -1
endi
print $data02
-if $data02 != 0 then
+if $data02 != 0 then
return -1
endi
-if $data12 != 0 then
+if $data12 != 0 then
return -1
endi
-if $data04 != ready then
+if $data04 != ready then
return -1
endi
-if $data14 != ready then
+if $data14 != ready then
return -1
endi
sql select * from information_schema.ins_mnodes;
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != 1 then
+if $data00 != 1 then
return -1
endi
-if $data02 != leader then
+if $data02 != leader then
return -1
endi
#print =============== create drop bnode 1
#sql create bnode on dnode 1
#sql show bnodes
-#if $rows != 1 then
+#if $rows != 1 then
# return -1
#endi
-#if $data00 != 1 then
+#if $data00 != 1 then
# return -1
#endi
#sql_error create bnode on dnode 1
#
#sql drop bnode on dnode 1
#sql show bnodes
-#if $rows != 0 then
+#if $rows != 0 then
# return -1
#endi
#sql_error drop bnode on dnode 1
@@ -96,17 +96,17 @@ endi
#print =============== create drop bnode 2
#sql create bnode on dnode 2
#sql show bnodes
-#if $rows != 1 then
+#if $rows != 1 then
# return -1
#endi
-#if $data00 != 2 then
+#if $data00 != 2 then
# return -1
#endi
#sql_error create bnode on dnode 2
#
#sql drop bnode on dnode 2
#sql show bnodes
-#if $rows != 0 then
+#if $rows != 0 then
# return -1
#endi
#sql_error drop bnode on dnode 2
@@ -115,7 +115,7 @@ endi
#sql create bnode on dnode 1
#sql create bnode on dnode 2
#sql show bnodes
-#if $rows != 2 then
+#if $rows != 2 then
# return -1
#endi
@@ -127,7 +127,7 @@ endi
#
#sleep 2000
#sql show bnodes
-#if $rows != 2 then
+#if $rows != 2 then
# return -1
#endi
diff --git a/tests/script/tsim/compress/commitlog.sim b/tests/script/tsim/compress/commitlog.sim
index bc9c231a9e..38899b95ba 100644
--- a/tests/script/tsim/compress/commitlog.sim
+++ b/tests/script/tsim/compress/commitlog.sim
@@ -25,7 +25,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -46,7 +46,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -67,7 +67,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -83,7 +83,7 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -93,18 +93,18 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
-
+
$i = 2
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/compress/compress.sim b/tests/script/tsim/compress/compress.sim
index 766f97450c..4752f1ba50 100644
--- a/tests/script/tsim/compress/compress.sim
+++ b/tests/script/tsim/compress/compress.sim
@@ -25,7 +25,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -47,7 +47,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -69,7 +69,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -85,7 +85,7 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -95,18 +95,18 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
-
+
$i = 2
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/compress/compress2.sim b/tests/script/tsim/compress/compress2.sim
index 87e50cce5b..c55b74f246 100644
--- a/tests/script/tsim/compress/compress2.sim
+++ b/tests/script/tsim/compress/compress2.sim
@@ -26,7 +26,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -48,7 +48,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -70,7 +70,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -86,7 +86,7 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -96,18 +96,18 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
-
+
$i = 2
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/compress/uncompress.sim b/tests/script/tsim/compress/uncompress.sim
index ccd5db4b0c..f48fc6da23 100644
--- a/tests/script/tsim/compress/uncompress.sim
+++ b/tests/script/tsim/compress/uncompress.sim
@@ -26,7 +26,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -48,7 +48,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -70,7 +70,7 @@ while $count < $N
endw
sql select * from $tb
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -85,7 +85,7 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
@@ -95,18 +95,18 @@ $tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
endi
-
+
$i = 2
$db = $dbPrefix . $i
$tb = $tbPrefix . $i
sql use $db
sql select * from $tb
print select * from $tb ==> $rows points
-if $rows != $N then
+if $rows != $N then
return -1
-endi
-
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/alter_option.sim b/tests/script/tsim/db/alter_option.sim
index d81671eebd..3d260235f2 100644
--- a/tests/script/tsim/db/alter_option.sim
+++ b/tests/script/tsim/db/alter_option.sim
@@ -186,13 +186,13 @@ sql_error alter database db replica 0
#sql alter database db replica 1
#sql select * from information_schema.ins_databases
#print replica: $data4_db
-#if $data4_db != 1 then
+#if $data4_db != 1 then
# return -1
#endi
#sql alter database db replica 3
#sql select * from information_schema.ins_databases
#print replica: $data4_db
-#if $data4_db != 3 then
+#if $data4_db != 3 then
# return -1
#endi
@@ -200,13 +200,13 @@ sql_error alter database db replica 0
#sql alter database db quorum 2
#sql select * from information_schema.ins_databases
#print quorum $data5_db
-#if $data5_db != 2 then
+#if $data5_db != 2 then
# return -1
#endi
#sql alter database db quorum 1
#sql select * from information_schema.ins_databases
#print quorum $data5_db
-#if $data5_db != 1 then
+#if $data5_db != 1 then
# return -1
#endi
@@ -233,7 +233,7 @@ endi
#sql alter database db keep 1000,2000
#sql select * from information_schema.ins_databases
#print keep $data7_db
-#if $data7_db != 500,500,500 then
+#if $data7_db != 500,500,500 then
# return -1
#endi
@@ -263,13 +263,13 @@ sql_error alter database db keep -1
#sql alter database db blocks 3
#sql select * from information_schema.ins_databases
#print blocks $data9_db
-#if $data9_db != 3 then
+#if $data9_db != 3 then
# return -1
#endi
#sql alter database db blocks 11
#sql select * from information_schema.ins_databases
#print blocks $data9_db
-#if $data9_db != 11 then
+#if $data9_db != 11 then
# return -1
#endi
@@ -300,13 +300,13 @@ print ============== step wal_level
sql alter database db wal_level 1
sql select * from information_schema.ins_databases
print wal_level $data20_db
-if $data20_db != 1 then
+if $data20_db != 1 then
return -1
endi
sql alter database db wal_level 2
sql select * from information_schema.ins_databases
print wal_level $data20_db
-if $data20_db != 2 then
+if $data20_db != 2 then
return -1
endi
@@ -319,19 +319,19 @@ print ============== modify wal_fsync_period
sql alter database db wal_fsync_period 2000
sql select * from information_schema.ins_databases
print wal_fsync_period $data21_db
-if $data21_db != 2000 then
+if $data21_db != 2000 then
return -1
endi
sql alter database db wal_fsync_period 500
sql select * from information_schema.ins_databases
print wal_fsync_period $data21_db
-if $data21_db != 500 then
+if $data21_db != 500 then
return -1
endi
sql alter database db wal_fsync_period 0
sql select * from information_schema.ins_databases
print wal_fsync_period $data21_db
-if $data21_db != 0 then
+if $data21_db != 0 then
return -1
endi
sql_error alter database db wal_fsync_period 180001
@@ -351,31 +351,31 @@ print ============== modify cachelast [0, 1, 2, 3]
sql alter database db cachemodel 'last_value'
sql select * from information_schema.ins_databases
print cachelast $data18_db
-if $data18_db != last_value then
+if $data18_db != last_value then
return -1
endi
sql alter database db cachemodel 'last_row'
sql select * from information_schema.ins_databases
print cachelast $data18_db
-if $data18_db != last_row then
+if $data18_db != last_row then
return -1
endi
sql alter database db cachemodel 'none'
sql select * from information_schema.ins_databases
print cachelast $data18_db
-if $data18_db != none then
+if $data18_db != none then
return -1
endi
sql alter database db cachemodel 'last_value'
sql select * from information_schema.ins_databases
print cachelast $data18_db
-if $data18_db != last_value then
+if $data18_db != last_value then
return -1
endi
sql alter database db cachemodel 'both'
sql select * from information_schema.ins_databases
print cachelast $data18_db
-if $data18_db != both then
+if $data18_db != both then
return -1
endi
diff --git a/tests/script/tsim/db/alter_replica_13.sim b/tests/script/tsim/db/alter_replica_13.sim
index d232c9bcd3..1d06d3abb9 100644
--- a/tests/script/tsim/db/alter_replica_13.sim
+++ b/tests/script/tsim/db/alter_replica_13.sim
@@ -36,10 +36,10 @@ endi
print =============== step2: create database
sql create database db vgroups 1
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-if $data(db)[4] != 1 then
+if $data(db)[4] != 1 then
return -1
endi
@@ -82,7 +82,7 @@ step3:
return -1
endi
sql select * from information_schema.ins_dnodes
-print ===> rows: $rows
+print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
print ===> $data20 $data21 $data22 $data23 $data24 $data25
@@ -115,7 +115,7 @@ step4:
return -1
endi
sql show db.vgroups
-print ===> rows: $rows
+print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
if $data[0][4] != leader then
goto step4
@@ -137,4 +137,4 @@ endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/alter_replica_31.sim b/tests/script/tsim/db/alter_replica_31.sim
index 17ab040520..4ab6783d07 100644
--- a/tests/script/tsim/db/alter_replica_31.sim
+++ b/tests/script/tsim/db/alter_replica_31.sim
@@ -23,7 +23,7 @@ step1:
return -1
endi
sql select * from information_schema.ins_dnodes
-print ===> rows: $rows
+print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
print ===> $data20 $data21 $data22 $data23 $data24 $data25
@@ -47,10 +47,10 @@ endi
print =============== step2: create database
sql create database db vgroups 1 replica 3
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-if $data(db)[4] != 3 then
+if $data(db)[4] != 3 then
return -1
endi
@@ -139,7 +139,7 @@ step3:
return -1
endi
sql show db.vgroups
-print ===> rows: $rows
+print ===> rows: $rows
if $rows != 1 then
goto step3
endi
@@ -165,4 +165,4 @@ endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
-system sh/exec.sh -n dnode4 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/back_insert.sim b/tests/script/tsim/db/back_insert.sim
index e2bdb3a64b..b3f8207293 100644
--- a/tests/script/tsim/db/back_insert.sim
+++ b/tests/script/tsim/db/back_insert.sim
@@ -2,8 +2,8 @@ sql connect
$x = 1
begin:
sql reset query cache
- sleep 100
- sql insert into db.tb values(now, $x ) -x begin
- #print ===> insert successed $x
- $x = $x + 1
-goto begin
\ No newline at end of file
+ sleep 100
+ sql insert into db.tb values(now, $x ) -x begin
+ #print ===> insert successed $x
+ $x = $x + 1
+goto begin
diff --git a/tests/script/tsim/db/basic1.sim b/tests/script/tsim/db/basic1.sim
index 679440590f..69eeb9347b 100644
--- a/tests/script/tsim/db/basic1.sim
+++ b/tests/script/tsim/db/basic1.sim
@@ -25,15 +25,15 @@ endi
print =============== show vgroups1
sql use d1
sql show vgroups
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
-if $data00 != 2 then
+if $data00 != 2 then
return -1
endi
-if $data10 != 3 then
+if $data10 != 3 then
return -1
endi
@@ -59,11 +59,11 @@ if $rows != 2 then
return -1
endi
-if $data00 != 4 then
+if $data00 != 4 then
return -1
endi
-if $data10 != 5 then
+if $data10 != 5 then
return -1
endi
@@ -73,15 +73,15 @@ if $rows != 3 then
return -1
endi
-if $data00 != 6 then
+if $data00 != 6 then
return -1
endi
-if $data10 != 7 then
+if $data10 != 7 then
return -1
endi
-if $data20 != 8 then
+if $data20 != 8 then
return -1
endi
@@ -91,19 +91,19 @@ if $rows != 4 then
return -1
endi
-if $data00 != 9 then
+if $data00 != 9 then
return -1
endi
-if $data10 != 10 then
+if $data10 != 10 then
return -1
endi
-if $data20 != 11 then
+if $data20 != 11 then
return -1
endi
-if $data30 != 12 then
+if $data30 != 12 then
return -1
endi
diff --git a/tests/script/tsim/db/basic2.sim b/tests/script/tsim/db/basic2.sim
index 114adf98e6..b7ac0b5edd 100644
--- a/tests/script/tsim/db/basic2.sim
+++ b/tests/script/tsim/db/basic2.sim
@@ -3,6 +3,21 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
+print =============== conflict stb
+sql create database db vgroups 1;
+sql use db;
+sql create table stb (ts timestamp, i int) tags (j int);
+sql_error create table stb using stb tags (1);
+sql_error create table stb (ts timestamp, i int);
+
+sql create table ctb (ts timestamp, i int);
+sql_error create table ctb (ts timestamp, i int) tags (j int);
+
+sql create table ntb (ts timestamp, i int);
+sql_error create table ntb (ts timestamp, i int) tags (j int);
+
+sql drop database db
+
print =============== create database d1
sql create database d1
sql use d1
@@ -12,7 +27,7 @@ sql create table t3 (ts timestamp, i int);
sql create table t4 (ts timestamp, i int);
sql select * from information_schema.ins_databases
-print rows: $rows
+print rows: $rows
print $data00 $data01 $data02 $data03
print $data10 $data11 $data12 $data13
if $rows != 3 then
@@ -32,7 +47,7 @@ endi
#endi
sql show tables
-if $rows != 4 then
+if $rows != 4 then
return -1
endi
@@ -49,8 +64,8 @@ if $rows != 4 then
endi
sql show tables
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/basic3.sim b/tests/script/tsim/db/basic3.sim
index 30faec0494..db355213db 100644
--- a/tests/script/tsim/db/basic3.sim
+++ b/tests/script/tsim/db/basic3.sim
@@ -23,12 +23,12 @@ if $data22 != 2 then
return -1
endi
-#if $data03 != 4 then
+#if $data03 != 4 then
# return -1
#endi
sql show d1.tables
-if $rows != 4 then
+if $rows != 4 then
return -1
endi
@@ -44,8 +44,8 @@ if $rows != 4 then
endi
sql show d2.tables
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/basic4.sim b/tests/script/tsim/db/basic4.sim
index f407c6352d..7a5e0ec764 100644
--- a/tests/script/tsim/db/basic4.sim
+++ b/tests/script/tsim/db/basic4.sim
@@ -11,109 +11,109 @@ sql create table d1.t3 (ts timestamp, i int);
sql create table d1.t4 (ts timestamp, i int);
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-if $data20 != d1 then
+if $data20 != d1 then
return -1
endi
-if $data22 != 1 then
+if $data22 != 1 then
return -1
endi
-if $data24 != 1 then
+if $data24 != 1 then
return -1
endi
sql show d1.tables
-if $rows != 4 then
+if $rows != 4 then
return -1
endi
sql show d1.vgroups
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != 2 then
+if $data00 != 2 then
return -1
endi
-if $data01 != d1 then
+if $data01 != d1 then
return -1
endi
-print =============== drop table
+print =============== drop table
sql drop table d1.t1
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-if $data20 != d1 then
+if $data20 != d1 then
return -1
endi
-if $data22 != 1 then
+if $data22 != 1 then
return -1
endi
-if $data24 != 1 then
+if $data24 != 1 then
return -1
endi
sql show d1.tables
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
sql show d1.vgroups
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != 2 then
+if $data00 != 2 then
return -1
endi
-if $data01 != d1 then
+if $data01 != d1 then
return -1
endi
-print =============== drop all table
+print =============== drop all table
sql drop table d1.t2
sql drop table d1.t3
sql drop table d1.t4
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
-if $data20 != d1 then
+if $data20 != d1 then
return -1
endi
-if $data22 != 1 then
+if $data22 != 1 then
return -1
endi
-if $data24 != 1 then
+if $data24 != 1 then
return -1
endi
sql show d1.tables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
sql show d1.vgroups
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != 2 then
+if $data00 != 2 then
return -1
endi
-if $data01 != d1 then
+if $data01 != d1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/basic5.sim b/tests/script/tsim/db/basic5.sim
index 9b809c35f0..933fb8cf4b 100644
--- a/tests/script/tsim/db/basic5.sim
+++ b/tests/script/tsim/db/basic5.sim
@@ -13,13 +13,13 @@ sql create table tb1 using st1 tags(1);
sql insert into tb1 values (now, 1);
sql show stables
-if $rows != 1 then
+if $rows != 1 then
print $rows
return -1
endi
sql show tables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
@@ -35,12 +35,12 @@ sql use db1;
sql create stable st1 (ts timestamp, f1 int) tags(t1 int)
sql show stables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
sql show tables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
diff --git a/tests/script/tsim/db/basic6.sim b/tests/script/tsim/db/basic6.sim
index 2377a65ac0..5043574787 100644
--- a/tests/script/tsim/db/basic6.sim
+++ b/tests/script/tsim/db/basic6.sim
@@ -14,7 +14,7 @@ $st = $stPrefix . $i
$tb = $tbPrefix . $i
print =============== step1
-# quorum presicion
+# quorum presicion
sql create database $db vgroups 8 replica 1 duration 2 keep 10 minrows 80 maxrows 10000 wal_level 2 wal_fsync_period 1000 comp 0 cachemodel 'last_value' precision 'us'
sql select * from information_schema.ins_databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
@@ -46,7 +46,7 @@ endi
#if $data29 != 12 then
# return -1
#endi
-
+
print =============== step2
sql_error create database $db
sql create database if not exists $db
@@ -60,7 +60,7 @@ sql drop database $db
sql select * from information_schema.ins_databases
if $rows != 2 then
return -1
-endi
+endi
print =============== step4
sql_error drop database $db
@@ -102,22 +102,22 @@ while $i < 5
sql create table $tb using $st tags(1)
sql show stables
- if $rows != 1 then
+ if $rows != 1 then
return -1
endi
print $data00 $data01 $data02 $data03
- if $data00 != $st then
+ if $data00 != $st then
return -1
endi
sql show tables
- if $rows != 1 then
+ if $rows != 1 then
return -1
endi
print $data00 $data01 $data02 $data03
- if $data00 != $tb then
+ if $data00 != $tb then
return -1
endi
@@ -127,8 +127,8 @@ endw
print =============== step7
$i = 0
while $i < 5
- $db = $dbPrefix . $i
- sql drop database $db
+ $db = $dbPrefix . $i
+ sql drop database $db
$i = $i + 1
endw
@@ -143,20 +143,20 @@ sql create table $st (ts timestamp, i int) tags (j int)
sql create table $tb using $st tags(1)
sql show stables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $st then
+if $data00 != $st then
return -1
endi
sql show tables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $tb then
+if $data00 != $tb then
return -1
endi
@@ -168,12 +168,12 @@ sql create database $db
sql use $db
sql show stables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
sql show tables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
@@ -182,20 +182,20 @@ sql create table $st (ts timestamp, i int) tags (j int)
sql create table $tb using $st tags(1)
sql show stables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $st then
+if $data00 != $st then
return -1
endi
sql show tables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $tb then
+if $data00 != $tb then
return -1
endi
@@ -207,12 +207,12 @@ sql create database $db
sql use $db
sql show stables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
sql show tables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
@@ -221,20 +221,20 @@ sql create table $st (ts timestamp, i int) tags (j int)
sql create table $tb using $st tags(1)
sql show stables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $st then
+if $data00 != $st then
return -1
endi
sql show tables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $tb then
+if $data00 != $tb then
return -1
endi
@@ -245,12 +245,12 @@ sql insert into $tb values (now+4a, 3)
sql insert into $tb values (now+5a, 4)
sql select * from $tb
-if $rows != 5 then
+if $rows != 5 then
return -1
endi
sql select * from $st
-if $rows != 5 then
+if $rows != 5 then
return -1
endi
@@ -262,12 +262,12 @@ sql create database $db
sql use $db
sql show stables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
sql show tables
-if $rows != 0 then
+if $rows != 0 then
return -1
endi
@@ -276,20 +276,20 @@ sql create table $st (ts timestamp, i int) tags (j int)
sql create table $tb using $st tags(1)
sql show stables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $st then
+if $data00 != $st then
return -1
endi
sql show tables
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != $tb then
+if $data00 != $tb then
return -1
endi
@@ -300,12 +300,12 @@ sql insert into $tb values (now+4a, 3)
sql insert into $tb values (now+5a, 4)
sql select * from $tb
-if $rows != 5 then
+if $rows != 5 then
return -1
endi
sql select * from $st
-if $rows != 5 then
+if $rows != 5 then
return -1
endi
diff --git a/tests/script/tsim/db/commit.sim b/tests/script/tsim/db/commit.sim
index 731f2aa256..2233245034 100644
--- a/tests/script/tsim/db/commit.sim
+++ b/tests/script/tsim/db/commit.sim
@@ -39,9 +39,9 @@ sql create table tb (ts timestamp, i int)
$x = 1
while $x < 41
$time = $x . m
- sql insert into tb values (now + $time , $x )
+ sql insert into tb values (now + $time , $x )
$x = $x + 1
-endw
+endw
sql select * from tb order by ts desc
print ===> rows $rows
@@ -71,7 +71,7 @@ if $data01 != 40 then
return -1
endi
-$oldnum = $rows
+$oldnum = $rows
$num = $rows + 2
print ======== step3 import old data
@@ -120,4 +120,4 @@ if $data01 != 40 then
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/delete_reuse1.sim b/tests/script/tsim/db/delete_reuse1.sim
index 680fe6b2ed..9dcb3c6ac1 100644
--- a/tests/script/tsim/db/delete_reuse1.sim
+++ b/tests/script/tsim/db/delete_reuse1.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d1 replica 1 vgroups 1
sql create database d2 replica 1 vgroups 1
sql create database d3 replica 1 vgroups 1
@@ -47,7 +47,7 @@ step2:
print ========= step3
sql reset query cache
-sleep 50
+sleep 50
sql create database d1 replica 1
sql create table d1.t1 (ts timestamp, i int)
@@ -65,20 +65,20 @@ while $x < 20
sql insert into d1.t1 values(now, -1) -x step4
return -1
step4:
-
+
sql create database d1 replica 1
sql reset query cache
- sleep 50
+ sleep 50
sql create table d1.t1 (ts timestamp, i int)
sql insert into d1.t1 values(now, $x )
sql select * from d1.t1
if $rows != 1 then
return -1
endi
-
- $x = $x + 1
-
- print ===> loop times: $x
-endw
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+ $x = $x + 1
+
+ print ===> loop times: $x
+endw
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/delete_reuse2.sim b/tests/script/tsim/db/delete_reuse2.sim
index d181b6b780..4480b60b1b 100644
--- a/tests/script/tsim/db/delete_reuse2.sim
+++ b/tests/script/tsim/db/delete_reuse2.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d1 replica 1
sql create database d2 replica 1
sql create database d3 replica 1
@@ -48,7 +48,7 @@ step2:
print ========= step3
sql create database db1 replica 1
sql reset query cache
-sleep 50
+sleep 50
sql create table db1.tb1 (ts timestamp, i int)
sql insert into db1.tb1 values(now, 2)
@@ -61,7 +61,7 @@ print ========= step4
$x = 1
while $x < 20
- $db = db . $x
+ $db = db . $x
$tb = tb . $x
sql use $db
sql drop database $db
@@ -69,14 +69,14 @@ while $x < 20
sql insert into $tb values(now, -1) -x step4
return -1
step4:
-
- $x = $x + 1
- $db = db . $x
+
+ $x = $x + 1
+ $db = db . $x
$tb = tb . $x
-
+
sql reset query cache
- sleep 50
-
+ sleep 50
+
sql create database $db replica 1
sql use $db
sql create table $tb (ts timestamp, i int)
@@ -85,8 +85,8 @@ while $x < 20
if $rows != 1 then
return -1
endi
-
- print ===> loop times: $x
-endw
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+ print ===> loop times: $x
+endw
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/delete_reusevnode.sim b/tests/script/tsim/db/delete_reusevnode.sim
index d194f82d08..7af5c9d39d 100644
--- a/tests/script/tsim/db/delete_reusevnode.sim
+++ b/tests/script/tsim/db/delete_reusevnode.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
$tbPrefix = t
$i = 0
@@ -21,13 +21,13 @@ while $i < 30
print times $i
$i = $i + 1
-endw
+endw
print ======== step2
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
-endi
+endi
system sh/stop_dnodes.sh
@@ -94,4 +94,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/delete_reusevnode2.sim b/tests/script/tsim/db/delete_reusevnode2.sim
index 754a6d695b..91473e5ee1 100644
--- a/tests/script/tsim/db/delete_reusevnode2.sim
+++ b/tests/script/tsim/db/delete_reusevnode2.sim
@@ -62,4 +62,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/delete_writing1.sim b/tests/script/tsim/db/delete_writing1.sim
index 279f8dece8..6fec09989d 100644
--- a/tests/script/tsim/db/delete_writing1.sim
+++ b/tests/script/tsim/db/delete_writing1.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database db
+sql create database db
sql create table db.tb (ts timestamp, i int)
sql insert into db.tb values(now, 1)
@@ -11,18 +11,18 @@ print ======== start back
run_back tsim/db/back_insert.sim
sleep 1000
-print ======== step1
-$x = 1
+print ======== step1
+$x = 1
while $x < 10
print drop database times $x
sql drop database if exists db
- sql create database db
+ sql create database db
sql create table db.tb (ts timestamp, i int)
sleep 1000
-
+
$x = $x + 1
endw
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/delete_writing2.sim b/tests/script/tsim/db/delete_writing2.sim
index 8eab126ae8..ad156f30eb 100644
--- a/tests/script/tsim/db/delete_writing2.sim
+++ b/tests/script/tsim/db/delete_writing2.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-sql create database db
+sql create database db
sql create table db.tb (ts timestamp, i int)
sql insert into db.tb values(now, 1)
@@ -11,11 +11,11 @@ sql create database db2
sql create table db2.tb2 (ts timestamp, i int)
sql insert into db2.tb2 values(now, 1)
-sql create database db3
+sql create database db3
sql create table db3.tb3 (ts timestamp, i int)
sql insert into db3.tb3 values(now, 1)
-sql create database db4
+sql create database db4
sql create table db4.tb4 (ts timestamp, i int)
sql insert into db4.tb4 values(now, 1)
@@ -23,19 +23,19 @@ print ======== start back
run_back tsim/db/back_insert.sim
sleep 1000
-print ======== step1
-$x = 1
+print ======== step1
+$x = 1
while $x < 10
print drop database times $x
sql drop database if exists db
- sql create database db
+ sql create database db
sql create table db.tb (ts timestamp, i int)
sleep 1000
-
+
$x = $x + 1
endw
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/dropdnodes.sim b/tests/script/tsim/db/dropdnodes.sim
index 8a46d5f9ce..20b4a136df 100644
--- a/tests/script/tsim/db/dropdnodes.sim
+++ b/tests/script/tsim/db/dropdnodes.sim
@@ -12,7 +12,7 @@ system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 2000
-sql connect
+sql connect
sql create dnode $hostname2
sleep 2000
@@ -61,13 +61,13 @@ sql show tables
print $rows
if $rows != 16 then
return -1
-endi
+endi
sql select * from mt
print $rows
if $rows != 16 then
return -1
-endi
+endi
print ========== step3
@@ -82,26 +82,26 @@ sql show tables
print $rows
if $rows != 8 then
return -1
-endi
+endi
sql select * from mt
print $rows
if $rows != 8 then
return -1
-endi
+endi
sql select * from db.t5
if $rows != 1 then
return -1
-endi
+endi
sql select * from db.t13
if $rows != 1 then
return -1
-endi
+endi
sql_error select * from db.t1
sql_error select * from db.t9
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/keep.sim b/tests/script/tsim/db/keep.sim
index e146a666d0..f0653c4801 100644
--- a/tests/script/tsim/db/keep.sim
+++ b/tests/script/tsim/db/keep.sim
@@ -14,7 +14,7 @@ while $x < 41
sql insert into tb values (now - $time , $x ) -x step2
step2:
$x = $x + 1
-endw
+endw
sql select * from tb
print ===> rows $rows last $data01
@@ -42,10 +42,10 @@ sql alter database keepdb keep 60
sql flush database keepdb
sql select * from information_schema.ins_databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
-if $data22 != 2 then
+if $data22 != 2 then
return -1
endi
-if $data27 != 86400m,86400m,86400m then
+if $data27 != 86400m,86400m,86400m then
return -1
endi
@@ -56,7 +56,7 @@ while $x < 81
sql insert into tb values (now - $time , $x ) -x step4
step4:
$x = $x + 1
-endw
+endw
sql select * from tb
print ===> rows $rows last $data01
@@ -83,10 +83,10 @@ endi
print ======== step6 alter db
sql alter database keepdb keep 30
sql select * from information_schema.ins_databases
-if $data22 != 2 then
+if $data22 != 2 then
return -1
endi
-if $data27 != 43200m,43200m,43200m then
+if $data27 != 43200m,43200m,43200m then
return -1
endi
@@ -110,7 +110,7 @@ while $x < 121
sql insert into tb values (now - $time , $x ) -x step8
step8:
$x = $x + 1
-endw
+endw
sql select * from tb
print ===> rows $rows last $data01
@@ -137,4 +137,4 @@ error3:
print ======= test success
system sh/exec.sh -n dnode1 -s stop -x SIGINT
-system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/len.sim b/tests/script/tsim/db/len.sim
index ae475ddf47..047dafd5f8 100644
--- a/tests/script/tsim/db/len.sim
+++ b/tests/script/tsim/db/len.sim
@@ -11,33 +11,33 @@ sql create database -x step1
step1:
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
print =============== step2
sql create database a
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
sql drop database a
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
print =============== step3
sql create database a12345678
sql select * from information_schema.ins_databases
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
sql drop database a12345678
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
@@ -46,15 +46,15 @@ sql create database a012345678901201234567890120123456789012a0123456789012012345
return -1
step4:
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
print =============== step5
-sql create database a;1
+sql create database a;1
sql drop database a
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
@@ -64,7 +64,7 @@ sql create database a'1 -x step6
step6:
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
@@ -73,7 +73,7 @@ sql create database (a) -x step7
return -1
step7:
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
@@ -82,8 +82,8 @@ sql create database a.1 -x step8
return -1
step8:
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/repeat.sim b/tests/script/tsim/db/repeat.sim
index 98d66244f5..b0627659d0 100644
--- a/tests/script/tsim/db/repeat.sim
+++ b/tests/script/tsim/db/repeat.sim
@@ -56,4 +56,4 @@ sql drop database d10
sql drop database d11
sql drop database d12
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/db/show_create_db.sim b/tests/script/tsim/db/show_create_db.sim
index 45007d01d6..3a51fedbff 100644
--- a/tests/script/tsim/db/show_create_db.sim
+++ b/tests/script/tsim/db/show_create_db.sim
@@ -7,7 +7,7 @@ print =============== step2
sql create database db
sql show create database db
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
@@ -15,13 +15,13 @@ print =============== step3
sql use db
sql show create database db
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-if $data00 != db then
+if $data00 != db then
return -1
-endi
+endi
sql drop database db
diff --git a/tests/script/tsim/db/show_create_table.sim b/tests/script/tsim/db/show_create_table.sim
index 44fa09577e..0aeee42d21 100644
--- a/tests/script/tsim/db/show_create_table.sim
+++ b/tests/script/tsim/db/show_create_table.sim
@@ -11,14 +11,14 @@ sql create table t0 using meters tags(1,'ch')
sql create table normalTbl(ts timestamp, zone binary(8))
sql use db
-sql show create table meters
-if $rows != 1 then
+sql show create table meters
+if $rows != 1 then
return -1
endi
print ===============check sub table
sql show create table t0
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
if $data00 == 't0' then
@@ -27,8 +27,8 @@ endi
print ===============check normal table
-sql show create table normalTbl
-if $rows != 1 then
+sql show create table normalTbl
+if $rows != 1 then
return -1
endi
@@ -37,8 +37,8 @@ if $data00 == 'normalTbl' then
endi
print ===============check super table
-sql show create table meters
-if $rows != 1 then
+sql show create table meters
+if $rows != 1 then
return -1
endi
@@ -49,7 +49,7 @@ endi
print ===============check sub table with prefix
sql show create table db.t0
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
@@ -58,8 +58,8 @@ if $data00 == 't0' then
endi
print ===============check normal table with prefix
-sql show create table db.normalTbl
-if $rows != 1 then
+sql show create table db.normalTbl
+if $rows != 1 then
return -1
endi
@@ -69,8 +69,8 @@ endi
print ===============check super table with prefix
-sql show create table db.meters
-if $rows != 1 then
+sql show create table db.meters
+if $rows != 1 then
return -1
endi
diff --git a/tests/script/tsim/db/tables.sim b/tests/script/tsim/db/tables.sim
index cdee504753..273a1fd45d 100644
--- a/tests/script/tsim/db/tables.sim
+++ b/tests/script/tsim/db/tables.sim
@@ -8,7 +8,7 @@ sql create database db
sql select * from information_schema.ins_databases
print $rows $data07
-if $rows != 3 then
+if $rows != 3 then
return -1
endi
@@ -125,4 +125,4 @@ if $data01 != 4 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim
index 758814bc2b..85ff490bf4 100644
--- a/tests/script/tsim/parser/columnValue_unsign.sim
+++ b/tests/script/tsim/parser/columnValue_unsign.sim
@@ -76,17 +76,16 @@ if $data03 != NULL then
return -1
endi
-sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql insert into mt_unsigned_1 values(now+1s, 1, 2, 3, 4, 5, 6, 7, 8, 9);
-
-sql_error insert into mt_unsigned_1 values(now, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL);
-sql_error insert into mt_unsigned_1 values(now, NULL, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL);
-sql insert into mt_unsigned_1 values(now, 255, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql insert into mt_unsigned_1 values(now, NULL, 65535, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-sql insert into mt_unsigned_1 values(now, NULL, NULL, 4294967295, NULL, NULL, NULL, NULL, NULL, NULL);
-sql insert into mt_unsigned_1 values(now, NULL, NULL, NULL, 18446744073709551615, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now+1s, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now+2s, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+sql_error insert into mt_unsigned_1 values(now+3s, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql_error insert into mt_unsigned_1 values(now+4s, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql_error insert into mt_unsigned_1 values(now+5s, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL, NULL);
+sql_error insert into mt_unsigned_1 values(now+6s, NULL, NULL, NULL, -1, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now+7s, 255, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now+8s, NULL, 65535, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now+9s, NULL, NULL, 4294967295, NULL, NULL, NULL, NULL, NULL, NULL);
+sql insert into mt_unsigned_1 values(now+10s, NULL, NULL, NULL, 18446744073709551615, NULL, NULL, NULL, NULL, NULL);
sql select count(a),count(b),count(c),count(d), count(e) from mt_unsigned_1
if $rows != 1 then
diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim
index 51ae6f4b41..656b1ac94e 100644
--- a/tests/script/tsim/parser/fill_stb.sim
+++ b/tests/script/tsim/parser/fill_stb.sim
@@ -136,7 +136,8 @@ if $data74 != -4.00000 then
endi
## fill(value) + group by
-sql select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8) group by t1
+print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
+sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4, -5, -6, -7, -8)
$val = $rowNum * 2
print $rowNum, $val
@@ -148,18 +149,13 @@ if $rows != 190 then
print expect 190, actual:$rows
return -1
endi
-if $data06 != 0 then
- return -1
-endi
if $data11 != -1 then
return -1
endi
-#if $data16 != 0 then
-# return -1
-#endi
# number of fill values is smaller than number of selected columns
-sql select max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
+print select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
+sql select _wstart, max(c1), max(c2), max(c3) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
if $data11 != 6 then
return -1
endi
@@ -174,11 +170,11 @@ endi
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
# fill_char_values_to_arithmetic_fields
-sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
-sql select sum(c1), avg(c2), min(c3), max(c4) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99)
+sql select _wstart, sum(c1), avg(c2), min(c3), max(c4) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99)
$val = $rowNum * 2
$val = $val - 1
if $rows != $val then
@@ -196,11 +192,14 @@ sql select * from $stb
if $data09 != nchar0 then
return -1
endi
-sql select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1) group by t1
-if $rows != 0 then
- return -1
-endi
-sql select min(c1), max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1)
+
+print select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1)
+sql select max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1)
+#if $rows != 0 then
+# return -1
+#endi
+
+sql select _wstart, min(c1), max(c4) from $stb where t1 > 4 and ts >= $ts0 and ts <= $tsu interval(5m) fill(value, -1)
$val = $rowNum * 2
$val = $val - 1
if $rows != $val then
@@ -223,11 +222,12 @@ if $data12 != -1.000000000 then
endi
# fill_into_nonarithmetic_fieds
-sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+print select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+sql select _wstart, first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
#if $data11 != 20000000 then
-if $data11 != 1 then
- return -1
-endi
+#if $data11 != 1 then
+# return -1
+#endi
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
@@ -235,16 +235,15 @@ sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <=
sql select first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
-sql_error select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
+sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
-
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
+sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
$val = $rowNum * 2
$val = $val - 1
if $rows != $val then
@@ -253,11 +252,11 @@ endi
if $data01 != $rowNum then
return -1
endi
-if $data11 != 20 then
- return -1
-endi
+#if $data11 != 20 then
+# return -1
+#endi
-sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 2e1);
+sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 2e1);
if $rows != $val then
return -1
endi
@@ -268,43 +267,44 @@ if $data11 != 20 then
return -1
endi
-sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20');
+sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '20');
if $rows != $val then
return -1
endi
if $data01 != $rowNum then
return -1
endi
-if $data11 != 20 then
- return -1
-endi
+#if $data11 != 20 then
+# return -1
+#endi
## linear fill
-sql select max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear) group by t1
+sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear)
$val = $rowNum * 2
$val = $val - 1
$val = $val * $tbNum
if $rows != $val then
return -1
endi
-if $data08 != 0 then
- return -1
-endi
-if $data15 != NULL then
- return -1
-endi
-if $data16 != NULL then
- return -1
-endi
-if $data17 != NULL then
- return -1
-endi
-if $data18 != 0 then
- return -1
-endi
+#if $data08 != 0 then
+# return -1
+#endi
+#if $data15 != NULL then
+# return -1
+#endi
+#if $data16 != NULL then
+# return -1
+#endi
+#if $data17 != NULL then
+# return -1
+#endi
+#if $data18 != 0 then
+# return -1
+#endi
## [TBASE-365]
-sql select max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 interval(5m) fill(linear) group by t1
+sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(linear)
+print select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(linear)
if $rows != 95 then
return -1
endi
@@ -332,14 +332,8 @@ endi
if $data17 != NULL then
return -1
endi
-if $data08 != 5 then
- return -1
-endi
-if $data18 != 5 then
- return -1
-endi
-sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear)
+sql select _wstart, max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(linear)
$val = $rowNum * 2
$val = $val - 1
if $rows != $val then
@@ -359,7 +353,8 @@ endi
## previous fill
print fill(prev)
-sql select max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 interval(5m) fill(prev) group by t1 limit 5
+print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(prev) limit 5
+sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(prev) limit 5
if $rows != 25 then
return -1
endi
@@ -372,69 +367,43 @@ endi
if $data04 != NULL then
return -1
endi
-if $data09 != 5 then
- return -1
-endi
if $data12 != NULL then
return -1
endi
-if $data19 != 5 then
- return -1
-endi
if $data18 != nchar0 then
return -1
endi
-if $data59 != 6 then
- return -1
-endi
-if $data69 != 6 then
- return -1
-endi
## NULL fill
print fill(NULL)
-sql select max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 interval(5m) fill(value, NULL) group by t1 limit 5
+print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
+sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
if $rows != 25 then
return -1
endi
if $data01 != 0 then
return -1
endi
-if $data02 != NULL then
- return -1
-endi
-if $data04 != NULL then
+if $data02 != 0 then
return -1
endi
if $data06 != 1 then
return -1
endi
-if $data09 != 5 then
+if $data11 != 0 then
return -1
endi
-if $data11 != NULL then
- return -1
-endi
-if $data12 != NULL then
- return -1
-endi
-if $data19 != 5 then
+if $data12 != 0 then
return -1
endi
if $data18 != NULL then
return -1
endi
-if $data59 != 6 then
- return -1
-endi
-if $data69 != 6 then
- return -1
-endi
print =============== clear
sql drop database $db
sql select * from information_schema.ins_databases
-if $rows != 0 then
+if $rows != 2 then
return -1
endi
diff --git a/tests/script/tsim/parser/function.sim b/tests/script/tsim/parser/function.sim
index 0219a84c64..0002a5d095 100644
--- a/tests/script/tsim/parser/function.sim
+++ b/tests/script/tsim/parser/function.sim
@@ -70,6 +70,7 @@ if $data00 != @15-08-18 00:00:00.000@ then
return -1
endi
if $data01 != 2.068333156 then
+ print expect 2.068333156, actual: $data01
return -1
endi
if $data02 != 2.063999891 then
@@ -128,6 +129,7 @@ if $data03 != 2 then
return -1
endi
if $data11 != 2.077099980 then
+ print expect 2.077099980, actual: $data11
return -1
endi
if $data12 != 2.077000022 then
diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim
index f53cd45d48..faff48b61c 100644
--- a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim
+++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim
@@ -35,6 +35,7 @@ sleep 7000
print =============== select * from retention level 2 from memory
sql select * from ct1;
print $data00 $data01 $data02
+print $data10 $data11 $data12
if $rows > 2 then
print retention level 2 file rows $rows > 2
return -1
@@ -51,6 +52,7 @@ endi
print =============== select * from retention level 1 from memory
sql select * from ct1 where ts > now-8d;
print $data00 $data01 $data02
+print $data10 $data11 $data12
if $rows > 2 then
print retention level 1 file rows $rows > 2
return -1
@@ -89,6 +91,7 @@ system sh/exec.sh -n dnode1 -s start
print =============== select * from retention level 2 from file
sql select * from ct1;
print $data00 $data01 $data02
+print $data10 $data11 $data12
if $rows > 2 then
print retention level 2 file rows $rows > 2
return -1
@@ -104,6 +107,7 @@ endi
print =============== select * from retention level 1 from file
sql select * from ct1 where ts > now-8d;
print $data00 $data01 $data02
+print $data10 $data11 $data12
if $rows > 2 then
print retention level 1 file rows $rows > 2
return -1
@@ -141,6 +145,7 @@ sleep 7000
print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery
sql select * from ct1;
print $data00 $data01 $data02
+print $data10 $data11 $data12
if $rows > 2 then
print retention level 2 file/mem rows $rows > 2
return -1
@@ -163,6 +168,7 @@ endi
print =============== select * from retention level 1 from file and memory after rsma qtaskinfo recovery
sql select * from ct1 where ts > now-8d;
print $data00 $data01 $data02
+print $data10 $data11 $data12
if $rows > 2 then
print retention level 1 file/mem rows $rows > 2
return -1
diff --git a/tests/script/tsim/stream/basic0.sim b/tests/script/tsim/stream/basic0.sim
index 9a5fb8012f..6d05f69dcf 100644
--- a/tests/script/tsim/stream/basic0.sim
+++ b/tests/script/tsim/stream/basic0.sim
@@ -1,7 +1,7 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/exec.sh -n dnode1 -s start
-sleep 50
+system sh/cfg.sh -n dnode1 -c debugflag -v 131
+system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== create database
@@ -137,4 +137,17 @@ if $data13 != 789 then
return -1
endi
+_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
+print =============== check
+$null=
+
+system_content sh/checkValgrind.sh -n dnode1
+print cmd return result ----> [ $system_content ]
+if $system_content > 0 then
+ return -1
+endi
+
+if $system_content == $null then
+ return -1
+endi
diff --git a/tests/script/tsim/stream/session0.sim b/tests/script/tsim/stream/session0.sim
index fee8c98cce..3e0af354d8 100644
--- a/tests/script/tsim/stream/session0.sim
+++ b/tests/script/tsim/stream/session0.sim
@@ -83,22 +83,22 @@ if $data11 != 3 then
goto loop0
endi
-if $data12 != NULL then
+if $data12 != 10 then
print ======data12=$data12
goto loop0
endi
-if $data13 != NULL then
+if $data13 != 10 then
print ======data13=$data13
goto loop0
endi
-if $data14 != NULL then
+if $data14 != 1.100000000 then
print ======data14=$data14
return -1
endi
-if $data15 != NULL then
+if $data15 != 0.000000000 then
print ======data15=$data15
return -1
endi
@@ -141,38 +141,38 @@ if $data01 != 7 then
goto loop1
endi
-if $data02 != NULL then
+if $data02 != 18 then
print =====data02=$data02
goto loop1
endi
-if $data03 != NULL then
+if $data03 != 4 then
print =====data03=$data03
goto loop1
endi
-if $data04 != NULL then
- print ======$data04
+if $data04 != 1.000000000 then
+ print ======data04=$data04
return -1
endi
-if $data05 != NULL then
- print ======$data05
+if $data05 != 1.154700538 then
+ print ======data05=$data05
return -1
endi
if $data06 != 4 then
- print ======$data06
+ print ======data06=$data06
return -1
endi
if $data07 != 1.000000000 then
- print ======$data07
+ print ======data07=$data07
return -1
endi
if $data08 != 13 then
- print ======$data08
+ print ======data08=$data08
return -1
endi
diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py
index 8f61da221d..c31d8d2547 100644
--- a/tests/system-test/1-insert/time_range_wise.py
+++ b/tests/system-test/1-insert/time_range_wise.py
@@ -293,7 +293,7 @@ class TDTestCase:
dbname = tdSql.getData(0,0)
tdSql.query("select * from information_schema.ins_databases")
for index , value in enumerate(tdSql.cursor.description):
- if value[0] == "retention":
+ if value[0] == "retentions":
r_index = index
break
for row in tdSql.queryResult:
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 9348a8ca8f..934ba9e161 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -551,6 +551,29 @@ class TDTestCase:
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
+ tdLog.printNoPrefix("==========step9:test error cases")
+
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname}")
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')")
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d)")
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') fill(null)")
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} every(1s) fill(null)")
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} where ts >= '2020-02-10 00:00:05' and ts <= '2020-02-15 00:00:05' every(1s) fill(null)")
+
+ # input can only be numerical types
+ tdSql.error(f"select interp(ts) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp(c6) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp(c7) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp(c8) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+
+ # input can only be columns
+ tdSql.error(f"select interp(1) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp(1.5) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp(true) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp(false) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py
index a64e7695c3..408f4b3749 100644
--- a/tests/system-test/2-query/irate.py
+++ b/tests/system-test/2-query/irate.py
@@ -69,7 +69,7 @@ class TDTestCase:
comput_irate_value = origin_result[1][0]*1000/( origin_result[1][-1] - origin_result[0][-1])
else:
comput_irate_value = (origin_result[1][0] - origin_result[0][0])*1000/( origin_result[1][-1] - origin_result[0][-1])
- if abs(comput_irate_value - irate_value) <= 0.0000001:
+ if abs(comput_irate_value - irate_value) <= 0.001: # set as 0.001 avoid floating point precision calculation errors
tdLog.info(" irate work as expected , sql is %s "% irate_sql)
else:
tdLog.exit(" irate work not as expected , sql is %s "% irate_sql)
diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py
index d9d7ef2300..856d764747 100644
--- a/tests/system-test/2-query/json_tag.py
+++ b/tests/system-test/2-query/json_tag.py
@@ -1,25 +1,8 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, db_test.stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
# -*- coding: utf-8 -*-
-import imp
-import sys
-import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
-import json
-import os
-
class TDTestCase:
def caseDescription(self):
@@ -31,35 +14,33 @@ class TDTestCase:
return
def init(self, conn, logSql):
- self.testcasePath = os.path.split(__file__)[0]
- self.testcaseFilename = os.path.split(__file__)[-1]
- # os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def run(self):
# tdSql.prepare()
- tdSql.execute('drop database if exists db')
- tdSql.execute('create database db vgroups 1')
- tdSql.execute('use db')
+ dbname = "db"
+ tdSql.execute(f'drop database if exists {dbname}')
+ tdSql.execute(f'create database {dbname} vgroups 1')
+ tdSql.execute(f'use {dbname}')
print("============== STEP 1 ===== prepare data & validate json string")
- tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)")
- tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)")
- tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- tdSql.execute("insert into jsons1_1 using jsons1 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')")
- tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060628000, 2, true, 'json2', 'sss')")
- tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"tag1\":false,\"tag2\":\"beijing\"}') values (1591060668000, 3, false, 'json3', 'efwe')")
- tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}') values (1591060728000, 4, true, 'json4', '323sd')")
- tdSql.execute("insert into jsons1_5 using jsons1 tags('{\"tag1\":1.232, \"tag2\":null}') values(1591060928000, 1, false, '你就会', 'ewe')")
- tdSql.execute("insert into jsons1_6 using jsons1 tags('{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}') values(1591061628000, 11, false, '你就会','')")
- tdSql.execute("insert into jsons1_7 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
+ tdSql.error(f"create table if not exists {dbname}.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)")
+ tdSql.error(f"create table if not exists {dbname}.jsons1(ts timestamp, data json) tags(tagint int)")
+ tdSql.execute(f"create table if not exists {dbname}.jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
+ tdSql.execute(f"insert into {dbname}.jsons1_1 using {dbname}.jsons1 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')")
+ tdSql.execute(f"insert into {dbname}.jsons1_2 using {dbname}.jsons1 tags('{{\"tag1\":5,\"tag2\":\"beijing\"}}') values (1591060628000, 2, true, 'json2', 'sss')")
+ tdSql.execute(f"insert into {dbname}.jsons1_3 using {dbname}.jsons1 tags('{{\"tag1\":false,\"tag2\":\"beijing\"}}') values (1591060668000, 3, false, 'json3', 'efwe')")
+ tdSql.execute(f"insert into {dbname}.jsons1_4 using {dbname}.jsons1 tags('{{\"tag1\":null,\"tag2\":\"shanghai\",\"tag3\":\"hello\"}}') values (1591060728000, 4, true, 'json4', '323sd')")
+ tdSql.execute(f"insert into {dbname}.jsons1_5 using {dbname}.jsons1 tags('{{\"tag1\":1.232, \"tag2\":null}}') values(1591060928000, 1, false, '你就会', 'ewe')")
+ tdSql.execute(f"insert into {dbname}.jsons1_6 using {dbname}.jsons1 tags('{{\"tag1\":11,\"tag2\":\"\",\"tag2\":null}}') values(1591061628000, 11, false, '你就会','')")
+ tdSql.execute(f"insert into {dbname}.jsons1_7 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}}') values(1591062628000, 2, NULL, '你就会', 'dws')")
# test duplicate key using the first one. elimate empty key
- tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')")
- tdSql.query("select jtag from jsons1_8")
- tdSql.checkRows(0);
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_8 using {dbname}.jsons1 tags('{{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}}')")
+ tdSql.query(f"select jtag from {dbname}.jsons1_8")
+ tdSql.checkRows(0)
- tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3")
+ tdSql.query(f"select ts,jtag from {dbname}.jsons1 order by ts limit 2,3")
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}')
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
@@ -67,7 +48,7 @@ class TDTestCase:
tdSql.checkData(2, 0, '2020-06-02 09:18:48.000')
tdSql.checkData(2, 1, '{"tag1":null,"tag2":"shanghai","tag3":"hello"}')
- tdSql.query("select ts,jtag->'tag1' from jsons1 order by ts limit 2,3")
+ tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 order by ts limit 2,3")
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
tdSql.checkData(0, 1, '5.000000000')
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
@@ -76,163 +57,163 @@ class TDTestCase:
tdSql.checkData(2, 1, 'null')
# test empty json string, save as jtag is NULL
- tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')")
- tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')")
- tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags(' ')")
- tdSql.execute("CREATE TABLE if not exists jsons1_12 using jsons1 tags('{}')")
- tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('null')")
+ tdSql.execute(f"insert into {dbname}.jsons1_9 using {dbname}.jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_10 using {dbname}.jsons1 tags('')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_11 using {dbname}.jsons1 tags(' ')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_12 using {dbname}.jsons1 tags('{{}}')")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_13 using {dbname}.jsons1 tags('null')")
# test invalidate json
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('\"efwewf\"')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('3333')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags(76)")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags(hell)")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('33.33')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('false')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('\"efwewf\"')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('3333')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags(76)")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags(hell)")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('33.33')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('false')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('[1,true]')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{222}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"fe\"}}')")
# test invalidate json key, key must can be printed assic char
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"。loc\":\"fff\"}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"\t\":\"fff\"}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"试试\":\"fff\"}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":[1,true]}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":{{}}}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"。loc\":\"fff\"}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"\t\":\"fff\"}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"试试\":\"fff\"}}')")
# test invalidate json value, value number can not be inf,nan TD-12166
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":1.8e308}')")
- tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":-1.8e308}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"k\":1.8e308}}')")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"k\":-1.8e308}}')")
#test length limit
char1= ''.join(['abcd']*64)
char3= ''.join(['abcd']*1021)
print(len(char3)) # 4084
- tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257
- tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256
- tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSSS\":\"%s\"}')" % char3) # len(object)=4096
- tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095
- tdSql.execute("drop table if exists jsons1_15")
- tdSql.execute("drop table if exists jsons1_16")
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"%s1\":5}}')" % char1) # len(key)=257
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"%s\":5}}')" % char1) # len(key)=256
+ tdSql.error(f"create TABLE if not exists {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"TSSSS\":\"%s\"}}')" % char3) # len(object)=4096
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"TSSS\":\"%s\"}}')" % char3) # len(object)=4095
+ tdSql.execute(f"drop table if exists {dbname}.jsons1_15")
+ tdSql.execute(f"drop table if exists {dbname}.jsons1_16")
print("============== STEP 2 ===== alter table json tag")
- tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)")
- tdSql.error("ALTER STABLE jsons1 drop tag jtag")
- tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)")
+ tdSql.error(f"ALTER stable {dbname}.jsons1 add tag tag2 nchar(20)")
+ tdSql.error(f"ALTER stable {dbname}.jsons1 drop tag jtag")
+ tdSql.error(f"ALTER table {dbname}.jsons1 MODIFY TAG jtag nchar(128)")
- tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'")
- tdSql.query("select jtag from jsons1_1")
+ tdSql.execute(f"ALTER table {dbname}.jsons1_1 SET TAG jtag='{{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}}'")
+ tdSql.query(f"select jtag from {dbname}.jsons1_1")
tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}')
- tdSql.execute("ALTER TABLE jsons1 rename TAG jtag jtag_new")
- tdSql.execute("ALTER TABLE jsons1 rename TAG jtag_new jtag")
+ tdSql.execute(f"ALTER table {dbname}.jsons1 rename TAG jtag jtag_new")
+ tdSql.execute(f"ALTER table {dbname}.jsons1 rename TAG jtag_new jtag")
- tdSql.execute("create table st(ts timestamp, i int) tags(t int)")
- tdSql.error("ALTER STABLE st add tag jtag json")
- tdSql.error("ALTER STABLE st add column jtag json")
+ tdSql.execute(f"create table {dbname}.st(ts timestamp, i int) tags(t int)")
+ tdSql.error(f"ALTER stable {dbname}.st add tag jtag json")
+ tdSql.error(f"ALTER stable {dbname}.st add column jtag json")
print("============== STEP 3 ===== query table")
# test error syntax
- tdSql.error("select * from jsons1 where jtag->tag1='beijing'")
- tdSql.error("select -> from jsons1")
- tdSql.error("select * from jsons1 where contains")
- tdSql.error("select * from jsons1 where jtag->")
- tdSql.error("select jtag->location from jsons1")
- tdSql.error("select jtag contains location from jsons1")
- tdSql.error("select * from jsons1 where jtag contains location")
- tdSql.query("select * from jsons1 where jtag contains''")
- tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->tag1='beijing'")
+ tdSql.error(f"select -> from {dbname}.jsons1")
+ tdSql.error(f"select * from {dbname}.jsons1 where contains")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->")
+ tdSql.error(f"select jtag->location from {dbname}.jsons1")
+ tdSql.error(f"select jtag contains location from {dbname}.jsons1")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag contains location")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains''")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag contains 'location'='beijing'")
# test function error
- tdSql.error("select avg(jtag->'tag1') from jsons1")
- tdSql.error("select avg(jtag) from jsons1")
- tdSql.error("select min(jtag->'tag1') from jsons1")
- tdSql.error("select min(jtag) from jsons1")
- tdSql.error("select ceil(jtag->'tag1') from jsons1")
- tdSql.error("select ceil(jtag) from jsons1")
+ tdSql.error(f"select avg(jtag->'tag1') from {dbname}.jsons1")
+ tdSql.error(f"select avg(jtag) from {dbname}.jsons1")
+ tdSql.error(f"select min(jtag->'tag1') from {dbname}.jsons1")
+ tdSql.error(f"select min(jtag) from {dbname}.jsons1")
+ tdSql.error(f"select ceil(jtag->'tag1') from {dbname}.jsons1")
+ tdSql.error(f"select ceil(jtag) from {dbname}.jsons1")
#test scalar operation
- tdSql.query("select jtag contains 'tag1',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag contains 'tag1',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' like 'fe%',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' like 'fe%',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' not like 'fe%',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' not like 'fe%',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' match 'fe',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' match 'fe',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1' nmatch 'fe',jtag->'tag1' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1' nmatch 'fe',jtag->'tag1' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
- tdSql.query("select jtag->'tag1',jtag->'tag1'>='a' from jsons1 order by jtag->'tag1'")
+ tdSql.query(f"select jtag->'tag1',jtag->'tag1'>='a' from {dbname}.jsons1 order by jtag->'tag1'")
tdSql.checkRows(9)
# test select normal column
- tdSql.query("select dataint from jsons1 order by dataint")
+ tdSql.query(f"select dataint from {dbname}.jsons1 order by dataint")
tdSql.checkRows(9)
tdSql.checkData(1, 0, 1)
# test select json tag
- tdSql.query("select * from jsons1")
+ tdSql.query(f"select * from {dbname}.jsons1")
tdSql.checkRows(9)
- tdSql.query("select jtag from jsons1")
+ tdSql.query(f"select jtag from {dbname}.jsons1")
tdSql.checkRows(9)
- tdSql.query("select * from jsons1 where jtag is null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag is null")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag is not null")
tdSql.checkRows(8)
# test jtag is NULL
- tdSql.query("select jtag from jsons1_9")
+ tdSql.query(f"select jtag from {dbname}.jsons1_9")
tdSql.checkData(0, 0, None)
# test select json tag->'key', value is string
- tdSql.query("select jtag->'tag1' from jsons1_1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1")
tdSql.checkData(0, 0, '"femail"')
- tdSql.query("select jtag->'tag2' from jsons1_6")
+ tdSql.query(f"select jtag->'tag2' from {dbname}.jsons1_6")
tdSql.checkData(0, 0, '""')
# test select json tag->'key', value is int
- tdSql.query("select jtag->'tag2' from jsons1_1")
+ tdSql.query(f"select jtag->'tag2' from {dbname}.jsons1_1")
tdSql.checkData(0, 0, "35.000000000")
# test select json tag->'key', value is bool
- tdSql.query("select jtag->'tag3' from jsons1_1")
+ tdSql.query(f"select jtag->'tag3' from {dbname}.jsons1_1")
tdSql.checkData(0, 0, "true")
# test select json tag->'key', value is null
- tdSql.query("select jtag->'tag1' from jsons1_4")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_4")
tdSql.checkData(0, 0, "null")
# test select json tag->'key', value is double
- tdSql.query("select jtag->'tag1' from jsons1_5")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_5")
tdSql.checkData(0, 0, "1.232000000")
# test select json tag->'key', key is not exist
- tdSql.query("select jtag->'tag10' from jsons1_4")
+ tdSql.query(f"select jtag->'tag10' from {dbname}.jsons1_4")
tdSql.checkData(0, 0, None)
- tdSql.query("select jtag->'tag1' from jsons1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1")
tdSql.checkRows(9)
# test header name
- res = tdSql.getColNameList("select jtag->'tag1' from jsons1")
+ res = tdSql.getColNameList(f"select jtag->'tag1' from {dbname}.jsons1")
cname_list = []
cname_list.append("jtag->'tag1'")
tdSql.checkColNameList(res, cname_list)
# test where with json tag
- tdSql.query("select * from jsons1_1 where jtag is not null")
- tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
- tdSql.error("select * from jsons1 where jtag->'tag1'={}")
+ tdSql.query(f"select * from {dbname}.jsons1_1 where jtag is not null")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag='{{\"tag1\":11,\"tag2\":\"\"}}'")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1'={{}}")
# test json error
- tdSql.error("select jtag + 1 from jsons1")
- tdSql.error("select jtag > 1 from jsons1")
- tdSql.error("select jtag like \"1\" from jsons1")
- tdSql.error("select jtag in (\"1\") from jsons1")
- #tdSql.error("select jtag from jsons1 where jtag > 1")
- #tdSql.error("select jtag from jsons1 where jtag like 'fsss'")
- #tdSql.error("select jtag from jsons1 where jtag in (1)")
+ tdSql.error(f"select jtag + 1 from {dbname}.jsons1")
+ tdSql.error(f"select jtag > 1 from {dbname}.jsons1")
+ tdSql.error(f"select jtag like \"1\" from {dbname}.jsons1")
+ tdSql.error(f"select jtag in (\"1\") from {dbname}.jsons1")
+ #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag > 1")
+ #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag like 'fsss'")
+ #tdSql.error(f"select jtag from {dbname}.jsons1 where jtag in (1)")
# where json value is string
- tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'='beijing'")
tdSql.checkRows(2)
- tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing' order by dataint")
+ tdSql.query(f"select dataint,tbname,jtag->'tag1',jtag from {dbname}.jsons1 where jtag->'tag2'='beijing' order by dataint")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, 'jsons1_2')
@@ -243,180 +224,180 @@ class TDTestCase:
tdSql.checkData(1, 2, 'false')
- tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='beijing'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='收到货'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag2'>'beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'>'beijing'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'>='beijing'")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'<'beijing'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'<='beijing'")
tdSql.checkRows(4)
- tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'!='beijing'")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag2'=''")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2'=''")
tdSql.checkRows(2)
# where json value is int
- tdSql.query("select * from jsons1 where jtag->'tag1'=5")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=5")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 2)
- tdSql.query("select * from jsons1 where jtag->'tag1'=10")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=10")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'<54")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<54")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'<=11")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<=11")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'>4")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>4")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'>=5")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>=5")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=5")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=5")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=55")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=55")
tdSql.checkRows(3)
# where json value is double
- tdSql.query("select * from jsons1 where jtag->'tag1'=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=1.232")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'<1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<1.232")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'<=1.232")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'>1.23")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>1.23")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>=1.232")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=1.232")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=3.232")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'/0=3")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'/0=3")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'/5=1")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'/5=1")
tdSql.checkRows(1)
# where json value is bool
- tdSql.query("select * from jsons1 where jtag->'tag1'=true")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=true")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'!=false")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'!=false")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'>false")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'>false")
tdSql.checkRows(0)
# where json value is null
- tdSql.query("select * from jsons1 where jtag->'tag1'=null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=null")
tdSql.checkRows(0)
# where json key is null
- tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag_no_exist'=3")
tdSql.checkRows(0)
# where json value is not exist
- tdSql.query("select * from jsons1 where jtag->'tag1' is null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' is null")
tdSql.checkData(0, 0, 'jsons1_9')
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag4' is null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag4' is null")
tdSql.checkRows(9)
- tdSql.query("select * from jsons1 where jtag->'tag3' is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag3' is not null")
tdSql.checkRows(3)
# test contains
- tdSql.query("select * from jsons1 where jtag contains 'tag1'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag1'")
tdSql.checkRows(8)
- tdSql.query("select * from jsons1 where jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag3'")
tdSql.checkRows(4)
- tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag contains 'tag_no_exist'")
tdSql.checkRows(0)
# test json tag in where condition with and/or
- tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=false and jtag->'tag2'='shanghai'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'=13 or jtag->'tag2'>35")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' is not null and jtag contains 'tag3'")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'")
tdSql.checkRows(2)
# test with between and
- tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 1 and 30")
tdSql.checkRows(3)
- tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
tdSql.checkRows(2)
# test with tbname/normal column
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1'")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=3")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23")
+ tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23")
tdSql.checkRows(1)
# test where condition like
- tdSql.query("select * from jsons1 where jtag->'tag2' like 'bei%'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2' like 'bei%'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null")
tdSql.checkRows(2)
# test where condition in no support in
- tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
+ tdSql.error(f"select * from {dbname}.jsons1 where jtag->'tag1' in ('beijing')")
# test where condition match/nmath
- tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match 'ma'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match 'ma$'")
tdSql.checkRows(0)
- tdSql.query("select * from jsons1 where jtag->'tag2' match 'jing$'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag2' match 'jing$'")
tdSql.checkRows(2)
- tdSql.query("select * from jsons1 where jtag->'tag1' match '收到'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' match '收到'")
tdSql.checkRows(1)
- tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' nmatch 'ma'")
tdSql.checkRows(1)
# test distinct
- tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
- tdSql.query("select distinct jtag->'tag1' from jsons1")
+ tdSql.execute(f"insert into {dbname}.jsons1_14 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}}') values(1591062628000, 2, NULL, '你就会', 'dws')")
+ tdSql.query(f"select distinct jtag->'tag1' from {dbname}.jsons1")
tdSql.checkRows(8)
- tdSql.error("select distinct jtag from jsons1")
+ tdSql.error(f"select distinct jtag from {dbname}.jsons1")
#test dumplicate key with normal colomn
- tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
- tdSql.query("select * from jsons1 where jtag->'datastr' match '是' and datastr match 'js'")
+ tdSql.execute(f"insert into {dbname}.jsons1_15 using {dbname}.jsons1 tags('{{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
+ tdSql.query(f"select * from {dbname}.jsons1 where jtag->'datastr' match '是' and datastr match 'js'")
tdSql.checkRows(1)
- tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_15'")
+ tdSql.query(f"select tbname,jtag->'tbname' from {dbname}.jsons1 where jtag->'tbname'='tt' and tbname='jsons1_15'")
tdSql.checkRows(1)
# test join
- tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')")
- tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')")
+ tdSql.execute(f"create table if not exists {dbname}.jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
+ tdSql.execute(f"insert into {dbname}.jsons2_1 using {dbname}.jsons2 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 2, false, 'json2', '你是2')")
+ tdSql.execute(f"insert into {dbname}.jsons2_2 using {dbname}.jsons2 tags('{{\"tag1\":5,\"tag2\":null}}') values (1591060628000, 2, true, 'json2', 'sss')")
- tdSql.execute("create table if not exists jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- tdSql.execute("insert into jsons3_1 using jsons3 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 3, false, 'json3', '你是3')")
- tdSql.execute("insert into jsons3_2 using jsons3 tags('{\"tag1\":5,\"tag2\":\"beijing\"}') values (1591060638000, 2, true, 'json3', 'sss')")
- tdSql.query("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
+ tdSql.execute(f"create table if not exists {dbname}.jsons3(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
+ tdSql.execute(f"insert into {dbname}.jsons3_1 using {dbname}.jsons3 tags('{{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}}') values(1591060618000, 3, false, 'json3', '你是3')")
+ tdSql.execute(f"insert into {dbname}.jsons3_2 using {dbname}.jsons3 tags('{{\"tag1\":5,\"tag2\":\"beijing\"}}') values (1591060638000, 2, true, 'json3', 'sss')")
+ tdSql.query(f"select 'sss',33,a.jtag->'tag3' from {dbname}.jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
tdSql.checkData(0, 0, "sss")
tdSql.checkData(0, 2, "true")
- res = tdSql.getColNameList("select 'sss',33,a.jtag->'tag3' from jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
+ res = tdSql.getColNameList(f"select 'sss',33,a.jtag->'tag3' from {dbname}.jsons2 a,jsons3 b where a.ts=b.ts and a.jtag->'tag1'=b.jtag->'tag1'")
cname_list = []
cname_list.append("'sss'")
cname_list.append("33")
@@ -424,19 +405,19 @@ class TDTestCase:
tdSql.checkColNameList(res, cname_list)
#
# test group by & order by json tag
- tdSql.query("select ts,jtag->'tag1' from jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc")
+ tdSql.query(f"select ts,jtag->'tag1' from {dbname}.jsons1 partition by jtag->'tag1' order by jtag->'tag1' desc")
tdSql.checkRows(11)
tdSql.checkData(0, 1, '"femail"')
tdSql.checkData(2, 1, '"收到货"')
tdSql.checkData(7, 1, "false")
- tdSql.error("select count(*) from jsons1 group by jtag")
- tdSql.error("select count(*) from jsons1 partition by jtag")
- tdSql.error("select count(*) from jsons1 group by jtag order by jtag")
- tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'")
- tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag")
- tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 partition by jtag")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag order by jtag")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag2'")
+ tdSql.error(f"select count(*) from {dbname}.jsons1 group by jtag->'tag1' order by jtag")
+ tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
tdSql.checkRows(8)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, '"femail"')
@@ -447,7 +428,7 @@ class TDTestCase:
tdSql.checkData(5, 0, 1)
tdSql.checkData(5, 1, "false")
- tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
+ tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
tdSql.checkRows(8)
tdSql.checkData(0, 1, None)
tdSql.checkData(2, 0, 1)
@@ -458,7 +439,7 @@ class TDTestCase:
tdSql.checkData(7, 1, '"femail"')
# test stddev with group by json tag
- tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
+ tdSql.query(f"select stddev(dataint),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'")
tdSql.checkRows(8)
tdSql.checkData(0, 1, None)
tdSql.checkData(4, 0, 0)
@@ -466,222 +447,222 @@ class TDTestCase:
tdSql.checkData(7, 0, 11)
tdSql.checkData(7, 1, '"femail"')
- res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
+ res = tdSql.getColNameList(f"select stddev(dataint),jsons1.jtag->'tag1' from {dbname}.jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
cname_list = []
cname_list.append("stddev(dataint)")
cname_list.append("jsons1.jtag->'tag1'")
tdSql.checkColNameList(res, cname_list)
# test top/bottom with group by json tag
- tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
+ tdSql.query(f"select top(dataint,2),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' order by jtag->'tag1'")
tdSql.checkRows(11)
tdSql.checkData(0, 1, None)
# test having
- tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1")
+ tdSql.query(f"select count(*),jtag->'tag1' from {dbname}.jsons1 group by jtag->'tag1' having count(*) > 1")
tdSql.checkRows(3)
# subquery with json tag
- tdSql.query("select * from (select jtag, dataint from jsons1) order by dataint")
+ tdSql.query(f"select * from (select jtag, dataint from {dbname}.jsons1) order by dataint")
tdSql.checkRows(11)
tdSql.checkData(1, 1, 1)
tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}')
- tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
- tdSql.error("select t->'tag1' from (select jtag->'tag1' as t, dataint from jsons1)")
- tdSql.error("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)")
+ tdSql.error(f"select jtag->'tag1' from (select jtag->'tag1', dataint from {dbname}.jsons1)")
+ tdSql.error(f"select t->'tag1' from (select jtag->'tag1' as t, dataint from {dbname}.jsons1)")
+ tdSql.error(f"select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from {dbname}.jsons1 order by ts)")
# union all
- tdSql.query("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1 union all select jtag->'tag2' from {dbname}.jsons2")
tdSql.checkRows(13)
- tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1 union all select jtag->'tag2' from {dbname}.jsons2_1")
tdSql.checkRows(3)
- tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag1' from jsons2_1")
+ tdSql.query(f"select jtag->'tag1' from {dbname}.jsons1_1 union all select jtag->'tag1' from {dbname}.jsons2_1")
tdSql.checkRows(3)
- tdSql.query("select dataint,jtag->'tag1',tbname from jsons1 union all select dataint,jtag->'tag1',tbname from jsons2")
+ tdSql.query(f"select dataint,jtag->'tag1',tbname from {dbname}.jsons1 union all select dataint,jtag->'tag1',tbname from {dbname}.jsons2")
tdSql.checkRows(13)
- tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2")
+ tdSql.query(f"select dataint,jtag,tbname from {dbname}.jsons1 union all select dataint,jtag,tbname from {dbname}.jsons2")
tdSql.checkRows(13)
#show create table
- tdSql.query("show create table jsons1")
+ tdSql.query(f"show create table {dbname}.jsons1")
tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON)')
#test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares
- tdSql.query("select count(*) from jsons1 where jtag is not null")
+ tdSql.query(f"select count(*) from {dbname}.jsons1 where jtag is not null")
tdSql.checkData(0, 0, 10)
- tdSql.query("select avg(dataint) from jsons1 where jtag is not null")
+ tdSql.query(f"select avg(dataint) from {dbname}.jsons1 where jtag is not null")
tdSql.checkData(0, 0, 5.3)
- # tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
+ # tdSql.query(f"select twa(dataint) from {dbname}.jsons1 where jtag is not null")
# tdSql.checkData(0, 0, 28.386363636363637)
- # tdSql.query("select irate(dataint) from jsons1 where jtag is not null")
+ # tdSql.query(f"select irate(dataint) from {dbname}.jsons1 where jtag is not null")
- tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null")
+ tdSql.query(f"select sum(dataint) from {dbname}.jsons1 where jtag->'tag1' is not null")
tdSql.checkData(0, 0, 45)
- tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select stddev(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 4.496912521)
- tdSql.query("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null")
+ tdSql.query(f"select LEASTSQUARES(dataint, 1, 1) from {dbname}.jsons1 where jtag is not null")
#test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp
- tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select min(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1)
- tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select max(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 11)
- tdSql.query("select first(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select first(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 2)
- tdSql.query("select last(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select last(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 11)
- tdSql.query("select top(dataint,100) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select top(dataint,100) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select bottom(dataint,100) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- #tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1")
- tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1")
+ #tdSql.query(f"select percentile(dataint,20) from {dbname}.jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select apercentile(dataint, 50) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1.5)
- # tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1")
- # tdSql.query("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1")
+ # tdSql.query(f"select last_row(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
+ # tdSql.query(f"select interp(dataint) from {dbname}.jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1")
#test calculation function:diff/derivative/spread/ceil/floor/round/
- tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select diff(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(2)
# tdSql.checkData(0, 0, -1)
# tdSql.checkData(1, 0, 10)
- tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select derivative(dataint, 10m, 0) from {dbname}.jsons1 where jtag->'tag1'>1")
# tdSql.checkData(0, 0, -2)
- tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select spread(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 10)
- tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select ceil(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select floor(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
- tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1")
+ tdSql.query(f"select round(dataint) from {dbname}.jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
#math function
- tdSql.query("select sin(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select sin(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select cos(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select cos(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select tan(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select tan(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select asin(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select asin(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select acos(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select acos(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select atan(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select atan(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select ceil(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select floor(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select floor(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select round(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select round(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select abs(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select abs(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select pow(dataint,5) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select pow(dataint,5) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select log(dataint,10) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select log(dataint,10) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select sqrt(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select sqrt(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select HISTOGRAM(dataint,'user_input','[1, 33, 555, 7777]',1) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select csum(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select csum(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select mavg(dataint,1) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select mavg(dataint,1) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select statecount(dataint,'GE',10) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select statecount(dataint,'GE',10) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select stateduration(dataint,'GE',0) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select stateduration(dataint,'GE',0) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select sample(dataint,3) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select sample(dataint,3) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select HYPERLOGLOG(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select HYPERLOGLOG(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
- tdSql.query("select twa(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select twa(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
# function not ready
- tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select tail(dataint,1) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
- tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select unique(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select mode(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
- tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select irate(dataint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
#str function
- tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select upper(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select ltrim(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select ltrim(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select lower(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select lower(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select rtrim(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select rtrim(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select LENGTH(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CHAR_LENGTH(dataStr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CHAR_LENGTH(dataStr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select SUBSTR(dataStr,5) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select SUBSTR(dataStr,5) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CONCAT(dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CONCAT(dataStr,dataStrBin) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CONCAT_ws('adad!@!@%$^$%$^$%^a',dataStr,dataStrBin) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select CAST(dataStr as bigint) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select CAST(dataStr as bigint) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
#time function
- tdSql.query("select now() from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select now() from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select today() from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select today() from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMEZONE() from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMEZONE() from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TO_ISO8601(ts) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TO_ISO8601(ts) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TO_UNIXTIMESTAMP(datastr) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TO_UNIXTIMESTAMP(datastr) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMETRUNCATE(ts,1s) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMETRUNCATE(ts,1s) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMEDIFF(ts,_c0) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMEDIFF(ts,_c0) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select TIMEDIFF(ts,1u) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select TIMEDIFF(ts,1u) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(3)
- tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;")
+ tdSql.query(f"select ELAPSED(ts,1h) from {dbname}.jsons1 where jtag->'tag1'>1;")
tdSql.checkRows(1)
# to_json()
- tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
+ tdSql.query(f"select to_json('{{\"abc\":123}}') from {dbname}.jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, '{"abc":123}')
tdSql.checkData(1, 0, '{"abc":123}')
- tdSql.query("select to_json('null') from jsons1_1")
+ tdSql.query(f"select to_json('null') from {dbname}.jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
tdSql.checkData(1, 0, 'null')
- tdSql.query("select to_json('{\"key\"}') from jsons1_1")
+ tdSql.query(f"select to_json('{{\"key\"}}') from {dbname}.jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
tdSql.checkData(1, 0, 'null')
#test TD-12077
- tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')")
- tdSql.query("select jtag->'tag3' from jsons1_16")
+ tdSql.execute(f"insert into {dbname}.jsons1_16 using {dbname}.jsons1 tags('{{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}}') values(1591062628000, 2, NULL, '你就会', 'dws')")
+ tdSql.query(f"select jtag->'tag3' from {dbname}.jsons1_16")
tdSql.checkData(0, 0, '-2.111000000')
# test TD-12452
- tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag=NULL")
- tdSql.query("select jtag from jsons1_1")
+ tdSql.execute(f"ALTER table {dbname}.jsons1_1 SET TAG jtag=NULL")
+ tdSql.query(f"select jtag from {dbname}.jsons1_1")
tdSql.checkData(0, 0, None)
- tdSql.execute("CREATE TABLE if not exists jsons1_20 using jsons1 tags(NULL)")
- tdSql.query("select jtag from jsons1_20")
+ tdSql.execute(f"create TABLE if not exists {dbname}.jsons1_20 using {dbname}.jsons1 tags(NULL)")
+ tdSql.query(f"select jtag from {dbname}.jsons1_20")
tdSql.checkRows(0)
- tdSql.execute("insert into jsons1_21 using jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')")
- tdSql.query("select jtag from jsons1_21")
+ tdSql.execute(f"insert into {dbname}.jsons1_21 using {dbname}.jsons1 tags(NULL) values(1591061628000, 11, false, '你就会','')")
+ tdSql.query(f"select jtag from {dbname}.jsons1_21")
tdSql.checkData(0, 0, None)
#
# #test TD-12389
@@ -691,23 +672,23 @@ class TDTestCase:
tdSql.checkData(5, 2, 4095)
#
# #test TD-13918
- tdSql.execute("drop table if exists jsons_13918_1")
- tdSql.execute("drop table if exists jsons_13918_2")
- tdSql.execute("drop table if exists jsons_13918_3")
- tdSql.execute("drop table if exists jsons_13918_4")
- tdSql.execute("drop table if exists jsons_stb")
- tdSql.execute("create table jsons_stb (ts timestamp, dataInt int) tags (jtag json)")
- tdSql.error("create table jsons_13918_1 using jsons_stb tags ('nullx')")
- tdSql.error("create table jsons_13918_2 using jsons_stb tags (nullx)")
- tdSql.error("insert into jsons_13918_3 using jsons_stb tags('NULLx') values(1591061628001, 11)")
- tdSql.error("insert into jsons_13918_4 using jsons_stb tags(NULLx) values(1591061628002, 11)")
- tdSql.execute("create table jsons_13918_1 using jsons_stb tags ('null')")
- tdSql.execute("create table jsons_13918_2 using jsons_stb tags (null)")
- tdSql.execute("insert into jsons_13918_1 values(1591061628003, 11)")
- tdSql.execute("insert into jsons_13918_2 values(1591061628004, 11)")
- tdSql.execute("insert into jsons_13918_3 using jsons_stb tags('NULL') values(1591061628005, 11)")
- tdSql.execute("insert into jsons_13918_4 using jsons_stb tags(\"NULL\") values(1591061628006, 11)")
- tdSql.query("select * from jsons_stb")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_1")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_2")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_3")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_13918_4")
+ tdSql.execute(f"drop table if exists {dbname}.jsons_stb")
+ tdSql.execute(f"create table {dbname}.jsons_stb (ts timestamp, dataInt int) tags (jtag json)")
+ tdSql.error(f"create table {dbname}.jsons_13918_1 using {dbname}.jsons_stb tags ('nullx')")
+ tdSql.error(f"create table {dbname}.jsons_13918_2 using {dbname}.jsons_stb tags (nullx)")
+ tdSql.error(f"insert into {dbname}.jsons_13918_3 using {dbname}.jsons_stb tags('NULLx') values(1591061628001, 11)")
+ tdSql.error(f"insert into {dbname}.jsons_13918_4 using {dbname}.jsons_stb tags(NULLx) values(1591061628002, 11)")
+ tdSql.execute(f"create table {dbname}.jsons_13918_1 using {dbname}.jsons_stb tags ('null')")
+ tdSql.execute(f"create table {dbname}.jsons_13918_2 using {dbname}.jsons_stb tags (null)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_1 values(1591061628003, 11)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_2 values(1591061628004, 11)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_3 using {dbname}.jsons_stb tags('NULL') values(1591061628005, 11)")
+ tdSql.execute(f"insert into {dbname}.jsons_13918_4 using {dbname}.jsons_stb tags(\"NULL\") values(1591061628006, 11)")
+ tdSql.query(f"select * from {dbname}.jsons_stb")
tdSql.checkRows(4)
def stop(self):
@@ -717,4 +698,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/json_tag_large_tables.py b/tests/system-test/2-query/json_tag_large_tables.py
index 5d7df6ceb8..9164c108f9 100644
--- a/tests/system-test/2-query/json_tag_large_tables.py
+++ b/tests/system-test/2-query/json_tag_large_tables.py
@@ -35,7 +35,7 @@ class TDTestCase:
self.testcaseFilename = os.path.split(__file__)[-1]
# os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def run(self):
# tdSql.prepare()
@@ -47,24 +47,24 @@ class TDTestCase:
i = 0
# add 100000 table
tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
- while i <= 10 0000:
+ while i <= 100000:
sql = """insert into jsons1_{%d} using jsons1 tags('{"tag1":{%d}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')"""%(i, i)
tdSql.execute(sql)
i = i + 1
- // do query
- i = 0
- while i <= 10 0000:
+ # do query
+ i = 0
+ while i <= 100000:
sql = """select count(*) from jsons1 where jtag->'tag1' = %d"""%(i)
tdSql.query(sql)
if 1 != tdSql.getRows():
print("err: %s"%(sql))
-
- while i <= 10000000
+
+ while i <= 10000000:
sql = """insert into jsons1_{%d} using jsons1 tags('{"tag1":{%d}}') values(1591060618000, 1, false, 'json1', '你是') (1591060608000, 23, true, '等等', 'json')"""%(i, i)
tdSql.execute(sql)
i = i + 1
-
+
i = 0
# drop super table
tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
@@ -76,7 +76,7 @@ class TDTestCase:
tdSql.execute('drop stable jsons1')
- # drop database
+ # drop database
i = 0
tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
while i <= 100000:
@@ -84,10 +84,10 @@ class TDTestCase:
tdSql.execute(sql)
i = i + 1
tdSql.execute('drop database db')
-
+
# test duplicate key using the first one. elimate empty key
- #tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") tdSql.query("select jtag from jsons1_8") tdSql.checkRows(0);
+ #tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('{\"tag1\":null, \"tag1\":true, \"tag1\":45, \"1tag$\":2, \" \":90, \"\":32}')") tdSql.query("select jtag from jsons1_8") tdSql.checkRows(0);
#tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3")
#tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
#tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}')
@@ -704,4 +704,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index 105dc883c7..f65744a0b7 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -10,29 +10,26 @@ import random
class TDTestCase:
- updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
- "jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
- "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
- def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ,cache_value ):
- tdSql.execute("drop database if exists test ")
- tdLog.info(" prepare datas for auto check abs function ")
+ def insert_datas_and_check_abs(self, tbnums, rownums, time_step, cache_value, dbname="test"):
+ tdSql.execute(f"drop database if exists {dbname} ")
+ tdLog.info("prepare datas for auto check abs function ")
- tdSql.execute(f" create database test cachemodel {cache_value} ")
- tdSql.execute(" use test ")
- tdSql.execute(" create stable test.stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
+ tdSql.execute(f"create database {dbname} cachemodel {cache_value} ")
+ tdSql.execute(f"use {dbname} ")
+ tdSql.execute(f"create stable {dbname}.stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
for tbnum in range(tbnums):
- tbname = "test.sub_tb_%d"%tbnum
- tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
+ tbname = f"{dbname}.sub_tb_{tbnum}"
+ tdSql.execute(f"create table {tbname} using {dbname}.stb tags({tbnum}) ")
ts = self.ts
for row in range(rownums):
@@ -49,66 +46,65 @@ class TDTestCase:
c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
- tdSql.execute("use test")
tbnames = ["stb", "sub_tb_1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
- tdSql.query("desc {}".format(tbname))
+ tdSql.query(f"desc {dbname}.{tbname}")
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
- abs_sql = "select abs({}) from {} order by tbname ".format(colname, 'test.'+tbname)
- origin_sql = "select {} from {} order by tbname".format(colname, 'test.'+tbname)
+ abs_sql = f"select abs({colname}) from {dbname}.{tbname} order by tbname "
+ origin_sql = f"select {colname} from {dbname}.{tbname} order by tbname"
if coltype[1] in support_types:
self.check_result_auto(origin_sql , abs_sql)
- def prepare_datas(self ,cache_value):
- tdSql.execute("drop database if exists db ")
- create_db_sql = f"create database if not exists db keep 3650 duration 1000 cachemodel {cache_value}"
+ def prepare_datas(self ,cache_value, dbname="db"):
+ tdSql.execute(f"drop database if exists {dbname} ")
+ create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}"
tdSql.execute(create_db_sql)
- tdSql.execute("use db")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- '''create table db.stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table db.t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table db.ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into db.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into db.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- "insert into db.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
- "insert into db.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into db.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into db.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
- "insert into db.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into db.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- "insert into db.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into db.t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -124,53 +120,53 @@ class TDTestCase:
'''
)
- def prepare_tag_datas(self,cache_value):
+ def prepare_tag_datas(self,cache_value, dbname="testdb"):
- tdSql.execute("drop database if exists testdb ")
+ tdSql.execute(f"drop database if exists {dbname} ")
# prepare datas
- tdSql.execute(f"create database if not exists testdb keep 3650 duration 1000 cachemodel {cache_value}")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel {cache_value}")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"use {dbname} ")
- tdSql.execute(f" create stable testdb.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp , uc1 int unsigned,\
+ tdSql.execute(f"create stable {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags( t1 int , t2 bigint , t3 smallint , t4 tinyint , t5 float , t6 double , t7 bool , t8 binary(36)\
, t9 nchar(36) , t10 int unsigned , t11 bigint unsigned ,t12 smallint unsigned , t13 tinyint unsigned ,t14 timestamp ) ")
tdSql.execute(
- '''
- create table testdb.t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
- f'create table testdb.ct{i+1} using stb1 tags ( {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" ,{111*i}, {1*i},{1*i},{1*i},now())')
+ f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" ,{111*i}, {1*i},{1*i},{1*i},now())')
for i in range(9):
tdSql.execute(
- f"insert into testdb.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i} )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i} )"
)
tdSql.execute(
- f"insert into testdb.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i})"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a ,{111*i},{1111*i},{i},{i})"
)
tdSql.execute(
- "insert into testdb.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a ,0,0,0,0)")
+ f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a ,0,0,0,0)")
tdSql.execute(
- "insert into testdb.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a , 999 , 9999 , 9 , 9)")
+ f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a , 999 , 9999 , 9 , 9)")
tdSql.execute(
- "insert into testdb.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a , 999 , 99999 , 9 , 9)")
+ f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a , 999 , 99999 , 9 , 9)")
tdSql.execute(
- "insert into testdb.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a ,999 , 99999 , 9 , 9)")
+ f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a ,999 , 99999 , 9 , 9)")
tdSql.execute(
- "insert into testdb.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
+ f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
tdSql.execute(
- "insert into testdb.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
+ f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL) ")
tdSql.execute(
- "insert into testdb.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ")
+ f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into testdb.t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -217,118 +213,116 @@ class TDTestCase:
tdLog.info(
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
- def test_errors(self):
- tdSql.execute("use testdb")
-
+ def test_errors(self, dbname="testdb"):
# bug need fix
- tdSql.error("select last_row(c1 ,NULL) from testdb.t1")
+ tdSql.error(f"select last_row(c1 ,NULL) from {dbname}.t1")
error_sql_lists = [
- "select last_row from testdb.t1",
- "select last_row(-+--+c1) from testdb.t1",
- "select last_row(123--123)==1 from testdb.t1",
- "select last_row(c1) as 'd1' from testdb.t1",
- #"select last_row(c1 ,NULL) from testdb.t1",
- "select last_row(,) from testdb.t1;",
- "select last_row(abs(c1) ab from testdb.t1)",
- "select last_row(c1) as int from testdb.t1",
- "select last_row from testdb.stb1",
- "select last_row(123--123)==1 from testdb.stb1",
- "select last_row(c1) as 'd1' from testdb.stb1",
- #"select last_row(c1 ,NULL) from testdb.stb1",
- "select last_row(,) from testdb.stb1;",
- "select last_row(abs(c1) ab from testdb.stb1)",
- "select last_row(c1) as int from testdb.stb1"
+ f"select last_row from {dbname}.t1",
+ f"select last_row(-+--+c1) from {dbname}.t1",
+ f"select last_row(123--123)==1 from {dbname}.t1",
+ f"select last_row(c1) as 'd1' from {dbname}.t1",
+ #f"select last_row(c1 ,NULL) from {dbname}.t1",
+ f"select last_row(,) from {dbname}.t1;",
+ f"select last_row(abs(c1) ab from {dbname}.t1)",
+ f"select last_row(c1) as int from {dbname}.t1",
+ f"select last_row from {dbname}.stb1",
+ f"select last_row(123--123)==1 from {dbname}.stb1",
+ f"select last_row(c1) as 'd1' from {dbname}.stb1",
+ #f"select last_row(c1 ,NULL) from {dbname}.stb1",
+ f"select last_row(,) from {dbname}.stb1;",
+ f"select last_row(abs(c1) ab from {dbname}.stb1)",
+ f"select last_row(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
- tdSql.execute("use testdb")
+ def support_types(self, dbname="testdb"):
+ tdSql.execute(f"use {dbname}")
tbnames = ["stb1", "t1", "ct1", "ct2"]
for tbname in tbnames:
- tdSql.query("desc {}".format(tbname))
+ tdSql.query(f"desc {dbname}.{tbname}")
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
col_note = coltype[-1]
if col_note != "TAG":
- abs_sql = "select last_row({}) from {}".format(colname, "testdb."+tbname)
+ abs_sql = f"select last_row({colname}) from {dbname}.{tbname}"
tdSql.query(abs_sql)
- def basic_abs_function(self):
+ def basic_abs_function(self, dbname="testdb"):
# basic query
- tdSql.query("select c1 from testdb.ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from testdb.t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from testdb.stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select last_row(c1) from testdb.ct3")
+ tdSql.query(f"select last_row(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c2) from testdb.ct3")
+ tdSql.query(f"select last_row(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c3) from testdb.ct3")
+ tdSql.query(f"select last_row(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c4) from testdb.ct3")
+ tdSql.query(f"select last_row(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c5) from testdb.ct3")
+ tdSql.query(f"select last_row(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select last_row(c6) from testdb.ct3")
+ tdSql.query(f"select last_row(c6) from {dbname}.ct3")
# used for regular table
# bug need fix
- tdSql.query("select last_row(c1) from testdb.t1")
+ tdSql.query(f"select last_row(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
- tdSql.query("select last_row(c1) from testdb.ct4")
+ tdSql.query(f"select last_row(c1) from {dbname}.ct4")
tdSql.checkData(0, 0, None)
- tdSql.query("select last_row(c1) from testdb.stb1")
+ tdSql.query(f"select last_row(c1) from {dbname}.stb1")
tdSql.checkData(0, 0, None)
-
- # support regular query about last ,first ,last_row
- tdSql.error("select last_row(c1,NULL) from testdb.t1")
- tdSql.error("select last_row(NULL) from testdb.t1")
- tdSql.error("select last(NULL) from testdb.t1")
- tdSql.error("select first(NULL) from testdb.t1")
- tdSql.query("select last_row(c1,123) from testdb.t1")
+ # support regular query about last ,first ,last_row
+ tdSql.error(f"select last_row(c1,NULL) from {dbname}.t1")
+ tdSql.error(f"select last_row(NULL) from {dbname}.t1")
+ tdSql.error(f"select last(NULL) from {dbname}.t1")
+ tdSql.error(f"select first(NULL) from {dbname}.t1")
+
+ tdSql.query(f"select last_row(c1,123) from {dbname}.t1")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,123)
- tdSql.query("select last_row(123) from testdb.t1")
+ tdSql.query(f"select last_row(123) from {dbname}.t1")
tdSql.checkData(0,0,123)
- tdSql.error("select last(c1,NULL) from testdb.t1")
+ tdSql.error(f"select last(c1,NULL) from {dbname}.t1")
- tdSql.query("select last(c1,123) from testdb.t1")
+ tdSql.query(f"select last(c1,123) from {dbname}.t1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,123)
- tdSql.error("select first(c1,NULL) from testdb.t1")
+ tdSql.error(f"select first(c1,NULL) from {dbname}.t1")
- tdSql.query("select first(c1,123) from testdb.t1")
+ tdSql.query(f"select first(c1,123) from {dbname}.t1")
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,123)
- tdSql.error("select last_row(c1,c2,c3,NULL,c4) from testdb.t1")
+ tdSql.error(f"select last_row(c1,c2,c3,NULL,c4) from {dbname}.t1")
- tdSql.query("select last_row(c1,c2,c3,123,c4) from testdb.t1")
+ tdSql.query(f"select last_row(c1,c2,c3,123,c4) from {dbname}.t1")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,None)
tdSql.checkData(0,3,123)
tdSql.checkData(0,4,None)
-
- tdSql.error("select last_row(c1,c2,c3,NULL,c4,t1,t2) from testdb.ct1")
- tdSql.query("select last_row(c1,c2,c3,123,c4,t1,t2) from testdb.ct1")
+ tdSql.error(f"select last_row(c1,c2,c3,NULL,c4,t1,t2) from {dbname}.ct1")
+
+ tdSql.query(f"select last_row(c1,c2,c3,123,c4,t1,t2) from {dbname}.ct1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,-99999)
tdSql.checkData(0,2,-999)
@@ -338,13 +332,13 @@ class TDTestCase:
tdSql.checkData(0,5,0)
# # bug need fix
- tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.t1")
+ tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
# # bug need fix
- tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.ct1")
+ tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.ct1")
tdSql.checkData(0, 0, 9)
tdSql.checkData(0, 1, -99999)
tdSql.checkData(0, 2, -999)
@@ -352,7 +346,7 @@ class TDTestCase:
tdSql.checkData(0, 4,-9.99000)
# bug need fix
- tdSql.query("select last_row(c1), c2, c3 , c4, c5 from testdb.stb1 where tbname='ct1'")
+ tdSql.query(f"select last_row(c1), c2, c3 , c4, c5 from {dbname}.stb1 where tbname='ct1'")
tdSql.checkData(0, 0, 9)
tdSql.checkData(0, 1, -99999)
tdSql.checkData(0, 2, -999)
@@ -360,124 +354,124 @@ class TDTestCase:
tdSql.checkData(0, 4,-9.99000)
# bug fix
- tdSql.query("select last_row(abs(c1)) from testdb.ct1")
+ tdSql.query(f"select last_row(abs(c1)) from {dbname}.ct1")
tdSql.checkData(0,0,9)
# # bug fix
- tdSql.query("select last_row(c1+1) from testdb.ct1")
- tdSql.query("select last_row(c1+1) from testdb.stb1")
- tdSql.query("select last_row(c1+1) from testdb.t1")
+ tdSql.query(f"select last_row(c1+1) from {dbname}.ct1")
+ tdSql.query(f"select last_row(c1+1) from {dbname}.stb1")
+ tdSql.query(f"select last_row(c1+1) from {dbname}.t1")
# used for stable table
- tdSql.query("select last_row(c1 ,c2 ,c3) ,last_row(c4) from testdb.ct1")
+ tdSql.query(f"select last_row(c1 ,c2 ,c3) ,last_row(c4) from {dbname}.ct1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,-99999)
tdSql.checkData(0,2,-999)
tdSql.checkData(0,3,None)
# bug need fix
- tdSql.query("select last_row(c1 ,c2 ,c3) from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1 ,c2 ,c3) from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
tdSql.checkData(0,2,None)
- tdSql.query('select last_row(c1) from testdb.t1 where ts <"2022-12-31 01:01:36.000"')
+ tdSql.query(f'select last_row(c1) from {dbname}.t1 where ts <"2022-12-31 01:01:36.000"')
tdSql.checkData(0,0,8)
# bug need fix
- tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.stb1 where c4 is not null")
+ tdSql.query(f"select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from {dbname}.stb1 where c4 is not null")
tdSql.checkData(0,0,16.000000000)
tdSql.checkData(0,1,-101.000000000)
- tdSql.query("select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from testdb.ct1 where c4<0")
+ tdSql.query(f"select abs(last_row(c1)-2)+max(c1),ceil(last_row(c4)-2) from {dbname}.ct1 where c4<0")
tdSql.checkData(0,0,16.000000000)
tdSql.checkData(0,1,-101.000000000)
- tdSql.query("select last_row(ceil(c1+2)+floor(c1)-10) from testdb.stb1")
+ tdSql.query(f"select last_row(ceil(c1+2)+floor(c1)-10) from {dbname}.stb1")
tdSql.checkData(0,0,None)
- tdSql.query("select last_row(ceil(c1+2)+floor(c1)-10) from testdb.ct1")
+ tdSql.query(f"select last_row(ceil(c1+2)+floor(c1)-10) from {dbname}.ct1")
tdSql.checkData(0,0,10.000000000)
# filter for last_row
# bug need fix for all function
- tdSql.query("select last_row(ts ,c1 ) from testdb.ct4 where t1 = 1 ")
+ tdSql.query(f"select last_row(ts ,c1 ) from {dbname}.ct4 where t1 = 1 ")
tdSql.checkRows(0)
- tdSql.query("select count(c1) from testdb.ct4 where t1 = 1 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 where t1 = 1 ")
tdSql.checkRows(0)
- tdSql.query("select last_row(c1) ,last(c1) from testdb.stb1 where c1 is null")
+ tdSql.query(f"select last_row(c1) ,last(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkRows(1)
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,None)
- tdSql.query("select last_row(c1) ,count(*) from testdb.stb1 where c1 is null")
+ tdSql.query(f"select last_row(c1) ,count(*) from {dbname}.stb1 where c1 is null")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
- tdSql.query("select last_row(c1) ,count(c1) from testdb.stb1 where c1 is null")
+ tdSql.query(f"select last_row(c1) ,count(c1) from {dbname}.stb1 where c1 is null")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,0)
# bug need fix
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1")
tdSql.checkData(0,0,'ct4')
tdSql.checkData(0,1,None)
- tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 partition by tbname order by tbname ")
tdSql.checkData(0,0,'ct1')
tdSql.checkData(0,1,9)
tdSql.checkData(1,0,'ct4')
tdSql.checkData(1,1,None)
- tdSql.query(" select tbname ,last_row(c1) from testdb.stb1 group by tbname order by tbname ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 group by tbname order by tbname ")
tdSql.checkData(0,0,'ct1')
tdSql.checkData(0,1,9)
tdSql.checkData(1,0,'ct4')
tdSql.checkData(1,1,None)
- tdSql.query(" select t1 ,count(c1) from testdb.stb1 partition by t1 ")
+ tdSql.query(f"select t1 ,count(c1) from {dbname}.stb1 partition by t1 ")
tdSql.checkRows(2)
# filter by tbname
- tdSql.query("select last_row(c1) from testdb.stb1 where tbname = 'ct1' ")
+ tdSql.query(f"select last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ")
tdSql.checkData(0,0,9)
# bug need fix
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where tbname = 'ct1' ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 where tbname = 'ct1' ")
tdSql.checkData(0,1,9)
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 partition by tbname order by tbname")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 partition by tbname order by tbname")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 9)
tdSql.checkData(1, 0, 'ct4')
tdSql.checkData(1, 1, None)
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 group by tbname order by tbname")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 group by tbname order by tbname")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 9)
tdSql.checkData(1, 0, 'ct4')
tdSql.checkData(1, 1, None)
# last_row for only tag
- tdSql.query("select last_row(t1 ,t2 ,t3 , t4 ) from testdb.stb1")
+ tdSql.query(f"select last_row(t1 ,t2 ,t3 , t4 ) from {dbname}.stb1")
tdSql.checkData(0,0,3)
tdSql.checkData(0,1,33333)
tdSql.checkData(0,2,333)
tdSql.checkData(0,3,3)
- tdSql.query("select last_row(abs(floor(t1)) ,t2 ,ceil(abs(t3)) , abs(ceil(t4)) ) from testdb.stb1")
+ tdSql.query(f"select last_row(abs(floor(t1)) ,t2 ,ceil(abs(t3)) , abs(ceil(t4)) ) from {dbname}.stb1")
tdSql.checkData(0,0,3)
tdSql.checkData(0,1,33333)
tdSql.checkData(0,2,333)
tdSql.checkData(0,3,3)
# filter by tag
- tdSql.query("select tbname ,last_row(c1) from testdb.stb1 where t1 =0 ")
+ tdSql.query(f"select tbname ,last_row(c1) from {dbname}.stb1 where t1 =0 ")
tdSql.checkData(0,1,9)
- tdSql.query("select tbname ,last_row(c1) ,t1 from testdb.stb1 partition by t1 order by t1")
+ tdSql.query(f"select tbname ,last_row(c1) ,t1 from {dbname}.stb1 partition by t1 order by t1")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 9)
tdSql.checkData(1, 0, 'ct4')
@@ -485,56 +479,55 @@ class TDTestCase:
# filter by col
- tdSql.query("select tbname ,last_row(c1),abs(c1)from testdb.stb1 where c1 =1;")
+ tdSql.query(f"select tbname ,last_row(c1),abs(c1)from {dbname}.stb1 where c1 =1;")
tdSql.checkData(0, 0, 'ct1')
tdSql.checkData(0, 1, 1)
tdSql.checkData(0, 2, 1)
- tdSql.query("select last_row(c1) from testdb.stb1 where abs(ceil(c1))*c1==1")
+ tdSql.query(f"select last_row(c1) from {dbname}.stb1 where abs(ceil(c1))*c1==1")
tdSql.checkData(0,0,1)
# mix with common functions
- tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ")
+ tdSql.query(f"select last_row(*) ,last(*) from {dbname}.stb1 ")
tdSql.checkRows(1)
- tdSql.query("select last_row(*) ,last(*) from testdb.stb1 ")
+ tdSql.query(f"select last_row(*) ,last(*) from {dbname}.stb1 ")
tdSql.checkRows(1)
- tdSql.query("select last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname")
- tdSql.query("select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from testdb.stb1 partition by tbname order by tbname")
+ tdSql.query(f"select last_row(c1+abs(c1)) from {dbname}.stb1 partition by tbname order by tbname")
+ tdSql.query(f"select last(c1), max(c1+abs(c1)),last_row(c1+abs(c1)) from {dbname}.stb1 partition by tbname order by tbname")
# # bug need fix ,taosd crash
- tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last(*)")
- tdSql.error("select last_row(*) ,last(*) from testdb.stb1 partition by tbname order by last_row(*)")
+ tdSql.error(f"select last_row(*) ,last(*) from {dbname}.stb1 partition by tbname order by last(*)")
+ tdSql.error(f"select last_row(*) ,last(*) from {dbname}.stb1 partition by tbname order by last_row(*)")
# mix with agg functions
- tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.stb1 ")
- tdSql.query("select last(*), last_row(*),last(c1), last_row(c1) from testdb.ct1 ")
- tdSql.query("select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from testdb.t1 ")
- tdSql.query("select last_row(*) ,abs(c1/2)+100 from testdb.stb1 where tbname =\"ct1\" ")
- tdSql.query("select c1, last_row(c5) from testdb.ct1 ")
- tdSql.error("select c1, last_row(c5) ,last(c1) from testdb.stb1 ")
+ tdSql.query(f"select last(*), last_row(*),last(c1), last_row(c1) from {dbname}.stb1 ")
+ tdSql.query(f"select last(*), last_row(*),last(c1), last_row(c1) from {dbname}.ct1 ")
+ tdSql.query(f"select last(*), last_row(*),last(c1+1)*max(c1), last_row(c1+2)/2 from {dbname}.t1 ")
+ tdSql.query(f"select last_row(*) ,abs(c1/2)+100 from {dbname}.stb1 where tbname =\"ct1\" ")
+ tdSql.query(f"select c1, last_row(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, last_row(c5) ,last(c1) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select last(c1) , max(c5), count(c5) from testdb.stb1")
- tdSql.query("select last_row(c1) , max(c5), count(c5) from testdb.ct1")
+ tdSql.query(f"select last(c1) , max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select last_row(c1) , max(c5), count(c5) from {dbname}.ct1")
# bug fix for compute
- tdSql.query("select last_row(c1) -0 ,last(c1)-0 ,last(c1)+last_row(c1) from testdb.ct4 ")
+ tdSql.query(f"select last_row(c1) -0 ,last(c1)-0 ,last(c1)+last_row(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,0.000000000)
tdSql.checkData(0,2,None)
- tdSql.query(" select c1, abs(c1) -0 ,last_row(c1-0.1)-0.1 from testdb.ct1")
+ tdSql.query(f"select c1, abs(c1) -0 ,last_row(c1-0.1)-0.1 from {dbname}.ct1")
tdSql.checkData(0,0,9)
tdSql.checkData(0,1,9.000000000)
tdSql.checkData(0,2,8.800000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
+ def abs_func_filter(self, dbname="db"):
tdSql.query(
- "select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,last_row(log(c1,2)-0.5) from db.ct4 where c1>5 ")
+ f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,last_row(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkData(0, 0, 6)
tdSql.checkData(0, 1, 6.000000000)
tdSql.checkData(0, 2, 6.000000000)
@@ -542,19 +535,19 @@ class TDTestCase:
tdSql.checkData(0, 4, 2.084962501)
tdSql.query(
- "select last_row(c1,c2,c1+5) from db.ct4 where c1=5 ")
+ f"select last_row(c1,c2,c1+5) from {dbname}.ct4 where c1=5 ")
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 55555)
tdSql.checkData(0, 2, 10.000000000)
tdSql.query(
- "select last(c1,c2,c1+5) from db.ct4 where c1=5 ")
+ f"select last(c1,c2,c1+5) from {dbname}.ct4 where c1=5 ")
tdSql.checkData(0, 0, 5)
tdSql.checkData(0, 1, 55555)
tdSql.checkData(0, 2, 10.000000000)
tdSql.query(
- "select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from db.ct4 where c1>log(c1,2) limit 1 ")
+ f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 8)
tdSql.checkData(0, 1, 88888)
@@ -566,166 +559,162 @@ class TDTestCase:
def abs_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test cachemodel 'LAST_ROW' ")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} cachemodel 'LAST_ROW' ")
time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create table bound_test.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table bound_test.sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into bound_test.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into bound_test.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
# check basic elem for table per row
tdSql.query(
- "select last(c1) ,last_row(c2), last_row(c3)+1 , last(c4)+1 from bound_test.sub1_bound ")
+ f"select last(c1) ,last_row(c2), last_row(c3)+1 , last(c4)+1 from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, -2147483646)
tdSql.checkData(0, 1, -9223372036854775806)
tdSql.checkData(0, 2, -32765.000000000)
tdSql.checkData(0, 3, -125.000000000)
# check + - * / in functions
tdSql.query(
- "select last_row(c1+1) ,last_row(c2) , last(c3*1) , last(c4/2) from bound_test.sub1_bound ")
-
- def test_tag_compute_for_scalar_function(self):
-
- tdSql.execute("use testdb")
+ f"select last_row(c1+1) ,last_row(c2) , last(c3*1) , last(c4/2) from {dbname}.sub1_bound ")
+ def test_tag_compute_for_scalar_function(self, dbname="testdb"):
# bug need fix
- tdSql.query(" select sum(c1) from testdb.stb1 where t1+10 >1; ")
- tdSql.query("select c1 ,t1 from testdb.stb1 where t1 =0 ")
+ tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1+10 >1; ")
+ tdSql.query(f"select c1 ,t1 from {dbname}.stb1 where t1 =0 ")
tdSql.checkRows(13)
- tdSql.query("select last_row(c1,t1) from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1,t1) from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
- tdSql.query("select last_row(c1),t1 from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1),t1 from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
- tdSql.query("select last_row(c1,t1),last(t1) from testdb.stb1 ")
+ tdSql.query(f"select last_row(c1,t1),last(t1) from {dbname}.stb1 ")
tdSql.checkData(0,0,None)
tdSql.checkData(0,1,3)
tdSql.checkData(0,2,3)
- tdSql.query("select last_row(t1) from testdb.stb1 where t1 >0 ")
+ tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 >0 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,3)
- tdSql.query("select last_row(t1) from testdb.stb1 where t1 =3 ")
+ tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 =3 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,3)
- tdSql.query("select last_row(t1) from testdb.stb1 where t1 =2")
+ tdSql.query(f"select last_row(t1) from {dbname}.stb1 where t1 =2")
tdSql.checkRows(0)
# nest query for last_row
- tdSql.query("select last_row(t1) from (select ts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select last_row(t1) from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,0,3)
- tdSql.query("select distinct(c1) ,t1 from testdb.stb1")
+ tdSql.query(f"select distinct(c1) ,t1 from {dbname}.stb1")
tdSql.checkRows(20)
- tdSql.query("select last_row(c1) from (select _rowts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select last_row(c1) from (select _rowts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,0,None)
- tdSql.query("select last_row(c1) from (select ts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select last_row(c1) from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,0,None)
- tdSql.query("select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from testdb.stb1)")
+ tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,1,None)
- tdSql.query("select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from testdb.stb1 where ts >now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts ="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(NULL)')
+ tdSql.query(f'select max(c1) from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(NULL)')
tdSql.checkRows(8)
tdSql.checkData(7,0,None)
- tdSql.query('select last_row(c1) from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(value ,2 )')
+ tdSql.query(f'select last_row(c1) from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" interval(50d) sliding(30d) fill(value ,2 )')
tdSql.checkRows(8)
tdSql.checkData(7,0,2)
- tdSql.query('select last_row(c1) from testdb.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)')
- tdSql.query('select last_row(c1) from (select ts , c1 from testdb.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)')
+ tdSql.query(f'select last_row(c1) from {dbname}.stb1 where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s)')
+ tdSql.query(f'select last_row(c1) from (select ts , c1 from {dbname}.t1 where ts>="2021-01-01 01:01:06.000" and ts < "2021-07-21 01:01:01.000" ) interval(10s) sliding(5s)')
# join
- tdSql.query("use test")
- tdSql.query("select last(sub_tb_1.c1), last(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ db1 = "test"
+ tdSql.query(f"use {db1}")
+ tdSql.query(f"select last(sub_tb_1.c1), last(sub_tb_2.c2) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
tdSql.checkCols(2)
last_row_result = tdSql.queryResult
- tdSql.query("select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last_row(sub_tb_1.c1), last_row(sub_tb_2.c2) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- tdSql.query("select last(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last(*), last(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
last_row_result = tdSql.queryResult
- tdSql.query("select last_row(*), last_row(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last_row(*), last_row(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- tdSql.query("select last(*), last_row(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last(*), last_row(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- tdSql.query("select last_row(*), last(*) from sub_tb_1, sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
+ tdSql.query(f"select last_row(*), last(*) from {db1}.sub_tb_1 sub_tb_1, {db1}.sub_tb_2 where sub_tb_1.ts=sub_tb_2.ts")
for ind , row in enumerate(last_row_result):
tdSql.checkData(ind , 0 , row[0])
- def support_super_table_test(self):
- tdSql.execute(" use testdb ")
- self.check_result_auto( " select c1 from testdb.stb1 order by ts " , "select abs(c1) from testdb.stb1 order by ts" )
- self.check_result_auto( " select c1 from testdb.stb1 order by tbname " , "select abs(c1) from testdb.stb1 order by tbname" )
- self.check_result_auto( " select c1 from testdb.stb1 where c1 > 0 order by tbname " , "select abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c1 from testdb.stb1 where c1 > 0 order by tbname " , "select abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="testdb"):
+ self.check_result_auto( f"select c1 from {dbname}.stb1 order by ts " , f"select abs(c1) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select c1 from {dbname}.stb1 order by tbname " , f"select abs(c1) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c1 from testdb.stb1 order by ts " , "select t1, abs(c1) from testdb.stb1 order by ts" )
- self.check_result_auto( " select t2,c1 from testdb.stb1 order by tbname " , "select t2 ,abs(c1) from testdb.stb1 order by tbname" )
- self.check_result_auto( " select t3,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t3 ,abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t4,c1 from testdb.stb1 where c1 > 0 order by tbname " , "select t4 , abs(c1) from testdb.stb1 where c1 > 0 order by tbname" )
- pass
+ self.check_result_auto( f"select t1,c1 from {dbname}.stb1 order by ts " , f"select t1, abs(c1) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select t2,c1 from {dbname}.stb1 order by tbname " , f"select t2 ,abs(c1) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select t3,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t3 ,abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t4,c1 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select t4 , abs(c1) from {dbname}.stb1 where c1 > 0 order by tbname" )
def basic_query(self):
diff --git a/tests/system-test/2-query/leastsquares.py b/tests/system-test/2-query/leastsquares.py
index 3bab509942..fe7188a545 100644
--- a/tests/system-test/2-query/leastsquares.py
+++ b/tests/system-test/2-query/leastsquares.py
@@ -26,6 +26,7 @@ TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
+DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql):
@@ -133,13 +134,13 @@ class TDTestCase:
return f"select leastsquares({select_clause}, {start_val}, {step_val}) from {from_clause} {where_condition} {group_condition}"
@property
- def __tb_list(self):
+ def __tb_list(self, dbname=DBNAME):
return [
- "ct1",
- "ct4",
- "t1",
- "ct2",
- "stb1",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
+ f"{dbname}.nt1",
+ f"{dbname}.ct2",
+ f"{dbname}.stb1",
]
@property
@@ -161,36 +162,37 @@ class TDTestCase:
err_sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition(col=select_claus)
- where_claus = self.__where_condition(query_conditon=select_claus)
- having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
- for arg in self.start_step_val:
- if not isinstance(arg,int) or isinstance(arg, bool) :
- err_sqls.extend(
- (
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
- self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
- )
+ tbname = tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tbname)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition(col=select_claus)
+ where_claus = self.__where_condition(query_conditon=select_claus)
+ having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
+ for arg in self.start_step_val:
+ if not isinstance(arg,int) or isinstance(arg, bool) :
+ err_sqls.extend(
+ (
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
)
- elif isinstance(select_claus, str) and any([BOOL_COL in select_claus, BINARY_COL in select_claus, NCHAR_COL in select_claus, TS_COL in select_claus]):
- err_sqls.extend(
- (
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
- self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
- )
+ )
+ elif isinstance(select_claus, str) and any([BOOL_COL in select_claus, BINARY_COL in select_claus, NCHAR_COL in select_claus, TS_COL in select_claus]):
+ err_sqls.extend(
+ (
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, step_val=arg, group_condition=group_claus),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, where_condition=where_claus, group_condition=having_claus),
)
- else:
- current_sqls.extend(
- (
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=0),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=0, step_val=arg, group_condition=group_claus),
- self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=arg, where_condition=where_claus, group_condition=having_claus),
- )
+ )
+ else:
+ current_sqls.extend(
+ (
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=0),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=0, step_val=arg, group_condition=group_claus),
+ self.__single_sql(select_clause=select_claus, from_clause=tb, start_val=arg, step_val=arg, where_condition=where_claus, group_condition=having_claus),
)
+ )
# return filter(None, sqls)
return list(filter(None, current_sqls)), list(filter(None, err_sqls))
@@ -207,25 +209,25 @@ class TDTestCase:
def __test_current(self):
- # tdSql.query("explain select c1 from ct1")
- # tdSql.query("explain select 1 from ct2")
- # tdSql.query("explain select cast(ceil(c6) as bigint) from ct4 group by c6")
- # tdSql.query("explain select count(c3) from ct4 group by c7 having count(c3) > 0")
- # tdSql.query("explain select ct2.c3 from ct4 join ct2 on ct4.ts=ct2.ts")
+ # tdSql.query("explain select c1 from {dbname}.ct1")
+ # tdSql.query("explain select 1 from {dbname}.ct2")
+ # tdSql.query("explain select cast(ceil(c6) as bigint) from {dbname}.ct4 group by c6")
+ # tdSql.query("explain select count(c3) from {dbname}.ct4 group by c7 having count(c3) > 0")
+ # tdSql.query("explain select ct2.c3 from {dbname}.ct4 join ct2 on ct4.ts=ct2.ts")
# tdSql.query("explain select c1 from stb1 where c1 is not null and c1 in (0, 1, 2) or c1 between 2 and 100 ")
self.leastsquares_check()
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("===step 0: err case, must return err")
- tdSql.error( "select leastsquares(c1) from ct8" )
- tdSql.error( "select leastsquares(c1, 1) from ct1 " )
- tdSql.error( "select leastsquares(c1, null, 1) from ct1 " )
- tdSql.error( "select leastsquares(c1, 1, null) from ct1 " )
- tdSql.error( "select leastsquares(null, 1, 1) from ct1 " )
- tdSql.error( '''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
- from ct1
+ tdSql.error( f"select leastsquares(c1) from {dbname}.ct8" )
+ tdSql.error( f"select leastsquares(c1, 1) from {dbname}.ct1 " )
+ tdSql.error( f"select leastsquares(c1, null, 1) from {dbname}.ct1 " )
+ tdSql.error( f"select leastsquares(c1, 1, null) from {dbname}.ct1 " )
+ tdSql.error( f"select leastsquares(null, 1, 1) from {dbname}.ct1 " )
+ tdSql.error( f'''select leastsquares(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
+ from {dbname}.ct1
where ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null
group by ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10']
having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
@@ -234,16 +236,16 @@ class TDTestCase:
self.__test_error()
self.__test_current()
- def __create_tb(self):
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.nt1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -253,30 +255,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -292,7 +293,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -308,13 +309,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.nt1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.nt1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -332,7 +333,7 @@ class TDTestCase:
def run(self):
- tdSql.prepare()
+ tdSql.prepare(DBNAME)
tdLog.printNoPrefix("==========step1:create table")
self.__create_tb()
@@ -344,10 +345,9 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute(f"flush database {DBNAME}")
- tdSql.execute("use db")
+ tdSql.execute(f"use {DBNAME}")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/length.py b/tests/system-test/2-query/length.py
index ed604c41ae..1761572245 100644
--- a/tests/system-test/2-query/length.py
+++ b/tests/system-test/2-query/length.py
@@ -19,6 +19,7 @@ TS_COL = "c10"
UN_CHAR_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
TS_TYPE_COL = [TS_COL]
+DBNAME = "db"
class TDTestCase:
@@ -102,16 +103,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname=DBNAME):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1", f"{dbname}.stb1"]
for tb in tbname:
self.__length_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.nt1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__length_err_check(tb):
@@ -124,17 +125,16 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.nt1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -144,29 +144,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -182,7 +182,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -198,13 +198,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.nt1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.nt1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -233,8 +233,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/log.py b/tests/system-test/2-query/log.py
index b8e0aaf52e..e304284bf9 100644
--- a/tests/system-test/2-query/log.py
+++ b/tests/system-test/2-query/log.py
@@ -10,48 +10,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -185,68 +183,68 @@ class TDTestCase:
else:
tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select log from t1",
- # "select log(-+--+c1 ,2) from t1",
- # "select +-log(c1,2) from t1",
- # "select ++-log(c1,2) from t1",
- # "select ++--log(c1,2) from t1",
- # "select - -log(c1,2)*0 from t1",
- # "select log(tbname+1,2) from t1 ",
- "select log(123--123,2)==1 from t1",
- "select log(c1,2) as 'd1' from t1",
- "select log(c1 ,c2 ,2) from t1",
- "select log(c1 ,NULL ,2) from t1",
- "select log(, 2) from t1;",
- "select log(log(c1, 2) ab from t1)",
- "select log(c1 ,2 ) as int from t1",
- "select log from stb1",
- # "select log(-+--+c1) from stb1",
- # "select +-log(c1) from stb1",
- # "select ++-log(c1) from stb1",
- # "select ++--log(c1) from stb1",
- # "select - -log(c1)*0 from stb1",
- # "select log(tbname+1) from stb1 ",
- "select log(123--123 ,2)==1 from stb1",
- "select log(c1 ,2) as 'd1' from stb1",
- "select log(c1 ,c2 ,2 ) from stb1",
- "select log(c1 ,NULL,2) from stb1",
- "select log(,) from stb1;",
- "select log(log(c1 , 2) ab from stb1)",
- "select log(c1 , 2) as int from stb1"
+ f"select log from {dbname}.t1",
+ # f"select log(-+--+c1 ,2) from {dbname}.t1",
+ # f"select +-log(c1,2) from {dbname}.t1",
+ # f"select ++-log(c1,2) from {dbname}.t1",
+ # f"select ++--log(c1,2) from {dbname}.t1",
+ # f"select - -log(c1,2)*0 from {dbname}.t1",
+ # f"select log(tbname+1,2) from {dbname}.t1 ",
+ f"select log(123--123,2)==1 from {dbname}.t1",
+ f"select log(c1,2) as 'd1' from {dbname}.t1",
+ f"select log(c1 ,c2 ,2) from {dbname}.t1",
+ f"select log(c1 ,NULL ,2) from {dbname}.t1",
+ f"select log(, 2) from {dbname}.t1;",
+ f"select log(log(c1, 2) ab from {dbname}.t1)",
+ f"select log(c1 ,2 ) as int from {dbname}.t1",
+ f"select log from {dbname}.stb1",
+ # f"select log(-+--+c1) from {dbname}.stb1",
+ # f"select +-log(c1) from {dbname}.stb1",
+ # f"select ++-log(c1) from {dbname}.stb1",
+ # f"select ++--log(c1) from {dbname}.stb1",
+ # f"select - -log(c1)*0 from {dbname}.stb1",
+ # f"select log(tbname+1) from {dbname}.stb1 ",
+ f"select log(123--123 ,2)==1 from {dbname}.stb1",
+ f"select log(c1 ,2) as 'd1' from {dbname}.stb1",
+ f"select log(c1 ,c2 ,2 ) from {dbname}.stb1",
+ f"select log(c1 ,NULL,2) from {dbname}.stb1",
+ f"select log(,) from {dbname}.stb1;",
+ f"select log(log(c1 , 2) ab from {dbname}.stb1)",
+ f"select log(c1 , 2) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select log(ts ,2 ) from t1" ,
- "select log(c7,c2 ) from t1",
- "select log(c8,c1 ) from t1",
- "select log(c9,c2 ) from t1",
- "select log(ts,c7 ) from ct1" ,
- "select log(c7,c9 ) from ct1",
- "select log(c8,c2 ) from ct1",
- "select log(c9,c1 ) from ct1",
- "select log(ts,2 ) from ct3" ,
- "select log(c7,2 ) from ct3",
- "select log(c8,2 ) from ct3",
- "select log(c9,2 ) from ct3",
- "select log(ts,2 ) from ct4" ,
- "select log(c7,2 ) from ct4",
- "select log(c8,2 ) from ct4",
- "select log(c9,2 ) from ct4",
- "select log(ts,2 ) from stb1" ,
- "select log(c7,2 ) from stb1",
- "select log(c8,2 ) from stb1",
- "select log(c9,2 ) from stb1" ,
+ f"select log(ts ,2 ) from {dbname}.t1" ,
+ f"select log(c7,c2 ) from {dbname}.t1",
+ f"select log(c8,c1 ) from {dbname}.t1",
+ f"select log(c9,c2 ) from {dbname}.t1",
+ f"select log(ts,c7 ) from {dbname}.ct1" ,
+ f"select log(c7,c9 ) from {dbname}.ct1",
+ f"select log(c8,c2 ) from {dbname}.ct1",
+ f"select log(c9,c1 ) from {dbname}.ct1",
+ f"select log(ts,2 ) from {dbname}.ct3" ,
+ f"select log(c7,2 ) from {dbname}.ct3",
+ f"select log(c8,2 ) from {dbname}.ct3",
+ f"select log(c9,2 ) from {dbname}.ct3",
+ f"select log(ts,2 ) from {dbname}.ct4" ,
+ f"select log(c7,2 ) from {dbname}.ct4",
+ f"select log(c8,2 ) from {dbname}.ct4",
+ f"select log(c9,2 ) from {dbname}.ct4",
+ f"select log(ts,2 ) from {dbname}.stb1" ,
+ f"select log(c7,2 ) from {dbname}.stb1",
+ f"select log(c8,2 ) from {dbname}.stb1",
+ f"select log(c9,2 ) from {dbname}.stb1" ,
- "select log(ts,2 ) from stbbb1" ,
- "select log(c7,2 ) from stbbb1",
+ f"select log(ts,2 ) from {dbname}.stbbb1" ,
+ f"select log(c7,2 ) from {dbname}.stbbb1",
- "select log(ts,2 ) from tbname",
- "select log(c9,2 ) from tbname"
+ f"select log(ts,2 ) from {dbname}.tbname",
+ f"select log(c9,2 ) from {dbname}.tbname"
]
@@ -255,98 +253,88 @@ class TDTestCase:
type_sql_lists = [
- "select log(c1,2 ) from t1",
- "select log(c2,2 ) from t1",
- "select log(c3,2 ) from t1",
- "select log(c4,2 ) from t1",
- "select log(c5,2 ) from t1",
- "select log(c6,2 ) from t1",
+ f"select log(c1,2 ) from {dbname}.t1",
+ f"select log(c2,2 ) from {dbname}.t1",
+ f"select log(c3,2 ) from {dbname}.t1",
+ f"select log(c4,2 ) from {dbname}.t1",
+ f"select log(c5,2 ) from {dbname}.t1",
+ f"select log(c6,2 ) from {dbname}.t1",
- "select log(c1,2 ) from ct1",
- "select log(c2,2 ) from ct1",
- "select log(c3,2 ) from ct1",
- "select log(c4,2 ) from ct1",
- "select log(c5,2 ) from ct1",
- "select log(c6,2 ) from ct1",
+ f"select log(c1,2 ) from {dbname}.ct1",
+ f"select log(c2,2 ) from {dbname}.ct1",
+ f"select log(c3,2 ) from {dbname}.ct1",
+ f"select log(c4,2 ) from {dbname}.ct1",
+ f"select log(c5,2 ) from {dbname}.ct1",
+ f"select log(c6,2 ) from {dbname}.ct1",
- "select log(c1,2 ) from ct3",
- "select log(c2,2 ) from ct3",
- "select log(c3,2 ) from ct3",
- "select log(c4,2 ) from ct3",
- "select log(c5,2 ) from ct3",
- "select log(c6,2 ) from ct3",
+ f"select log(c1,2 ) from {dbname}.ct3",
+ f"select log(c2,2 ) from {dbname}.ct3",
+ f"select log(c3,2 ) from {dbname}.ct3",
+ f"select log(c4,2 ) from {dbname}.ct3",
+ f"select log(c5,2 ) from {dbname}.ct3",
+ f"select log(c6,2 ) from {dbname}.ct3",
- "select log(c1,2 ) from stb1",
- "select log(c2,2 ) from stb1",
- "select log(c3,2 ) from stb1",
- "select log(c4,2 ) from stb1",
- "select log(c5,2 ) from stb1",
- "select log(c6,2 ) from stb1",
+ f"select log(c1,2 ) from {dbname}.stb1",
+ f"select log(c2,2 ) from {dbname}.stb1",
+ f"select log(c3,2 ) from {dbname}.stb1",
+ f"select log(c4,2 ) from {dbname}.stb1",
+ f"select log(c5,2 ) from {dbname}.stb1",
+ f"select log(c6,2 ) from {dbname}.stb1",
- "select log(c6,2) as alisb from stb1",
- "select log(c6,2) alisb from stb1",
+ f"select log(c6,2) as alisb from {dbname}.stb1",
+ f"select log(c6,2) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_log_function(self):
+ def basic_log_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select log(c1 ,2) from ct3")
+ tdSql.query(f"select log(c1 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c2 ,2) from ct3")
+ tdSql.query(f"select log(c2 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c3 ,2) from ct3")
+ tdSql.query(f"select log(c3 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c4 ,2) from ct3")
+ tdSql.query(f"select log(c4 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c5 ,2) from ct3")
+ tdSql.query(f"select log(c5 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select log(c6 ,2) from ct3")
+ tdSql.query(f"select log(c6 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select log(c1 ,2) from t1")
+ tdSql.query(f"select log(c1 ,2) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.000000000)
tdSql.checkData(3 , 0, 1.584962501)
tdSql.checkData(5 , 0, None)
- tdSql.query("select log(c1) from t1")
+ tdSql.query(f"select log(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.000000000)
tdSql.checkData(2 , 0, 0.693147181)
tdSql.checkData(3 , 0, 1.098612289)
tdSql.checkData(4 , 0, 1.386294361)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
- tdSql.checkData(1, 4, 1.11000)
- tdSql.checkData(3, 3, 33)
- tdSql.checkData(5, 4, None)
-
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
- tdSql.checkData(1, 5, 1.11000)
- tdSql.checkData(3, 4, 33)
- tdSql.checkData(5, 5, None)
-
- self.check_result_auto_log( "select c1, c2, c3 , c4, c5 from t1", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) from t1")
- self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from t1")
- self.check_result_auto_log1( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from t1")
- self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 from t1", "select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from t1")
+ self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from {dbname}.t1")
# used for sub table
- tdSql.query("select c1 ,log(c1 ,3) from ct1")
+ tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.892789261)
tdSql.checkData(1 , 1, 1.771243749)
tdSql.checkData(3 , 1, 1.464973521)
@@ -354,19 +342,19 @@ class TDTestCase:
# test bug fix for log(c1,c2)
- tdSql.query("select c1, c2 ,log(c1,c2) from ct1")
+ tdSql.query(f"select c1, c2 ,log(c1,c2) from {dbname}.ct1")
tdSql.checkData(0 , 2, 0.182485070)
tdSql.checkData(1 , 2, 0.172791608)
tdSql.checkData(2 , 2, 0.161311499)
tdSql.checkData(3 , 2, 0.147315235)
tdSql.checkData(4 , 2, None)
- self.check_result_auto_log( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) from ct1")
- self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from ct1")
- self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 from ct1", "select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from ct1")
+ self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from {dbname}.ct1")
# nest query for log functions
- tdSql.query("select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from ct1;")
+ tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 8)
tdSql.checkData(0 , 1 , 1.892789261)
tdSql.checkData(0 , 2 , 0.580779541)
@@ -384,36 +372,36 @@ class TDTestCase:
# # used for stable table
- tdSql.query("select log(c1, 2) from stb1")
+ tdSql.query(f"select log(c1, 2) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select log(c1, 2) from stbbb1")
- tdSql.error("select log(c1, 2) from tbname")
- tdSql.error("select log(c1, 2) from ct5")
+ tdSql.error(f"select log(c1, 2) from {dbname}.stbbb1")
+ tdSql.error(f"select log(c1, 2) from {dbname}tbname")
+ tdSql.error(f"select log(c1, 2) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, log(c1 ,2) from ct1")
+ tdSql.query(f"select c1, log(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,3.000000000)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,None)
- tdSql.query("select c1, log(c1,2) from ct4")
+ tdSql.query(f"select c1, log(c1,2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,2.321928095)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
- tdSql.query("select c1, log(c1 ,2 ) from ct4 ")
+ tdSql.query(f"select c1, log(c1 ,2 ) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,2.321928095)
# mix with common functions
- tdSql.query("select c1, log(c1 ,2),c5, log(c5 ,2) from ct4 ")
+ tdSql.query(f"select c1, log(c1 ,2),c5, log(c5 ,2) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -424,34 +412,34 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,6.66000)
tdSql.checkData(3 , 3 ,2.735522144)
- tdSql.query("select c1, log(c1,1),c5, floor(c5 ) from stb1 ")
+ tdSql.query(f"select c1, log(c1,1),c5, floor(c5 ) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, log(c1 ,2),c5, count(c5) from stb1 ")
- tdSql.error("select c1, log(c1 ,2),c5, count(c5) from ct1 ")
- tdSql.error("select log(c1 ,2), count(c5) from stb1 ")
- tdSql.error("select log(c1 ,2), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, log(c1 ,2),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, log(c1 ,2),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select log(c1 ,2), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select log(c1 ,2), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# # bug fix for compute
- tdSql.query("select c1, log(c1 ,2) -0 ,log(c1-4 ,2)-0 from ct4 ")
+ tdSql.query(f"select c1, log(c1 ,2) -0 ,log(c1-4 ,2)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -459,7 +447,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 3.000000000)
tdSql.checkData(1, 2, 2.000000000)
- tdSql.query(" select c1, log(c1 ,2) -0 ,log(c1-0.1 ,2)-0.1 from ct4")
+ tdSql.query(f"select c1, log(c1 ,2) -0 ,log(c1-0.1 ,2)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -467,88 +455,87 @@ class TDTestCase:
tdSql.checkData(1, 1, 3.000000000)
tdSql.checkData(1, 2, 2.881852653)
- tdSql.query("select c1, log(c1, -10), c2, log(c2, -10), c3, log(c3, -10) from ct1")
+ tdSql.query(f"select c1, log(c1, -10), c2, log(c2, -10), c3, log(c3, -10) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, log(c1, 100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, 0.112886248)
tdSql.checkData(1, 1, 0.105637255)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, 0.069468461)
tdSql.checkData(1, 1, 0.065007542)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, log(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, 0.036123599)
tdSql.checkData(1, 1, 0.033803922)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, 0.026561470)
tdSql.checkData(1, 1, 0.024855825)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, 0.022577250)
tdSql.checkData(1, 1, 0.021127451)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, log(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def log_base_test(self):
+ def log_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, log(c1, 2) from ct1")
+ tdSql.query(f"select c1, log(c1, 2) from {dbname}.ct1")
tdSql.checkData(0, 1,3.000000000)
- tdSql.query("select c1, log(c1, 2.0) from ct1")
+ tdSql.query(f"select c1, log(c1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 3.000000000)
- tdSql.query("select c1, log(1, 2.0) from ct1")
+ tdSql.query(f"select c1, log(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.000000000)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, log(1, 2.0) from ct1")
+ tdSql.query(f"select c1, log(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.000000000)
tdSql.checkRows(13)
# two cols start log(x,y)
- tdSql.query("select c1,c2, log(c1,c2) from ct1")
+ tdSql.query(f"select c1,c2, log(c1,c2) from {dbname}.ct1")
tdSql.checkData(0, 2, 0.182485070)
tdSql.checkData(1, 2, 0.172791608)
tdSql.checkData(4, 2, None)
- tdSql.query("select c1,c2, log(c2,c1) from ct1")
+ tdSql.query(f"select c1,c2, log(c2,c1) from {dbname}.ct1")
tdSql.checkData(0, 2, 5.479900349)
tdSql.checkData(1, 2, 5.787318105)
tdSql.checkData(4, 2, None)
- tdSql.query("select c1, log(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, log(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.333333333)
tdSql.checkData(1, 1, 0.356207187)
tdSql.checkData(4, 1, None)
- tdSql.query("select c1, log(2.0 , ceil(abs(c1))) from ct1")
+ tdSql.query(f"select c1, log(2.0 , ceil(abs(c1))) from {dbname}.ct1")
tdSql.checkData(0, 1, 0.333333333)
tdSql.checkData(1, 1, 0.356207187)
tdSql.checkData(4, 1, None)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -556,7 +543,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -564,15 +551,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
- tdSql.checkRows(1)
- tdSql.checkData(0,0,5)
- tdSql.checkData(0,1,5.000000000)
- tdSql.checkData(0,2,5.000000000)
- tdSql.checkData(0,3,4.900000000)
- tdSql.checkData(0,4,2.000000000)
-
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -581,46 +560,43 @@ class TDTestCase:
tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,3.000000000)
- def log_Arithmetic(self):
- pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
time.sleep(3)
- tdSql.execute("use bound_test")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_log( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from sub1_bound")
- self.check_result_auto_log2( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from sub1_bound")
- self.check_result_auto_log__10( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from sub1_bound")
+ self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from {dbname}.sub1_bound")
- self.check_result_auto_log2( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from sub1_bound")
- self.check_result_auto_log( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from sub1_bound")
+ self.check_result_auto_log2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
- self.check_result_auto_log2("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select log(abs(c1) ,2) from sub1_bound" )
+ self.check_result_auto_log2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from sub1_bound ")
+ tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.log(2147483647,2))
tdSql.checkData(0,1,math.log(9223372036854775807 ,2))
tdSql.checkData(0,2,math.log(32767,2))
@@ -641,7 +617,7 @@ class TDTestCase:
tdSql.checkData(3,5,math.log(169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000,2))
# check basic elem for table per row
- tdSql.query("select log(abs(c1)) ,log(abs(c2)) , log(abs(c3)) , log(abs(c4)), log(abs(c5)), log(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select log(abs(c1)) ,log(abs(c2)) , log(abs(c3)) , log(abs(c4)), log(abs(c5)), log(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.log(2147483647))
tdSql.checkData(0,1,math.log(9223372036854775807))
tdSql.checkData(0,2,math.log(32767))
@@ -661,28 +637,25 @@ class TDTestCase:
tdSql.checkData(3,4,math.log(339999995214436424907732413799364296704.00000))
tdSql.checkData(3,5,math.log(169999999999999993883079578865998174333346074304075874502773119193537729178160565864330091787584707988572262467983188919169916105593357174268369962062473635296474636515660464935663040684957844303524367815028553272712298986386310828644513212353921123253311675499856875650512437415429217994623324794855339589632.000000000))
-
-
# check + - * / in functions
- tdSql.query("select log(abs(c1+1) ,2) ,log(abs(c2),2) , log(abs(c3*1),2) , log(abs(c4/2),2), log(abs(c5) ,2)/2, log(abs(c6) ,2) from sub1_bound ")
+ tdSql.query(f"select log(abs(c1+1) ,2) ,log(abs(c2),2) , log(abs(c3*1),2) , log(abs(c4/2),2), log(abs(c5) ,2)/2, log(abs(c6) ,2) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.log(2147483648.000000000,2))
tdSql.checkData(0,1,math.log(9223372036854775807,2))
tdSql.checkData(0,2,math.log(32767.000000000,2))
tdSql.checkData(0,3,math.log(63.500000000,2))
tdSql.checkData(0,4,63.999401166)
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_log2( " select c5 from stb1 order by ts " , "select log(c5,2) from stb1 order by ts" )
- self.check_result_auto_log2( " select c5 from stb1 order by tbname " , "select log(c5,2) from stb1 order by tbname" )
- self.check_result_auto_log2( " select c5 from stb1 where c1 > 0 order by tbname " , "select log(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( " select c5 from stb1 where c1 > 0 order by tbname " , "select log(c5,2) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
- self.check_result_auto_log2( " select t1,c5 from stb1 order by ts " , "select log(t1,2), log(c5,2) from stb1 order by ts" )
- self.check_result_auto_log2( " select t1,c5 from stb1 order by tbname " , "select log(t1,2) ,log(c5,2) from stb1 order by tbname" )
- self.check_result_auto_log2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select log(t1,2) ,log(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select log(t1,2) , log(c5,2) from stb1 where c1 > 0 order by tbname" )
- pass
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py
index d7dc5e6143..0995dfc6ff 100644
--- a/tests/system-test/2-query/mavg.py
+++ b/tests/system-test/2-query/mavg.py
@@ -25,13 +25,13 @@ from util.cases import *
from util.sql import *
from util.dnodes import *
-
+dbname = 'db'
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
- def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def mavg_query_form(self, sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{dbname}.t1", condition=""):
'''
mavg function:
@@ -50,7 +50,7 @@ class TDTestCase:
return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}"
- def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def checkmavg(self,sel="select", func="mavg(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{dbname}.t1", condition=""):
# print(self.mavg_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
# table_expr=table_expr, condition=condition))
line = sys._getframe().f_back.f_lineno
@@ -62,7 +62,7 @@ class TDTestCase:
table_expr=table_expr, condition=condition
))
- sql = "select * from t1"
+ sql = f"select * from {dbname}.t1"
collist = tdSql.getColNameList(sql)
if not isinstance(col, str):
@@ -326,9 +326,9 @@ class TDTestCase:
self.checkmavg(**case6)
# # case7~8: nested query
- # case7 = {"table_expr": "(select c1 from stb1)"}
+ # case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
# self.checkmavg(**case7)
- # case8 = {"table_expr": "(select mavg(c1, 1) c1 from stb1 group by tbname)"}
+ # case8 = {"table_expr": f"(select mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
# self.checkmavg(**case8)
# case9~10: mix with tbname/ts/tag/col
@@ -362,7 +362,7 @@ class TDTestCase:
self.checkmavg(**case17)
# # case18~19: with group by
# case19 = {
- # "table_expr": "stb1",
+ # "table_expr": f"{dbname}.stb1",
# "condition": "partition by tbname"
# }
# self.checkmavg(**case19)
@@ -371,14 +371,14 @@ class TDTestCase:
# case20 = {"condition": "order by ts"}
# self.checkmavg(**case20)
#case21 = {
- # "table_expr": "stb1",
+ # "table_expr": f"{dbname}.stb1",
# "condition": "group by tbname order by tbname"
#}
#self.checkmavg(**case21)
# # case22: with union
# case22 = {
- # "condition": "union all select mavg( c1 , 1 ) from t2"
+ # "condition": f"union all select mavg( c1 , 1 ) from {dbname}.t2"
# }
# self.checkmavg(**case22)
@@ -486,32 +486,33 @@ class TDTestCase:
#tdSql.query(" select mavg( c1 , 1 ) + 2 from t1 ")
err41 = {"alias": "+ avg(c1)"}
self.checkmavg(**err41) # mix with arithmetic 2
- err42 = {"alias": ", c1"}
- self.checkmavg(**err42) # mix with other col
- # err43 = {"table_expr": "stb1"}
+ # err42 = {"alias": ", c1"}
+ # self.checkmavg(**err42) # mix with other col
+ # err43 = {"table_expr": f"{dbname}.stb1"}
# self.checkmavg(**err43) # select stb directly
- err44 = {
- "col": "stb1.c1",
- "table_expr": "stb1, stb2",
- "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
- }
- self.checkmavg(**err44) # stb join
+ # err44 = {
+ # "col": "stb1.c1",
+ # "table_expr": "stb1, stb2",
+ # "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
+ # }
+ # self.checkmavg(**err44) # stb join
+ tdSql.query("select mavg( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
err45 = {
"condition": "where ts>0 and ts < now interval(1h) fill(next)"
}
self.checkmavg(**err45) # interval
err46 = {
- "table_expr": "t1",
+ "table_expr": f"{dbname}.t1",
"condition": "group by c6"
}
self.checkmavg(**err46) # group by normal col
err47 = {
- "table_expr": "stb1",
+ "table_expr": f"{dbname}.stb1",
"condition": "group by tbname slimit 1 "
}
# self.checkmavg(**err47) # with slimit
err48 = {
- "table_expr": "stb1",
+ "table_expr": f"{dbname}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
# self.checkmavg(**err48) # with soffset
@@ -554,8 +555,8 @@ class TDTestCase:
err67 = {"k": 0.999999}
self.checkmavg(**err67) # k: left out of [1, 1000]
err68 = {
- "table_expr": "stb1",
- "condition": "group by tbname order by tbname" # order by tbname not supported
+ "table_expr": f"{dbname}.stb1",
+ "condition": f"group by tbname order by tbname" # order by tbname not supported
}
self.checkmavg(**err68)
@@ -565,42 +566,42 @@ class TDTestCase:
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
def mavg_test_table(self,tbnum: int) -> None :
- tdSql.execute("drop database if exists db")
- tdSql.execute("create database if not exists db keep 3650")
- tdSql.execute("use db")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650")
+ tdSql.execute(f"use {dbname}")
tdSql.execute(
- "create stable db.stb1 (\
+ f"create stable {dbname}.stb1 (\
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
) \
tags(st1 int)"
)
tdSql.execute(
- "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+ tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})")
+ tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
pass
@@ -617,25 +618,25 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})")
self.mavg_current_query()
self.mavg_error_query()
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
# self.mavg_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
# f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
# f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
# self.mavg_current_query()
# self.mavg_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
# self.mavg_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
# f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
+ # tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
# f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
# self.mavg_current_query()
# self.mavg_error_query()
@@ -649,9 +650,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.mavg_current_query()
self.mavg_error_query()
@@ -664,67 +665,64 @@ class TDTestCase:
tdDnodes.start(index)
self.mavg_current_query()
self.mavg_error_query()
- tdSql.query("select mavg(1,1) from t1")
+ tdSql.query(f"select mavg(1,1) from {dbname}.t1")
tdSql.checkRows(7)
tdSql.checkData(0,0,1.000000000)
tdSql.checkData(1,0,1.000000000)
tdSql.checkData(5,0,1.000000000)
- tdSql.query("select mavg(abs(c1),1) from t1")
+ tdSql.query(f"select mavg(abs(c1),1) from {dbname}.t1")
tdSql.checkRows(4)
def mavg_support_stable(self):
- tdSql.query(" select mavg(1,3) from stb1 ")
+ tdSql.query(f" select mavg(1,3) from {dbname}.stb1 ")
tdSql.checkRows(68)
tdSql.checkData(0,0,1.000000000)
- tdSql.query("select mavg(c1,3) from stb1 partition by tbname ")
+ tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(20)
- # tdSql.query("select mavg(st1,3) from stb1 partition by tbname")
- # tdSql.checkRows(38)
- tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
+ tdSql.query(f"select mavg(st1,3) from {dbname}.stb1 partition by tbname")
+ tdSql.checkRows(50)
+ tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
+ tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
+ tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
- # # bug need fix
- # tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname slimit 1 ")
- # tdSql.checkRows(2)
- # tdSql.error("select mavg(st1+c1,3) from stb1 partition by tbname limit 1 ")
# bug need fix
- tdSql.query("select mavg(st1+c1,3) from stb1 partition by tbname")
+ tdSql.query(f"select mavg(st1+c1,3) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(20)
# bug need fix
- # tdSql.query("select tbname , mavg(c1,3) from stb1 partition by tbname")
- # tdSql.checkRows(38)
- # tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname")
- # tdSql.checkRows(38)
- # tdSql.query("select tbname , mavg(st1,3) from stb1 partition by tbname slimit 1")
- # tdSql.checkRows(2)
+ tdSql.query(f"select tbname , mavg(c1,3) from {dbname}.stb1 partition by tbname")
+ tdSql.checkRows(20)
+ tdSql.query(f"select tbname , mavg(st1,3) from {dbname}.stb1 partition by tbname")
+ tdSql.checkRows(50)
+ tdSql.query(f"select tbname , mavg(st1,3) from {dbname}.stb1 partition by tbname slimit 1")
+ tdSql.checkRows(5)
# partition by tags
- # tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1")
- # tdSql.checkRows(38)
- # tdSql.query("select mavg(c1,3) from stb1 partition by st1")
- # tdSql.checkRows(38)
- # tdSql.query("select st1 , mavg(c1,3) from stb1 partition by st1 slimit 1")
- # tdSql.checkRows(2)
- # tdSql.query("select mavg(c1,3) from stb1 partition by st1 slimit 1")
- # tdSql.checkRows(2)
+ tdSql.query(f"select st1 , mavg(c1,3) from {dbname}.stb1 partition by st1")
+ tdSql.checkRows(20)
+ tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by st1")
+ tdSql.checkRows(20)
+ tdSql.query(f"select st1 , mavg(c1,3) from {dbname}.stb1 partition by st1 slimit 1")
+ tdSql.checkRows(2)
+ tdSql.query(f"select mavg(c1,3) from {dbname}.stb1 partition by st1 slimit 1")
+ tdSql.checkRows(2)
# partition by col
- # tdSql.query("select c1 , mavg(c1,3) from stb1 partition by c1")
- # tdSql.checkRows(38)
- # tdSql.query("select mavg(c1 ,3) from stb1 partition by c1")
- # tdSql.checkRows(38)
- # tdSql.query("select c1 , mavg(c1,3) from stb1 partition by st1 slimit 1")
- # tdSql.checkRows(2)
- # tdSql.query("select diff(c1) from stb1 partition by st1 slimit 1")
- # tdSql.checkRows(2)
+ tdSql.query(f"select c1 , mavg(c1,3) from {dbname}.stb1 partition by c1")
+ tdSql.checkRows(0)
+ tdSql.query(f"select c1 , mavg(c1,1) from {dbname}.stb1 partition by c1")
+ tdSql.checkRows(40)
+ tdSql.query(f"select c1, c2, c3, c4, mavg(c1,3) from {dbname}.stb1 partition by tbname ")
+ tdSql.checkRows(20)
+ tdSql.query(f"select c1, c2, c3, c4, mavg(123,3) from {dbname}.stb1 partition by tbname ")
+ tdSql.checkRows(50)
+
def run(self):
import traceback
diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py
index 46d2062341..45be0ef8ab 100644
--- a/tests/system-test/2-query/sample.py
+++ b/tests/system-test/2-query/sample.py
@@ -873,7 +873,7 @@ class TDTestCase:
# bug need fix
tdSql.query("select c1 ,t1, sample(c1,2) from db.stb1 partition by c1 ")
tdSql.query("select sample(c1,2) from db.stb1 partition by c1 ")
- # tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ")
+ tdSql.query("select c1 ,ind, sample(c1,2) from sample_db.st partition by c1 ")
def run(self):
import traceback
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index e9331de6bf..1e958bdb29 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -110,15 +110,20 @@ python3 ./test.py -f 2-query/histogram.py
python3 ./test.py -f 2-query/histogram.py -R
python3 ./test.py -f 2-query/hyperloglog.py
python3 ./test.py -f 2-query/hyperloglog.py -R
-python3 ./test.py -f 2-query/irate.py
-# python3 ./test.py -f 2-query/irate.py -R
-python3 ./test.py -f 2-query/join.py
-python3 ./test.py -f 2-query/join.py -R
-
python3 ./test.py -f 2-query/interp.py
python3 ./test.py -f 2-query/interp.py -R
-
-
+python3 ./test.py -f 2-query/irate.py
+python3 ./test.py -f 2-query/irate.py -R
+python3 ./test.py -f 2-query/join.py
+python3 ./test.py -f 2-query/join.py -R
+python3 ./test.py -f 2-query/last_row.py
+python3 ./test.py -f 2-query/last_row.py -R
+python3 ./test.py -f 2-query/last.py
+python3 ./test.py -f 2-query/last.py -R
+python3 ./test.py -f 2-query/leastsquares.py
+python3 ./test.py -f 2-query/leastsquares.py -R
+python3 ./test.py -f 2-query/length.py
+python3 ./test.py -f 2-query/length.py -R
python3 ./test.py -f 1-insert/update_data.py
@@ -127,7 +132,6 @@ python3 ./test.py -f 1-insert/delete_data.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/ltrim.py
python3 ./test.py -f 2-query/rtrim.py
-python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/upper.py
python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join2.py
@@ -136,7 +140,6 @@ python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
python3 ./test.py -f 2-query/concat2.py
python3 ./test.py -f 2-query/spread.py
-python3 ./test.py -f 2-query/leastsquares.py
python3 ./test.py -f 2-query/timezone.py
@@ -144,7 +147,6 @@ python3 ./test.py -f 2-query/Now.py
python3 ./test.py -f 2-query/Today.py
python3 ./test.py -f 2-query/max.py
python3 ./test.py -f 2-query/min.py
-python3 ./test.py -f 2-query/last.py
python3 ./test.py -f 2-query/To_iso8601.py
python3 ./test.py -f 2-query/To_unixtimestamp.py
python3 ./test.py -f 2-query/timetruncate.py
@@ -167,7 +169,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
-#python3 ./test.py -f 2-query/mavg.py
+python3 ./test.py -f 2-query/mavg.py
python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/function_diff.py
python3 ./test.py -f 2-query/unique.py
@@ -178,7 +180,6 @@ python3 ./test.py -f 2-query/ttl_comment.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/queryQnode.py
python3 ./test.py -f 2-query/max_partition.py
-python3 ./test.py -f 2-query/last_row.py
python3 ./test.py -f 2-query/tsbsQuery.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
@@ -357,7 +358,7 @@ python3 ./test.py -f 2-query/interp.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
-#python3 ./test.py -f 2-query/mavg.py -Q 2
+python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@@ -444,7 +445,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/avg.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
-#python3 ./test.py -f 2-query/mavg.py -Q 3
+python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
diff --git a/tests/system-test/simpletest.bat b/tests/system-test/simpletest.bat
index 656828aa1e..cc4ae17955 100644
--- a/tests/system-test/simpletest.bat
+++ b/tests/system-test/simpletest.bat
@@ -4,9 +4,9 @@ python3 .\test.py -f 0-others\taosShellError.py
python3 .\test.py -f 0-others\taosShellNetChk.py
python3 .\test.py -f 0-others\telemetry.py
python3 .\test.py -f 0-others\taosdMonitor.py
-python3 .\test.py -f 0-others\udfTest.py
-python3 .\test.py -f 0-others\udf_create.py
-python3 .\test.py -f 0-others\udf_restart_taosd.py
+@REM python3 .\test.py -f 0-others\udfTest.py
+@REM python3 .\test.py -f 0-others\udf_create.py
+@REM python3 .\test.py -f 0-others\udf_restart_taosd.py
@REM python3 .\test.py -f 0-others\cachelast.py
@REM python3 .\test.py -f 0-others\user_control.py
diff --git a/tests/system-test/test.py b/tests/system-test/test.py
index 5dc6139410..2f482e4277 100644
--- a/tests/system-test/test.py
+++ b/tests/system-test/test.py
@@ -194,7 +194,7 @@ if __name__ == "__main__":
processID = subprocess.check_output(psCmd, shell=True)
for port in range(6030, 6041):
- usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
+ usePortPID = "lsof -i tcp:%d | grep LISTEN | awk '{print $2}'" % port
processID = subprocess.check_output(usePortPID, shell=True)
if processID:
@@ -206,11 +206,13 @@ if __name__ == "__main__":
time.sleep(2)
if restful:
- toBeKilled = "taosadapter"
+ toBeKilled = "taosadapt"
- killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
+ # killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
+ killCmd = f"pkill {toBeKilled}"
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
+ # psCmd = f"pgrep {toBeKilled}"
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
@@ -218,15 +220,15 @@ if __name__ == "__main__":
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
- for port in range(6030, 6041):
- usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
- processID = subprocess.check_output(usePortPID, shell=True)
+ port = 6041
+ usePortPID = f"lsof -i tcp:{port} | grep LISTEN | awk '{{print $2}}'"
+ processID = subprocess.check_output(usePortPID, shell=True)
- if processID:
- killCmd = "kill -TERM %s" % processID
- os.system(killCmd)
- fuserCmd = "fuser -k -n tcp %d" % port
- os.system(fuserCmd)
+ if processID:
+ killCmd = f"kill -TERM {processID}"
+ os.system(killCmd)
+ fuserCmd = f"fuser -k -n tcp {port}"
+ os.system(fuserCmd)
tdLog.info('stop taosadapter')
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 5751c347e3..5eec174618 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -99,11 +99,20 @@ ELSE ()
MESSAGE("CURRENT SOURCE DIR ${CMAKE_CURRENT_SOURCE_DIR}")
IF (TD_LINUX)
include(ExternalProject)
+ set(_upx_prefix "$ENV{HOME}/.taos/externals/upx")
+ ExternalProject_Add(upx
+ PREFIX "${_upx_prefix}"
+ URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz
+ CONFIGURE_COMMAND cmake -E true
+ BUILD_COMMAND cmake -E true
+ INSTALL_COMMAND cmake -E true
+ )
+
ExternalProject_Add(taosadapter
PREFIX "taosadapter"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
BUILD_ALWAYS off
- DEPENDS taos
+ DEPENDS taos upx
BUILD_IN_SOURCE 1
CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
PATCH_COMMAND
@@ -112,13 +121,14 @@ ELSE ()
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
- COMMAND wget -nc https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O $ENV{HOME}/upx.tar.xz && tar -xvJf $ENV{HOME}/upx.tar.xz -C $ENV{HOME}/ --strip-components 1 > /dev/null && $ENV{HOME}/upx taosadapter || :
+ COMMAND ${_upx_prefix}/src/upx/upx taosadapter
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
)
+ unset(_upx_prefix)
ELSEIF (TD_DARWIN)
include(ExternalProject)
ExternalProject_Add(taosadapter
@@ -140,7 +150,42 @@ ELSE ()
COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
COMMAND cmake -E copy taosadapter-debug ${CMAKE_BINARY_DIR}/build/bin
)
+# unset(_upx_prefix)
+ ELSEIF (TD_WINDOWS)
+ include(ExternalProject)
+ set(_upx_prefix "${CMAKE_BINARY_DIR}/.taos/externals/upx")
+ ExternalProject_Add(upx
+ PREFIX "${_upx_prefix}"
+ URL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-win32.zip
+ CONFIGURE_COMMAND cmake -E true
+ BUILD_COMMAND cmake -E true
+ INSTALL_COMMAND cmake -E true
+ )
+
+ ExternalProject_Add(taosadapter
+ PREFIX "taosadapter"
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
+ BUILD_ALWAYS off
+ DEPENDS taos
+ BUILD_IN_SOURCE 1
+ CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
+ PATCH_COMMAND
+ COMMAND git clean -f -d
+ BUILD_COMMAND
+ COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
+ COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
+ COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ INSTALL_COMMAND
+ COMMAND ${_upx_prefix}/src/upx/upx taosadapter.exe
+ COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin
+ COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
+ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
+ COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
+ COMMAND cmake -E copy taosadapter-debug.exe ${CMAKE_BINARY_DIR}/build/bin
+ )
+ unset(_upx_prefix)
ELSE ()
- MESSAGE("${Yellow} Windows system still use original embedded httpd ${ColourReset}")
+ MESSAGE("${Yellow} taosAdapter Not supported yet ${ColourReset}")
ENDIF ()
ENDIF ()