Merge branch 'main' of https://github.com/taosdata/TDengine into main
This commit is contained in:
commit
1bdaf1c633
|
@ -201,6 +201,7 @@ def pre_test_win(){
|
|||
'''
|
||||
bat '''
|
||||
cd %WIN_COMMUNITY_ROOT%
|
||||
git clean -fxd
|
||||
git reset --hard
|
||||
git remote prune origin
|
||||
git fetch
|
||||
|
|
|
@ -60,7 +60,7 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
|||
为了在 Ubuntu/Debian 系统上编译 [taos-tools](https://github.com/taosdata/taos-tools) 需要安装如下软件:
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
@ -85,7 +85,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
|
@ -94,7 +94,7 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
|||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
注意:由于 snappy 缺乏 pkg-config 支持(参考 [链接](https://github.com/google/snappy/pull/86)),会导致 cmake 提示无法发现 libsnappy,实际上工作正常。
|
||||
|
|
|
@ -31,7 +31,7 @@ TDengine is an open source, high-performance, cloud native [time-series database
|
|||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 19.9k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
|
||||
|
||||
For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/). The easiest way to experience TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
|
||||
|
||||
|
@ -62,7 +62,7 @@ sudo apt-get install -y gcc cmake build-essential git libssl-dev
|
|||
To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debian, the following packages need to be installed.
|
||||
|
||||
```bash
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config
|
||||
sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
|
||||
```
|
||||
|
||||
### CentOS 7.9
|
||||
|
@ -85,7 +85,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
|||
#### CentOS 7.9
|
||||
|
||||
```
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
#### CentOS 8/Rocky Linux
|
||||
|
@ -94,7 +94,7 @@ sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgco
|
|||
sudo yum install -y epel-release
|
||||
sudo yum install -y dnf-plugins-core
|
||||
sudo yum config-manager --set-enabled powertools
|
||||
sudo yum install -y zlib-devel xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libstdc++-static openssl-devel
|
||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||
```
|
||||
|
||||
Note: Since snappy lacks pkg-config support (refer to [link](https://github.com/google/snappy/pull/86)), it leads a cmake prompt libsnappy not found. But snappy still works well.
|
||||
|
|
|
@ -123,15 +123,37 @@ ELSE ()
|
|||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
IF (TD_INTEL_64 OR TD_INTEL_32)
|
||||
ADD_DEFINITIONS("-msse4.2")
|
||||
IF("${FMA_SUPPORT}" MATCHES "true")
|
||||
MESSAGE(STATUS "turn fma function support on")
|
||||
ADD_DEFINITIONS("-mfma")
|
||||
ELSE ()
|
||||
MESSAGE(STATUS "turn fma function support off")
|
||||
INCLUDE(CheckCCompilerFlag)
|
||||
IF (("${CMAKE_C_COMPILER_ID}" MATCHES "Clang") OR ("${CMAKE_C_COMPILER_ID}" MATCHES "AppleClang"))
|
||||
SET(COMPILER_SUPPORT_SSE42 true)
|
||||
MESSAGE(STATUS "Always enable sse4.2 for Clang/AppleClang")
|
||||
ELSE()
|
||||
CHECK_C_COMPILER_FLAG("-msse4.2" COMPILER_SUPPORT_SSE42)
|
||||
ENDIF()
|
||||
|
||||
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
|
||||
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
|
||||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
||||
|
||||
IF (COMPILER_SUPPORT_SSE42)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_FMA)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||
ENDIF()
|
||||
|
||||
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
||||
IF (COMPILER_SUPPORT_AVX)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
ENDIF()
|
||||
ENDIF ()
|
||||
IF (COMPILER_SUPPORT_AVX2)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx2")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2")
|
||||
ENDIF()
|
||||
MESSAGE(STATUS "SIMD instructions (AVX/AVX2) is ACTIVATED")
|
||||
ENDIF()
|
||||
|
||||
ENDIF ()
|
||||
|
|
|
@ -1,20 +1,17 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
MESSAGE("Current system is ${CMAKE_SYSTEM_NAME}")
|
||||
|
||||
# init
|
||||
SET(TD_LINUX FALSE)
|
||||
SET(TD_WINDOWS FALSE)
|
||||
SET(TD_DARWIN FALSE)
|
||||
|
||||
MESSAGE("Compiler ID: ${CMAKE_CXX_COMPILER_ID}")
|
||||
if(CMAKE_COMPILER_IS_GNUCXX MATCHES 1)
|
||||
set(CXX_COMPILER_IS_GNU TRUE)
|
||||
else()
|
||||
set(CXX_COMPILER_IS_GNU FALSE)
|
||||
endif()
|
||||
|
||||
MESSAGE("Current system name is ${CMAKE_SYSTEM_NAME}.")
|
||||
MESSAGE("Current system: ${CMAKE_SYSTEM_NAME}")
|
||||
|
||||
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
|
||||
|
@ -26,6 +23,8 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
set(CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS "${CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS} -undefined dynamic_lookup")
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE("Current system processor: ${CMAKE_SYSTEM_PROCESSOR}")
|
||||
|
||||
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
|
||||
|
||||
SET(TD_LINUX TRUE)
|
||||
|
@ -44,7 +43,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
SET(OSTYPE "macOS")
|
||||
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
|
||||
|
||||
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
|
||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
|
||||
MESSAGE("Current system arch is arm64")
|
||||
SET(TD_DARWIN_64 TRUE)
|
||||
|
@ -80,24 +78,24 @@ ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
|||
ENDIF()
|
||||
|
||||
IF ("${CPUTYPE}" STREQUAL "")
|
||||
MESSAGE(STATUS "The current platform " ${CMAKE_SYSTEM_PROCESSOR} " is detected")
|
||||
|
||||
IF (CMAKE_SYSTEM_PROCESSOR MATCHES "(amd64)|(AMD64)")
|
||||
MESSAGE(STATUS "The current platform is amd64")
|
||||
MESSAGE(STATUS "Current platform is amd64")
|
||||
SET(PLATFORM_ARCH_STR "amd64")
|
||||
SET(TD_INTEL_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_X86_")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)")
|
||||
MESSAGE(STATUS "The current platform is x86")
|
||||
MESSAGE(STATUS "Current platform is x86")
|
||||
SET(PLATFORM_ARCH_STR "i386")
|
||||
SET(TD_INTEL_32 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_X86_")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "armv7l")
|
||||
MESSAGE(STATUS "The current platform is aarch32")
|
||||
MESSAGE(STATUS "Current platform is aarch32")
|
||||
SET(PLATFORM_ARCH_STR "arm")
|
||||
SET(TD_ARM_32 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
ADD_DEFINITIONS("-D_TD_ARM_32")
|
||||
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
|
||||
MESSAGE(STATUS "The current platform is aarch64")
|
||||
MESSAGE(STATUS "Current platform is aarch64")
|
||||
SET(PLATFORM_ARCH_STR "arm64")
|
||||
SET(TD_ARM_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_ARM_")
|
||||
|
@ -149,7 +147,7 @@ ELSE ()
|
|||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
MESSAGE(STATUS "platform arch:" ${PLATFORM_ARCH_STR})
|
||||
MESSAGE(STATUS "Platform arch:" ${PLATFORM_ARCH_STR})
|
||||
|
||||
MESSAGE("C Compiler ID: ${CMAKE_C_COMPILER_ID}")
|
||||
MESSAGE("CXX Compiler ID: ${CMAKE_CXX_COMPILER_ID}")
|
||||
MESSAGE("C Compiler: ${CMAKE_C_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_C_COMPILER_VERSION})")
|
||||
MESSAGE("CXX Compiler: ${CMAKE_CXX_COMPILER} (${CMAKE_C_COMPILER_ID}, ${CMAKE_CXX_COMPILER_VERSION})")
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.1.7")
|
||||
SET(TD_VER_NUMBER "3.0.2.0")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
@ -26,7 +26,7 @@ ELSEIF (HAVE_GIT)
|
|||
SET(TD_VER_GIT "no git commit id")
|
||||
ENDIF ()
|
||||
ELSE ()
|
||||
message(STATUS "no git cmd")
|
||||
message(STATUS "no git found")
|
||||
SET(TD_VER_GIT "no git commit id")
|
||||
ENDIF ()
|
||||
|
||||
|
@ -70,9 +70,9 @@ MESSAGE(STATUS "compatible: " ${TD_VER_COMPATIBLE})
|
|||
MESSAGE(STATUS "commit id: " ${TD_VER_GIT})
|
||||
MESSAGE(STATUS "build date: " ${TD_VER_DATE})
|
||||
MESSAGE(STATUS "build type: " ${CMAKE_BUILD_TYPE})
|
||||
MESSAGE(STATUS "type: " ${TD_VER_VERTYPE})
|
||||
MESSAGE(STATUS "cpu: " ${TD_VER_CPUTYPE})
|
||||
MESSAGE(STATUS "os: " ${TD_VER_OSTYPE})
|
||||
MESSAGE(STATUS "type: " ${TD_VER_VERTYPE})
|
||||
MESSAGE(STATUS "cpu: " ${TD_VER_CPUTYPE})
|
||||
MESSAGE(STATUS "os: " ${TD_VER_OSTYPE})
|
||||
MESSAGE(STATUS "============= compile version parameter information end ============= ")
|
||||
|
||||
STRING(REPLACE "." "_" TD_LIB_VER_NUMBER ${TD_VER_NUMBER})
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG ff7de07
|
||||
GIT_TAG 566540d
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG fab042d
|
||||
GIT_TAG 261fcca
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
title: Write from Kafka
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import PyKafka from "./_py_kafka.mdx";
|
||||
|
||||
## About Kafka
|
||||
|
||||
Apache Kafka is an open-source distributed event streaming platform, used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications. For the key concepts of kafka, please refer to [kafka documentation](https://kafka.apache.org/documentation/#gettingStarted).
|
||||
|
||||
### kafka topic
|
||||
|
||||
Messages in Kafka are organized by topics. A topic may have one or more partitions. We can manage kafka topics through `kafka-topics`.
|
||||
|
||||
create a topic named `kafka-events`:
|
||||
|
||||
```
|
||||
bin/kafka-topics.sh --create --topic kafka-events --bootstrap-server localhost:9092
|
||||
```
|
||||
|
||||
Alter `kafka-events` topic to set partitions to 3:
|
||||
|
||||
```
|
||||
bin/kafka-topics.sh --alter --topic kafka-events --partitions 3 --bootstrap-server=localhost:9092
|
||||
```
|
||||
|
||||
Show all topics and partitions in Kafka:
|
||||
|
||||
```
|
||||
bin/kafka-topics.sh --bootstrap-server=localhost:9092 --describe
|
||||
```
|
||||
|
||||
## Insert into TDengine
|
||||
|
||||
We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](/develop/insert-data/sql-writing/) or [High Performance Writing](/develop/insert-data/high-volume/) or [Schemaless Writing](/reference/schemaless/).
|
||||
|
||||
## Examples
|
||||
|
||||
<Tabs defaultValue="Python" groupId="lang">
|
||||
<TabItem label="Python" value="Python">
|
||||
<PyKafka />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
### python Kafka 客户端
|
||||
|
||||
For python kafka client, please refer to [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python). In this document, we use [kafka-python](http://github.com/dpkp/kafka-python).
|
||||
|
||||
### consume from Kafka
|
||||
|
||||
The simple way to consume messages from Kafka is to read messages one by one. The demo is as follows:
|
||||
|
||||
```
|
||||
from kafka import KafkaConsumer
|
||||
consumer = KafkaConsumer('my_favorite_topic')
|
||||
for msg in consumer:
|
||||
print (msg)
|
||||
```
|
||||
|
||||
For higher performance, we can consume message from kafka in batch. The demo is as follows:
|
||||
|
||||
```
|
||||
from kafka import KafkaConsumer
|
||||
consumer = KafkaConsumer('my_favorite_topic')
|
||||
while True:
|
||||
msgs = consumer.poll(timeout_ms=500, max_records=1000)
|
||||
if msgs:
|
||||
print (msgs)
|
||||
```
|
||||
|
||||
### multi-threading
|
||||
|
||||
For more higher performance we can process data from kafka in multi-thread. We can use python's ThreadPoolExecutor to achieve multithreading. The demo is as follows:
|
||||
|
||||
```
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
pool = ThreadPoolExecutor(max_workers=10)
|
||||
pool.submit(...)
|
||||
```
|
||||
|
||||
### multi-process
|
||||
|
||||
For more higher performance, sometimes we use multiprocessing. In this case, the number of Kafka Consumers should not be greater than the number of Kafka Topic Partitions. The demo is as follows:
|
||||
|
||||
```
|
||||
from multiprocessing import Process
|
||||
|
||||
ps = []
|
||||
for i in range(5):
|
||||
p = Process(target=Consumer().consume())
|
||||
p.start()
|
||||
ps.append(p)
|
||||
|
||||
for p in ps:
|
||||
p.join()
|
||||
```
|
||||
|
||||
In addition to python's built-in multithreading and multiprocessing library, we can also use the third-party library gunicorn.
|
||||
|
||||
### Examples
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example.py}}
|
||||
```
|
|
@ -532,7 +532,12 @@ TIMEDIFF(expr1, expr2 [, time_unit])
|
|||
#### TIMETRUNCATE
|
||||
|
||||
```sql
|
||||
TIMETRUNCATE(expr, time_unit)
|
||||
TIMETRUNCATE(expr, time_unit [, ignore_timezone])
|
||||
|
||||
ignore_timezone: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**Description**: Truncate the input timestamp with unit specified by `time_unit`
|
||||
|
@ -548,7 +553,10 @@ TIMETRUNCATE(expr, time_unit)
|
|||
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
|
||||
- The precision of the returned timestamp is same as the precision set for the current data base in use
|
||||
- If the input data is not formatted as a timestamp, the returned value is null.
|
||||
|
||||
- If `1d` is used as `time_unit` to truncate the timestamp, `ignore_timezone` option can be set to indicate if the returned result is affected by client timezone or not.
|
||||
For example, if client timezone is set to UTC+0800, TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) will return '2020-01-01 08:00:00'.
|
||||
Otherwise, TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) will return '2020-01-01 00:00:00'.
|
||||
If `ignore_timezone` option is omitted, the default value is set to 1.
|
||||
|
||||
#### TIMEZONE
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ Because stream processing is built in to TDengine, you are no longer reliant on
|
|||
## Create a Stream
|
||||
|
||||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||
WATERMARK time
|
||||
|
@ -30,6 +30,8 @@ subquery: SELECT [DISTINCT] select_list
|
|||
|
||||
Session windows, state windows, and sliding windows are supported. When you configure a session or state window for a supertable, you must use PARTITION BY TBNAME.
|
||||
|
||||
Subtable Clause defines the naming rules of auto-created subtable, you can see more details in below part: Partitions of Stream.
|
||||
|
||||
```sql
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
|
@ -47,6 +49,47 @@ CREATE STREAM avg_vol_s INTO avg_vol AS
|
|||
SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
|
||||
```
|
||||
|
||||
## Partitions of Stream
|
||||
|
||||
A Stream can process data in multiple partitions. Partition rules can be defined by PARTITION BY clause in stream processing. Each partition will have different timelines and windows, and will be processed separately and be written into different subtables of target supertable.
|
||||
|
||||
If a stream is created without PARTITION BY clause, all data will be written into one subtable.
|
||||
|
||||
If a stream is created with PARTITION BY clause without SUBTABLE clause, each partition will be given a random name.
|
||||
|
||||
If a stream is created with PARTITION BY clause and SUBTABLE clause, the name of each partition will be calculated according to SUBTABLE clause. For example:
|
||||
|
||||
```sql
|
||||
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
|
||||
```
|
||||
|
||||
IN PARTITION clause, 'tbname', representing each subtable name of source supertable, is given alias 'tname'. And 'tname' is used in SUBTABLE clause. In SUBTABLE clause, each auto created subtable will concat 'new-' and source subtable name as their name. Other expressions are also allowed in SUBTABLE clause, but the output type must be varchar.
|
||||
|
||||
If the output length exceeds the limitation of TDengine(192), the name will be truncated. If the generated name is occupied by some other table, the creation and writing of the new subtable will be failed.
|
||||
|
||||
## Filling history data
|
||||
|
||||
Normally a stream does not process data already or being written into source table when it's being creating. But adding FILL_HISTORY 1 as a stream option when creating the stream will allow it to process data written before and while creating the stream. For example:
|
||||
|
||||
```sql
|
||||
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 interval(10s)
|
||||
```
|
||||
|
||||
Combining fill_history option and where clause, stream can processing data of specific time range. For example, only process data after a past time. (In this case, 2020-01-30)
|
||||
|
||||
```sql
|
||||
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' interval(10s)
|
||||
```
|
||||
|
||||
As another example, only processing data starting from some past time, and ending at some future time.
|
||||
|
||||
```sql
|
||||
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' and ts < '2023-01-01' interval(10s)
|
||||
```
|
||||
|
||||
If some streams are totally outdated, and you do not want it to monitor or process anymore, those streams can be manually dropped and output data will be still kept.
|
||||
|
||||
|
||||
## Delete a Stream
|
||||
|
||||
```sql
|
||||
|
|
|
@ -59,10 +59,10 @@ The different database framework specifications for various programming language
|
|||
| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
|
||||
| **Connection Management** | Support | Support | Support | Support | Support | Support |
|
||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | Not supported | Support | Not supported | Support |
|
||||
| **Subscription (TMQ) ** | Not supported | Not supported | Not supported | Not supported | Not supported | Support |
|
||||
| **Parameter Binding** | Not supported | Not supported | support | Support | Not supported | Support |
|
||||
| **Subscription (TMQ) ** | Not supported | Not supported | support | Not supported | Not supported | Support |
|
||||
| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Not Supported | support | Not Supported | Supported |
|
||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | support | Support | Support |
|
||||
| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
|
||||
|
||||
:::warning
|
||||
|
|
|
@ -76,7 +76,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
|
|||
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
|
||||
|
||||
```bash
|
||||
GF_VERSION=3.2.2
|
||||
GF_VERSION=3.2.7
|
||||
# from GitHub
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
|
||||
# from Grafana
|
||||
|
|
|
@ -76,7 +76,7 @@ Development: false
|
|||
### Install from source code
|
||||
|
||||
```
|
||||
git clone https://github.com:taosdata/kafka-connect-tdengine.git
|
||||
git clone https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
|
|
|
@ -38,15 +38,9 @@ Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/
|
|||
|
||||
## Data Connection Setup
|
||||
|
||||
### Download TDengine plug-in to grafana plug-in directory
|
||||
### Install Grafana Plugin and Configure Data Source
|
||||
|
||||
```bash
|
||||
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
|
||||
2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
|
||||
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
|
||||
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
|
||||
5. sudo systemctl restart grafana-server.service
|
||||
```
|
||||
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source)
|
||||
|
||||
### Modify /etc/telegraf/telegraf.conf
|
||||
|
||||
|
|
|
@ -41,15 +41,9 @@ Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/
|
|||
|
||||
## Data Connection Setup
|
||||
|
||||
### Copy the TDengine plugin to the grafana plugin directory
|
||||
### Install Grafana Plugin and Configure Data Source
|
||||
|
||||
```bash
|
||||
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
|
||||
2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
|
||||
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
|
||||
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
|
||||
5. sudo systemctl restart grafana-server.service
|
||||
```
|
||||
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source)
|
||||
|
||||
### Configure collectd
|
||||
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.1.8
|
||||
|
||||
<Release type="tdengine" version="3.0.1.8" />
|
||||
|
||||
## 3.0.1.7
|
||||
|
||||
<Release type="tdengine" version="3.0.1.7" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.3.0
|
||||
|
||||
<Release type="tools" version="2.3.0" />
|
||||
|
||||
## 2.2.9
|
||||
|
||||
<Release type="tools" version="2.2.9" />
|
||||
|
|
|
@ -5,22 +5,24 @@ namespace Examples
|
|||
{
|
||||
public class WSConnExample
|
||||
{
|
||||
static void Main(string[] args)
|
||||
static int Main(string[] args)
|
||||
{
|
||||
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
|
||||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception("get WS connection failed");
|
||||
Console.WriteLine("get WS connection failed");
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
Console.WriteLine("Establish connect success.");
|
||||
// close connection.
|
||||
LibTaosWS.WSClose(wsConn);
|
||||
}
|
||||
|
||||
// close connection.
|
||||
LibTaosWS.WSClose(wsConn);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ namespace Examples
|
|||
{
|
||||
public class WSInsertExample
|
||||
{
|
||||
static void Main(string[] args)
|
||||
static int Main(string[] args)
|
||||
{
|
||||
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
|
||||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
|
@ -13,7 +13,8 @@ namespace Examples
|
|||
// Assert if connection is validate
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception("get WS connection failed");
|
||||
Console.WriteLine("get WS connection failed");
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -36,6 +37,8 @@ namespace Examples
|
|||
|
||||
// close connection.
|
||||
LibTaosWS.WSClose(wsConn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ValidInsert(string desc, IntPtr wsRes)
|
||||
|
@ -43,7 +46,7 @@ namespace Examples
|
|||
int code = LibTaosWS.WSErrorNo(wsRes);
|
||||
if (code != 0)
|
||||
{
|
||||
throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
|
||||
Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -55,4 +58,4 @@ namespace Examples
|
|||
}
|
||||
// Establish connect success.
|
||||
// create table success affect 0 rows, cost 3717542 nanoseconds
|
||||
// insert data success affect 8 rows, cost 2613637 nanoseconds
|
||||
// insert data success affect 8 rows, cost 2613637 nanoseconds
|
||||
|
|
|
@ -7,13 +7,14 @@ namespace Examples
|
|||
{
|
||||
public class WSQueryExample
|
||||
{
|
||||
static void Main(string[] args)
|
||||
static int Main(string[] args)
|
||||
{
|
||||
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
|
||||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception("get WS connection failed");
|
||||
Console.WriteLine("get WS connection failed");
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -28,7 +29,9 @@ namespace Examples
|
|||
int code = LibTaosWS.WSErrorNo(wsRes);
|
||||
if (code != 0)
|
||||
{
|
||||
throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
|
||||
Console.WriteLine($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
|
||||
LibTaosWS.WSFreeResult(wsRes);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// get meta data
|
||||
|
@ -58,6 +61,8 @@ namespace Examples
|
|||
|
||||
// close connection.
|
||||
LibTaosWS.WSClose(wsConn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -71,4 +76,4 @@ namespace Examples
|
|||
// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 |
|
||||
// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 |
|
||||
// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 |
|
||||
// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |
|
||||
// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |
|
||||
|
|
|
@ -7,7 +7,7 @@ namespace Examples
|
|||
{
|
||||
public class WSStmtExample
|
||||
{
|
||||
static void Main(string[] args)
|
||||
static int Main(string[] args)
|
||||
{
|
||||
const string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
|
||||
const string table = "meters";
|
||||
|
@ -21,7 +21,8 @@ namespace Examples
|
|||
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
|
||||
if (wsConn == IntPtr.Zero)
|
||||
{
|
||||
throw new Exception($"get WS connection failed");
|
||||
Console.WriteLine($"get WS connection failed");
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -66,18 +67,20 @@ namespace Examples
|
|||
}
|
||||
else
|
||||
{
|
||||
throw new Exception("Init STMT failed...");
|
||||
Console.WriteLine("Init STMT failed...");
|
||||
}
|
||||
|
||||
// close connection.
|
||||
LibTaosWS.WSClose(wsConn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ValidStmtStep(int code, IntPtr wsStmt, string desc)
|
||||
{
|
||||
if (code != 0)
|
||||
{
|
||||
throw new Exception($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}");
|
||||
Console.WriteLine($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -92,4 +95,4 @@ namespace Examples
|
|||
// WSStmtBindParamBatch success...
|
||||
// WSStmtAddBatch success...
|
||||
// WSStmtExecute success...
|
||||
// WS STMT insert 5 rows...
|
||||
// WS STMT insert 5 rows...
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
#! encoding = utf-8
|
||||
import json
|
||||
import time
|
||||
from json import JSONDecodeError
|
||||
from typing import Callable
|
||||
import logging
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
import taos
|
||||
from kafka import KafkaConsumer
|
||||
from kafka.consumer.fetcher import ConsumerRecord
|
||||
|
||||
|
||||
class Consumer(object):
|
||||
DEFAULT_CONFIGS = {
|
||||
'kafka_brokers': 'localhost:9092',
|
||||
'kafka_topic': 'python_kafka',
|
||||
'kafka_group_id': 'taos',
|
||||
'taos_host': 'localhost',
|
||||
'taos_user': 'root',
|
||||
'taos_password': 'taosdata',
|
||||
'taos_database': 'power',
|
||||
'taos_port': 6030,
|
||||
'timezone': None,
|
||||
'clean_after_testing': False,
|
||||
'bath_consume': True,
|
||||
'batch_size': 1000,
|
||||
'async_model': True,
|
||||
'workers': 10
|
||||
}
|
||||
|
||||
LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanDiego', 'California.SanJose',
|
||||
'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale',
|
||||
'California.SantaClara', 'California.Cupertino']
|
||||
|
||||
CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1'
|
||||
USE_DATABASE_SQL = 'use {}'
|
||||
DROP_TABLE_SQL = 'drop table if exists meters'
|
||||
DROP_DATABASE_SQL = 'drop database if exists {}'
|
||||
CREATE_STABLE_SQL = 'create stable meters (ts timestamp, current float, voltage int, phase float) ' \
|
||||
'tags (location binary(64), groupId int)'
|
||||
CREATE_TABLE_SQL = 'create table if not exists {} using meters tags (\'{}\', {})'
|
||||
INSERT_SQL_HEADER = "insert into "
|
||||
INSERT_PART_SQL = 'power.{} values (\'{}\', {}, {}, {})'
|
||||
|
||||
def __init__(self, **configs):
|
||||
self.config: dict = self.DEFAULT_CONFIGS
|
||||
self.config.update(configs)
|
||||
self.consumer = KafkaConsumer(
|
||||
self.config.get('kafka_topic'), # topic
|
||||
bootstrap_servers=self.config.get('kafka_brokers'),
|
||||
group_id=self.config.get('kafka_group_id'),
|
||||
)
|
||||
self.taos = taos.connect(
|
||||
host=self.config.get('taos_host'),
|
||||
user=self.config.get('taos_user'),
|
||||
password=self.config.get('taos_password'),
|
||||
port=self.config.get('taos_port'),
|
||||
timezone=self.config.get('timezone'),
|
||||
)
|
||||
if self.config.get('async_model'):
|
||||
self.pool = ThreadPoolExecutor(max_workers=self.config.get('workers'))
|
||||
self.tasks: list[Future] = []
|
||||
# tags and table mapping # key: {location}_{groupId} value:
|
||||
self.tag_table_mapping = {}
|
||||
i = 0
|
||||
for location in self.LOCATIONS:
|
||||
for j in range(1, 11):
|
||||
table_name = 'd{}'.format(i)
|
||||
self._cache_table(location=location, group_id=j, table_name=table_name)
|
||||
i += 1
|
||||
|
||||
def init_env(self):
|
||||
# create database and table
|
||||
self.taos.execute(self.DROP_DATABASE_SQL.format(self.config.get('taos_database')))
|
||||
self.taos.execute(self.CREATE_DATABASE_SQL.format(self.config.get('taos_database')))
|
||||
self.taos.execute(self.USE_DATABASE_SQL.format(self.config.get('taos_database')))
|
||||
self.taos.execute(self.DROP_TABLE_SQL)
|
||||
self.taos.execute(self.CREATE_STABLE_SQL)
|
||||
for tags, table_name in self.tag_table_mapping.items():
|
||||
location, group_id = _get_location_and_group(tags)
|
||||
self.taos.execute(self.CREATE_TABLE_SQL.format(table_name, location, group_id))
|
||||
|
||||
def consume(self):
|
||||
logging.warning('## start consumer topic-[%s]', self.config.get('kafka_topic'))
|
||||
try:
|
||||
if self.config.get('bath_consume'):
|
||||
self._run_batch(self._to_taos_batch)
|
||||
else:
|
||||
self._run(self._to_taos)
|
||||
except KeyboardInterrupt:
|
||||
logging.warning("## caught keyboard interrupt, stopping")
|
||||
finally:
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
# close consumer
|
||||
if self.consumer is not None:
|
||||
self.consumer.commit()
|
||||
self.consumer.close()
|
||||
|
||||
# multi thread
|
||||
if self.config.get('async_model'):
|
||||
for task in self.tasks:
|
||||
while not task.done():
|
||||
pass
|
||||
if self.pool is not None:
|
||||
self.pool.shutdown()
|
||||
|
||||
# clean data
|
||||
if self.config.get('clean_after_testing'):
|
||||
self.taos.execute(self.DROP_TABLE_SQL)
|
||||
self.taos.execute(self.DROP_DATABASE_SQL.format(self.config.get('taos_database')))
|
||||
# close taos
|
||||
if self.taos is not None:
|
||||
self.taos.close()
|
||||
|
||||
def _run(self, f: Callable[[ConsumerRecord], bool]):
|
||||
for message in self.consumer:
|
||||
if self.config.get('async_model'):
|
||||
self.pool.submit(f(message))
|
||||
else:
|
||||
f(message)
|
||||
|
||||
def _run_batch(self, f: Callable[[list[list[ConsumerRecord]]], None]):
|
||||
while True:
|
||||
messages = self.consumer.poll(timeout_ms=500, max_records=self.config.get('batch_size'))
|
||||
if messages:
|
||||
if self.config.get('async_model'):
|
||||
self.pool.submit(f, messages.values())
|
||||
else:
|
||||
f(list(messages.values()))
|
||||
if not messages:
|
||||
time.sleep(0.1)
|
||||
|
||||
def _to_taos(self, message: ConsumerRecord) -> bool:
|
||||
sql = self.INSERT_SQL_HEADER + self._build_sql(message.value)
|
||||
if len(sql) == 0: # decode error, skip
|
||||
return True
|
||||
logging.info('## insert sql %s', sql)
|
||||
return self.taos.execute(sql=sql) == 1
|
||||
|
||||
def _to_taos_batch(self, messages: list[list[ConsumerRecord]]):
|
||||
sql = self._build_sql_batch(messages=messages)
|
||||
if len(sql) == 0: # decode error, skip
|
||||
return
|
||||
self.taos.execute(sql=sql)
|
||||
|
||||
def _build_sql(self, msg_value: str) -> str:
|
||||
try:
|
||||
data = json.loads(msg_value)
|
||||
except JSONDecodeError as e:
|
||||
logging.error('## decode message [%s] error ', msg_value, e)
|
||||
return ''
|
||||
location = data.get('location')
|
||||
group_id = data.get('groupId')
|
||||
ts = data.get('ts')
|
||||
current = data.get('current')
|
||||
voltage = data.get('voltage')
|
||||
phase = data.get('phase')
|
||||
|
||||
table_name = self._get_table_name(location=location, group_id=group_id)
|
||||
return self.INSERT_PART_SQL.format(table_name, ts, current, voltage, phase)
|
||||
|
||||
def _build_sql_batch(self, messages: list[list[ConsumerRecord]]) -> str:
|
||||
sql_list = []
|
||||
for partition_messages in messages:
|
||||
for message in partition_messages:
|
||||
sql_list.append(self._build_sql(message.value))
|
||||
|
||||
return self.INSERT_SQL_HEADER + ' '.join(sql_list)
|
||||
|
||||
def _cache_table(self, location: str, group_id: int, table_name: str):
|
||||
self.tag_table_mapping[_tag_table_mapping_key(location=location, group_id=group_id)] = table_name
|
||||
|
||||
def _get_table_name(self, location: str, group_id: int) -> str:
|
||||
return self.tag_table_mapping.get(_tag_table_mapping_key(location=location, group_id=group_id))
|
||||
|
||||
|
||||
def _tag_table_mapping_key(location: str, group_id: int):
|
||||
return '{}_{}'.format(location, group_id)
|
||||
|
||||
|
||||
def _get_location_and_group(key: str) -> (str, int):
|
||||
fields = key.split('_')
|
||||
return fields[0], fields[1]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
consumer = Consumer(async_model=True)
|
||||
consumer.init_env()
|
||||
consumer.consume()
|
|
@ -69,7 +69,7 @@ TDengine 的主要功能如下:
|
|||
|
||||
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
|
||||
|
||||
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
|
||||
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 20k,且拥有一个活跃的开发者社区。
|
||||
|
||||
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
|
||||
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
---
|
||||
title: 从 Kafka 写入
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
import PyKafka from "./_py_kafka.mdx";
|
||||
|
||||
## Kafka 介绍
|
||||
|
||||
Apache Kafka 是开源的分布式消息分发平台,被广泛应用于高性能数据管道、流式数据分析、数据集成和事件驱动类型的应用程序。Kafka 包含 Producer、Consumer 和 Topic,其中 Producer 是向 Kafka 发送消息的进程,Consumer 是从 Kafka 消费消息的进程。Kafka 相关概念可以参考[官方文档](https://kafka.apache.org/documentation/#gettingStarted)。
|
||||
|
||||
|
||||
### kafka topic
|
||||
|
||||
Kafka 的消息按 topic 组织,每个 topic 会有一到多个 partition。可以通过 kafka 的 `kafka-topics` 管理 topic。
|
||||
|
||||
创建名为 `kafka-events` 的topic:
|
||||
|
||||
```
|
||||
bin/kafka-topics.sh --create --topic kafka-events --bootstrap-server localhost:9092
|
||||
```
|
||||
|
||||
修改 `kafka-events` 的 partition 数量为 3:
|
||||
|
||||
```
|
||||
bin/kafka-topics.sh --alter --topic kafka-events --partitions 3 --bootstrap-server=localhost:9092
|
||||
```
|
||||
|
||||
展示所有的 topic 和 partition:
|
||||
|
||||
```
|
||||
bin/kafka-topics.sh --bootstrap-server=localhost:9092 --describe
|
||||
```
|
||||
|
||||
## 写入 TDengine
|
||||
|
||||
TDengine 支持 Sql 方式和 Schemaless 方式的数据写入,Sql 方式数据写入可以参考 [TDengine SQL 写入](/develop/insert-data/sql-writing/) 和 [TDengine 高效写入](/develop/insert-data/high-volume/)。Schemaless 方式数据写入可以参考 [TDengine Schemaless 写入](/reference/schemaless/) 文档。
|
||||
|
||||
## 示例代码
|
||||
|
||||
<Tabs defaultValue="Python" groupId="lang">
|
||||
<TabItem label="Python" value="Python">
|
||||
<PyKafka />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
### python Kafka 客户端
|
||||
|
||||
Kafka 的 python 客户端可以参考文档 [kafka client](https://cwiki.apache.org/confluence/display/KAFKA/Clients#Clients-Python)。推荐使用 [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python) 和 [kafka-python](http://github.com/dpkp/kafka-python)。以下示例以 [kafka-python](http://github.com/dpkp/kafka-python) 为例。
|
||||
|
||||
### 从 Kafka 消费数据
|
||||
|
||||
Kafka 客户端采用 pull 的方式从 Kafka 消费数据,可以采用单条消费的方式或批量消费的方式读取数据。使用 [kafka-python](http://github.com/dpkp/kafka-python) 客户端单条消费数据的示例如下:
|
||||
|
||||
```
|
||||
from kafka import KafkaConsumer
|
||||
consumer = KafkaConsumer('my_favorite_topic')
|
||||
for msg in consumer:
|
||||
print (msg)
|
||||
```
|
||||
|
||||
单条消费的方式在数据流量大的情况下往往存在性能瓶颈,导致 Kafka 消息积压,更推荐使用批量消费的方式消费数据。使用 [kafka-python](http://github.com/dpkp/kafka-python) 客户端批量消费数据的示例如下:
|
||||
|
||||
```
|
||||
from kafka import KafkaConsumer
|
||||
consumer = KafkaConsumer('my_favorite_topic')
|
||||
while True:
|
||||
msgs = consumer.poll(timeout_ms=500, max_records=1000)
|
||||
if msgs:
|
||||
print (msgs)
|
||||
```
|
||||
|
||||
### Python 多线程
|
||||
|
||||
为了提高数据写入效率,通常采用多线程的方式写入数据,可以使用 python 线程池 ThreadPoolExecutor 实现多线程。示例代码如下:
|
||||
|
||||
```
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
pool = ThreadPoolExecutor(max_workers=10)
|
||||
pool.submit(...)
|
||||
```
|
||||
|
||||
### Python 多进程
|
||||
|
||||
单个python进程不能充分发挥多核 CPU 的性能,有时候我们会选择多进程的方式。在多进程的情况下,需要注意,Kafka Consumer 的数量应该小于等于 Kafka Topic Partition 数量。Python 多进程示例代码如下:
|
||||
|
||||
```
|
||||
from multiprocessing import Process
|
||||
|
||||
ps = []
|
||||
for i in range(5):
|
||||
p = Process(target=Consumer().consume())
|
||||
p.start()
|
||||
ps.append(p)
|
||||
|
||||
for p in ps:
|
||||
p.join()
|
||||
```
|
||||
|
||||
除了 Python 内置的多线程和多进程方式,还可以通过第三方库 gunicorn 实现并发。
|
||||
|
||||
### 完整示例
|
||||
|
||||
```py
|
||||
{{#include docs/examples/python/kafka_example.py}}
|
||||
```
|
|
@ -59,10 +59,10 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
|||
| ------------------------------ | -------- | ---------- | -------- | -------- | ----------- | -------- |
|
||||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **参数绑定** | 暂不支持 | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
|
||||
| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
|
||||
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | 支持 | 暂不支持 | 支持 |
|
||||
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||
|
||||
:::warning
|
||||
|
|
|
@ -533,7 +533,12 @@ TIMEDIFF(expr1, expr2 [, time_unit])
|
|||
#### TIMETRUNCATE
|
||||
|
||||
```sql
|
||||
TIMETRUNCATE(expr, time_unit)
|
||||
TIMETRUNCATE(expr, time_unit [, ignore_timezone])
|
||||
|
||||
ignore_timezone: {
|
||||
0
|
||||
| 1
|
||||
}
|
||||
```
|
||||
|
||||
**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。
|
||||
|
@ -549,6 +554,11 @@ TIMETRUNCATE(expr, time_unit)
|
|||
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
|
||||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
- 输入包含不符合时间日期格式的字符串则返回 NULL。
|
||||
- 当使用 1d 作为时间单位对时间戳进行截断时, 可通过设置 ignore_timezone 参数指定返回结果的显示是否忽略客户端时区的影响。
|
||||
例如客户端所配置时区为 UTC+0800, 则 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 0) 返回结果为 '2020-01-01 08:00:00'。
|
||||
而使用 TIMETRUNCATE('2020-01-01 23:00:00', 1d, 1) 设置忽略时区时,返回结果为 '2020-01-01 00:00:00'
|
||||
ignore_timezone 如果忽略的话,则默认值为 1 。
|
||||
|
||||
|
||||
|
||||
#### TIMEZONE
|
||||
|
@ -1085,7 +1095,7 @@ ignore_negative: {
|
|||
|
||||
```sql
|
||||
DIFF(expr [, ignore_negative])
|
||||
|
||||
|
||||
ignore_negative: {
|
||||
0
|
||||
| 1
|
||||
|
@ -1249,4 +1259,4 @@ SELECT SERVER_VERSION();
|
|||
SELECT SERVER_STATUS();
|
||||
```
|
||||
|
||||
**说明**:返回服务端当前的状态。
|
||||
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
|
||||
|
|
|
@ -8,7 +8,7 @@ description: 流式计算的相关 SQL 的详细语法
|
|||
## 创建流式计算
|
||||
|
||||
```sql
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name AS subquery
|
||||
CREATE STREAM [IF NOT EXISTS] stream_name [stream_options] INTO stb_name SUBTABLE(expression) AS subquery
|
||||
stream_options: {
|
||||
TRIGGER [AT_ONCE | WINDOW_CLOSE | MAX_DELAY time]
|
||||
WATERMARK time
|
||||
|
@ -28,6 +28,9 @@ subquery: SELECT select_list
|
|||
|
||||
支持会话窗口、状态窗口与滑动窗口,其中,会话窗口与状态窗口搭配超级表时必须与partition by tbname一起使用
|
||||
|
||||
|
||||
subtable 子句定义了流式计算中创建的子表的命名规则,详见 流式计算的 partition 部分。
|
||||
|
||||
```sql
|
||||
window_clause: {
|
||||
SESSION(ts_col, tol_val)
|
||||
|
@ -49,11 +52,43 @@ SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(
|
|||
|
||||
## 流式计算的 partition
|
||||
|
||||
可以使用 PARTITION BY TBNAME 或 PARTITION BY tag,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。
|
||||
可以使用 PARTITION BY TBNAME,tag,普通列或者表达式,对一个流进行多分区的计算,每个分区的时间线与时间窗口是独立的,会各自聚合,并写入到目的表中的不同子表。
|
||||
|
||||
不带 PARTITION BY 选项时,所有的数据将写入到一张子表。
|
||||
不带 PARTITION BY 子句时,所有的数据将写入到一张子表。
|
||||
|
||||
流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。
|
||||
在创建流时不使用 SUBTABLE 子句时,流式计算创建的超级表有唯一的 tag 列 groupId,每个 partition 会被分配唯一 groupId。与 schemaless 写入一致,我们通过 MD5 计算子表名,并自动创建它。
|
||||
|
||||
若创建流的语句中包含 SUBTABLE 子句,用户可以为每个 partition 对应的子表生成自定义的表名,例如:
|
||||
|
||||
```sql
|
||||
CREATE STREAM avg_vol_s INTO avg_vol SUBTABLE(CONCAT('new-', tname)) AS SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname tname INTERVAL(1m);
|
||||
```
|
||||
|
||||
PARTITION 子句中,为 tbname 定义了一个别名 tname, 在PARTITION 子句中的别名可以用于 SUBTABLE 子句中的表达式计算,在上述示例中,流新创建的子表将以前缀 'new-' 连接原表名作为表名。
|
||||
|
||||
注意,子表名的长度若超过 TDengine 的限制,将被截断。若要生成的子表名已经存在于另一超级表,由于 TDengine 的子表名是唯一的,因此对应新子表的创建以及数据的写入将会失败。
|
||||
|
||||
## 流式计算读取历史数据
|
||||
|
||||
正常情况下,流式计算不会处理创建前已经写入源表中的数据,若要处理已经写入的数据,可以在创建流时设置 fill_history 1 选项,这样创建的流式计算会自动处理创建前、创建中、创建后写入的数据。例如:
|
||||
|
||||
```sql
|
||||
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 interval(10s)
|
||||
```
|
||||
|
||||
结合 fill_history 1 选项,可以实现只处理特定历史时间范围的数据,例如:只处理某历史时刻(2020年1月30日)之后的数据
|
||||
|
||||
```sql
|
||||
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' interval(10s)
|
||||
```
|
||||
|
||||
再如,仅处理某时间段内的数据,结束时间可以是未来时间
|
||||
|
||||
```sql
|
||||
create stream if not exists s1 fill_history 1 into st1 as select count(*) from t1 where ts > '2020-01-30' and ts < '2023-01-01' interval(10s)
|
||||
```
|
||||
|
||||
如果该流任务已经彻底过期,并且您不再想让它检测或处理数据,您可以手动删除它,被计算出的数据仍会被保留。
|
||||
|
||||
## 删除流式计算
|
||||
|
||||
|
@ -72,7 +107,7 @@ SHOW STREAMS;
|
|||
若要展示更详细的信息,可以使用:
|
||||
|
||||
```sql
|
||||
SELECT * from performance_schema.`perf_streams`;
|
||||
SELECT * from information_schema.`ins_streams`;
|
||||
```
|
||||
|
||||
## 流式计算的触发模式
|
||||
|
|
|
@ -77,7 +77,7 @@ sudo -u grafana grafana-cli plugins install tdengine-datasource
|
|||
或者从 [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) 或 [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) 下载 .zip 文件到本地并解压到 Grafana 插件目录。命令行下载示例如下:
|
||||
|
||||
```bash
|
||||
GF_VERSION=3.2.2
|
||||
GF_VERSION=3.2.7
|
||||
# from GitHub
|
||||
wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
|
||||
# from Grafana
|
||||
|
|
|
@ -79,7 +79,7 @@ Development: false
|
|||
### 从源码安装
|
||||
|
||||
```
|
||||
git clone https://github.com:taosdata/kafka-connect-tdengine.git
|
||||
git clone https://github.com/taosdata/kafka-connect-tdengine.git
|
||||
cd kafka-connect-tdengine
|
||||
mvn clean package
|
||||
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
|
||||
|
|
|
@ -39,15 +39,9 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
|
|||
|
||||
## 数据链路设置
|
||||
|
||||
### 下载 TDengine 插件到 Grafana 插件目录
|
||||
### 安装 Grafana Plugin 并配置数据源
|
||||
|
||||
```bash
|
||||
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
|
||||
2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
|
||||
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
|
||||
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
|
||||
5. sudo systemctl restart grafana-server.service
|
||||
```
|
||||
请参考[安装 Grafana Plugin 并配置数据源](/third-party/grafana/#%E5%AE%89%E8%A3%85-grafana-plugin-%E5%B9%B6%E9%85%8D%E7%BD%AE%E6%95%B0%E6%8D%AE%E6%BA%90)。
|
||||
|
||||
### 修改 /etc/telegraf/telegraf.conf
|
||||
|
||||
|
|
|
@ -41,15 +41,9 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
|
|||
|
||||
## 数据链路设置
|
||||
|
||||
### 复制 TDengine 插件到 grafana 插件目录
|
||||
### 安装 Grafana Plugin 并配置数据源
|
||||
|
||||
```bash
|
||||
1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.3/tdengine-datasource-3.1.3.zip
|
||||
2. sudo unzip tdengine-datasource-3.1.3.zip -d /var/lib/grafana/plugins/
|
||||
3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
|
||||
4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
|
||||
5. sudo systemctl restart grafana-server.service
|
||||
```
|
||||
请参考[安装 Grafana Plugin 并配置数据源](/third-party/grafana/#%E5%AE%89%E8%A3%85-grafana-plugin-%E5%B9%B6%E9%85%8D%E7%BD%AE%E6%95%B0%E6%8D%AE%E6%BA%90)。
|
||||
|
||||
### 配置 collectd
|
||||
|
||||
|
|
|
@ -201,3 +201,45 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
|
|||
OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。
|
||||
|
||||
TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 建库时的vgroups参数影响,每个 VNode 占用的内存大小受 buffer参数 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。
|
||||
|
||||
### 19. 在macOS上遇到Too many open files怎么办?
|
||||
|
||||
taosd日志文件报错Too many open file,是由于taosd打开文件数超过系统设置的上限所致。
|
||||
解决方案如下:
|
||||
1. 新建文件 /Library/LaunchDaemons/limit.maxfiles.plist,写入以下内容(以下示例将limit和maxfiles改为10万,可按需修改):
|
||||
```
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
|
||||
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>Label</key>
|
||||
<string>limit.maxfiles</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>launchctl</string>
|
||||
<string>limit</string>
|
||||
<string>maxfiles</string>
|
||||
<string>100000</string>
|
||||
<string>100000</string>
|
||||
</array>
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
<key>ServiceIPC</key>
|
||||
<false/>
|
||||
</dict>
|
||||
</plist>
|
||||
```
|
||||
2. 修改文件权限
|
||||
```
|
||||
sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist
|
||||
sudo chmod 644 /Library/LaunchDaemons/limit.maxfiles.plist
|
||||
```
|
||||
3. 加载 plist 文件 (或重启系统后生效。launchd在启动时会自动加载该目录的 plist)
|
||||
```
|
||||
sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist
|
||||
```
|
||||
4.确认更改后的限制
|
||||
```
|
||||
launchctl limit maxfiles
|
||||
```
|
||||
|
|
|
@ -10,6 +10,11 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.1.8
|
||||
|
||||
<Release type="tdengine" version="3.0.1.8" />
|
||||
|
||||
|
||||
## 3.0.1.7
|
||||
|
||||
<Release type="tdengine" version="3.0.1.7" />
|
||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.3.0
|
||||
|
||||
<Release type="tools" version="2.3.0" />
|
||||
|
||||
## 2.2.9
|
||||
|
||||
<Release type="tools" version="2.2.9" />
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<version>2.17.1</version>
|
||||
<version>2.17.2</version>
|
||||
</dependency>
|
||||
<!-- junit -->
|
||||
<dependency>
|
||||
|
|
|
@ -2,12 +2,10 @@
|
|||
cd tests/examples/JDBC/taosdemo
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
# 先建表,再插入的
|
||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
# 不建表,直接插入的
|
||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host [hostname] -database [database] -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -database <db name> -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100
|
||||
```
|
||||
|
||||
需求:
|
||||
1. 可以读lowa的配置文件
|
||||
2. 支持JDBC-JNI和JDBC-restful
|
||||
3. 读取配置文件,持续执行查询
|
||||
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
|
||||
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/local/lib 来指定寻找共享库的路径。
|
||||
|
|
|
@ -3,19 +3,13 @@ PROJECT(TDengine)
|
|||
IF (TD_LINUX)
|
||||
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
# ADD_EXECUTABLE(demo apitest.c)
|
||||
#TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread )
|
||||
#ADD_EXECUTABLE(sml schemaless.c)
|
||||
#TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread )
|
||||
#ADD_EXECUTABLE(subscribe subscribe.c)
|
||||
#TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread )
|
||||
#ADD_EXECUTABLE(epoll epoll.c)
|
||||
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
|
||||
|
||||
add_executable(tmq "")
|
||||
add_executable(stream_demo "")
|
||||
add_executable(demoapi "")
|
||||
add_executable(api_reqid "")
|
||||
add_executable(schemaless "")
|
||||
add_executable(prepare "")
|
||||
add_executable(demo "")
|
||||
add_executable(asyncdemo "")
|
||||
|
||||
target_sources(tmq
|
||||
PRIVATE
|
||||
|
@ -27,16 +21,25 @@ IF (TD_LINUX)
|
|||
"stream_demo.c"
|
||||
)
|
||||
|
||||
target_sources(demoapi
|
||||
target_sources(schemaless
|
||||
PRIVATE
|
||||
"demoapi.c"
|
||||
"schemaless.c"
|
||||
)
|
||||
|
||||
target_sources(api_reqid
|
||||
target_sources(prepare
|
||||
PRIVATE
|
||||
"api_with_reqid_test.c"
|
||||
"prepare.c"
|
||||
)
|
||||
|
||||
target_sources(demo
|
||||
PRIVATE
|
||||
"demo.c"
|
||||
)
|
||||
|
||||
target_sources(asyncdemo
|
||||
PRIVATE
|
||||
"asyncdemo.c"
|
||||
)
|
||||
|
||||
target_link_libraries(tmq
|
||||
taos_static
|
||||
|
@ -46,46 +49,30 @@ IF (TD_LINUX)
|
|||
taos_static
|
||||
)
|
||||
|
||||
target_link_libraries(demoapi
|
||||
target_link_libraries(schemaless
|
||||
taos_static
|
||||
)
|
||||
|
||||
target_link_libraries(api_reqid
|
||||
target_link_libraries(prepare
|
||||
taos_static
|
||||
)
|
||||
|
||||
|
||||
target_include_directories(tmq
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
target_link_libraries(demo
|
||||
taos_static
|
||||
)
|
||||
|
||||
target_include_directories(stream_demo
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_include_directories(demoapi
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_include_directories(api_reqid
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/client"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
target_link_libraries(asyncdemo
|
||||
taos_static
|
||||
)
|
||||
|
||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
|
||||
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
|
||||
SET_TARGET_PROPERTIES(api_reqid PROPERTIES OUTPUT_NAME api_reqid)
|
||||
SET_TARGET_PROPERTIES(schemaless PROPERTIES OUTPUT_NAME schemaless)
|
||||
SET_TARGET_PROPERTIES(prepare PROPERTIES OUTPUT_NAME prepare)
|
||||
SET_TARGET_PROPERTIES(demo PROPERTIES OUTPUT_NAME demo)
|
||||
SET_TARGET_PROPERTIES(asyncdemo PROPERTIES OUTPUT_NAME asyncdemo)
|
||||
ENDIF ()
|
||||
IF (TD_DARWIN)
|
||||
INCLUDE_DIRECTORIES(. ${TD_SOURCE_DIR}/src/inc ${TD_SOURCE_DIR}/src/client/inc ${TD_SOURCE_DIR}/inc)
|
||||
AUX_SOURCE_DIRECTORY(. SRC)
|
||||
#ADD_EXECUTABLE(demo demo.c)
|
||||
#TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua)
|
||||
#ADD_EXECUTABLE(epoll epoll.c)
|
||||
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
|
||||
ENDIF ()
|
||||
|
|
1948
examples/c/apitest.c
1948
examples/c/apitest.c
File diff suppressed because it is too large
Load Diff
|
@ -23,8 +23,8 @@
|
|||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "../../../include/client/taos.h"
|
||||
#include <inttypes.h>
|
||||
#include "taos.h"
|
||||
|
||||
int points = 5;
|
||||
int numOfTables = 3;
|
||||
|
@ -35,7 +35,7 @@ int64_t st, et;
|
|||
typedef struct {
|
||||
int id;
|
||||
TAOS *taos;
|
||||
char name[16];
|
||||
char name[32];
|
||||
time_t timeStamp;
|
||||
int value;
|
||||
int rowsInserted;
|
||||
|
@ -230,7 +230,7 @@ void taos_insert_call_back(void *param, TAOS_RES *tres, int code)
|
|||
if (tablesInsertProcessed >= numOfTables) {
|
||||
gettimeofday(&systemTime, NULL);
|
||||
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||
printf("%lld mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables);
|
||||
printf("%" PRId64 " mseconds to insert %d data points\n", (et - st) / 1000, points*numOfTables);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,7 +267,7 @@ void taos_retrieve_call_back(void *param, TAOS_RES *tres, int numOfRows)
|
|||
if (tablesSelectProcessed >= numOfTables) {
|
||||
gettimeofday(&systemTime, NULL);
|
||||
et = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
|
||||
printf("%lld mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables);
|
||||
printf("%" PRId64 " mseconds to query %d data rows\n", (et - st) / 1000, points * numOfTables);
|
||||
}
|
||||
|
||||
taos_free_result(tres);
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "../../../include/client/taos.h" // TAOS header file
|
||||
#include "taos.h" // TAOS header file
|
||||
|
||||
static void queryDB(TAOS *taos, char *command) {
|
||||
int i;
|
||||
|
|
|
@ -1,304 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
// how to use to do a pressure-test upon eok
|
||||
// tester: cat /dev/urandom | nc -c <ip> <port>
|
||||
// testee: ./debug/build/bin/epoll -l <port> > /dev/null
|
||||
// compare against: nc -l <port> > /dev/null
|
||||
// monitor and compare : glances
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include "osEok.h"
|
||||
#else // __APPLE__
|
||||
#include <sys/epoll.h>
|
||||
#endif // __APPLE__
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/socket.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <netinet/in.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <libgen.h>
|
||||
#include <locale.h>
|
||||
#include <netdb.h>
|
||||
|
||||
#define D(fmt, ...) fprintf(stderr, "%s[%d]%s(): " fmt "\n", basename(__FILE__), __LINE__, __func__, ##__VA_ARGS__)
|
||||
#define A(statement, fmt, ...) do { \
|
||||
if (statement) break; \
|
||||
fprintf(stderr, "%s[%d]%s(): assert [%s] failed: %d[%s]: " fmt "\n", \
|
||||
basename(__FILE__), __LINE__, __func__, \
|
||||
#statement, errno, strerror(errno), \
|
||||
##__VA_ARGS__); \
|
||||
abort(); \
|
||||
} while (0)
|
||||
|
||||
#define E(fmt, ...) do { \
|
||||
fprintf(stderr, "%s[%d]%s(): %d[%s]: " fmt "\n", \
|
||||
basename(__FILE__), __LINE__, __func__, \
|
||||
errno, strerror(errno), \
|
||||
##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#include "os.h"
|
||||
|
||||
typedef struct ep_s ep_t;
|
||||
struct ep_s {
|
||||
int ep;
|
||||
|
||||
pthread_mutex_t lock;
|
||||
int sv[2]; // 0 for read, 1 for write;
|
||||
pthread_t thread;
|
||||
|
||||
volatile unsigned int stopping:1;
|
||||
volatile unsigned int waiting:1;
|
||||
volatile unsigned int wakenup:1;
|
||||
};
|
||||
|
||||
static int ep_dummy = 0;
|
||||
|
||||
static ep_t* ep_create(void);
|
||||
static void ep_destroy(ep_t *ep);
|
||||
static void* routine(void* arg);
|
||||
static int open_listen(unsigned short port);
|
||||
|
||||
typedef struct fde_s fde_t;
|
||||
struct fde_s {
|
||||
int skt;
|
||||
void (*on_event)(ep_t *ep, struct epoll_event *events, fde_t *client);
|
||||
};
|
||||
|
||||
static void listen_event(ep_t *ep, struct epoll_event *ev, fde_t *client);
|
||||
static void null_event(ep_t *ep, struct epoll_event *ev, fde_t *client);
|
||||
|
||||
#define usage(arg0, fmt, ...) do { \
|
||||
if (fmt[0]) { \
|
||||
fprintf(stderr, "" fmt "\n", ##__VA_ARGS__); \
|
||||
} \
|
||||
fprintf(stderr, "usage:\n"); \
|
||||
fprintf(stderr, " %s -l <port> : specify listenning port\n", arg0); \
|
||||
} while (0)
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
char *prg = basename(argv[0]);
|
||||
if (argc==1) {
|
||||
usage(prg, "");
|
||||
return 0;
|
||||
}
|
||||
ep_t* ep = ep_create();
|
||||
A(ep, "failed");
|
||||
for (int i=1; i<argc; ++i) {
|
||||
const char *arg = argv[i];
|
||||
if (0==strcmp(arg, "-l")) {
|
||||
++i;
|
||||
if (i>=argc) {
|
||||
usage(prg, "expecting <port> after -l, but got nothing");
|
||||
return 1; // confirmed potential leakage
|
||||
}
|
||||
arg = argv[i];
|
||||
int port = atoi(arg);
|
||||
int skt = open_listen(port);
|
||||
if (skt==-1) continue;
|
||||
fde_t *client = (fde_t*)calloc(1, sizeof(*client));
|
||||
if (!client) {
|
||||
E("out of memory");
|
||||
close(skt);
|
||||
continue;
|
||||
}
|
||||
client->skt = skt;
|
||||
client->on_event = listen_event;
|
||||
struct epoll_event ev = {0};
|
||||
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
|
||||
ev.data.ptr = client;
|
||||
A(0==epoll_ctl(ep->ep, EPOLL_CTL_ADD, skt, &ev), "");
|
||||
continue;
|
||||
}
|
||||
usage(prg, "unknown argument: [%s]", arg);
|
||||
return 1;
|
||||
}
|
||||
char *line = NULL;
|
||||
size_t linecap = 0;
|
||||
ssize_t linelen;
|
||||
while ((linelen = getline(&line, &linecap, stdin)) > 0) {
|
||||
line[strlen(line)-1] = '\0';
|
||||
if (0==strcmp(line, "exit")) break;
|
||||
if (0==strcmp(line, "quit")) break;
|
||||
if (line==strstr(line, "close")) {
|
||||
int fd = 0;
|
||||
sscanf(line, "close %d", &fd);
|
||||
if (fd<=2) {
|
||||
fprintf(stderr, "fd [%d] invalid\n", fd);
|
||||
continue;
|
||||
}
|
||||
A(0==epoll_ctl(ep->ep, EPOLL_CTL_DEL, fd, NULL), "");
|
||||
continue;
|
||||
}
|
||||
if (strlen(line)==0) continue;
|
||||
fprintf(stderr, "unknown cmd:[%s]\n", line);
|
||||
}
|
||||
ep_destroy(ep);
|
||||
D("");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ep_t* ep_create(void) {
|
||||
ep_t *ep = (ep_t*)calloc(1, sizeof(*ep));
|
||||
A(ep, "out of memory");
|
||||
A(-1!=(ep->ep = epoll_create(1)), "");
|
||||
ep->sv[0] = -1;
|
||||
ep->sv[1] = -1;
|
||||
A(0==socketpair(AF_LOCAL, SOCK_STREAM, 0, ep->sv), "");
|
||||
A(0==pthread_mutex_init(&ep->lock, NULL), "");
|
||||
A(0==pthread_mutex_lock(&ep->lock), "");
|
||||
struct epoll_event ev = {0};
|
||||
ev.events = EPOLLIN;
|
||||
ev.data.ptr = &ep_dummy;
|
||||
A(0==epoll_ctl(ep->ep, EPOLL_CTL_ADD, ep->sv[0], &ev), "");
|
||||
A(0==pthread_create(&ep->thread, NULL, routine, ep), "");
|
||||
A(0==pthread_mutex_unlock(&ep->lock), "");
|
||||
return ep;
|
||||
}
|
||||
|
||||
static void ep_destroy(ep_t *ep) {
|
||||
A(ep, "invalid argument");
|
||||
ep->stopping = 1;
|
||||
A(1==send(ep->sv[1], "1", 1, 0), "");
|
||||
A(0==pthread_join(ep->thread, NULL), "");
|
||||
A(0==pthread_mutex_destroy(&ep->lock), "");
|
||||
A(0==close(ep->sv[0]), "");
|
||||
A(0==close(ep->sv[1]), "");
|
||||
A(0==close(ep->ep), "");
|
||||
free(ep);
|
||||
}
|
||||
|
||||
static void* routine(void* arg) {
|
||||
A(arg, "invalid argument");
|
||||
ep_t *ep = (ep_t*)arg;
|
||||
|
||||
while (!ep->stopping) {
|
||||
struct epoll_event evs[10];
|
||||
memset(evs, 0, sizeof(evs));
|
||||
|
||||
A(0==pthread_mutex_lock(&ep->lock), "");
|
||||
A(ep->waiting==0, "internal logic error");
|
||||
ep->waiting = 1;
|
||||
A(0==pthread_mutex_unlock(&ep->lock), "");
|
||||
|
||||
int r = epoll_wait(ep->ep, evs, sizeof(evs)/sizeof(evs[0]), -1);
|
||||
A(r>0, "indefinite epoll_wait shall not timeout:[%d]", r);
|
||||
|
||||
A(0==pthread_mutex_lock(&ep->lock), "");
|
||||
A(ep->waiting==1, "internal logic error");
|
||||
ep->waiting = 0;
|
||||
A(0==pthread_mutex_unlock(&ep->lock), "");
|
||||
|
||||
for (int i=0; i<r; ++i) {
|
||||
struct epoll_event *ev = evs + i;
|
||||
if (ev->data.ptr == &ep_dummy) {
|
||||
char c = '\0';
|
||||
A(1==recv(ep->sv[0], &c, 1, 0), "internal logic error");
|
||||
A(0==pthread_mutex_lock(&ep->lock), "");
|
||||
ep->wakenup = 0;
|
||||
A(0==pthread_mutex_unlock(&ep->lock), "");
|
||||
continue;
|
||||
}
|
||||
A(ev->data.ptr, "internal logic error");
|
||||
fde_t *client = (fde_t*)ev->data.ptr;
|
||||
client->on_event(ep, ev, client);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int open_listen(unsigned short port) {
|
||||
int r = 0;
|
||||
int skt = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
||||
if (skt==-1) {
|
||||
E("socket() failed");
|
||||
return -1;
|
||||
}
|
||||
do {
|
||||
struct sockaddr_in si = {0};
|
||||
si.sin_family = AF_INET;
|
||||
si.sin_addr.s_addr = inet_addr("0.0.0.0");
|
||||
si.sin_port = htons(port);
|
||||
r = bind(skt, (struct sockaddr*)&si, sizeof(si));
|
||||
if (r) {
|
||||
E("bind(%u) failed", port);
|
||||
break;
|
||||
}
|
||||
r = listen(skt, 100);
|
||||
if (r) {
|
||||
E("listen() failed");
|
||||
break;
|
||||
}
|
||||
memset(&si, 0, sizeof(si));
|
||||
socklen_t len = sizeof(si);
|
||||
r = getsockname(skt, (struct sockaddr *)&si, &len);
|
||||
if (r) {
|
||||
E("getsockname() failed");
|
||||
}
|
||||
A(len==sizeof(si), "internal logic error");
|
||||
D("listenning at: %d", ntohs(si.sin_port));
|
||||
return skt;
|
||||
} while (0);
|
||||
close(skt);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void listen_event(ep_t *ep, struct epoll_event *ev, fde_t *client) {
|
||||
A(ev->events & EPOLLIN, "internal logic error");
|
||||
struct sockaddr_in si = {0};
|
||||
socklen_t silen = sizeof(si);
|
||||
int skt = accept(client->skt, (struct sockaddr*)&si, &silen);
|
||||
A(skt!=-1, "internal logic error");
|
||||
fde_t *server = (fde_t*)calloc(1, sizeof(*server));
|
||||
if (!server) {
|
||||
close(skt);
|
||||
return;
|
||||
}
|
||||
server->skt = skt;
|
||||
server->on_event = null_event;
|
||||
struct epoll_event ee = {0};
|
||||
ee.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
|
||||
ee.data.ptr = server;
|
||||
A(0==epoll_ctl(ep->ep, EPOLL_CTL_ADD, skt, &ee), "");
|
||||
}
|
||||
|
||||
static void null_event(ep_t *ep, struct epoll_event *ev, fde_t *client) {
|
||||
if (ev->events & EPOLLIN) {
|
||||
char buf[8192];
|
||||
int n = recv(client->skt, buf, sizeof(buf), 0);
|
||||
A(n>=0 && n<=sizeof(buf), "internal logic error:[%d]", n);
|
||||
A(n==fwrite(buf, 1, n, stdout), "internal logic error");
|
||||
}
|
||||
if (ev->events & (EPOLLERR | EPOLLHUP | EPOLLRDHUP)) {
|
||||
A(0==pthread_mutex_lock(&ep->lock), "");
|
||||
A(0==epoll_ctl(ep->ep, EPOLL_CTL_DEL, client->skt, NULL), "");
|
||||
A(0==pthread_mutex_unlock(&ep->lock), "");
|
||||
close(client->skt);
|
||||
client->skt = -1;
|
||||
client->on_event = NULL;
|
||||
free(client);
|
||||
}
|
||||
}
|
||||
|
|
@ -7,22 +7,21 @@ LFLAGS = '-Wl,-rpath,/usr/local/taos/driver/' -ltaos -lpthread -lm -lrt
|
|||
CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion \
|
||||
-Wno-char-subscripts -D_REENTRANT -Wno-format -D_REENTRANT -DLINUX \
|
||||
-Wno-unused-function -D_M_X64 -I/usr/local/taos/include -std=gnu99 \
|
||||
-I../../../deps/cJson/inc
|
||||
-I/usr/local/include/cjson
|
||||
all: $(TARGET)
|
||||
|
||||
exe:
|
||||
gcc $(CFLAGS) ./asyncdemo.c -o $(ROOT)asyncdemo $(LFLAGS)
|
||||
gcc $(CFLAGS) ./demo.c -o $(ROOT)demo $(LFLAGS)
|
||||
gcc $(CFLAGS) ./prepare.c -o $(ROOT)prepare $(LFLAGS)
|
||||
gcc $(CFLAGS) ./stream.c -o $(ROOT)stream $(LFLAGS)
|
||||
gcc $(CFLAGS) ./subscribe.c -o $(ROOT)subscribe $(LFLAGS)
|
||||
gcc $(CFLAGS) ./apitest.c -o $(ROOT)apitest $(LFLAGS)
|
||||
gcc $(CFLAGS) ./stream_demo.c -o $(ROOT)stream_demo $(LFLAGS)
|
||||
gcc $(CFLAGS) ./tmq.c -o $(ROOT)tmq $(LFLAGS)
|
||||
gcc $(CFLAGS) ./schemaless.c -o $(ROOT)schemaless $(LFLAGS)
|
||||
|
||||
clean:
|
||||
rm $(ROOT)asyncdemo
|
||||
rm $(ROOT)demo
|
||||
rm $(ROOT)prepare
|
||||
rm $(ROOT)batchprepare
|
||||
rm $(ROOT)stream
|
||||
rm $(ROOT)subscribe
|
||||
rm $(ROOT)apitest
|
||||
rm $(ROOT)stream_demo
|
||||
rm $(ROOT)tmq
|
||||
rm $(ROOT)schemaless
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "../../../include/client/taos.h"
|
||||
#include "taos.h"
|
||||
|
||||
void taosMsleep(int mseconds);
|
||||
|
||||
|
@ -70,70 +70,89 @@ int main(int argc, char *argv[])
|
|||
char blob[80];
|
||||
} v = {0};
|
||||
|
||||
int32_t boolLen = sizeof(int8_t);
|
||||
int32_t sintLen = sizeof(int16_t);
|
||||
int32_t intLen = sizeof(int32_t);
|
||||
int32_t bintLen = sizeof(int64_t);
|
||||
int32_t floatLen = sizeof(float);
|
||||
int32_t doubleLen = sizeof(double);
|
||||
int32_t binLen = sizeof(v.bin);
|
||||
int32_t ncharLen = 30;
|
||||
|
||||
stmt = taos_stmt_init(taos);
|
||||
TAOS_BIND params[10];
|
||||
TAOS_MULTI_BIND params[10];
|
||||
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
|
||||
params[0].buffer_length = sizeof(v.ts);
|
||||
params[0].buffer = &v.ts;
|
||||
params[0].length = ¶ms[0].buffer_length;
|
||||
params[0].length = &bintLen;
|
||||
params[0].is_null = NULL;
|
||||
params[0].num = 1;
|
||||
|
||||
params[1].buffer_type = TSDB_DATA_TYPE_BOOL;
|
||||
params[1].buffer_length = sizeof(v.b);
|
||||
params[1].buffer = &v.b;
|
||||
params[1].length = ¶ms[1].buffer_length;
|
||||
params[1].length = &boolLen;
|
||||
params[1].is_null = NULL;
|
||||
params[1].num = 1;
|
||||
|
||||
params[2].buffer_type = TSDB_DATA_TYPE_TINYINT;
|
||||
params[2].buffer_length = sizeof(v.v1);
|
||||
params[2].buffer = &v.v1;
|
||||
params[2].length = ¶ms[2].buffer_length;
|
||||
params[2].length = &boolLen;
|
||||
params[2].is_null = NULL;
|
||||
params[2].num = 1;
|
||||
|
||||
params[3].buffer_type = TSDB_DATA_TYPE_SMALLINT;
|
||||
params[3].buffer_length = sizeof(v.v2);
|
||||
params[3].buffer = &v.v2;
|
||||
params[3].length = ¶ms[3].buffer_length;
|
||||
params[3].length = &sintLen;
|
||||
params[3].is_null = NULL;
|
||||
params[3].num = 1;
|
||||
|
||||
params[4].buffer_type = TSDB_DATA_TYPE_INT;
|
||||
params[4].buffer_length = sizeof(v.v4);
|
||||
params[4].buffer = &v.v4;
|
||||
params[4].length = ¶ms[4].buffer_length;
|
||||
params[4].length = &intLen;
|
||||
params[4].is_null = NULL;
|
||||
params[4].num = 1;
|
||||
|
||||
params[5].buffer_type = TSDB_DATA_TYPE_BIGINT;
|
||||
params[5].buffer_length = sizeof(v.v8);
|
||||
params[5].buffer = &v.v8;
|
||||
params[5].length = ¶ms[5].buffer_length;
|
||||
params[5].length = &bintLen;
|
||||
params[5].is_null = NULL;
|
||||
params[5].num = 1;
|
||||
|
||||
params[6].buffer_type = TSDB_DATA_TYPE_FLOAT;
|
||||
params[6].buffer_length = sizeof(v.f4);
|
||||
params[6].buffer = &v.f4;
|
||||
params[6].length = ¶ms[6].buffer_length;
|
||||
params[6].length = &floatLen;
|
||||
params[6].is_null = NULL;
|
||||
params[6].num = 1;
|
||||
|
||||
params[7].buffer_type = TSDB_DATA_TYPE_DOUBLE;
|
||||
params[7].buffer_length = sizeof(v.f8);
|
||||
params[7].buffer = &v.f8;
|
||||
params[7].length = ¶ms[7].buffer_length;
|
||||
params[7].length = &doubleLen;
|
||||
params[7].is_null = NULL;
|
||||
params[7].num = 1;
|
||||
|
||||
params[8].buffer_type = TSDB_DATA_TYPE_BINARY;
|
||||
params[8].buffer_length = sizeof(v.bin);
|
||||
params[8].buffer = v.bin;
|
||||
params[8].length = ¶ms[8].buffer_length;
|
||||
params[8].length = &binLen;
|
||||
params[8].is_null = NULL;
|
||||
params[8].num = 1;
|
||||
|
||||
strcpy(v.blob, "一二三四五六七八九十");
|
||||
params[9].buffer_type = TSDB_DATA_TYPE_NCHAR;
|
||||
params[9].buffer_length = strlen(v.blob);
|
||||
params[9].buffer_length = sizeof(v.blob);
|
||||
params[9].buffer = v.blob;
|
||||
params[9].length = ¶ms[9].buffer_length;
|
||||
params[9].length = &ncharLen;
|
||||
params[9].is_null = NULL;
|
||||
params[9].num = 1;
|
||||
|
||||
int is_null = 1;
|
||||
char is_null = 1;
|
||||
|
||||
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
|
||||
code = taos_stmt_prepare(stmt, sql, 0);
|
||||
|
@ -153,7 +172,7 @@ int main(int argc, char *argv[])
|
|||
v.v8 = (int64_t)(i * 8);
|
||||
v.f4 = (float)(i * 40);
|
||||
v.f8 = (double)(i * 80);
|
||||
for (int j = 0; j < sizeof(v.bin) - 1; ++j) {
|
||||
for (int j = 0; j < sizeof(v.bin); ++j) {
|
||||
v.bin[j] = (char)(i + '0');
|
||||
}
|
||||
|
||||
|
|
|
@ -1,32 +1,17 @@
|
|||
#include "../../../include/client/taos.h"
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
|
||||
#include "taos.h"
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
int numSuperTables = 8;
|
||||
int numChildTables = 4;
|
||||
int numRowsPerChildTable = 2048;
|
||||
|
||||
void shuffle(char**lines, size_t n)
|
||||
{
|
||||
if (n > 1)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < n - 1; i++)
|
||||
{
|
||||
size_t j = i + taosRand() / (RAND_MAX / (n - i) + 1);
|
||||
char* t = lines[j];
|
||||
lines[j] = lines[i];
|
||||
lines[i] = t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t getTimeInUs() {
|
||||
struct timeval systemTime;
|
||||
gettimeofday(&systemTime, NULL);
|
||||
|
@ -46,7 +31,7 @@ int main(int argc, char* argv[]) {
|
|||
exit(1);
|
||||
}
|
||||
|
||||
char* info = taos_get_server_info(taos);
|
||||
const char* info = taos_get_server_info(taos);
|
||||
printf("server info: %s\n", info);
|
||||
info = taos_get_client_info(taos);
|
||||
printf("client info: %s\n", info);
|
||||
|
@ -61,9 +46,10 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
time_t ct = time(0);
|
||||
int64_t ts = ct * 1000;
|
||||
char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=254u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms";
|
||||
char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=254u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %" PRId64;
|
||||
|
||||
char** lines = calloc(numSuperTables * numChildTables * numRowsPerChildTable, sizeof(char*));
|
||||
int lineNum = numSuperTables * numChildTables * numRowsPerChildTable;
|
||||
char** lines = calloc((size_t)lineNum, sizeof(char*));
|
||||
int l = 0;
|
||||
for (int i = 0; i < numSuperTables; ++i) {
|
||||
for (int j = 0; j < numChildTables; ++j) {
|
||||
|
@ -75,13 +61,13 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
}
|
||||
}
|
||||
//shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable);
|
||||
|
||||
printf("%s\n", "begin taos_insert_lines");
|
||||
int64_t begin = getTimeInUs();
|
||||
int32_t code = taos_insert_lines(taos, lines, numSuperTables * numChildTables * numRowsPerChildTable);
|
||||
TAOS_RES *res = taos_schemaless_insert(taos, lines, lineNum, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_MILLI_SECONDS);
|
||||
int64_t end = getTimeInUs();
|
||||
printf("code: %d, %s. time used: %"PRId64"\n", code, tstrerror(code), end-begin);
|
||||
printf("code: %s. time used: %" PRId64 "\n", taos_errstr(res), end-begin);
|
||||
taos_free_result(res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -108,10 +108,13 @@ int32_t create_stream() {
|
|||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int code;
|
||||
if (argc > 1) {
|
||||
printf("env init\n");
|
||||
code = init_env();
|
||||
int code = init_env();
|
||||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
}
|
||||
create_stream();
|
||||
}
|
||||
|
|
|
@ -20,10 +20,7 @@
|
|||
#include <time.h>
|
||||
#include "taos.h"
|
||||
|
||||
static int running = 1;
|
||||
static char dbName[64] = "tmqdb";
|
||||
static char stbName[64] = "stb";
|
||||
static char topicName[64] = "topicname";
|
||||
static int running = 1;
|
||||
|
||||
static int32_t msg_process(TAOS_RES* msg) {
|
||||
char buf[1024];
|
||||
|
@ -43,8 +40,8 @@ static int32_t msg_process(TAOS_RES* msg) {
|
|||
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
int32_t numOfFields = taos_field_count(msg);
|
||||
int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
// int32_t* length = taos_fetch_lengths(msg);
|
||||
int32_t precision = taos_result_precision(msg);
|
||||
rows++;
|
||||
taos_print_row(buf, row, fields, numOfFields);
|
||||
printf("precision: %d, row content: %s\n", precision, buf);
|
||||
|
@ -62,10 +59,15 @@ static int32_t init_env() {
|
|||
TAOS_RES* pRes;
|
||||
// drop database if exists
|
||||
printf("create database\n");
|
||||
pRes = taos_query(pConn, "drop topic topicname");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "drop database if exists tmqdb");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in drop tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
|
@ -73,7 +75,7 @@ static int32_t init_env() {
|
|||
pRes = taos_query(pConn, "create database tmqdb precision 'ns'");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
|
@ -83,7 +85,7 @@ static int32_t init_env() {
|
|||
pConn, "create table tmqdb.stb (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table stb, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
|
@ -92,28 +94,28 @@ static int32_t init_env() {
|
|||
pRes = taos_query(pConn, "create table tmqdb.ctb0 using tmqdb.stb tags(0, 'subtable0')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb1 using tmqdb.stb tags(1, 'subtable1')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb2 using tmqdb.stb tags(2, 'subtable2')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb2, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table tmqdb.ctb3 using tmqdb.stb tags(3, 'subtable3')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table ctb3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
|
@ -122,33 +124,37 @@ static int32_t init_env() {
|
|||
pRes = taos_query(pConn, "insert into tmqdb.ctb0 values(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb1 values(now, 1, 1, 'a1')(now+1s, 11, 11, 'a11')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb2 values(now, 2, 2, 'a1')(now+1s, 22, 22, 'a22')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into tmqdb.ctb3 values(now, 3, 3, 'a1')(now+1s, 33, 33, 'a33')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ctb0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
goto END;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(pConn);
|
||||
return 0;
|
||||
|
||||
END:
|
||||
taos_free_result(pRes);
|
||||
taos_close(pConn);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t create_topic() {
|
||||
|
@ -249,7 +255,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
tmq_t* tmq = build_consumer();
|
||||
if (NULL == tmq) {
|
||||
fprintf(stderr, "%% build_consumer() fail!\n");
|
||||
fprintf(stderr, "build_consumer() fail!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -259,7 +265,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
if ((code = tmq_subscribe(tmq, topic_list))) {
|
||||
fprintf(stderr, "%% Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
|
||||
fprintf(stderr, "Failed to tmq_subscribe(): %s\n", tmq_err2str(code));
|
||||
}
|
||||
tmq_list_destroy(topic_list);
|
||||
|
||||
|
@ -267,9 +273,9 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
code = tmq_consumer_close(tmq);
|
||||
if (code) {
|
||||
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
fprintf(stderr, "Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
} else {
|
||||
fprintf(stderr, "%% Consumer closed\n");
|
||||
fprintf(stderr, "Consumer closed\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -59,6 +59,7 @@ typedef enum {
|
|||
TSDB_OPTION_TIMEZONE,
|
||||
TSDB_OPTION_CONFIGDIR,
|
||||
TSDB_OPTION_SHELL_ACTIVITY_TIMER,
|
||||
TSDB_OPTION_USE_ADAPTER,
|
||||
TSDB_MAX_OPTIONS
|
||||
} TSDB_OPTION;
|
||||
|
||||
|
@ -149,7 +150,7 @@ DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char
|
|||
DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port);
|
||||
DLL_EXPORT void taos_close(TAOS *taos);
|
||||
|
||||
const char *taos_data_type(int type);
|
||||
DLL_EXPORT const char *taos_data_type(int type);
|
||||
|
||||
DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos);
|
||||
DLL_EXPORT TAOS_STMT *taos_stmt_init_with_reqid(TAOS *taos, int64_t reqid);
|
||||
|
@ -185,6 +186,7 @@ DLL_EXPORT void taos_kill_query(TAOS *taos);
|
|||
DLL_EXPORT int taos_field_count(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_num_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_affected_rows(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res);
|
||||
|
||||
DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res);
|
||||
DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
|
||||
|
@ -217,7 +219,10 @@ DLL_EXPORT const void *taos_get_raw_block(TAOS_RES *res);
|
|||
DLL_EXPORT int taos_get_db_route_info(TAOS *taos, const char *db, TAOS_DB_ROUTE_INFO *dbInfo);
|
||||
DLL_EXPORT int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId);
|
||||
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||
DLL_EXPORT int taos_load_table_info(TAOS *taos, const char *tableNameList);
|
||||
|
||||
/* --------------------------schemaless INTERFACE------------------------------- */
|
||||
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert(TAOS *taos, char *lines[], int numLines, int protocol, int precision);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol,
|
||||
int precision, int64_t reqid);
|
||||
|
@ -225,6 +230,14 @@ DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len
|
|||
int precision);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows,
|
||||
int protocol, int precision, int64_t reqid);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision,
|
||||
int32_t ttl);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol,
|
||||
int precision, int32_t ttl, int64_t reqid);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol,
|
||||
int precision, int32_t ttl);
|
||||
DLL_EXPORT TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows,
|
||||
int protocol, int precision, int32_t ttl, int64_t reqid);
|
||||
|
||||
/* --------------------------TMQ INTERFACE------------------------------- */
|
||||
|
||||
|
@ -297,6 +310,8 @@ DLL_EXPORT tmq_res_t tmq_get_res_type(TAOS_RES *res);
|
|||
DLL_EXPORT int32_t tmq_get_raw(TAOS_RES *res, tmq_raw_data *raw);
|
||||
DLL_EXPORT int32_t tmq_write_raw(TAOS *taos, tmq_raw_data raw);
|
||||
DLL_EXPORT int taos_write_raw_block(TAOS *taos, int numOfRows, char *pData, const char *tbname);
|
||||
DLL_EXPORT int taos_write_raw_block_with_fields(TAOS *taos, int rows, char *pData, const char *tbname,
|
||||
TAOS_FIELD *fields, int numFields);
|
||||
DLL_EXPORT void tmq_free_raw(tmq_raw_data raw);
|
||||
// Returning null means error. Returned result need to be freed by tmq_free_json_meta
|
||||
DLL_EXPORT char *tmq_get_json_meta(TAOS_RES *res);
|
||||
|
|
|
@ -47,6 +47,7 @@ extern "C" {
|
|||
#define TSDB_INS_TABLE_TOPICS "ins_topics"
|
||||
#define TSDB_INS_TABLE_STREAMS "ins_streams"
|
||||
#define TSDB_INS_TABLE_STREAM_TASKS "ins_stream_tasks"
|
||||
#define TSDB_INS_TABLE_USER_PRIVILEGES "ins_user_privileges"
|
||||
|
||||
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
|
||||
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
|
||||
|
|
|
@ -174,15 +174,29 @@ typedef struct SColumnDataAgg {
|
|||
} SColumnDataAgg;
|
||||
#pragma pack(pop)
|
||||
|
||||
typedef struct SBlockID {
|
||||
// The uid of table, from which current data block comes. And it is always 0, if current block is the
|
||||
// result of calculation.
|
||||
uint64_t uid;
|
||||
|
||||
// Block id, acquired and assigned from executor, which created according to the hysical planner. Block id is used
|
||||
// to mark the stage of exec task.
|
||||
uint64_t blockId;
|
||||
|
||||
// Generated by group/partition by [value|tags]. Created and assigned by table-scan operator, group-by operator,
|
||||
// and partition by operator.
|
||||
uint64_t groupId;
|
||||
} SBlockID;
|
||||
|
||||
typedef struct SDataBlockInfo {
|
||||
STimeWindow window;
|
||||
int32_t rows; // todo hide this attribute
|
||||
int32_t rowSize;
|
||||
uint64_t uid; // the uid of table, from which current data block comes
|
||||
uint16_t blockId; // block id, generated by physical planner
|
||||
uint64_t groupId;
|
||||
int16_t hasVarCol;
|
||||
int32_t rows; // todo hide this attribute
|
||||
uint32_t capacity;
|
||||
SBlockID id;
|
||||
int16_t hasVarCol;
|
||||
int16_t dataLoad; // denote if the data is loaded or not
|
||||
|
||||
// TODO: optimize and remove following
|
||||
int64_t version; // used for stream, and need serialization
|
||||
int32_t childId; // used for stream, do not serialize
|
||||
|
@ -190,8 +204,8 @@ typedef struct SDataBlockInfo {
|
|||
STimeWindow calWin; // used for stream, do not serialize
|
||||
TSKEY watermark; // used for stream
|
||||
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
|
||||
STag* pTag; // used for stream partition
|
||||
char parTbName[TSDB_TABLE_NAME_LEN]; // used for stream partition
|
||||
STag* pTag; // used for stream partition
|
||||
} SDataBlockInfo;
|
||||
|
||||
typedef struct SSDataBlock {
|
||||
|
@ -225,21 +239,22 @@ typedef struct SVarColAttr {
|
|||
// pBlockAgg->numOfNull == info.rows, all data are null
|
||||
// pBlockAgg->numOfNull == 0, no data are null.
|
||||
typedef struct SColumnInfoData {
|
||||
char* pData; // the corresponding block data in memory
|
||||
char* pData; // the corresponding block data in memory
|
||||
union {
|
||||
char* nullbitmap; // bitmap, one bit for each item in the list
|
||||
SVarColAttr varmeta;
|
||||
};
|
||||
SColumnInfo info; // column info
|
||||
bool hasNull; // if current column data has null value.
|
||||
SColumnInfo info; // column info
|
||||
bool hasNull; // if current column data has null value.
|
||||
} SColumnInfoData;
|
||||
|
||||
typedef struct SQueryTableDataCond {
|
||||
uint64_t suid;
|
||||
int32_t order; // desc|asc order to iterate the data block
|
||||
int32_t order; // desc|asc order to iterate the data block
|
||||
int32_t numOfCols;
|
||||
SColumnInfo* colList;
|
||||
int32_t type; // data block load type:
|
||||
int32_t* pSlotList; // the column output destation slot, and it may be null
|
||||
int32_t type; // data block load type:
|
||||
STimeWindow twindows;
|
||||
int64_t startVersion;
|
||||
int64_t endVersion;
|
||||
|
|
|
@ -24,11 +24,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct SCorEpSet {
|
||||
int32_t version;
|
||||
SEpSet epSet;
|
||||
} SCorEpSet;
|
||||
|
||||
typedef struct SBlockOrderInfo {
|
||||
bool nullFirst;
|
||||
int32_t order;
|
||||
|
@ -36,14 +31,6 @@ typedef struct SBlockOrderInfo {
|
|||
SColumnInfoData* pColData;
|
||||
} SBlockOrderInfo;
|
||||
|
||||
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp);
|
||||
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port);
|
||||
|
||||
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2);
|
||||
|
||||
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet);
|
||||
SEpSet getEpSet_s(SCorEpSet* pEpSet);
|
||||
|
||||
#define NBIT (3u)
|
||||
#define BitPos(_n) ((_n) & ((1 << NBIT) - 1))
|
||||
#define BMCharPos(bm_, r_) ((bm_)[(r_) >> NBIT])
|
||||
|
@ -54,9 +41,9 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet);
|
|||
BMCharPos(bm_, r_) |= (1u << (7u - BitPos(r_))); \
|
||||
} while (0)
|
||||
|
||||
#define colDataSetNotNull_f(bm_, r_) \
|
||||
do { \
|
||||
BMCharPos(bm_, r_) &= ~(1u << (7u - BitPos(r_))); \
|
||||
#define colDataClearNull_f(bm_, r_) \
|
||||
do { \
|
||||
BMCharPos(bm_, r_) &= ((char)(~(1u << (7u - BitPos(r_))))); \
|
||||
} while (0)
|
||||
|
||||
#define colDataIsNull_var(pColumnInfoData, row) (pColumnInfoData->varmeta.offset[row] == -1)
|
||||
|
@ -164,9 +151,6 @@ static FORCE_INLINE void colDataAppendNNULL(SColumnInfoData* pColumnInfoData, ui
|
|||
for (int32_t i = start; i < start + nRows; ++i) {
|
||||
colDataSetNull_f(pColumnInfoData->nullbitmap, i);
|
||||
}
|
||||
|
||||
int32_t bytes = pColumnInfoData->info.bytes;
|
||||
memset(pColumnInfoData->pData + start * bytes, 0, nRows * bytes);
|
||||
}
|
||||
|
||||
pColumnInfoData->hasNull = true;
|
||||
|
@ -247,9 +231,11 @@ int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullF
|
|||
|
||||
int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload);
|
||||
int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
int32_t blockDataEnsureCapacityNoClear(SSDataBlock* pDataBlock, uint32_t numOfRows);
|
||||
|
||||
void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
|
||||
void blockDataCleanup(SSDataBlock* pDataBlock);
|
||||
void blockDataEmpty(SSDataBlock* pDataBlock);
|
||||
|
||||
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
|
||||
|
||||
|
|
|
@ -44,6 +44,8 @@ extern int32_t tsCompatibleModel;
|
|||
extern bool tsPrintAuth;
|
||||
extern int64_t tsTickPerMin[3];
|
||||
extern int32_t tsCountAlwaysReturnValue;
|
||||
extern float tsSelectivityRatio;
|
||||
extern int32_t tsTagFilterResCacheSize;
|
||||
|
||||
// queue & threads
|
||||
extern int32_t tsNumOfRpcThreads;
|
||||
|
@ -62,6 +64,11 @@ extern int32_t tsNumOfSnodeStreamThreads;
|
|||
extern int32_t tsNumOfSnodeWriteThreads;
|
||||
extern int64_t tsRpcQueueMemoryAllowed;
|
||||
|
||||
// sync raft
|
||||
extern int32_t tsElectInterval;
|
||||
extern int32_t tsHeartbeatInterval;
|
||||
extern int32_t tsHeartbeatTimeout;
|
||||
|
||||
// monitor
|
||||
extern bool tsEnableMonitor;
|
||||
extern int32_t tsMonitorInterval;
|
||||
|
@ -90,6 +97,11 @@ extern int32_t tsQueryNodeChunkSize;
|
|||
extern bool tsQueryUseNodeAllocator;
|
||||
extern bool tsKeepColumnName;
|
||||
extern bool tsEnableQueryHb;
|
||||
extern int32_t tsRedirectPeriod;
|
||||
extern int32_t tsRedirectFactor;
|
||||
extern int32_t tsRedirectMaxPeriod;
|
||||
extern int32_t tsMaxRetryWaitTime;
|
||||
extern bool tsUseAdapter;
|
||||
|
||||
// client
|
||||
extern int32_t tsMinSlidingTime;
|
||||
|
@ -120,9 +132,10 @@ extern char tsUdfdResFuncs[];
|
|||
extern char tsUdfdLdLibPath[];
|
||||
|
||||
// schemaless
|
||||
extern char tsSmlChildTableName[];
|
||||
extern char tsSmlTagName[];
|
||||
extern bool tsSmlDataFormat;
|
||||
extern char tsSmlChildTableName[];
|
||||
extern char tsSmlTagName[];
|
||||
extern bool tsSmlDataFormat;
|
||||
extern int32_t tsSmlBatchSize;
|
||||
|
||||
// wal
|
||||
extern int64_t tsWalFsyncDataSizeLimit;
|
||||
|
@ -130,6 +143,7 @@ extern int64_t tsWalFsyncDataSizeLimit;
|
|||
// internal
|
||||
extern int32_t tsTransPullupInterval;
|
||||
extern int32_t tsMqRebalanceInterval;
|
||||
extern int32_t tsStreamCheckpointTickInterval;
|
||||
extern int32_t tsTtlUnit;
|
||||
extern int32_t tsTtlPushInterval;
|
||||
extern int32_t tsGrantHBInterval;
|
||||
|
@ -138,7 +152,7 @@ extern int32_t tsUptimeInterval;
|
|||
extern int32_t tsRpcRetryLimit;
|
||||
extern int32_t tsRpcRetryInterval;
|
||||
|
||||
//#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
|
||||
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,
|
||||
const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc);
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TMISCE_H
|
||||
#define TDENGINE_TMISCE_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "tmsg.h"
|
||||
|
||||
typedef struct SCorEpSet {
|
||||
int32_t version;
|
||||
SEpSet epSet;
|
||||
} SCorEpSet;
|
||||
|
||||
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp);
|
||||
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port);
|
||||
|
||||
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2);
|
||||
|
||||
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet);
|
||||
SEpSet getEpSet_s(SCorEpSet* pEpSet);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // TDENGINE_TMISCE_H
|
|
@ -66,6 +66,15 @@ extern int32_t tMsgDict[];
|
|||
|
||||
typedef uint16_t tmsg_t;
|
||||
|
||||
static inline bool vnodeIsMsgBlock(tmsg_t type) {
|
||||
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
|
||||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM);
|
||||
}
|
||||
|
||||
static inline bool syncUtilUserCommit(tmsg_t msgType) {
|
||||
return msgType != TDMT_SYNC_NOOP && msgType != TDMT_SYNC_LEADER_TRANSFER;
|
||||
}
|
||||
|
||||
/* ------------------------ OTHER DEFINITIONS ------------------------ */
|
||||
// IE type
|
||||
#define TSDB_IE_TYPE_SEC 1
|
||||
|
@ -120,6 +129,7 @@ typedef enum _mgmt_table {
|
|||
TSDB_MGMT_TABLE_VNODES,
|
||||
TSDB_MGMT_TABLE_APPS,
|
||||
TSDB_MGMT_TABLE_STREAM_TASKS,
|
||||
TSDB_MGMT_TABLE_PRIVILEGES,
|
||||
TSDB_MGMT_TABLE_MAX,
|
||||
} EShowType;
|
||||
|
||||
|
@ -141,16 +151,18 @@ typedef enum _mgmt_table {
|
|||
#define TSDB_FILL_PREV 4
|
||||
#define TSDB_FILL_NEXT 5
|
||||
|
||||
#define TSDB_ALTER_USER_PASSWD 0x1
|
||||
#define TSDB_ALTER_USER_SUPERUSER 0x2
|
||||
#define TSDB_ALTER_USER_ADD_READ_DB 0x3
|
||||
#define TSDB_ALTER_USER_REMOVE_READ_DB 0x4
|
||||
#define TSDB_ALTER_USER_ADD_WRITE_DB 0x5
|
||||
#define TSDB_ALTER_USER_REMOVE_WRITE_DB 0x6
|
||||
#define TSDB_ALTER_USER_ADD_ALL_DB 0x7
|
||||
#define TSDB_ALTER_USER_REMOVE_ALL_DB 0x8
|
||||
#define TSDB_ALTER_USER_ENABLE 0x9
|
||||
#define TSDB_ALTER_USER_SYSINFO 0xA
|
||||
#define TSDB_ALTER_USER_PASSWD 0x1
|
||||
#define TSDB_ALTER_USER_SUPERUSER 0x2
|
||||
#define TSDB_ALTER_USER_ADD_READ_DB 0x3
|
||||
#define TSDB_ALTER_USER_REMOVE_READ_DB 0x4
|
||||
#define TSDB_ALTER_USER_ADD_WRITE_DB 0x5
|
||||
#define TSDB_ALTER_USER_REMOVE_WRITE_DB 0x6
|
||||
#define TSDB_ALTER_USER_ADD_ALL_DB 0x7
|
||||
#define TSDB_ALTER_USER_REMOVE_ALL_DB 0x8
|
||||
#define TSDB_ALTER_USER_ENABLE 0x9
|
||||
#define TSDB_ALTER_USER_SYSINFO 0xA
|
||||
#define TSDB_ALTER_USER_ADD_SUBSCRIBE_TOPIC 0xB
|
||||
#define TSDB_ALTER_USER_REMOVE_SUBSCRIBE_TOPIC 0xC
|
||||
|
||||
#define TSDB_ALTER_USER_PRIVILEGES 0x2
|
||||
|
||||
|
@ -497,6 +509,8 @@ typedef struct {
|
|||
char* pComment;
|
||||
char* pAst1;
|
||||
char* pAst2;
|
||||
int64_t deleteMark1;
|
||||
int64_t deleteMark2;
|
||||
} SMCreateStbReq;
|
||||
|
||||
int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
|
||||
|
@ -620,7 +634,7 @@ typedef struct {
|
|||
int8_t enable;
|
||||
char user[TSDB_USER_LEN];
|
||||
char pass[TSDB_USET_PASSWORD_LEN];
|
||||
char dbname[TSDB_DB_FNAME_LEN];
|
||||
char objname[TSDB_DB_FNAME_LEN]; // db or topic
|
||||
} SAlterUserReq;
|
||||
|
||||
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
|
||||
|
@ -649,34 +663,6 @@ int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pR
|
|||
int32_t tDeserializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp);
|
||||
void tFreeSGetUserAuthRsp(SGetUserAuthRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int16_t lowerRelOptr;
|
||||
int16_t upperRelOptr;
|
||||
int16_t filterstr; // denote if current column is char(binary/nchar)
|
||||
|
||||
union {
|
||||
struct {
|
||||
int64_t lowerBndi;
|
||||
int64_t upperBndi;
|
||||
};
|
||||
struct {
|
||||
double lowerBndd;
|
||||
double upperBndd;
|
||||
};
|
||||
struct {
|
||||
int64_t pz;
|
||||
int64_t len;
|
||||
};
|
||||
};
|
||||
} SColumnFilterInfo;
|
||||
|
||||
typedef struct {
|
||||
int16_t numOfFilters;
|
||||
union {
|
||||
int64_t placeholder;
|
||||
SColumnFilterInfo* filterInfo;
|
||||
};
|
||||
} SColumnFilterList;
|
||||
/*
|
||||
* for client side struct, only column id, type, bytes are necessary
|
||||
* But for data in vnode side, we need all the following information.
|
||||
|
@ -687,10 +673,10 @@ typedef struct {
|
|||
int16_t slotId;
|
||||
};
|
||||
|
||||
int8_t type;
|
||||
int32_t bytes;
|
||||
uint8_t precision;
|
||||
uint8_t scale;
|
||||
int32_t bytes;
|
||||
int8_t type;
|
||||
} SColumnInfo;
|
||||
|
||||
typedef struct STimeWindow {
|
||||
|
@ -1087,6 +1073,7 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
int8_t syncState;
|
||||
int8_t syncRestore;
|
||||
int8_t syncCanRead;
|
||||
int64_t cacheUsage;
|
||||
int64_t numOfTables;
|
||||
int64_t numOfTimeSeries;
|
||||
|
@ -1172,6 +1159,13 @@ typedef struct {
|
|||
int32_t tSerializeSMTimerMsg(void* buf, int32_t bufLen, SMTimerReq* pReq);
|
||||
int32_t tDeserializeSMTimerMsg(void* buf, int32_t bufLen, SMTimerReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int64_t tick;
|
||||
} SMStreamTickReq;
|
||||
|
||||
int32_t tSerializeSMStreamTickMsg(void* buf, int32_t bufLen, SMStreamTickReq* pReq);
|
||||
int32_t tDeserializeSMStreamTickMsg(void* buf, int32_t bufLen, SMStreamTickReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t id;
|
||||
uint16_t port; // node sync Port
|
||||
|
@ -1420,8 +1414,8 @@ typedef struct {
|
|||
int8_t streamBlockType;
|
||||
int32_t compLen;
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfRows;
|
||||
int32_t numOfCols;
|
||||
int64_t numOfRows; // from int32_t change to int64_t
|
||||
int64_t numOfCols;
|
||||
int64_t skey;
|
||||
int64_t ekey;
|
||||
int64_t version; // for stream
|
||||
|
@ -1620,14 +1614,14 @@ typedef struct SSubQueryMsg {
|
|||
int8_t explain;
|
||||
int8_t needFetch;
|
||||
uint32_t sqlLen;
|
||||
char *sql;
|
||||
char* sql;
|
||||
uint32_t msgLen;
|
||||
char *msg;
|
||||
char* msg;
|
||||
} SSubQueryMsg;
|
||||
|
||||
int32_t tSerializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq);
|
||||
int32_t tDeserializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq);
|
||||
void tFreeSSubQueryMsg(SSubQueryMsg *pReq);
|
||||
int32_t tSerializeSSubQueryMsg(void* buf, int32_t bufLen, SSubQueryMsg* pReq);
|
||||
int32_t tDeserializeSSubQueryMsg(void* buf, int32_t bufLen, SSubQueryMsg* pReq);
|
||||
void tFreeSSubQueryMsg(SSubQueryMsg* pReq);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
|
@ -1666,9 +1660,8 @@ typedef struct {
|
|||
int32_t execId;
|
||||
} SResFetchReq;
|
||||
|
||||
int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq);
|
||||
int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq);
|
||||
|
||||
int32_t tSerializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
|
||||
int32_t tDeserializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
|
@ -1741,12 +1734,11 @@ typedef struct {
|
|||
int32_t execId;
|
||||
} STaskDropReq;
|
||||
|
||||
int32_t tSerializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq);
|
||||
int32_t tDeserializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq);
|
||||
|
||||
int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp);
|
||||
int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp);
|
||||
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
|
||||
|
||||
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
|
@ -1774,6 +1766,8 @@ typedef struct {
|
|||
int64_t watermark;
|
||||
int32_t numOfTags;
|
||||
SArray* pTags; // array of SField
|
||||
// 3.0.20
|
||||
int64_t checkpointFreq; // ms
|
||||
} SCMCreateStreamReq;
|
||||
|
||||
typedef struct {
|
||||
|
@ -1973,6 +1967,12 @@ typedef struct {
|
|||
SHashObj* rebSubHash; // SHashObj<key, SMqRebSubscribe>
|
||||
} SMqDoRebalanceMsg;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int64_t checkpointId;
|
||||
char streamName[TSDB_STREAM_FNAME_LEN];
|
||||
} SMStreamDoCheckpointMsg;
|
||||
|
||||
typedef struct {
|
||||
int64_t status;
|
||||
} SMVSubscribeRsp;
|
||||
|
@ -2024,6 +2024,7 @@ typedef struct {
|
|||
typedef struct {
|
||||
int64_t maxdelay[2];
|
||||
int64_t watermark[2];
|
||||
int64_t deleteMark[2];
|
||||
int32_t qmsgLen[2];
|
||||
char* qmsg[2]; // pAst:qmsg:SRetention => trigger aggr task1/2
|
||||
} SRSmaParam;
|
||||
|
@ -2745,6 +2746,7 @@ typedef struct {
|
|||
char* tagsFilter;
|
||||
char* sql;
|
||||
char* ast;
|
||||
int64_t deleteMark;
|
||||
} SMCreateSmaReq;
|
||||
|
||||
int32_t tSerializeSMCreateSmaReq(void* buf, int32_t bufLen, SMCreateSmaReq* pReq);
|
||||
|
@ -2951,9 +2953,8 @@ typedef struct {
|
|||
STqOffsetVal reqOffset;
|
||||
} SMqPollReq;
|
||||
|
||||
int32_t tSerializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq);
|
||||
int32_t tDeserializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq);
|
||||
|
||||
int32_t tSerializeSMqPollReq(void* buf, int32_t bufLen, SMqPollReq* pReq);
|
||||
int32_t tDeserializeSMqPollReq(void* buf, int32_t bufLen, SMqPollReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
|
@ -3166,7 +3167,8 @@ int32_t tDecodeDeleteRes(SDecoder* pCoder, SDeleteRes* pRes);
|
|||
typedef struct {
|
||||
// int64_t uid;
|
||||
char tbname[TSDB_TABLE_NAME_LEN];
|
||||
int64_t ts;
|
||||
int64_t startTs;
|
||||
int64_t endTs;
|
||||
} SSingleDeleteReq;
|
||||
|
||||
int32_t tEncodeSSingleDeleteReq(SEncoder* pCoder, const SSingleDeleteReq* pReq);
|
||||
|
@ -3188,8 +3190,8 @@ typedef struct {
|
|||
} SBatchMsg;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
SArray* pMsgs; //SArray<SBatchMsg>
|
||||
SMsgHead header;
|
||||
SArray* pMsgs; // SArray<SBatchMsg>
|
||||
} SBatchReq;
|
||||
|
||||
typedef struct {
|
||||
|
@ -3201,11 +3203,11 @@ typedef struct {
|
|||
} SBatchRspMsg;
|
||||
|
||||
typedef struct {
|
||||
SArray* pRsps; //SArray<SBatchRspMsg>
|
||||
SArray* pRsps; // SArray<SBatchRspMsg>
|
||||
} SBatchRsp;
|
||||
|
||||
int32_t tSerializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq);
|
||||
int32_t tDeserializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq);
|
||||
int32_t tSerializeSBatchReq(void* buf, int32_t bufLen, SBatchReq* pReq);
|
||||
int32_t tDeserializeSBatchReq(void* buf, int32_t bufLen, SBatchReq* pReq);
|
||||
static FORCE_INLINE void tFreeSBatchReqMsg(void* msg) {
|
||||
if (NULL == msg) {
|
||||
return;
|
||||
|
@ -3214,8 +3216,8 @@ static FORCE_INLINE void tFreeSBatchReqMsg(void* msg) {
|
|||
taosMemoryFree(pMsg->msg);
|
||||
}
|
||||
|
||||
int32_t tSerializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp);
|
||||
int32_t tDeserializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp);
|
||||
int32_t tSerializeSBatchRsp(void* buf, int32_t bufLen, SBatchRsp* pRsp);
|
||||
int32_t tDeserializeSBatchRsp(void* buf, int32_t bufLen, SBatchRsp* pRsp);
|
||||
|
||||
static FORCE_INLINE void tFreeSBatchRspMsg(void* p) {
|
||||
if (NULL == p) {
|
||||
|
@ -3226,11 +3228,10 @@ static FORCE_INLINE void tFreeSBatchRspMsg(void* p) {
|
|||
taosMemoryFree(pRsp->msg);
|
||||
}
|
||||
|
||||
int32_t tSerializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq);
|
||||
int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq);
|
||||
int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq);
|
||||
int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq);
|
||||
|
||||
int32_t tSerializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
|
||||
int32_t tDeserializeSMqAskEpReq(void* buf, int32_t bufLen, SMqAskEpReq* pReq);
|
||||
int32_t tSerializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||
int32_t tDeserializeSMqHbReq(void* buf, int32_t bufLen, SMqHbReq* pReq);
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ typedef int32_t (*PutToQueueFp)(void* pMgmt, EQueueType qtype, SRpcMsg* pMsg);
|
|||
typedef int32_t (*GetQueueSizeFp)(void* pMgmt, int32_t vgId, EQueueType qtype);
|
||||
typedef int32_t (*SendReqFp)(const SEpSet* pEpSet, SRpcMsg* pMsg);
|
||||
typedef void (*SendRspFp)(SRpcMsg* pMsg);
|
||||
typedef void (*SendRedirectRspFp)(SRpcMsg* pMsg, const SEpSet* pNewEpSet);
|
||||
typedef void (*RegisterBrokenLinkArgFp)(SRpcMsg* pMsg);
|
||||
typedef void (*ReleaseHandleFp)(SRpcHandleInfo* pHandle, int8_t type);
|
||||
typedef void (*ReportStartup)(const char* name, const char* desc);
|
||||
|
@ -55,7 +54,6 @@ typedef struct {
|
|||
GetQueueSizeFp qsizeFp;
|
||||
SendReqFp sendReqFp;
|
||||
SendRspFp sendRspFp;
|
||||
SendRedirectRspFp sendRedirectRspFp;
|
||||
RegisterBrokenLinkArgFp registerBrokenLinkArgFp;
|
||||
ReleaseHandleFp releaseHandleFp;
|
||||
ReportStartup reportStartupFp;
|
||||
|
@ -66,7 +64,6 @@ int32_t tmsgPutToQueue(const SMsgCb* msgcb, EQueueType qtype, SRpcMsg* pMsg);
|
|||
int32_t tmsgGetQueueSize(const SMsgCb* msgcb, int32_t vgId, EQueueType qtype);
|
||||
int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg);
|
||||
void tmsgSendRsp(SRpcMsg* pMsg);
|
||||
void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet);
|
||||
void tmsgRegisterBrokenLinkArg(SRpcMsg* pMsg);
|
||||
void tmsgReleaseHandle(SRpcHandleInfo* pHandle, int8_t type);
|
||||
void tmsgReportStartup(const char* name, const char* desc);
|
||||
|
|
|
@ -172,6 +172,8 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, "lost-consumer-clear", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||
|
@ -241,8 +243,11 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RECOVER_FINISH, "vnode-stream-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RECOVER_FINISH, "stream-recover-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
||||
|
@ -280,8 +285,9 @@ enum {
|
|||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_STEP1, "vnode-stream-recover1", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_STEP2, "vnode-stream-recover2", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_NONBLOCKING_STAGE, "vnode-stream-recover1", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_RECOVER_BLOCKING_STAGE, "vnode-stream-recover2", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG)
|
||||
|
|
|
@ -58,42 +58,42 @@
|
|||
#define TK_TO 40
|
||||
#define TK_REVOKE 41
|
||||
#define TK_FROM 42
|
||||
#define TK_NK_COMMA 43
|
||||
#define TK_READ 44
|
||||
#define TK_WRITE 45
|
||||
#define TK_NK_DOT 46
|
||||
#define TK_DNODE 47
|
||||
#define TK_PORT 48
|
||||
#define TK_DNODES 49
|
||||
#define TK_NK_IPTOKEN 50
|
||||
#define TK_FORCE 51
|
||||
#define TK_LOCAL 52
|
||||
#define TK_QNODE 53
|
||||
#define TK_BNODE 54
|
||||
#define TK_SNODE 55
|
||||
#define TK_MNODE 56
|
||||
#define TK_DATABASE 57
|
||||
#define TK_USE 58
|
||||
#define TK_FLUSH 59
|
||||
#define TK_TRIM 60
|
||||
#define TK_IF 61
|
||||
#define TK_NOT 62
|
||||
#define TK_EXISTS 63
|
||||
#define TK_BUFFER 64
|
||||
#define TK_CACHEMODEL 65
|
||||
#define TK_CACHESIZE 66
|
||||
#define TK_COMP 67
|
||||
#define TK_DURATION 68
|
||||
#define TK_NK_VARIABLE 69
|
||||
#define TK_MAXROWS 70
|
||||
#define TK_MINROWS 71
|
||||
#define TK_KEEP 72
|
||||
#define TK_PAGES 73
|
||||
#define TK_PAGESIZE 74
|
||||
#define TK_TSDB_PAGESIZE 75
|
||||
#define TK_PRECISION 76
|
||||
#define TK_REPLICA 77
|
||||
#define TK_STRICT 78
|
||||
#define TK_SUBSCRIBE 43
|
||||
#define TK_NK_COMMA 44
|
||||
#define TK_READ 45
|
||||
#define TK_WRITE 46
|
||||
#define TK_NK_DOT 47
|
||||
#define TK_DNODE 48
|
||||
#define TK_PORT 49
|
||||
#define TK_DNODES 50
|
||||
#define TK_NK_IPTOKEN 51
|
||||
#define TK_FORCE 52
|
||||
#define TK_LOCAL 53
|
||||
#define TK_QNODE 54
|
||||
#define TK_BNODE 55
|
||||
#define TK_SNODE 56
|
||||
#define TK_MNODE 57
|
||||
#define TK_DATABASE 58
|
||||
#define TK_USE 59
|
||||
#define TK_FLUSH 60
|
||||
#define TK_TRIM 61
|
||||
#define TK_IF 62
|
||||
#define TK_NOT 63
|
||||
#define TK_EXISTS 64
|
||||
#define TK_BUFFER 65
|
||||
#define TK_CACHEMODEL 66
|
||||
#define TK_CACHESIZE 67
|
||||
#define TK_COMP 68
|
||||
#define TK_DURATION 69
|
||||
#define TK_NK_VARIABLE 70
|
||||
#define TK_MAXROWS 71
|
||||
#define TK_MINROWS 72
|
||||
#define TK_KEEP 73
|
||||
#define TK_PAGES 74
|
||||
#define TK_PAGESIZE 75
|
||||
#define TK_TSDB_PAGESIZE 76
|
||||
#define TK_PRECISION 77
|
||||
#define TK_REPLICA 78
|
||||
#define TK_VGROUPS 79
|
||||
#define TK_SINGLE_STABLE 80
|
||||
#define TK_RETENTIONS 81
|
||||
|
@ -146,194 +146,197 @@
|
|||
#define TK_ROLLUP 128
|
||||
#define TK_TTL 129
|
||||
#define TK_SMA 130
|
||||
#define TK_FIRST 131
|
||||
#define TK_LAST 132
|
||||
#define TK_SHOW 133
|
||||
#define TK_DATABASES 134
|
||||
#define TK_TABLES 135
|
||||
#define TK_STABLES 136
|
||||
#define TK_MNODES 137
|
||||
#define TK_QNODES 138
|
||||
#define TK_FUNCTIONS 139
|
||||
#define TK_INDEXES 140
|
||||
#define TK_ACCOUNTS 141
|
||||
#define TK_APPS 142
|
||||
#define TK_CONNECTIONS 143
|
||||
#define TK_LICENCES 144
|
||||
#define TK_GRANTS 145
|
||||
#define TK_QUERIES 146
|
||||
#define TK_SCORES 147
|
||||
#define TK_TOPICS 148
|
||||
#define TK_VARIABLES 149
|
||||
#define TK_CLUSTER 150
|
||||
#define TK_BNODES 151
|
||||
#define TK_SNODES 152
|
||||
#define TK_TRANSACTIONS 153
|
||||
#define TK_DISTRIBUTED 154
|
||||
#define TK_CONSUMERS 155
|
||||
#define TK_SUBSCRIPTIONS 156
|
||||
#define TK_VNODES 157
|
||||
#define TK_LIKE 158
|
||||
#define TK_TBNAME 159
|
||||
#define TK_QTAGS 160
|
||||
#define TK_AS 161
|
||||
#define TK_INDEX 162
|
||||
#define TK_FUNCTION 163
|
||||
#define TK_INTERVAL 164
|
||||
#define TK_TOPIC 165
|
||||
#define TK_WITH 166
|
||||
#define TK_META 167
|
||||
#define TK_CONSUMER 168
|
||||
#define TK_GROUP 169
|
||||
#define TK_DESC 170
|
||||
#define TK_DESCRIBE 171
|
||||
#define TK_RESET 172
|
||||
#define TK_QUERY 173
|
||||
#define TK_CACHE 174
|
||||
#define TK_EXPLAIN 175
|
||||
#define TK_ANALYZE 176
|
||||
#define TK_VERBOSE 177
|
||||
#define TK_NK_BOOL 178
|
||||
#define TK_RATIO 179
|
||||
#define TK_NK_FLOAT 180
|
||||
#define TK_OUTPUTTYPE 181
|
||||
#define TK_AGGREGATE 182
|
||||
#define TK_BUFSIZE 183
|
||||
#define TK_STREAM 184
|
||||
#define TK_INTO 185
|
||||
#define TK_TRIGGER 186
|
||||
#define TK_AT_ONCE 187
|
||||
#define TK_WINDOW_CLOSE 188
|
||||
#define TK_IGNORE 189
|
||||
#define TK_EXPIRED 190
|
||||
#define TK_FILL_HISTORY 191
|
||||
#define TK_SUBTABLE 192
|
||||
#define TK_KILL 193
|
||||
#define TK_CONNECTION 194
|
||||
#define TK_TRANSACTION 195
|
||||
#define TK_BALANCE 196
|
||||
#define TK_VGROUP 197
|
||||
#define TK_MERGE 198
|
||||
#define TK_REDISTRIBUTE 199
|
||||
#define TK_SPLIT 200
|
||||
#define TK_DELETE 201
|
||||
#define TK_INSERT 202
|
||||
#define TK_NULL 203
|
||||
#define TK_NK_QUESTION 204
|
||||
#define TK_NK_ARROW 205
|
||||
#define TK_ROWTS 206
|
||||
#define TK_QSTART 207
|
||||
#define TK_QEND 208
|
||||
#define TK_QDURATION 209
|
||||
#define TK_WSTART 210
|
||||
#define TK_WEND 211
|
||||
#define TK_WDURATION 212
|
||||
#define TK_IROWTS 213
|
||||
#define TK_CAST 214
|
||||
#define TK_NOW 215
|
||||
#define TK_TODAY 216
|
||||
#define TK_TIMEZONE 217
|
||||
#define TK_CLIENT_VERSION 218
|
||||
#define TK_SERVER_VERSION 219
|
||||
#define TK_SERVER_STATUS 220
|
||||
#define TK_CURRENT_USER 221
|
||||
#define TK_COUNT 222
|
||||
#define TK_LAST_ROW 223
|
||||
#define TK_CASE 224
|
||||
#define TK_END 225
|
||||
#define TK_WHEN 226
|
||||
#define TK_THEN 227
|
||||
#define TK_ELSE 228
|
||||
#define TK_BETWEEN 229
|
||||
#define TK_IS 230
|
||||
#define TK_NK_LT 231
|
||||
#define TK_NK_GT 232
|
||||
#define TK_NK_LE 233
|
||||
#define TK_NK_GE 234
|
||||
#define TK_NK_NE 235
|
||||
#define TK_MATCH 236
|
||||
#define TK_NMATCH 237
|
||||
#define TK_CONTAINS 238
|
||||
#define TK_IN 239
|
||||
#define TK_JOIN 240
|
||||
#define TK_INNER 241
|
||||
#define TK_SELECT 242
|
||||
#define TK_DISTINCT 243
|
||||
#define TK_WHERE 244
|
||||
#define TK_PARTITION 245
|
||||
#define TK_BY 246
|
||||
#define TK_SESSION 247
|
||||
#define TK_STATE_WINDOW 248
|
||||
#define TK_SLIDING 249
|
||||
#define TK_FILL 250
|
||||
#define TK_VALUE 251
|
||||
#define TK_NONE 252
|
||||
#define TK_PREV 253
|
||||
#define TK_LINEAR 254
|
||||
#define TK_NEXT 255
|
||||
#define TK_HAVING 256
|
||||
#define TK_RANGE 257
|
||||
#define TK_EVERY 258
|
||||
#define TK_ORDER 259
|
||||
#define TK_SLIMIT 260
|
||||
#define TK_SOFFSET 261
|
||||
#define TK_LIMIT 262
|
||||
#define TK_OFFSET 263
|
||||
#define TK_ASC 264
|
||||
#define TK_NULLS 265
|
||||
#define TK_ABORT 266
|
||||
#define TK_AFTER 267
|
||||
#define TK_ATTACH 268
|
||||
#define TK_BEFORE 269
|
||||
#define TK_BEGIN 270
|
||||
#define TK_BITAND 271
|
||||
#define TK_BITNOT 272
|
||||
#define TK_BITOR 273
|
||||
#define TK_BLOCKS 274
|
||||
#define TK_CHANGE 275
|
||||
#define TK_COMMA 276
|
||||
#define TK_COMPACT 277
|
||||
#define TK_CONCAT 278
|
||||
#define TK_CONFLICT 279
|
||||
#define TK_COPY 280
|
||||
#define TK_DEFERRED 281
|
||||
#define TK_DELIMITERS 282
|
||||
#define TK_DETACH 283
|
||||
#define TK_DIVIDE 284
|
||||
#define TK_DOT 285
|
||||
#define TK_EACH 286
|
||||
#define TK_FAIL 287
|
||||
#define TK_FILE 288
|
||||
#define TK_FOR 289
|
||||
#define TK_GLOB 290
|
||||
#define TK_ID 291
|
||||
#define TK_IMMEDIATE 292
|
||||
#define TK_IMPORT 293
|
||||
#define TK_INITIALLY 294
|
||||
#define TK_INSTEAD 295
|
||||
#define TK_ISNULL 296
|
||||
#define TK_KEY 297
|
||||
#define TK_MODULES 298
|
||||
#define TK_NK_BITNOT 299
|
||||
#define TK_NK_SEMI 300
|
||||
#define TK_NOTNULL 301
|
||||
#define TK_OF 302
|
||||
#define TK_PLUS 303
|
||||
#define TK_PRIVILEGE 304
|
||||
#define TK_RAISE 305
|
||||
#define TK_REPLACE 306
|
||||
#define TK_RESTRICT 307
|
||||
#define TK_ROW 308
|
||||
#define TK_SEMI 309
|
||||
#define TK_STAR 310
|
||||
#define TK_STATEMENT 311
|
||||
#define TK_STRING 312
|
||||
#define TK_TIMES 313
|
||||
#define TK_UPDATE 314
|
||||
#define TK_VALUES 315
|
||||
#define TK_VARIABLE 316
|
||||
#define TK_VIEW 317
|
||||
#define TK_WAL 318
|
||||
#define TK_DELETE_MARK 131
|
||||
#define TK_FIRST 132
|
||||
#define TK_LAST 133
|
||||
#define TK_SHOW 134
|
||||
#define TK_PRIVILEGES 135
|
||||
#define TK_DATABASES 136
|
||||
#define TK_TABLES 137
|
||||
#define TK_STABLES 138
|
||||
#define TK_MNODES 139
|
||||
#define TK_QNODES 140
|
||||
#define TK_FUNCTIONS 141
|
||||
#define TK_INDEXES 142
|
||||
#define TK_ACCOUNTS 143
|
||||
#define TK_APPS 144
|
||||
#define TK_CONNECTIONS 145
|
||||
#define TK_LICENCES 146
|
||||
#define TK_GRANTS 147
|
||||
#define TK_QUERIES 148
|
||||
#define TK_SCORES 149
|
||||
#define TK_TOPICS 150
|
||||
#define TK_VARIABLES 151
|
||||
#define TK_CLUSTER 152
|
||||
#define TK_BNODES 153
|
||||
#define TK_SNODES 154
|
||||
#define TK_TRANSACTIONS 155
|
||||
#define TK_DISTRIBUTED 156
|
||||
#define TK_CONSUMERS 157
|
||||
#define TK_SUBSCRIPTIONS 158
|
||||
#define TK_VNODES 159
|
||||
#define TK_LIKE 160
|
||||
#define TK_TBNAME 161
|
||||
#define TK_QTAGS 162
|
||||
#define TK_AS 163
|
||||
#define TK_INDEX 164
|
||||
#define TK_FUNCTION 165
|
||||
#define TK_INTERVAL 166
|
||||
#define TK_TOPIC 167
|
||||
#define TK_WITH 168
|
||||
#define TK_META 169
|
||||
#define TK_CONSUMER 170
|
||||
#define TK_GROUP 171
|
||||
#define TK_DESC 172
|
||||
#define TK_DESCRIBE 173
|
||||
#define TK_RESET 174
|
||||
#define TK_QUERY 175
|
||||
#define TK_CACHE 176
|
||||
#define TK_EXPLAIN 177
|
||||
#define TK_ANALYZE 178
|
||||
#define TK_VERBOSE 179
|
||||
#define TK_NK_BOOL 180
|
||||
#define TK_RATIO 181
|
||||
#define TK_NK_FLOAT 182
|
||||
#define TK_OUTPUTTYPE 183
|
||||
#define TK_AGGREGATE 184
|
||||
#define TK_BUFSIZE 185
|
||||
#define TK_STREAM 186
|
||||
#define TK_INTO 187
|
||||
#define TK_TRIGGER 188
|
||||
#define TK_AT_ONCE 189
|
||||
#define TK_WINDOW_CLOSE 190
|
||||
#define TK_IGNORE 191
|
||||
#define TK_EXPIRED 192
|
||||
#define TK_FILL_HISTORY 193
|
||||
#define TK_SUBTABLE 194
|
||||
#define TK_KILL 195
|
||||
#define TK_CONNECTION 196
|
||||
#define TK_TRANSACTION 197
|
||||
#define TK_BALANCE 198
|
||||
#define TK_VGROUP 199
|
||||
#define TK_MERGE 200
|
||||
#define TK_REDISTRIBUTE 201
|
||||
#define TK_SPLIT 202
|
||||
#define TK_DELETE 203
|
||||
#define TK_INSERT 204
|
||||
#define TK_NULL 205
|
||||
#define TK_NK_QUESTION 206
|
||||
#define TK_NK_ARROW 207
|
||||
#define TK_ROWTS 208
|
||||
#define TK_QSTART 209
|
||||
#define TK_QEND 210
|
||||
#define TK_QDURATION 211
|
||||
#define TK_WSTART 212
|
||||
#define TK_WEND 213
|
||||
#define TK_WDURATION 214
|
||||
#define TK_IROWTS 215
|
||||
#define TK_CAST 216
|
||||
#define TK_NOW 217
|
||||
#define TK_TODAY 218
|
||||
#define TK_TIMEZONE 219
|
||||
#define TK_CLIENT_VERSION 220
|
||||
#define TK_SERVER_VERSION 221
|
||||
#define TK_SERVER_STATUS 222
|
||||
#define TK_CURRENT_USER 223
|
||||
#define TK_COUNT 224
|
||||
#define TK_LAST_ROW 225
|
||||
#define TK_CASE 226
|
||||
#define TK_END 227
|
||||
#define TK_WHEN 228
|
||||
#define TK_THEN 229
|
||||
#define TK_ELSE 230
|
||||
#define TK_BETWEEN 231
|
||||
#define TK_IS 232
|
||||
#define TK_NK_LT 233
|
||||
#define TK_NK_GT 234
|
||||
#define TK_NK_LE 235
|
||||
#define TK_NK_GE 236
|
||||
#define TK_NK_NE 237
|
||||
#define TK_MATCH 238
|
||||
#define TK_NMATCH 239
|
||||
#define TK_CONTAINS 240
|
||||
#define TK_IN 241
|
||||
#define TK_JOIN 242
|
||||
#define TK_INNER 243
|
||||
#define TK_SELECT 244
|
||||
#define TK_DISTINCT 245
|
||||
#define TK_WHERE 246
|
||||
#define TK_PARTITION 247
|
||||
#define TK_BY 248
|
||||
#define TK_SESSION 249
|
||||
#define TK_STATE_WINDOW 250
|
||||
#define TK_SLIDING 251
|
||||
#define TK_FILL 252
|
||||
#define TK_VALUE 253
|
||||
#define TK_NONE 254
|
||||
#define TK_PREV 255
|
||||
#define TK_LINEAR 256
|
||||
#define TK_NEXT 257
|
||||
#define TK_HAVING 258
|
||||
#define TK_RANGE 259
|
||||
#define TK_EVERY 260
|
||||
#define TK_ORDER 261
|
||||
#define TK_SLIMIT 262
|
||||
#define TK_SOFFSET 263
|
||||
#define TK_LIMIT 264
|
||||
#define TK_OFFSET 265
|
||||
#define TK_ASC 266
|
||||
#define TK_NULLS 267
|
||||
#define TK_ABORT 268
|
||||
#define TK_AFTER 269
|
||||
#define TK_ATTACH 270
|
||||
#define TK_BEFORE 271
|
||||
#define TK_BEGIN 272
|
||||
#define TK_BITAND 273
|
||||
#define TK_BITNOT 274
|
||||
#define TK_BITOR 275
|
||||
#define TK_BLOCKS 276
|
||||
#define TK_CHANGE 277
|
||||
#define TK_COMMA 278
|
||||
#define TK_COMPACT 279
|
||||
#define TK_CONCAT 280
|
||||
#define TK_CONFLICT 281
|
||||
#define TK_COPY 282
|
||||
#define TK_DEFERRED 283
|
||||
#define TK_DELIMITERS 284
|
||||
#define TK_DETACH 285
|
||||
#define TK_DIVIDE 286
|
||||
#define TK_DOT 287
|
||||
#define TK_EACH 288
|
||||
#define TK_FAIL 289
|
||||
#define TK_FILE 290
|
||||
#define TK_FOR 291
|
||||
#define TK_GLOB 292
|
||||
#define TK_ID 293
|
||||
#define TK_IMMEDIATE 294
|
||||
#define TK_IMPORT 295
|
||||
#define TK_INITIALLY 296
|
||||
#define TK_INSTEAD 297
|
||||
#define TK_ISNULL 298
|
||||
#define TK_KEY 299
|
||||
#define TK_MODULES 300
|
||||
#define TK_NK_BITNOT 301
|
||||
#define TK_NK_SEMI 302
|
||||
#define TK_NOTNULL 303
|
||||
#define TK_OF 304
|
||||
#define TK_PLUS 305
|
||||
#define TK_PRIVILEGE 306
|
||||
#define TK_RAISE 307
|
||||
#define TK_REPLACE 308
|
||||
#define TK_RESTRICT 309
|
||||
#define TK_ROW 310
|
||||
#define TK_SEMI 311
|
||||
#define TK_STAR 312
|
||||
#define TK_STATEMENT 313
|
||||
#define TK_STRICT 314
|
||||
#define TK_STRING 315
|
||||
#define TK_TIMES 316
|
||||
#define TK_UPDATE 317
|
||||
#define TK_VALUES 318
|
||||
#define TK_VARIABLE 319
|
||||
#define TK_VIEW 320
|
||||
#define TK_WAL 321
|
||||
|
||||
#define TK_NK_SPACE 600
|
||||
#define TK_NK_COMMENT 601
|
||||
|
|
|
@ -278,11 +278,9 @@ typedef struct {
|
|||
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
||||
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
|
||||
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
|
||||
#define IS_VALID_BIGINT(_t) ((_t) >= INT64_MIN && (_t) <= INT64_MAX)
|
||||
#define IS_VALID_UTINYINT(_t) ((_t) >= 0 && (_t) <= UINT8_MAX)
|
||||
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) <= UINT16_MAX)
|
||||
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) <= UINT32_MAX)
|
||||
#define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) <= UINT64_MAX)
|
||||
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
|
||||
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)
|
||||
|
||||
|
|
|
@ -211,6 +211,8 @@ int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STab
|
|||
|
||||
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
|
||||
|
||||
int32_t catalogGetCachedTableVgMeta(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, STableMeta** pTableMeta);
|
||||
|
||||
/**
|
||||
* Force refresh DB's local cached vgroup info.
|
||||
* @param pCtg (input, got with catalogGetHandle)
|
||||
|
|
|
@ -68,7 +68,7 @@ typedef struct SInputData {
|
|||
|
||||
typedef struct SOutputData {
|
||||
int32_t numOfBlocks;
|
||||
int32_t numOfRows;
|
||||
int64_t numOfRows; // int32_t changed to int64_t
|
||||
int32_t numOfCols;
|
||||
int8_t compressed;
|
||||
char* pData;
|
||||
|
|
|
@ -142,14 +142,17 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
|
|||
*/
|
||||
|
||||
int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bool* hasMore, SLocalFetch* pLocal);
|
||||
|
||||
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
|
||||
|
||||
void qCleanExecTaskBlockBuf(qTaskInfo_t tinfo);
|
||||
|
||||
/**
|
||||
* kill the ongoing query asynchronously
|
||||
* @param tinfo qhandle
|
||||
* @return
|
||||
*/
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo);
|
||||
int32_t qAsyncKillTask(qTaskInfo_t tinfo, int32_t rspCode);
|
||||
|
||||
/**
|
||||
* destroy query info structure
|
||||
|
@ -157,13 +160,6 @@ int32_t qAsyncKillTask(qTaskInfo_t tinfo);
|
|||
*/
|
||||
void qDestroyTask(qTaskInfo_t tinfo);
|
||||
|
||||
/**
|
||||
* Get the queried table uid
|
||||
* @param qHandle
|
||||
* @return
|
||||
*/
|
||||
int64_t qGetQueriedTableUid(qTaskInfo_t tinfo);
|
||||
|
||||
/**
|
||||
* Extract the qualified table id list, and than pass them to the TSDB driver to load the required table data blocks.
|
||||
*
|
||||
|
|
|
@ -57,7 +57,7 @@ typedef struct SFuncExecFuncs {
|
|||
#define MAX_INTERVAL_TIME_WINDOW 10000000 // maximum allowed time windows in final results
|
||||
|
||||
#define TOP_BOTTOM_QUERY_LIMIT 100
|
||||
#define FUNCTIONS_NAME_MAX_LENGTH 16
|
||||
#define FUNCTIONS_NAME_MAX_LENGTH 32
|
||||
|
||||
typedef struct SResultRowEntryInfo {
|
||||
bool initialized : 1; // output buffer has been initialized
|
||||
|
@ -115,7 +115,7 @@ typedef struct SInputColumnInfoData {
|
|||
int32_t startRowIndex; // handle started row index
|
||||
int32_t numOfRows; // the number of rows needs to be handled
|
||||
int32_t numOfInputCols; // PTS is not included
|
||||
bool colDataAggIsSet; // if agg is set or not
|
||||
bool colDataSMAIsSet; // if agg is set or not
|
||||
SColumnInfoData *pPTS; // primary timestamp column
|
||||
SColumnInfoData **pData;
|
||||
SColumnDataAgg **pColumnDataAgg;
|
||||
|
@ -137,22 +137,22 @@ typedef struct SqlFunctionCtx {
|
|||
int16_t functionId; // function id
|
||||
char *pOutput; // final result output buffer, point to sdata->data
|
||||
int32_t numOfParams;
|
||||
SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
|
||||
SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
|
||||
int32_t offset;
|
||||
struct SResultRowEntryInfo *resultInfo;
|
||||
SSubsidiaryResInfo subsidiaries;
|
||||
SPoint1 start;
|
||||
SPoint1 end;
|
||||
SFuncExecFuncs fpSet;
|
||||
SScalarFuncExecFuncs sfp;
|
||||
struct SExprInfo *pExpr;
|
||||
struct SSDataBlock *pSrcBlock;
|
||||
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
|
||||
SSerializeDataHandle saveHandle;
|
||||
bool isStream;
|
||||
|
||||
char udfName[TSDB_FUNC_NAME_LEN];
|
||||
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param
|
||||
SFunctParam *param;
|
||||
// corresponding output buffer for timestamp of each result, e.g., diff/csum
|
||||
SColumnInfoData *pTsOutput;
|
||||
int32_t offset;
|
||||
SResultRowEntryInfo *resultInfo;
|
||||
SSubsidiaryResInfo subsidiaries;
|
||||
SPoint1 start;
|
||||
SPoint1 end;
|
||||
SFuncExecFuncs fpSet;
|
||||
SScalarFuncExecFuncs sfp;
|
||||
struct SExprInfo *pExpr;
|
||||
struct SSDataBlock *pSrcBlock;
|
||||
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
|
||||
SSerializeDataHandle saveHandle;
|
||||
char udfName[TSDB_FUNC_NAME_LEN];
|
||||
} SqlFunctionCtx;
|
||||
|
||||
typedef struct tExprNode {
|
||||
|
@ -163,6 +163,7 @@ typedef struct tExprNode {
|
|||
int32_t functionId;
|
||||
int32_t num;
|
||||
struct SFunctionNode *pFunctNode;
|
||||
int32_t functionType;
|
||||
} _function;
|
||||
|
||||
struct {
|
||||
|
@ -182,7 +183,6 @@ struct SScalarParam {
|
|||
};
|
||||
|
||||
void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell);
|
||||
//int32_t getNumOfResult(SqlFunctionCtx *pCtx, int32_t num, SSDataBlock *pResBlock);
|
||||
bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry);
|
||||
bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry);
|
||||
|
||||
|
@ -194,32 +194,6 @@ typedef struct SPoint {
|
|||
int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2,
|
||||
int32_t inputType);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// udf api
|
||||
/**
|
||||
* create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
|
||||
* @return error code
|
||||
*/
|
||||
int32_t udfcOpen();
|
||||
|
||||
/**
|
||||
* destroy udfd proxy
|
||||
* @return error code
|
||||
*/
|
||||
int32_t udfcClose();
|
||||
|
||||
/**
|
||||
* start udfd that serves udf function invocation under dnode startDnodeId
|
||||
* @param startDnodeId
|
||||
* @return
|
||||
*/
|
||||
int32_t udfStartUdfd(int32_t startDnodeId);
|
||||
/**
|
||||
* stop udfd
|
||||
* @return
|
||||
*/
|
||||
int32_t udfStopUdfd();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -130,6 +130,7 @@ typedef enum EFunctionType {
|
|||
FUNCTION_TYPE_GROUP_KEY,
|
||||
FUNCTION_TYPE_CACHE_LAST_ROW,
|
||||
FUNCTION_TYPE_CACHE_LAST,
|
||||
FUNCTION_TYPE_TABLE_COUNT,
|
||||
|
||||
// distributed splitting functions
|
||||
FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000,
|
||||
|
|
|
@ -228,7 +228,7 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn *pColumn, uint32_t currentR
|
|||
newSize = 8;
|
||||
}
|
||||
|
||||
while (newSize < data->varLenCol.payloadLen + dataLen) {
|
||||
while (newSize < (uint32_t)(data->varLenCol.payloadLen + dataLen)) {
|
||||
newSize = newSize * UDF_MEMORY_EXP_GROWTH;
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,7 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn *pColumn, uint32_t currentR
|
|||
data->varLenCol.payloadLen += dataLen;
|
||||
}
|
||||
}
|
||||
data->numOfRows = (currentRow + 1 > data->numOfRows) ? (currentRow + 1) : data->numOfRows;
|
||||
data->numOfRows = ((int32_t)(currentRow + 1) > data->numOfRows) ? (int32_t)(currentRow + 1) : data->numOfRows;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,6 +85,32 @@ int32_t callUdfScalarFunc(char *udfName, SScalarParam *input, int32_t numOfCols,
|
|||
|
||||
int32_t cleanUpUdfs();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// udf api
|
||||
/**
|
||||
* create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
|
||||
* @return error code
|
||||
*/
|
||||
int32_t udfcOpen();
|
||||
|
||||
/**
|
||||
* destroy udfd proxy
|
||||
* @return error code
|
||||
*/
|
||||
int32_t udfcClose();
|
||||
|
||||
/**
|
||||
* start udfd that serves udf function invocation under dnode startDnodeId
|
||||
* @param startDnodeId
|
||||
* @return
|
||||
*/
|
||||
int32_t udfStartUdfd(int32_t startDnodeId);
|
||||
/**
|
||||
* stop udfd
|
||||
* @return
|
||||
*/
|
||||
int32_t udfStopUdfd();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -42,9 +42,10 @@ extern "C" {
|
|||
|
||||
#define PRIVILEGE_TYPE_MASK(n) (1 << n)
|
||||
|
||||
#define PRIVILEGE_TYPE_ALL PRIVILEGE_TYPE_MASK(0)
|
||||
#define PRIVILEGE_TYPE_READ PRIVILEGE_TYPE_MASK(1)
|
||||
#define PRIVILEGE_TYPE_WRITE PRIVILEGE_TYPE_MASK(2)
|
||||
#define PRIVILEGE_TYPE_ALL PRIVILEGE_TYPE_MASK(0)
|
||||
#define PRIVILEGE_TYPE_READ PRIVILEGE_TYPE_MASK(1)
|
||||
#define PRIVILEGE_TYPE_WRITE PRIVILEGE_TYPE_MASK(2)
|
||||
#define PRIVILEGE_TYPE_SUBSCRIBE PRIVILEGE_TYPE_MASK(3)
|
||||
|
||||
#define PRIVILEGE_TYPE_TEST_MASK(val, mask) (((val) & (mask)) != 0)
|
||||
|
||||
|
@ -132,6 +133,9 @@ typedef struct STableOptions {
|
|||
SNodeList* pWatermark;
|
||||
int64_t watermark1;
|
||||
int64_t watermark2;
|
||||
SNodeList* pDeleteMark;
|
||||
int64_t deleteMark1;
|
||||
int64_t deleteMark2;
|
||||
SNodeList* pRollupFuncs;
|
||||
int32_t ttl;
|
||||
SNodeList* pSma;
|
||||
|
@ -382,6 +386,7 @@ typedef struct SStreamOptions {
|
|||
int8_t triggerType;
|
||||
SNode* pDelay;
|
||||
SNode* pWatermark;
|
||||
SNode* pDeleteMark;
|
||||
int8_t fillHistory;
|
||||
int8_t ignoreExpired;
|
||||
} SStreamOptions;
|
||||
|
@ -423,7 +428,7 @@ typedef struct SDropFunctionStmt {
|
|||
typedef struct SGrantStmt {
|
||||
ENodeType type;
|
||||
char userName[TSDB_USER_LEN];
|
||||
char dbName[TSDB_DB_NAME_LEN];
|
||||
char objName[TSDB_DB_NAME_LEN]; // db or topic
|
||||
int64_t privileges;
|
||||
} SGrantStmt;
|
||||
|
||||
|
|
|
@ -60,6 +60,12 @@ extern "C" {
|
|||
for (SListCell* cell = (NULL != (list) ? (list)->pHead : NULL); \
|
||||
(NULL != cell ? (node = &(cell->pNode), true) : (node = NULL, false)); cell = cell->pNext)
|
||||
|
||||
#define NODES_DESTORY_NODE(node) \
|
||||
do { \
|
||||
nodesDestroyNode((node)); \
|
||||
(node) = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define NODES_DESTORY_LIST(list) \
|
||||
do { \
|
||||
nodesDestroyList((list)); \
|
||||
|
@ -187,6 +193,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
|
||||
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
|
||||
QUERY_NODE_SHOW_VNODES_STMT,
|
||||
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
|
||||
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
|
||||
|
@ -257,7 +264,8 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DELETE,
|
||||
QUERY_NODE_PHYSICAL_SUBPLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN
|
||||
QUERY_NODE_PHYSICAL_PLAN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN
|
||||
} ENodeType;
|
||||
|
||||
/**
|
||||
|
|
|
@ -62,7 +62,8 @@ typedef enum EScanType {
|
|||
SCAN_TYPE_STREAM,
|
||||
SCAN_TYPE_TABLE_MERGE,
|
||||
SCAN_TYPE_BLOCK_INFO,
|
||||
SCAN_TYPE_LAST_ROW
|
||||
SCAN_TYPE_LAST_ROW,
|
||||
SCAN_TYPE_TABLE_COUNT
|
||||
} EScanType;
|
||||
|
||||
typedef struct SScanLogicNode {
|
||||
|
@ -90,6 +91,7 @@ typedef struct SScanLogicNode {
|
|||
SNode* pTagIndexCond;
|
||||
int8_t triggerType;
|
||||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
SArray* pSmaIndexes;
|
||||
SNodeList* pGroupTags;
|
||||
|
@ -212,6 +214,7 @@ typedef struct SWindowLogicNode {
|
|||
SNode* pStateExpr;
|
||||
int8_t triggerType;
|
||||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
EWindowAlgorithm windowAlgo;
|
||||
EOrder inputTsOrder;
|
||||
|
@ -323,6 +326,8 @@ typedef struct SLastRowScanPhysiNode {
|
|||
bool ignoreNull;
|
||||
} SLastRowScanPhysiNode;
|
||||
|
||||
typedef SLastRowScanPhysiNode STableCountScanPhysiNode;
|
||||
|
||||
typedef struct SSystemTableScanPhysiNode {
|
||||
SScanPhysiNode scan;
|
||||
SEpSet mgmtEpSet;
|
||||
|
@ -437,6 +442,7 @@ typedef struct SWinodwPhysiNode {
|
|||
SNode* pTsEnd; // window end timestamp
|
||||
int8_t triggerType;
|
||||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
EOrder inputTsOrder;
|
||||
EOrder outputTsOrder;
|
||||
|
|
|
@ -106,7 +106,7 @@ typedef struct SValueNode {
|
|||
char* p;
|
||||
} datum;
|
||||
int64_t typeData;
|
||||
char unit;
|
||||
int8_t unit;
|
||||
} SValueNode;
|
||||
|
||||
typedef struct SLeftValueNode {
|
||||
|
|
|
@ -29,7 +29,7 @@ struct SMetaData;
|
|||
typedef struct SStmtCallback {
|
||||
TAOS_STMT* pStmt;
|
||||
int32_t (*getTbNameFn)(TAOS_STMT*, char**);
|
||||
int32_t (*setInfoFn)(TAOS_STMT*, STableMeta*, void*, char*, bool, SHashObj*, SHashObj*, const char*);
|
||||
int32_t (*setInfoFn)(TAOS_STMT*, STableMeta*, void*, SName*, bool, SHashObj*, SHashObj*, const char*);
|
||||
int32_t (*getExecInfoFn)(TAOS_STMT*, SHashObj**, SHashObj**);
|
||||
} SStmtCallback;
|
||||
|
||||
|
@ -108,7 +108,7 @@ int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char*
|
|||
void* smlInitHandle(SQuery* pQuery);
|
||||
void smlDestroyHandle(void* pHandle);
|
||||
int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols, bool format, STableMeta* pTableMeta,
|
||||
char* tableName, const char* sTableName, int32_t sTableNameLen, char* msgBuf, int16_t msgBufLen);
|
||||
char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen);
|
||||
int32_t smlBuildOutput(void* handle, SHashObj* pVgHash);
|
||||
|
||||
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
|
||||
|
|
|
@ -34,6 +34,7 @@ typedef struct SPlanContext {
|
|||
bool showRewrite;
|
||||
int8_t triggerType;
|
||||
int64_t watermark;
|
||||
int64_t deleteMark;
|
||||
int8_t igExpired;
|
||||
char* pMsg;
|
||||
int32_t msgLen;
|
||||
|
|
|
@ -58,8 +58,7 @@ typedef enum {
|
|||
#define QUERY_RSP_POLICY_QUICK 1
|
||||
|
||||
#define QUERY_MSG_MASK_SHOW_REWRITE() (1 << 0)
|
||||
#define TEST_SHOW_REWRITE_MASK(m) (((m) & QUERY_MSG_MASK_SHOW_REWRITE()) != 0)
|
||||
|
||||
#define TEST_SHOW_REWRITE_MASK(m) (((m)&QUERY_MSG_MASK_SHOW_REWRITE()) != 0)
|
||||
|
||||
typedef struct STableComInfo {
|
||||
uint8_t numOfTags; // the number of tags in schema
|
||||
|
@ -128,7 +127,8 @@ typedef struct SDBVgInfo {
|
|||
int8_t hashMethod;
|
||||
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
|
||||
int64_t stateTs;
|
||||
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
||||
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
||||
SArray* vgArray;
|
||||
} SDBVgInfo;
|
||||
|
||||
typedef struct SUseDbOutput {
|
||||
|
@ -238,6 +238,7 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
|
|||
char* parseTagDatatoJson(void* p);
|
||||
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
|
||||
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
|
||||
void freeVgInfo(SDBVgInfo* vgInfo);
|
||||
|
||||
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,
|
||||
void* (*mallocFp)(int64_t));
|
||||
|
@ -259,18 +260,27 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
|
|||
#define NEED_CLIENT_HANDLE_ERROR(_code) \
|
||||
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
|
||||
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
|
||||
#define NEED_REDIRECT_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || \
|
||||
(_code) == TSDB_CODE_NODE_NOT_DEPLOYED || (_code) == TSDB_CODE_SYN_NOT_LEADER || \
|
||||
(_code) == TSDB_CODE_APP_NOT_READY || (_code) == TSDB_CODE_RPC_BROKEN_LINK)
|
||||
|
||||
#define SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_SYN_INTERNAL_ERROR || \
|
||||
(_code) == TSDB_CODE_VND_STOPPED || (_code) == TSDB_CODE_APP_IS_STARTING || (_code) == TSDB_CODE_APP_IS_STOPPING)
|
||||
#define SYNC_SELF_LEADER_REDIRECT_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_SYN_RESTORING || (_code) == TSDB_CODE_SYN_INTERNAL_ERROR)
|
||||
#define SYNC_OTHER_LEADER_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_MNODE_NOT_FOUND)
|
||||
|
||||
#define NO_RET_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL)
|
||||
|
||||
#define NEED_REDIRECT_ERROR(_code) \
|
||||
(NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \
|
||||
SYNC_SELF_LEADER_REDIRECT_ERROR(_code) || SYNC_OTHER_LEADER_REDIRECT_ERROR(_code))
|
||||
|
||||
#define NEED_CLIENT_RM_TBLMETA_REQ(_type) \
|
||||
((_type) == TDMT_VND_CREATE_TABLE || (_type) == TDMT_MND_CREATE_STB || (_type) == TDMT_VND_DROP_TABLE || \
|
||||
(_type) == TDMT_MND_DROP_STB)
|
||||
|
||||
#define NEED_SCHEDULER_REDIRECT_ERROR(_code) \
|
||||
((_code) == TSDB_CODE_RPC_REDIRECT || (_code) == TSDB_CODE_NODE_NOT_DEPLOYED || \
|
||||
(_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_APP_NOT_READY)
|
||||
#define NEED_SCHEDULER_REDIRECT_ERROR(_code) \
|
||||
(SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || SYNC_SELF_LEADER_REDIRECT_ERROR(_code) || \
|
||||
SYNC_OTHER_LEADER_REDIRECT_ERROR(_code))
|
||||
|
||||
#define REQUEST_TOTAL_EXEC_TIMES 2
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ typedef struct STdbState {
|
|||
TTB* pFillStateDb; // todo refactor
|
||||
TTB* pSessionStateDb;
|
||||
TTB* pParNameDb;
|
||||
TXN txn;
|
||||
TXN* txn;
|
||||
} STdbState;
|
||||
|
||||
// incremental state storage
|
||||
|
|
|
@ -140,15 +140,40 @@ typedef struct {
|
|||
int8_t type;
|
||||
} SStreamCheckpoint;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
} SStreamTaskDestroy;
|
||||
|
||||
typedef struct {
|
||||
int8_t type;
|
||||
SSDataBlock* pBlock;
|
||||
} SStreamTrigger;
|
||||
|
||||
typedef struct SStreamQueueNode SStreamQueueNode;
|
||||
|
||||
struct SStreamQueueNode {
|
||||
SStreamQueueItem* item;
|
||||
SStreamQueueNode* next;
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
SStreamQueueNode* head;
|
||||
int64_t size;
|
||||
} SStreamQueueRes;
|
||||
|
||||
void streamFreeQitem(SStreamQueueItem* data);
|
||||
|
||||
bool streamQueueResEmpty(const SStreamQueueRes* pRes);
|
||||
int64_t streamQueueResSize(const SStreamQueueRes* pRes);
|
||||
SStreamQueueNode* streamQueueResFront(SStreamQueueRes* pRes);
|
||||
SStreamQueueNode* streamQueueResPop(SStreamQueueRes* pRes);
|
||||
void streamQueueResClear(SStreamQueueRes* pRes);
|
||||
SStreamQueueRes streamQueueBuildRes(SStreamQueueNode* pNode);
|
||||
|
||||
typedef struct {
|
||||
SStreamQueueNode* pHead;
|
||||
} SStreamQueue1;
|
||||
|
||||
bool streamQueueHasTask(const SStreamQueue1* pQueue);
|
||||
int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
|
||||
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
|
||||
|
||||
typedef struct {
|
||||
STaosQueue* queue;
|
||||
STaosQall* qall;
|
||||
|
@ -250,31 +275,6 @@ typedef struct {
|
|||
SEpSet epSet;
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t srcNodeId;
|
||||
int32_t srcChildId;
|
||||
int64_t stateSaveVer;
|
||||
int64_t stateProcessedVer;
|
||||
} SStreamCheckpointInfo;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int64_t checkTs;
|
||||
int32_t checkpointId; // incremental
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer; // SArray<SStreamCheckpointInfo>
|
||||
} SStreamMultiVgCheckpointInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t taskId;
|
||||
int32_t checkpointId; // incremental
|
||||
} SStreamCheckpointKey;
|
||||
|
||||
typedef struct {
|
||||
int32_t taskId;
|
||||
SArray* checkpointVer;
|
||||
} SStreamRecoveringState;
|
||||
|
||||
typedef struct SStreamTask {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
|
@ -339,6 +339,10 @@ typedef struct SStreamTask {
|
|||
int64_t checkReqId;
|
||||
SArray* checkReqIds; // shuffle
|
||||
int32_t refCnt;
|
||||
|
||||
int64_t checkpointingId;
|
||||
int32_t checkpointAlignCnt;
|
||||
|
||||
} SStreamTask;
|
||||
|
||||
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
|
||||
|
@ -484,6 +488,60 @@ typedef struct {
|
|||
int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq);
|
||||
int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq);
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int64_t checkpointId;
|
||||
int32_t taskId;
|
||||
int32_t nodeId;
|
||||
int64_t expireTime;
|
||||
} SStreamCheckpointSourceReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int64_t checkpointId;
|
||||
int32_t taskId;
|
||||
int32_t nodeId;
|
||||
int64_t expireTime;
|
||||
} SStreamCheckpointSourceRsp;
|
||||
|
||||
int32_t tEncodeSStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq);
|
||||
int32_t tDecodeSStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSourceReq* pReq);
|
||||
|
||||
int32_t tEncodeSStreamCheckpointSourceRsp(SEncoder* pEncoder, const SStreamCheckpointSourceRsp* pRsp);
|
||||
int32_t tDecodeSStreamCheckpointSourceRsp(SDecoder* pDecoder, SStreamCheckpointSourceRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
SMsgHead msgHead;
|
||||
int64_t streamId;
|
||||
int64_t checkpointId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t downstreamNodeId;
|
||||
int32_t upstreamTaskId;
|
||||
int32_t upstreamNodeId;
|
||||
int32_t childId;
|
||||
int64_t expireTime;
|
||||
int8_t taskLevel;
|
||||
} SStreamCheckpointReq;
|
||||
|
||||
typedef struct {
|
||||
SMsgHead msgHead;
|
||||
int64_t streamId;
|
||||
int64_t checkpointId;
|
||||
int32_t downstreamTaskId;
|
||||
int32_t downstreamNodeId;
|
||||
int32_t upstreamTaskId;
|
||||
int32_t upstreamNodeId;
|
||||
int32_t childId;
|
||||
int64_t expireTime;
|
||||
int8_t taskLevel;
|
||||
} SStreamCheckpointRsp;
|
||||
|
||||
int32_t tEncodeSStreamCheckpointReq(SEncoder* pEncoder, const SStreamCheckpointReq* pReq);
|
||||
int32_t tDecodeSStreamCheckpointReq(SDecoder* pDecoder, SStreamCheckpointReq* pReq);
|
||||
|
||||
int32_t tEncodeSStreamCheckpointRsp(SEncoder* pEncoder, const SStreamCheckpointRsp* pRsp);
|
||||
int32_t tDecodeSStreamCheckpointRsp(SDecoder* pDecoder, SStreamCheckpointRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t downstreamTaskId;
|
||||
|
@ -562,7 +620,7 @@ typedef struct SStreamMeta {
|
|||
SHashObj* pTasks;
|
||||
SHashObj* pRecoverStatus;
|
||||
void* ahandle;
|
||||
TXN txn;
|
||||
TXN* txn;
|
||||
FTaskExpand* expandFunc;
|
||||
int32_t vgId;
|
||||
SRWLatch lock;
|
||||
|
@ -573,18 +631,22 @@ void streamMetaClose(SStreamMeta* streamMeta);
|
|||
|
||||
int32_t streamMetaAddTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask);
|
||||
int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t startVer, char* msg, int32_t msgLen);
|
||||
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
SStreamTask* streamMetaGetTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
|
||||
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
|
||||
void streamMetaRemoveTask1(SStreamMeta* pMeta, int32_t taskId);
|
||||
void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
|
||||
|
||||
int32_t streamMetaBegin(SStreamMeta* pMeta);
|
||||
int32_t streamMetaCommit(SStreamMeta* pMeta);
|
||||
int32_t streamMetaRollBack(SStreamMeta* pMeta);
|
||||
int32_t streamLoadTasks(SStreamMeta* pMeta);
|
||||
|
||||
// checkpoint
|
||||
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
|
||||
int32_t streamProcessCheckpointReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointReq* pReq);
|
||||
int32_t streamProcessCheckpointRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointRsp* pRsp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,7 @@ extern "C" {
|
|||
#include "tlrucache.h"
|
||||
#include "tmsgcb.h"
|
||||
|
||||
#define SYNC_RESP_TTL_MS 10000000
|
||||
#define SYNC_RESP_TTL_MS 30000
|
||||
#define SYNC_SPEED_UP_HB_TIMER 400
|
||||
#define SYNC_SPEED_UP_AFTER_MS (1000 * 20)
|
||||
#define SYNC_SLOW_DOWN_RANGE 100
|
||||
|
@ -40,13 +40,19 @@ extern "C" {
|
|||
#define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10
|
||||
#define SNAPSHOT_WAIT_MS 1000 * 30
|
||||
|
||||
#define SYNC_MAX_RETRY_BACKOFF 5
|
||||
#define SYNC_LOG_REPL_RETRY_WAIT_MS 100
|
||||
#define SYNC_APPEND_ENTRIES_TIMEOUT_MS 10000
|
||||
#define SYNC_HEART_TIMEOUT_MS 1000 * 8
|
||||
#define SYNC_HEART_TIMEOUT_MS 1000 * 15
|
||||
|
||||
#define SYNC_HEARTBEAT_SLOW_MS 1500
|
||||
#define SYNC_HEARTBEAT_REPLY_SLOW_MS 1500
|
||||
#define SYNC_SNAP_RESEND_MS 1000 * 60
|
||||
|
||||
#define SYNC_MAX_BATCH_SIZE 1
|
||||
#define SYNC_INDEX_BEGIN 0
|
||||
#define SYNC_INDEX_INVALID -1
|
||||
#define SYNC_TERM_INVALID 0xFFFFFFFFFFFFFFFF
|
||||
#define SYNC_TERM_INVALID -1 // 0xFFFFFFFFFFFFFFFF
|
||||
|
||||
typedef enum {
|
||||
SYNC_STRATEGY_NO_SNAPSHOT = 0,
|
||||
|
@ -57,13 +63,14 @@ typedef enum {
|
|||
typedef uint64_t SyncNodeId;
|
||||
typedef int32_t SyncGroupId;
|
||||
typedef int64_t SyncIndex;
|
||||
typedef uint64_t SyncTerm;
|
||||
typedef int64_t SyncTerm;
|
||||
|
||||
typedef struct SSyncNode SSyncNode;
|
||||
typedef struct SWal SWal;
|
||||
typedef struct SSyncRaftEntry SSyncRaftEntry;
|
||||
|
||||
typedef enum {
|
||||
TAOS_SYNC_STATE_OFFLINE = 0,
|
||||
TAOS_SYNC_STATE_FOLLOWER = 100,
|
||||
TAOS_SYNC_STATE_CANDIDATE = 101,
|
||||
TAOS_SYNC_STATE_LEADER = 102,
|
||||
|
@ -132,13 +139,13 @@ typedef struct SSnapshotMeta {
|
|||
typedef struct SSyncFSM {
|
||||
void* data;
|
||||
|
||||
void (*FpCommitCb)(const struct SSyncFSM* pFsm, const SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
void (*FpPreCommitCb)(const struct SSyncFSM* pFsm, const SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
void (*FpRollBackCb)(const struct SSyncFSM* pFsm, const SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
int32_t (*FpCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
int32_t (*FpPreCommitCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
void (*FpRollBackCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
|
||||
void (*FpRestoreFinishCb)(const struct SSyncFSM* pFsm);
|
||||
void (*FpReConfigCb)(const struct SSyncFSM* pFsm, const SRpcMsg* pMsg, const SReConfigCbMeta* pMeta);
|
||||
void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, const SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
void (*FpReConfigCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SReConfigCbMeta* pMeta);
|
||||
void (*FpLeaderTransferCb)(const struct SSyncFSM* pFsm, SRpcMsg* pMsg, const SFsmCbMeta* pMeta);
|
||||
bool (*FpApplyQueueEmptyCb)(const struct SSyncFSM* pFsm);
|
||||
int32_t (*FpApplyQueueItems)(const struct SSyncFSM* pFsm);
|
||||
|
||||
|
@ -162,7 +169,10 @@ typedef struct SSyncFSM {
|
|||
// SWal implements it
|
||||
typedef struct SSyncLogStore {
|
||||
SLRUCache* pCache;
|
||||
void* data;
|
||||
int32_t cacheHit;
|
||||
int32_t cacheMiss;
|
||||
|
||||
void* data;
|
||||
|
||||
int32_t (*syncLogUpdateCommitIndex)(struct SSyncLogStore* pLogStore, SyncIndex index);
|
||||
SyncIndex (*syncLogCommitIndex)(struct SSyncLogStore* pLogStore);
|
||||
|
@ -204,18 +214,23 @@ typedef struct SSyncInfo {
|
|||
int32_t (*syncEqCtrlMsg)(const SMsgCb* msgcb, SRpcMsg* pMsg);
|
||||
} SSyncInfo;
|
||||
|
||||
// if state == leader
|
||||
// if restored, display "leader"
|
||||
// if !restored && canRead, display "leader*"
|
||||
// if !restored && !canRead, display "leader**"
|
||||
typedef struct SSyncState {
|
||||
ESyncState state;
|
||||
bool restored;
|
||||
bool canRead;
|
||||
} SSyncState;
|
||||
|
||||
int32_t syncInit();
|
||||
void syncCleanUp();
|
||||
int64_t syncOpen(SSyncInfo* pSyncInfo);
|
||||
void syncStart(int64_t rid);
|
||||
int32_t syncStart(int64_t rid);
|
||||
void syncStop(int64_t rid);
|
||||
void syncPreStop(int64_t rid);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
|
||||
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
|
||||
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
|
||||
int32_t syncBeginSnapshot(int64_t rid, int64_t lastApplyIndex);
|
||||
|
@ -223,6 +238,9 @@ int32_t syncEndSnapshot(int64_t rid);
|
|||
int32_t syncLeaderTransfer(int64_t rid);
|
||||
int32_t syncStepDown(int64_t rid, SyncTerm newTerm);
|
||||
bool syncIsReadyForRead(int64_t rid);
|
||||
bool syncSnapshotSending(int64_t rid);
|
||||
bool syncSnapshotRecving(int64_t rid);
|
||||
int32_t syncSendTimeoutRsp(int64_t rid, int64_t seq);
|
||||
|
||||
SSyncState syncGetState(int64_t rid);
|
||||
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
|
||||
|
|
|
@ -72,18 +72,25 @@ typedef struct SRpcMsg {
|
|||
typedef void (*RpcCfp)(void *parent, SRpcMsg *, SEpSet *epset);
|
||||
typedef bool (*RpcRfp)(int32_t code, tmsg_t msgType);
|
||||
typedef bool (*RpcTfp)(int32_t code, tmsg_t msgType);
|
||||
typedef bool (*RpcFFfp)(tmsg_t msgType);
|
||||
typedef void (*RpcDfp)(void *ahandle);
|
||||
|
||||
typedef struct SRpcInit {
|
||||
char localFqdn[TSDB_FQDN_LEN];
|
||||
uint16_t localPort; // local port
|
||||
char *label; // for debug purpose
|
||||
int32_t numOfThreads; // number of threads to handle connections
|
||||
int32_t sessions; // number of sessions allowed
|
||||
int8_t connType; // TAOS_CONN_UDP, TAOS_CONN_TCPC, TAOS_CONN_TCPS
|
||||
int32_t idleTime; // milliseconds, 0 means idle timer is disabled
|
||||
int32_t retryLimit; // retry limit
|
||||
int32_t retryInterval; // retry interval ms
|
||||
uint16_t localPort; // local port
|
||||
char *label; // for debug purpose
|
||||
int32_t numOfThreads; // number of threads to handle connections
|
||||
int32_t sessions; // number of sessions allowed
|
||||
int8_t connType; // TAOS_CONN_UDP, TAOS_CONN_TCPC, TAOS_CONN_TCPS
|
||||
int32_t idleTime; // milliseconds, 0 means idle timer is disabled
|
||||
|
||||
int32_t retryMinInterval; // retry init interval
|
||||
int32_t retryStepFactor; // retry interval factor
|
||||
int32_t retryMaxInterval; // retry max interval
|
||||
int64_t retryMaxTimouet;
|
||||
|
||||
int32_t failFastThreshold;
|
||||
int32_t failFastInterval;
|
||||
|
||||
int32_t compressSize; // -1: no compress, 0 : all data compressed, size: compress data if larger than size
|
||||
int8_t encryption; // encrypt or not
|
||||
|
@ -102,6 +109,8 @@ typedef struct SRpcInit {
|
|||
|
||||
// destroy client ahandle;
|
||||
RpcDfp dfp;
|
||||
// fail fast fp
|
||||
RpcFFfp ffp;
|
||||
|
||||
void *parent;
|
||||
} SRpcInit;
|
||||
|
|
|
@ -33,16 +33,16 @@ extern "C" {
|
|||
#define wTrace(...) { if (wDebugFlag & DEBUG_TRACE) { taosPrintLog("WAL ", DEBUG_TRACE, wDebugFlag, __VA_ARGS__); }}
|
||||
// clang-format on
|
||||
|
||||
#define WAL_PROTO_VER 0
|
||||
#define WAL_NOSUFFIX_LEN 20
|
||||
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
|
||||
#define WAL_LOG_SUFFIX "log"
|
||||
#define WAL_INDEX_SUFFIX "idx"
|
||||
#define WAL_REFRESH_MS 1000
|
||||
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
|
||||
#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL
|
||||
#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
|
||||
#define WAL_PROTO_VER 0
|
||||
#define WAL_NOSUFFIX_LEN 20
|
||||
#define WAL_SUFFIX_AT (WAL_NOSUFFIX_LEN + 1)
|
||||
#define WAL_LOG_SUFFIX "log"
|
||||
#define WAL_INDEX_SUFFIX "idx"
|
||||
#define WAL_REFRESH_MS 1000
|
||||
#define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12)
|
||||
#define WAL_FILE_LEN (WAL_PATH_LEN + 32)
|
||||
#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL
|
||||
#define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3)
|
||||
|
||||
typedef enum {
|
||||
TAOS_WAL_WRITE = 1,
|
||||
|
@ -107,6 +107,8 @@ typedef struct SWal {
|
|||
TdFilePtr pIdxFile;
|
||||
int32_t writeCur;
|
||||
SArray *fileInfoSet; // SArray<SWalFileInfo>
|
||||
// gc
|
||||
SArray *toDeleteFiles; // SArray<SWalFileInfo>
|
||||
// status
|
||||
int64_t totSize;
|
||||
int64_t lastRollSeq;
|
||||
|
@ -170,7 +172,7 @@ int32_t walWriteWithSyncInfo(SWal *, int64_t index, tmsg_t msgType, SWalSyncInfo
|
|||
|
||||
// Assign version automatically and return to caller,
|
||||
// -1 will be returned for failed writes
|
||||
int64_t walAppendLog(SWal *, tmsg_t msgType, SWalSyncInfo syncMeta, const void *body, int32_t bodyLen);
|
||||
int64_t walAppendLog(SWal *, int64_t index, tmsg_t msgType, SWalSyncInfo syncMeta, const void *body, int32_t bodyLen);
|
||||
|
||||
void walFsync(SWal *, bool force);
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ extern "C" {
|
|||
|
||||
#if !defined(WINDOWS)
|
||||
#include <dirent.h>
|
||||
#include <execinfo.h>
|
||||
#include <libgen.h>
|
||||
#include <sched.h>
|
||||
#include <unistd.h>
|
||||
|
@ -48,6 +49,9 @@ extern "C" {
|
|||
#else
|
||||
#include <argp.h>
|
||||
#include <sys/prctl.h>
|
||||
#if defined(_TD_X86_)
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
#endif
|
||||
#else
|
||||
|
||||
|
@ -82,6 +86,12 @@ extern "C" {
|
|||
#include <wchar.h>
|
||||
#include <wctype.h>
|
||||
|
||||
#if __AVX__
|
||||
#include <immintrin.h>
|
||||
#elif __SSE4_2__
|
||||
#include <nmmintrin.h>
|
||||
#endif
|
||||
|
||||
#include "osThread.h"
|
||||
|
||||
#include "osAtomic.h"
|
||||
|
|
|
@ -120,12 +120,6 @@ void syslog(int unused, const char *format, ...);
|
|||
#define POINTER_SHIFT(p, b) ((void *)((char *)(p) + (b)))
|
||||
#define POINTER_DISTANCE(p1, p2) ((char *)(p1) - (char *)(p2))
|
||||
|
||||
#ifndef NDEBUG
|
||||
#define ASSERT(x) assert(x)
|
||||
#else
|
||||
#define ASSERT(x)
|
||||
#endif
|
||||
|
||||
#ifndef UNUSED
|
||||
#define UNUSED(x) ((void)(x))
|
||||
#endif
|
||||
|
@ -168,22 +162,22 @@ void syslog(int unused, const char *format, ...);
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
#define DEFAULT_DOUBLE_COMP(x, y) \
|
||||
do { \
|
||||
if (isnan(x) && isnan(y)) { \
|
||||
return 0; \
|
||||
} \
|
||||
if (isnan(x)) { \
|
||||
return -1; \
|
||||
} \
|
||||
if (isnan(y)) { \
|
||||
return 1; \
|
||||
} \
|
||||
if ((x) == (y)) { \
|
||||
return 0; \
|
||||
} else { \
|
||||
return (x) < (y) ? -1 : 1; \
|
||||
} \
|
||||
#define DEFAULT_DOUBLE_COMP(x, y) \
|
||||
do { \
|
||||
if (isnan(x) && isnan(y)) { \
|
||||
return 0; \
|
||||
} \
|
||||
if (isnan(x)) { \
|
||||
return -1; \
|
||||
} \
|
||||
if (isnan(y)) { \
|
||||
return 1; \
|
||||
} \
|
||||
if (fabs((x) - (y)) <= DBL_EPSILON) { \
|
||||
return 0; \
|
||||
} else { \
|
||||
return (x) < (y) ? -1 : 1; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define DEFAULT_FLOAT_COMP(x, y) DEFAULT_DOUBLE_COMP(x, y)
|
||||
|
|
|
@ -62,6 +62,7 @@ int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
|
|||
bool taosIsDir(const char *dirname);
|
||||
char *taosDirName(char *dirname);
|
||||
char *taosDirEntryBaseName(char *dirname);
|
||||
void taosGetCwd(char *buf, int32_t len);
|
||||
|
||||
TdDirPtr taosOpenDir(const char *dirname);
|
||||
TdDirEntryPtr taosReadDir(TdDirPtr pDir);
|
||||
|
|
|
@ -36,6 +36,11 @@ extern int64_t tsStreamMax;
|
|||
extern float tsNumOfCores;
|
||||
extern int64_t tsTotalMemoryKB;
|
||||
extern char *tsProcPath;
|
||||
extern char tsSIMDBuiltins;
|
||||
extern char tsSSE42Enable;
|
||||
extern char tsAVXEnable;
|
||||
extern char tsAVX2Enable;
|
||||
extern char tsFMAEnable;
|
||||
|
||||
extern char configDir[];
|
||||
extern char tsDataDir[];
|
||||
|
|
|
@ -88,6 +88,7 @@ int32_t taosFsyncFile(TdFilePtr pFile);
|
|||
int64_t taosReadFile(TdFilePtr pFile, void *buf, int64_t count);
|
||||
int64_t taosPReadFile(TdFilePtr pFile, void *buf, int64_t count, int64_t offset);
|
||||
int64_t taosWriteFile(TdFilePtr pFile, const void *buf, int64_t count);
|
||||
int64_t taosPWriteFile(TdFilePtr pFile, const void *buf, int64_t count, int64_t offset);
|
||||
void taosFprintfFile(TdFilePtr pFile, const char *format, ...);
|
||||
|
||||
int64_t taosGetLineFile(TdFilePtr pFile, char **__restrict ptrBuf);
|
||||
|
|
|
@ -37,6 +37,7 @@ void taosMemoryFree(void *ptr);
|
|||
int64_t taosMemorySize(void *ptr);
|
||||
void taosPrintBackTrace();
|
||||
void taosMemoryTrim(int32_t size);
|
||||
void *taosMemoryMallocAlign(uint32_t alignment, int64_t size);
|
||||
|
||||
#define taosMemoryFreeClear(ptr) \
|
||||
do { \
|
||||
|
|
|
@ -29,7 +29,7 @@ typedef dispatch_semaphore_t tsem_t;
|
|||
|
||||
int tsem_init(tsem_t *sem, int pshared, unsigned int value);
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t nanosecs);
|
||||
int tsem_timewait(tsem_t *sim, int64_t milis);
|
||||
int tsem_post(tsem_t *sem);
|
||||
int tsem_destroy(tsem_t *sem);
|
||||
|
||||
|
@ -38,7 +38,7 @@ int tsem_destroy(tsem_t *sem);
|
|||
#define tsem_t sem_t
|
||||
#define tsem_init sem_init
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t nanosecs);
|
||||
int tsem_timewait(tsem_t *sim, int64_t milis);
|
||||
#define tsem_post sem_post
|
||||
#define tsem_destroy sem_destroy
|
||||
|
||||
|
|
|
@ -169,6 +169,9 @@ void taosSetMaskSIGPIPE();
|
|||
uint32_t taosInetAddr(const char *ipAddr);
|
||||
const char *taosInetNtoa(struct in_addr ipInt, char *dstStr, int32_t len);
|
||||
|
||||
uint64_t taosHton64(uint64_t val);
|
||||
uint64_t taosNtoh64(uint64_t val);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -62,7 +62,7 @@ typedef int32_t TdUcs4;
|
|||
int32_t taosUcs4len(TdUcs4 *ucs4);
|
||||
int64_t taosStr2int64(const char *str);
|
||||
|
||||
void taosConvInit(void);
|
||||
int32_t taosConvInit(void);
|
||||
void taosConvDestroy();
|
||||
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs);
|
||||
bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len);
|
||||
|
|
|
@ -40,6 +40,7 @@ int32_t taosGetOsReleaseName(char *releaseName, int32_t maxLen);
|
|||
int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores);
|
||||
int32_t taosGetCpuCores(float *numOfCores);
|
||||
void taosGetCpuUsage(double *cpu_system, double *cpu_engine);
|
||||
int32_t taosGetCpuInstructions(char* sse42, char* avx, char* avx2, char* fma);
|
||||
int32_t taosGetTotalMemory(int64_t *totalKB);
|
||||
int32_t taosGetProcMemory(int64_t *usedKB);
|
||||
int32_t taosGetSysMemory(int64_t *usedKB);
|
||||
|
|
|
@ -46,6 +46,29 @@ void taosSetTerminalMode();
|
|||
int32_t taosGetOldTerminalMode();
|
||||
void taosResetTerminalMode();
|
||||
|
||||
#if !defined(WINDOWS)
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
{ \
|
||||
void* array[100]; \
|
||||
int32_t size = backtrace(array, 100); \
|
||||
char** strings = backtrace_symbols(array, size); \
|
||||
if (strings != NULL) { \
|
||||
taosPrintLog(flags, level, dflag, "obtained %d stack frames", size); \
|
||||
for (int32_t i = 0; i < size; i++) { \
|
||||
taosPrintLog(flags, level, dflag, "frame:%d, %s", i, strings[i]); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
taosMemoryFree(strings); \
|
||||
}
|
||||
#else
|
||||
#define taosPrintTrace(flags, level, dflag) \
|
||||
{ \
|
||||
taosPrintLog(flags, level, dflag, \
|
||||
"backtrace not implemented on windows, so detailed stack information cannot be printed"); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -35,6 +35,7 @@ extern "C" {
|
|||
#ifdef WINDOWS
|
||||
|
||||
#define CLOCK_REALTIME 0
|
||||
#define CLOCK_MONOTONIC 0
|
||||
|
||||
#define MILLISECOND_PER_SECOND (1000i64)
|
||||
#else
|
||||
|
@ -82,6 +83,13 @@ static FORCE_INLINE int64_t taosGetTimestampNs() {
|
|||
return (int64_t)systemTime.tv_sec * 1000000000LL + (int64_t)systemTime.tv_nsec;
|
||||
}
|
||||
|
||||
//@return timestamp of monotonic clock in millisecond
|
||||
static FORCE_INLINE int64_t taosGetMonoTimestampMs() {
|
||||
struct timespec systemTime = {0};
|
||||
taosClockGetTime(CLOCK_MONOTONIC, &systemTime);
|
||||
return (int64_t)systemTime.tv_sec * 1000LL + (int64_t)systemTime.tv_nsec / 1000000;
|
||||
}
|
||||
|
||||
char *taosStrpTime(const char *buf, const char *fmt, struct tm *tm);
|
||||
struct tm *taosLocalTime(const time_t *timep, struct tm *result);
|
||||
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
|
||||
|
|
|
@ -27,7 +27,8 @@ extern "C" {
|
|||
typedef int32_t (*__compar_fn_t)(const void *, const void *);
|
||||
#endif
|
||||
|
||||
typedef void *(*FCopy)(void *);
|
||||
typedef void *(*__array_item_dup_fn_t)(void *);
|
||||
|
||||
typedef void (*FDelete)(void *);
|
||||
typedef int32_t (*FEncode)(void **buf, const void *dst);
|
||||
typedef void *(*FDecode)(const void *buf, void *dst);
|
||||
|
@ -41,7 +42,6 @@ typedef void *(*FDecode)(const void *buf, void *dst);
|
|||
#define elePtrAt(base, size, idx) (void *)((char *)(base) + (size) * (idx))
|
||||
|
||||
typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void *param);
|
||||
typedef void (*__ext_swap_fn_t)(void *p1, void *p2, const void *param);
|
||||
|
||||
/**
|
||||
* quick sort, with the compare function requiring additional parameters support
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue