Merge branch '3.0' into refact/fillhistory
This commit is contained in:
commit
ccbea13012
|
@ -18,7 +18,7 @@
|
||||||
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
注意:修改文档的分支要以`docs/`为开头,以免进行不必要的测试。
|
||||||
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
4. 创建pull request,将自己的分支合并到开发分支`3.0`,我们开发团队将尽快审核。
|
||||||
|
|
||||||
如遇任何问题,请添加官方微信TDengineECO。我们的团队会帮忙解决。
|
如遇任何问题,请添加官方微信 tdengine1。我们的团队会帮忙解决。
|
||||||
|
|
||||||
## 给贡献者的礼品
|
## 给贡献者的礼品
|
||||||
|
|
||||||
|
@ -48,4 +48,4 @@ TDengine 社区致力于让更多的开发者理解和使用它。
|
||||||
|
|
||||||
## 联系我们
|
## 联系我们
|
||||||
|
|
||||||
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:TDengineECO
|
如果您有什么问题需要解决,或者有什么问题需要解答,可以添加微信:tdengine1。
|
||||||
|
|
14
README-CN.md
14
README-CN.md
|
@ -52,7 +52,7 @@ TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBench
|
||||||
### Ubuntu 18.04 及以上版本 & Debian:
|
### Ubuntu 18.04 及以上版本 & Debian:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 为 taos-tools 安装编译需要的软件
|
#### 为 taos-tools 安装编译需要的软件
|
||||||
|
@ -68,14 +68,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
||||||
```bash
|
```bash
|
||||||
sudo yum install epel-release
|
sudo yum install epel-release
|
||||||
sudo yum update
|
sudo yum update
|
||||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
|
||||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||||
```
|
```
|
||||||
|
|
||||||
### CentOS 8 & Fedora
|
### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
#### 在 CentOS 上构建 taosTools 安装依赖软件
|
||||||
|
@ -88,7 +88,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### CentOS 8/Rocky Linux
|
#### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo yum install -y epel-release
|
sudo yum install -y epel-release
|
||||||
|
@ -101,7 +101,7 @@ sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson
|
||||||
|
|
||||||
若 powertools 安装失败,可以尝试改用:
|
若 powertools 安装失败,可以尝试改用:
|
||||||
```
|
```
|
||||||
sudo yum config-manager --set-enabled Powertools
|
sudo yum config-manager --set-enabled powertools
|
||||||
```
|
```
|
||||||
|
|
||||||
#### CentOS + devtoolset
|
#### CentOS + devtoolset
|
||||||
|
@ -117,7 +117,7 @@ scl enable devtoolset-9 -- bash
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
```
|
```
|
||||||
brew install argp-standalone pkgconfig
|
brew install argp-standalone pkgconfig geos
|
||||||
```
|
```
|
||||||
|
|
||||||
### 设置 golang 开发环境
|
### 设置 golang 开发环境
|
||||||
|
|
12
README.md
12
README.md
|
@ -60,7 +60,7 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t
|
||||||
### Ubuntu 18.04 and above or Debian
|
### Ubuntu 18.04 and above or Debian
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
|
sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev libgeos-dev
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install build dependencies for taosTools
|
#### Install build dependencies for taosTools
|
||||||
|
@ -76,14 +76,14 @@ sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-d
|
||||||
```bash
|
```bash
|
||||||
sudo yum install epel-release
|
sudo yum install epel-release
|
||||||
sudo yum update
|
sudo yum update
|
||||||
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel
|
sudo yum install -y gcc gcc-c++ make cmake3 git openssl-devel geos geos-devel
|
||||||
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
|
||||||
```
|
```
|
||||||
|
|
||||||
### CentOS 8 & Fedora
|
### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel geos geos-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Install build dependencies for taosTools on CentOS
|
#### Install build dependencies for taosTools on CentOS
|
||||||
|
@ -94,7 +94,7 @@ sudo dnf install -y gcc gcc-c++ make cmake epel-release git openssl-devel
|
||||||
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
|
||||||
```
|
```
|
||||||
|
|
||||||
#### CentOS 8/Rocky Linux
|
#### CentOS 8/Fedora/Rocky Linux
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo yum install -y epel-release
|
sudo yum install -y epel-release
|
||||||
|
@ -124,7 +124,7 @@ scl enable devtoolset-9 -- bash
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
```
|
```
|
||||||
brew install argp-standalone pkgconfig
|
brew install argp-standalone pkgconfig geos
|
||||||
```
|
```
|
||||||
|
|
||||||
### Setup golang environment
|
### Setup golang environment
|
||||||
|
|
|
@ -64,6 +64,13 @@ IF(${TD_WINDOWS})
|
||||||
ON
|
ON
|
||||||
)
|
)
|
||||||
|
|
||||||
|
MESSAGE("build geos Win32")
|
||||||
|
option(
|
||||||
|
BUILD_GEOS
|
||||||
|
"If build geos on Windows"
|
||||||
|
ON
|
||||||
|
)
|
||||||
|
|
||||||
ELSEIF (TD_DARWIN_64)
|
ELSEIF (TD_DARWIN_64)
|
||||||
IF(${BUILD_TEST})
|
IF(${BUILD_TEST})
|
||||||
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
|
||||||
|
|
|
@ -57,6 +57,8 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
||||||
SET(TD_DARWIN TRUE)
|
SET(TD_DARWIN TRUE)
|
||||||
SET(OSTYPE "macOS")
|
SET(OSTYPE "macOS")
|
||||||
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
|
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
|
||||||
|
INCLUDE_DIRECTORIES(/usr/local/include)
|
||||||
|
LINK_DIRECTORIES(/usr/local/lib)
|
||||||
|
|
||||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
|
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
|
||||||
MESSAGE("Current system arch is arm64")
|
MESSAGE("Current system arch is arm64")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
IF (DEFINED VERNUMBER)
|
IF (DEFINED VERNUMBER)
|
||||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||||
ELSE ()
|
ELSE ()
|
||||||
SET(TD_VER_NUMBER "3.0.4.1")
|
SET(TD_VER_NUMBER "3.0.4.3")
|
||||||
ENDIF ()
|
ENDIF ()
|
||||||
|
|
||||||
IF (DEFINED VERCOMPATIBLE)
|
IF (DEFINED VERCOMPATIBLE)
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
|
||||||
|
# geos
|
||||||
|
ExternalProject_Add(geos
|
||||||
|
GIT_REPOSITORY https://github.com/libgeos/geos.git
|
||||||
|
GIT_TAG 3.11.2
|
||||||
|
SOURCE_DIR "${TD_CONTRIB_DIR}/geos"
|
||||||
|
BINARY_DIR ""
|
||||||
|
CONFIGURE_COMMAND ""
|
||||||
|
BUILD_COMMAND ""
|
||||||
|
INSTALL_COMMAND ""
|
||||||
|
TEST_COMMAND ""
|
||||||
|
)
|
|
@ -2,6 +2,7 @@
|
||||||
# stub
|
# stub
|
||||||
ExternalProject_Add(stub
|
ExternalProject_Add(stub
|
||||||
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
GIT_REPOSITORY https://github.com/coolxv/cpp-stub.git
|
||||||
|
GIT_TAG 5e903b8e
|
||||||
GIT_SUBMODULES "src"
|
GIT_SUBMODULES "src"
|
||||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
SOURCE_DIR "${TD_CONTRIB_DIR}/cpp-stub"
|
||||||
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
BINARY_DIR "${TD_CONTRIB_DIR}/cpp-stub/src"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# taosadapter
|
# taosadapter
|
||||||
ExternalProject_Add(taosadapter
|
ExternalProject_Add(taosadapter
|
||||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||||
GIT_TAG 565ca21
|
GIT_TAG 3.0
|
||||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
|
||||||
BINARY_DIR ""
|
BINARY_DIR ""
|
||||||
#BUILD_IN_SOURCE TRUE
|
#BUILD_IN_SOURCE TRUE
|
||||||
|
|
|
@ -134,6 +134,11 @@ if(${BUILD_ADDR2LINE})
|
||||||
endif(NOT ${TD_WINDOWS})
|
endif(NOT ${TD_WINDOWS})
|
||||||
endif(${BUILD_ADDR2LINE})
|
endif(${BUILD_ADDR2LINE})
|
||||||
|
|
||||||
|
# geos
|
||||||
|
if(${BUILD_GEOS})
|
||||||
|
cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||||
|
endif()
|
||||||
|
|
||||||
# download dependencies
|
# download dependencies
|
||||||
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||||
|
@ -470,6 +475,15 @@ if(${BUILD_ADDR2LINE})
|
||||||
endif(NOT ${TD_WINDOWS})
|
endif(NOT ${TD_WINDOWS})
|
||||||
endif(${BUILD_ADDR2LINE})
|
endif(${BUILD_ADDR2LINE})
|
||||||
|
|
||||||
|
# geos
|
||||||
|
if(${BUILD_GEOS})
|
||||||
|
option(BUILD_SHARED_LIBS "Build GEOS with shared libraries" OFF)
|
||||||
|
add_subdirectory(geos EXCLUDE_FROM_ALL)
|
||||||
|
target_include_directories(
|
||||||
|
geos_c
|
||||||
|
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/geos/include>
|
||||||
|
)
|
||||||
|
endif(${BUILD_GEOS})
|
||||||
|
|
||||||
# ================================================================================================
|
# ================================================================================================
|
||||||
# Build test
|
# Build test
|
||||||
|
|
|
@ -4,7 +4,7 @@ if(${BUILD_DOCS})
|
||||||
find_package(Doxygen)
|
find_package(Doxygen)
|
||||||
if (DOXYGEN_FOUND)
|
if (DOXYGEN_FOUND)
|
||||||
# Build the doc
|
# Build the doc
|
||||||
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/Doxyfile.in)
|
set(DOXYGEN_IN ${TD_SOURCE_DIR}/docs/doxgen/Doxyfile.in)
|
||||||
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
|
set(DOXYGEN_OUT ${CMAKE_BINARY_DIR}/Doxyfile)
|
||||||
|
|
||||||
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
configure_file(${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||||
|
|
|
@ -83,7 +83,7 @@ If `maven` is used to manage the projects, what needs to be done is only adding
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.2.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -285,10 +285,10 @@ You configure the following parameters when creating a consumer:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||||
| `td.connect.ip` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.ip` | string | IP address of the server side | |
|
||||||
| `td.connect.user` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.user` | string | User Name | |
|
||||||
| `td.connect.pass` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.pass` | string | Password | |
|
||||||
| `td.connect.port` | string | Used in establishing a connection; same as `taos_connect` | Only valid for establishing native connection |
|
| `td.connect.port` | string | Port of the server side | |
|
||||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192. |
|
||||||
| `client.id` | string | Client ID | Maximum length: 192. |
|
| `client.id` | string | Client ID | Maximum length: 192. |
|
||||||
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
||||||
|
@ -325,6 +325,7 @@ Java programs use the following parameters:
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
| Parameter | Type | Description | Remarks |
|
||||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `td.connect.type` | string | connection type: "jni" means native connection, "ws" means websocket connection, the default is "jni" |
|
||||||
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
| `bootstrap.servers` | string |Connection address, such as `localhost:6030` |
|
||||||
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
| `value.deserializer` | string | Value deserializer; to use this method, implement the `com.taosdata.jdbc.tmq.Deserializer` interface or inherit the `com.taosdata.jdbc.tmq.ReferenceDeserializer` type |
|
||||||
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
| `value.deserializer.encoding` | string | Specify the encoding for string deserialization | |
|
||||||
|
@ -399,22 +400,6 @@ from taos.tmq import Consumer
|
||||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
```
|
```
|
||||||
|
|
||||||
Python programs use the following parameters:
|
|
||||||
|
|
||||||
| Parameter | Type | Description | Remarks |
|
|
||||||
|:---------:|:----:|:-----------:|:-------:|
|
|
||||||
| `td.connect.ip` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.user` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.pass` | string | Used in establishing a connection||
|
|
||||||
| `td.connect.port` | string | Used in establishing a connection||
|
|
||||||
| `group.id` | string | Consumer group ID; consumers with the same ID are in the same group | **Required**. Maximum length: 192 |
|
|
||||||
| `client.id` | string | Client ID | Maximum length: 192 |
|
|
||||||
| `msg.with.table.name` | string | Specify whether to deserialize table names from messages | pecify `true` or `false` |
|
|
||||||
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
|
|
||||||
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
|
|
||||||
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
|
|
||||||
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem label="Node.JS" value="Node.JS">
|
<TabItem label="Node.JS" value="Node.JS">
|
||||||
|
|
|
@ -377,7 +377,7 @@ The `pybitand` function implements bitwise addition for multiple columns. If the
|
||||||
|
|
||||||
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
|
||||||
|
|
||||||
The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
|
The `pyl2norm` function finds the second-order norm for all data in the input columns. This squares the values, takes a cumulative sum, and finds the square root.
|
||||||
<details>
|
<details>
|
||||||
<summary>pyl2norm.py</summary>
|
<summary>pyl2norm.py</summary>
|
||||||
|
|
||||||
|
@ -387,5 +387,16 @@ The `pyl2norm` function finds the second-order norm for all data in the input co
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
#### Aggregate Function [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
|
||||||
|
|
||||||
|
The `pycumsum` function finds the cumulative sum for all data in the input columns.
|
||||||
|
<details>
|
||||||
|
<summary>pycumsum.py</summary>
|
||||||
|
|
||||||
|
```c
|
||||||
|
{{#include tests/script/sh/pycumsum.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
## Manage and Use UDF
|
## Manage and Use UDF
|
||||||
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
|
||||||
|
|
|
@ -42,7 +42,6 @@ In TDengine, the data types below can be used when specifying a column or tag.
|
||||||
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
|
||||||
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
|
||||||
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
| 16 | VARCHAR | User-defined | Alias of BINARY |
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
|
||||||
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
|
||||||
|
|
|
@ -867,10 +867,16 @@ FIRST(expr)
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INTERP(expr)
|
INTERP(expr [, ignore_null_values])
|
||||||
|
|
||||||
|
ignore_null_values: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
|
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned. The value of `ignore_null_values` can be 0 or 1, 1 means null values are ignored. The default value of this parameter is 0.
|
||||||
|
|
||||||
|
|
||||||
**Return value type**: Same as the column being operated upon
|
**Return value type**: Same as the column being operated upon
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_typ
|
||||||
```
|
```
|
||||||
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
- OR REPLACE: if the UDF exists, the UDF properties are modified
|
||||||
- function_name: The scalar function name to be used in the SQL statement
|
- function_name: The scalar function name to be used in the SQL statement
|
||||||
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
|
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python (v3.7+) is supported. If this clause is omitted, C is assumed as the programming language.
|
||||||
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
|
||||||
- output_type: The data type of the results of the UDF.
|
- output_type: The data type of the results of the UDF.
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ You can use below command to setup Grafana alert notification.
|
||||||
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
|
||||||
|
@ -274,7 +274,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -288,7 +288,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -302,7 +302,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds table
|
||||||
|
@ -330,7 +330,7 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||metric value|
|
|gauge|DOUBLE||metric value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent table
|
### taosadapter\_system\_cpu\_percent table
|
||||||
|
@ -340,6 +340,6 @@ The data of tdinsight dashboard is stored in `log` database (default. You can ch
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||mertic value|
|
|gauge|DOUBLE||mertic value|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
|
|
|
@ -959,6 +959,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
|
||||||
|
|
||||||
```java
|
```java
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
config.setProperty("group.id", "group1");
|
config.setProperty("group.id", "group1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||||
|
@ -966,12 +967,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
|
||||||
TaosConsumer consumer = new TaosConsumer<>(config);
|
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
|
||||||
- enable.auto.commit: Specifies whether to commit automatically.
|
- enable.auto.commit: Specifies whether to commit automatically.
|
||||||
- group.id: consumer: Specifies the group that the consumer is in.
|
- group.id: consumer: Specifies the group that the consumer is in.
|
||||||
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
|
||||||
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
|
||||||
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
|
||||||
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
|
||||||
|
- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
|
||||||
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
- For more information, see [Consumer Parameters](../../../develop/tmq).
|
||||||
|
|
||||||
#### Subscribe to consume data
|
#### Subscribe to consume data
|
||||||
|
@ -1015,10 +1018,20 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
public ConsumerLoop() throws SQLException {
|
public ConsumerLoop() throws SQLException {
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
|
config.setProperty("td.connect.type", "jni");
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||||
|
config.setProperty("td.connect.user", "root");
|
||||||
|
config.setProperty("td.connect.pass", "taosdata");
|
||||||
|
config.setProperty("auto.offset.reset", "earliest");
|
||||||
config.setProperty("msg.with.table.name", "true");
|
config.setProperty("msg.with.table.name", "true");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("auto.commit.interval.ms", "1000");
|
||||||
config.setProperty("group.id", "group1");
|
config.setProperty("group.id", "group1");
|
||||||
|
config.setProperty("client.id", "1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||||
|
config.setProperty("experimental.snapshot.enable", "true");
|
||||||
|
|
||||||
|
|
||||||
this.consumer = new TaosConsumer<>(config);
|
this.consumer = new TaosConsumer<>(config);
|
||||||
this.topics = Collections.singletonList("topic_speed");
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
@ -1090,12 +1103,19 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
public ConsumerLoop() throws SQLException {
|
public ConsumerLoop() throws SQLException {
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
|
||||||
config.setProperty("td.connect.type", "ws");
|
config.setProperty("td.connect.type", "ws");
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||||
|
config.setProperty("td.connect.user", "root");
|
||||||
|
config.setProperty("td.connect.pass", "taosdata");
|
||||||
|
config.setProperty("auto.offset.reset", "earliest");
|
||||||
config.setProperty("msg.with.table.name", "true");
|
config.setProperty("msg.with.table.name", "true");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("auto.commit.interval.ms", "1000");
|
||||||
config.setProperty("group.id", "group2");
|
config.setProperty("group.id", "group2");
|
||||||
|
config.setProperty("client.id", "1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||||
|
config.setProperty("experimental.snapshot.enable", "true");
|
||||||
|
|
||||||
this.consumer = new TaosConsumer<>(config);
|
this.consumer = new TaosConsumer<>(config);
|
||||||
this.topics = Collections.singletonList("topic_speed");
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
|
|
@ -62,7 +62,7 @@ The different database framework specifications for various programming language
|
||||||
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
| **Regular Query** | Support | Support | Support | Support | Support | Support |
|
||||||
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
|
||||||
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
| **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
|
||||||
| **Schemaless** | Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||||
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
|
||||||
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
|
||||||
|
|
||||||
|
|
|
@ -149,7 +149,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
Use the `uid` value obtained above as `-E` input.
|
Use the `uid` value obtained above as `-E` input.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
|
||||||
|
|
|
@ -45,19 +45,19 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
|
|
||||||
### firstEp
|
### firstEp
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------------------- |
|
| ---------- | ---------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
|
||||||
| Default | localhost:6030 |
|
| Default | localhost:6030 |
|
||||||
|
|
||||||
### secondEp
|
### secondEp
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------- |
|
| ---------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
|
||||||
| Default | None |
|
| Default | None |
|
||||||
|
|
||||||
### fqdn
|
### fqdn
|
||||||
|
|
||||||
|
@ -65,28 +65,28 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| ------------- | ------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
|
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
|
||||||
| Default Value | The first hostname configured for the host |
|
| Default Value | The first hostname configured for the host |
|
||||||
| Note | It should be within 96 bytes | |
|
| Note | It should be within 96 bytes | |
|
||||||
|
|
||||||
### serverPort
|
### serverPort
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------------------------------------------------------------------------------------------------- |
|
| ------------- | ----------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The port for external access after `taosd` is started |
|
| Meaning | The port for external access after `taosd` is started |
|
||||||
| Default Value | 6030 |
|
| Default Value | 6030 |
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
|
||||||
:::
|
:::
|
||||||
| Protocol | Default Port | Description | How to configure |
|
| Protocol | Default Port | Description | How to configure |
|
||||||
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||||
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
|
||||||
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
|
||||||
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
|
||||||
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
|
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
|
||||||
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
|
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |
|
||||||
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
|
||||||
|
|
||||||
### maxShellConns
|
### maxShellConns
|
||||||
|
|
||||||
|
@ -97,6 +97,24 @@ The parameters described in this document by the effect that they have on the sy
|
||||||
| Value Range | 10-50000000 |
|
| Value Range | 10-50000000 |
|
||||||
| Default Value | 5000 |
|
| Default Value | 5000 |
|
||||||
|
|
||||||
|
### numOfRpcSessions
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ------------------------------------------ |
|
||||||
|
| Applicable | Client/Server |
|
||||||
|
| Meaning | The maximum number of connection to create |
|
||||||
|
| Value Range | 100-100000 |
|
||||||
|
| Default Value | 10000 |
|
||||||
|
|
||||||
|
### timeToGetAvailableConn
|
||||||
|
|
||||||
|
| Attribute | Description |
|
||||||
|
| ------------- | ---------------------------------------------- |
|
||||||
|
| Applicable | Client/Server |
|
||||||
|
| Meaning | The maximum waiting time to get avaliable conn |
|
||||||
|
| Value Range | 10-50000000(ms) |
|
||||||
|
| Default Value | 500000 |
|
||||||
|
|
||||||
## Monitoring Parameters
|
## Monitoring Parameters
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -104,114 +122,114 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
|
||||||
|
|
||||||
### monitor
|
### monitor
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
|
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`. |
|
||||||
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
|
||||||
| Default | 0 |
|
| Default | 0 |
|
||||||
|
|
||||||
### monitorFqdn
|
### monitorFqdn
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------- |
|
| ---------- | ------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | FQDN of taosKeeper monitoring service |
|
| Meaning | FQDN of taosKeeper monitoring service |
|
||||||
| Default | None |
|
| Default | None |
|
||||||
|
|
||||||
### monitorPort
|
### monitorPort
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------------- |
|
| ------------- | ------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Port of taosKeeper monitoring service |
|
| Meaning | Port of taosKeeper monitoring service |
|
||||||
| Default Value | 6043 |
|
| Default Value | 6043 |
|
||||||
|
|
||||||
### monitorInterval
|
### monitorInterval
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The interval of collecting system workload |
|
| Meaning | The interval of collecting system workload |
|
||||||
| Unit | second |
|
| Unit | second |
|
||||||
| Value Range | 1-200000 |
|
| Value Range | 1-200000 |
|
||||||
| Default Value | 30 |
|
| Default Value | 30 |
|
||||||
|
|
||||||
### telemetryReporting
|
### telemetryReporting
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------------------------- |
|
| ------------- | ---------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
| Meaning | Switch for allowing TDengine to collect and report service usage information |
|
||||||
| Value Range | 0: Not allowed; 1: Allowed |
|
| Value Range | 0: Not allowed; 1: Allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
### crashReporting
|
### crashReporting
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | ---------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning |Switch for allowing TDengine to collect and report crash related information |
|
| Meaning | Switch for allowing TDengine to collect and report crash related information |
|
||||||
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
| Value Range | 0,1 0: Not allowed; 1: allowed |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
|
||||||
## Query Parameters
|
## Query Parameters
|
||||||
|
|
||||||
### queryPolicy
|
### queryPolicy
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Execution policy for query statements |
|
| Meaning | Execution policy for query statements |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
|
| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
|
||||||
|
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | SMA index optimization policy |
|
| Meaning | SMA index optimization policy |
|
||||||
| Unit | None |
|
| Unit | None |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
|
| Notes | 0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results. |
|
||||||
|
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
| Meaning | count()/hyperloglog() return value or not if the input data is empty or NULL |
|
||||||
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
| Vlue Range | 0: Return empty line, 1: Return 0 |
|
||||||
| Default | 1 |
|
| Default | 1 |
|
||||||
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
| Notes | When this parameter is setting to 1, for queries containing GROUP BY, PARTITION BY and INTERVAL clause, and input data in certain groups or windows is empty or NULL, the corresponding groups or windows have no return values |
|
||||||
|
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ------------- | -------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | The maximum number of distinct rows returned |
|
| Meaning | The maximum number of distinct rows returned |
|
||||||
| Value Range | [100,000 - 100,000,000] |
|
| Value Range | [100,000 - 100,000,000] |
|
||||||
| Default Value | 100,000 |
|
| Default Value | 100,000 |
|
||||||
|
|
||||||
### keepColumnName
|
### keepColumnName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------- |
|
| ------------- | --------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
|
||||||
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
| Value Range | 0 means including the function name, 1 means not including the function name. |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
|
|
||||||
## Locale Parameters
|
## Locale Parameters
|
||||||
|
|
||||||
### timezone
|
### timezone
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------ |
|
| ------------- | ------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | TimeZone |
|
| Meaning | TimeZone |
|
||||||
| Default Value | TimeZone configured in the host |
|
| Default Value | TimeZone configured in the host |
|
||||||
|
|
||||||
|
@ -314,49 +332,49 @@ The charset that takes effect is UTF-8.
|
||||||
|
|
||||||
### dataDir
|
### dataDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------ |
|
| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | All data files are stored in this directory |
|
| Meaning | All data files are stored in this directory |
|
||||||
| Default Value | /var/lib/taos |
|
| Default Value | /var/lib/taos |
|
||||||
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
|
| Note | The [Tiered Storage](https://docs.tdengine.com/tdinternal/arch/#tiered-storage) function needs to be used in conjunction with the [KEEP](https://docs.tdengine.com/taos-sql/database/#parameters) parameter |
|
||||||
|
|
||||||
### tempDir
|
### tempDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------ |
|
| ---------- | ---------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | The directory where to put all the temporary files generated during system running |
|
| Meaning | The directory where to put all the temporary files generated during system running |
|
||||||
| Default | /tmp |
|
| Default | /tmp |
|
||||||
|
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------ |
|
| ------------- | ----------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
| Default Value | 1.0 |
|
| Default Value | 1.0 |
|
||||||
|
|
||||||
### minimalDataDirGB
|
### minimalDataDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
| Default Value | 2.0 |
|
| Default Value | 2.0 |
|
||||||
|
|
||||||
## Cluster Parameters
|
## Cluster Parameters
|
||||||
|
|
||||||
### supportVnodes
|
### supportVnodes
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------------- |
|
| ------------- | ---------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Maximum number of vnodes per dnode |
|
| Meaning | Maximum number of vnodes per dnode |
|
||||||
| Value Range | 0-4096 |
|
| Value Range | 0-4096 |
|
||||||
| Default Value | 2x the CPU cores |
|
| Default Value | 2x the CPU cores |
|
||||||
|
|
||||||
## Performance Tuning
|
## Performance Tuning
|
||||||
|
|
||||||
|
@ -373,345 +391,345 @@ The charset that takes effect is UTF-8.
|
||||||
|
|
||||||
### logDir
|
### logDir
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------------- |
|
| ------------- | ----------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The directory for writing log files |
|
| Meaning | The directory for writing log files |
|
||||||
| Default Value | /var/log/taos |
|
| Default Value | /var/log/taos |
|
||||||
|
|
||||||
### minimalLogDirGB
|
### minimalLogDirGB
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------------------------------- |
|
| ------------- | -------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
|
||||||
| Unit | GB |
|
| Unit | GB |
|
||||||
| Default Value | 1.0 |
|
| Default Value | 1.0 |
|
||||||
|
|
||||||
### numOfLogLines
|
### numOfLogLines
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Maximum number of lines in single log file |
|
| Meaning | Maximum number of lines in single log file |
|
||||||
| Default Value | 10000000 |
|
| Default Value | 10000000 |
|
||||||
|
|
||||||
### asyncLog
|
### asyncLog
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ---------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The mode of writing log file |
|
| Meaning | The mode of writing log file |
|
||||||
| Value Range | 0: sync way; 1: async way |
|
| Value Range | 0: sync way; 1: async way |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
### logKeepDays
|
### logKeepDays
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------------------------------------------------------------- |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | The number of days for log files to be kept |
|
| Meaning | The number of days for log files to be kept |
|
||||||
| Unit | day |
|
| Unit | day |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
|
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
|
||||||
|
|
||||||
### debugFlag
|
### debugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------- |
|
| ------------- | --------------------------------------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level |
|
| Meaning | Log level |
|
||||||
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
|
||||||
| Default Value | 131 or 135, depending on the module |
|
| Default Value | 131 or 135, depending on the module |
|
||||||
|
|
||||||
### tmrDebugFlag
|
### tmrDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of timer module |
|
| Meaning | Log level of timer module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### uDebugFlag
|
### uDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ---------------------- |
|
| ------------- | -------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of common module |
|
| Meaning | Log level of common module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### rpcDebugFlag
|
### rpcDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of rpc module |
|
| Meaning | Log level of rpc module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### jniDebugFlag
|
### jniDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Client Only |
|
| Applicable | Client Only |
|
||||||
| Meaning | Log level of jni module |
|
| Meaning | Log level of jni module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### qDebugFlag
|
### qDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of query module |
|
| Meaning | Log level of query module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### cDebugFlag
|
### cDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | --------------------- |
|
| ------------- | ------------------- |
|
||||||
| Applicable | Client Only |
|
| Applicable | Client Only |
|
||||||
| Meaning | Log level of Client |
|
| Meaning | Log level of Client |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### dDebugFlag
|
### dDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of dnode |
|
| Meaning | Log level of dnode |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | 135 |
|
| Default Value | 135 |
|
||||||
|
|
||||||
### vDebugFlag
|
### vDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of vnode |
|
| Meaning | Log level of vnode |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### mDebugFlag
|
### mDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of mnode module |
|
| Meaning | Log level of mnode module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | 135 |
|
| Default Value | 135 |
|
||||||
|
|
||||||
### wDebugFlag
|
### wDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of WAL module |
|
| Meaning | Log level of WAL module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | 135 |
|
| Default Value | 135 |
|
||||||
|
|
||||||
### sDebugFlag
|
### sDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Log level of sync module |
|
| Meaning | Log level of sync module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | 135 |
|
| Default Value | 135 |
|
||||||
|
|
||||||
### tsdbDebugFlag
|
### tsdbDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------- |
|
| ------------- | ------------------------ |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of TSDB module |
|
| Meaning | Log level of TSDB module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### tqDebugFlag
|
### tqDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------- |
|
| ------------- | ---------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | Log level of TQ module |
|
| Meaning | Log level of TQ module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### fsDebugFlag
|
### fsDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------- |
|
| ------------- | ---------------------- |
|
||||||
| Applicable | Server only |
|
| Applicable | Server only |
|
||||||
| Meaning | Log level of FS module |
|
| Meaning | Log level of FS module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### udfDebugFlag
|
### udfDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of UDF module |
|
| Meaning | Log level of UDF module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### smaDebugFlag
|
### smaDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of SMA module |
|
| Meaning | Log level of SMA module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### idxDebugFlag
|
### idxDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | -------------------- |
|
| ------------- | ------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of index module |
|
| Meaning | Log level of index module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
### tdbDebugFlag
|
### tdbDebugFlag
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ----------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Log level of TDB module |
|
| Meaning | Log level of TDB module |
|
||||||
| Value Range | same as debugFlag |
|
| Value Range | same as debugFlag |
|
||||||
| Default Value | |
|
| Default Value | |
|
||||||
|
|
||||||
## Schemaless Parameters
|
## Schemaless Parameters
|
||||||
|
|
||||||
### smlChildTableName
|
### smlChildTableName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------- |
|
| ------------- | ------------------------------------------ |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Custom subtable name for schemaless writes |
|
| Meaning | Custom subtable name for schemaless writes |
|
||||||
| Type | String |
|
| Type | String |
|
||||||
| Default Value | None |
|
| Default Value | None |
|
||||||
|
|
||||||
### smlTagName
|
### smlTagName
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------ |
|
| ------------- | ------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Default tag for schemaless writes without tag value specified |
|
| Meaning | Default tag for schemaless writes without tag value specified |
|
||||||
| Type | String |
|
| Type | String |
|
||||||
| Default Value | _tag_null |
|
| Default Value | _tag_null |
|
||||||
|
|
||||||
### smlDataFormat
|
### smlDataFormat
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | ----------------------------------------------------------------------------------- |
|
||||||
| Applicable | Client only |
|
| Applicable | Client only |
|
||||||
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
|
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0 |
|
||||||
| Value Range | 0: not consistent; 1: consistent. |
|
| Value Range | 0: not consistent; 1: consistent. |
|
||||||
| Default | 0 |
|
| Default | 0 |
|
||||||
|
|
||||||
## Compress Parameters
|
## Compress Parameters
|
||||||
|
|
||||||
### compressMsgSize
|
### compressMsgSize
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ----------------------------- |
|
| ----------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||||
| Applicable | Both Client and Server side |
|
| Applicable | Both Client and Server side |
|
||||||
| Meaning | Whether RPC message is compressed |
|
| Meaning | Whether RPC message is compressed |
|
||||||
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
| Value Range | -1: none message is compressed; 0: all messages are compressed; N (N>0): messages exceeding N bytes are compressed |
|
||||||
| Default | -1 |
|
| Default | -1 |
|
||||||
|
|
||||||
|
|
||||||
## Other Parameters
|
## Other Parameters
|
||||||
|
|
||||||
### enableCoreFile
|
### enableCoreFile
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| Applicable | Server and Client |
|
| Applicable | Server and Client |
|
||||||
| Meaning | Whether to generate core file when server crashes |
|
| Meaning | Whether to generate core file when server crashes |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
|
| Note | The core file is generated under root directory `systemctl start taosd`/`launchctl start com.tdengine.taosd` is used to start, or under the working directory if `taosd` is started directly on Linux/macOS Shell. |
|
||||||
|
|
||||||
### enableScience
|
### enableScience
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| ------------- | ------------------------------------------------------------- |
|
||||||
| Applicable | Only taos-CLI client |
|
| Applicable | Only taos-CLI client |
|
||||||
| Meaning | Whether to show float and double with the scientific notation |
|
| Meaning | Whether to show float and double with the scientific notation |
|
||||||
| Value Range | 0: false, 1: true |
|
| Value Range | 0: false, 1: true |
|
||||||
| Default Value | 0 |
|
| Default Value | 0 |
|
||||||
|
|
||||||
|
|
||||||
### udf
|
### udf
|
||||||
|
|
||||||
| Attribute | Description |
|
| Attribute | Description |
|
||||||
| -------- | ------------------ |
|
| ------------- | ---------------------------------- |
|
||||||
| Applicable | Server Only |
|
| Applicable | Server Only |
|
||||||
| Meaning | Whether the UDF service is enabled |
|
| Meaning | Whether the UDF service is enabled |
|
||||||
| Value Range | 0: disable UDF; 1: enabled UDF |
|
| Value Range | 0: disable UDF; 1: enabled UDF |
|
||||||
| Default Value | 1 |
|
| Default Value | 1 |
|
||||||
|
|
||||||
|
|
||||||
## 3.0 Parameters
|
## 3.0 Parameters
|
||||||
|
|
||||||
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
| # | **Parameter** | **Applicable to 2.x ** | **Applicable to 3.0 ** | Current behavior in 3.0 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :--------------------: | ---------------------- | ---------------------------- | ----------------------- |
|
||||||
| 1 | firstEp | Yes | Yes | |
|
| 1 | firstEp | Yes | Yes | |
|
||||||
| 2 | secondEp | Yes | Yes | |
|
| 2 | secondEp | Yes | Yes | |
|
||||||
| 3 | fqdn | Yes | Yes | |
|
| 3 | fqdn | Yes | Yes | |
|
||||||
| 4 | serverPort | Yes | Yes | |
|
| 4 | serverPort | Yes | Yes | |
|
||||||
| 5 | maxShellConns | Yes | Yes | |
|
| 5 | maxShellConns | Yes | Yes | |
|
||||||
| 6 | monitor | Yes | Yes | |
|
| 6 | monitor | Yes | Yes | |
|
||||||
| 7 | monitorFqdn | No | Yes | |
|
| 7 | monitorFqdn | No | Yes | |
|
||||||
| 8 | monitorPort | No | Yes | |
|
| 8 | monitorPort | No | Yes | |
|
||||||
| 9 | monitorInterval | Yes | Yes | |
|
| 9 | monitorInterval | Yes | Yes | |
|
||||||
| 10 | queryPolicy | No | Yes | |
|
| 10 | queryPolicy | No | Yes | |
|
||||||
| 11 | querySmaOptimize | No | Yes | |
|
| 11 | querySmaOptimize | No | Yes | |
|
||||||
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
| 12 | maxNumOfDistinctRes | Yes | Yes | |
|
||||||
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
| 15 | countAlwaysReturnValue | Yes | Yes | |
|
||||||
| 16 | dataDir | Yes | Yes | |
|
| 16 | dataDir | Yes | Yes | |
|
||||||
| 17 | minimalDataDirGB | Yes | Yes | |
|
| 17 | minimalDataDirGB | Yes | Yes | |
|
||||||
| 18 | supportVnodes | No | Yes | |
|
| 18 | supportVnodes | No | Yes | |
|
||||||
| 19 | tempDir | Yes | Yes | |
|
| 19 | tempDir | Yes | Yes | |
|
||||||
| 20 | minimalTmpDirGB | Yes | Yes | |
|
| 20 | minimalTmpDirGB | Yes | Yes | |
|
||||||
| 21 | smlChildTableName | Yes | Yes | |
|
| 21 | smlChildTableName | Yes | Yes | |
|
||||||
| 22 | smlTagName | Yes | Yes | |
|
| 22 | smlTagName | Yes | Yes | |
|
||||||
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
|
| 23 | smlDataFormat | No | Yes(discarded since 3.0.3.0) | |
|
||||||
| 24 | statusInterval | Yes | Yes | |
|
| 24 | statusInterval | Yes | Yes | |
|
||||||
| 25 | logDir | Yes | Yes | |
|
| 25 | logDir | Yes | Yes | |
|
||||||
| 26 | minimalLogDirGB | Yes | Yes | |
|
| 26 | minimalLogDirGB | Yes | Yes | |
|
||||||
| 27 | numOfLogLines | Yes | Yes | |
|
| 27 | numOfLogLines | Yes | Yes | |
|
||||||
| 28 | asyncLog | Yes | Yes | |
|
| 28 | asyncLog | Yes | Yes | |
|
||||||
| 29 | logKeepDays | Yes | Yes | |
|
| 29 | logKeepDays | Yes | Yes | |
|
||||||
| 30 | debugFlag | Yes | Yes | |
|
| 30 | debugFlag | Yes | Yes | |
|
||||||
| 31 | tmrDebugFlag | Yes | Yes | |
|
| 31 | tmrDebugFlag | Yes | Yes | |
|
||||||
| 32 | uDebugFlag | Yes | Yes | |
|
| 32 | uDebugFlag | Yes | Yes | |
|
||||||
| 33 | rpcDebugFlag | Yes | Yes | |
|
| 33 | rpcDebugFlag | Yes | Yes | |
|
||||||
| 34 | jniDebugFlag | Yes | Yes | |
|
| 34 | jniDebugFlag | Yes | Yes | |
|
||||||
| 35 | qDebugFlag | Yes | Yes | |
|
| 35 | qDebugFlag | Yes | Yes | |
|
||||||
| 36 | cDebugFlag | Yes | Yes | |
|
| 36 | cDebugFlag | Yes | Yes | |
|
||||||
| 37 | dDebugFlag | Yes | Yes | |
|
| 37 | dDebugFlag | Yes | Yes | |
|
||||||
| 38 | vDebugFlag | Yes | Yes | |
|
| 38 | vDebugFlag | Yes | Yes | |
|
||||||
| 39 | mDebugFlag | Yes | Yes | |
|
| 39 | mDebugFlag | Yes | Yes | |
|
||||||
| 40 | wDebugFlag | Yes | Yes | |
|
| 40 | wDebugFlag | Yes | Yes | |
|
||||||
| 41 | sDebugFlag | Yes | Yes | |
|
| 41 | sDebugFlag | Yes | Yes | |
|
||||||
| 42 | tsdbDebugFlag | Yes | Yes | |
|
| 42 | tsdbDebugFlag | Yes | Yes | |
|
||||||
| 43 | tqDebugFlag | No | Yes | |
|
| 43 | tqDebugFlag | No | Yes | |
|
||||||
| 44 | fsDebugFlag | Yes | Yes | |
|
| 44 | fsDebugFlag | Yes | Yes | |
|
||||||
| 45 | udfDebugFlag | No | Yes | |
|
| 45 | udfDebugFlag | No | Yes | |
|
||||||
| 46 | smaDebugFlag | No | Yes | |
|
| 46 | smaDebugFlag | No | Yes | |
|
||||||
| 47 | idxDebugFlag | No | Yes | |
|
| 47 | idxDebugFlag | No | Yes | |
|
||||||
| 48 | tdbDebugFlag | No | Yes | |
|
| 48 | tdbDebugFlag | No | Yes | |
|
||||||
| 49 | metaDebugFlag | No | Yes | |
|
| 49 | metaDebugFlag | No | Yes | |
|
||||||
| 50 | timezone | Yes | Yes | |
|
| 50 | timezone | Yes | Yes | |
|
||||||
| 51 | locale | Yes | Yes | |
|
| 51 | locale | Yes | Yes | |
|
||||||
| 52 | charset | Yes | Yes | |
|
| 52 | charset | Yes | Yes | |
|
||||||
| 53 | udf | Yes | Yes | |
|
| 53 | udf | Yes | Yes | |
|
||||||
| 54 | enableCoreFile | Yes | Yes | |
|
| 54 | enableCoreFile | Yes | Yes | |
|
||||||
|
|
|
@ -314,7 +314,6 @@ connection.backoff.ms=5000
|
||||||
topic.prefix=tdengine-source-
|
topic.prefix=tdengine-source-
|
||||||
poll.interval.ms=1000
|
poll.interval.ms=1000
|
||||||
fetch.max.rows=100
|
fetch.max.rows=100
|
||||||
out.format=line
|
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
```
|
```
|
||||||
|
@ -353,7 +352,7 @@ confluent local services connect connector load TDengineSourceConnector --config
|
||||||
|
|
||||||
### View topic data
|
### View topic data
|
||||||
|
|
||||||
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data.
|
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
|
||||||
|
|
||||||
````
|
````
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||||
|
@ -424,11 +423,13 @@ The following configuration items apply to TDengine Sink Connector and TDengine
|
||||||
### TDengine Source Connector specific configuration
|
### TDengine Source Connector specific configuration
|
||||||
|
|
||||||
1. `connection.database`: source database name, no default value.
|
1. `connection.database`: source database name, no default value.
|
||||||
2. `topic.prefix`: topic name prefix after data is imported into kafka. Use `topic.prefix` + `connection.database` name as the full topic name. Defaults to the empty string "".
|
2. `topic.prefix`: topic name prefix used when importing data into kafka. Its defaults value is empty string "".
|
||||||
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. Default "1970-01-01 00:00:00".
|
3. `timestamp.initial`: Data synchronization start time. The format is 'yyyy-MM-dd HH:mm:ss'. If it is not set, the data importing to Kafka will be started from the first/oldest row in the database.
|
||||||
4. `poll.interval.ms`: Pull data interval, the unit is ms. Default is 1000.
|
4. `poll.interval.ms`: The time interval for checking newly created tables or removed tables, default value is 1000.
|
||||||
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database. Default is 100.
|
5. `fetch.max.rows`: The maximum number of rows retrieved when retrieving the database, default is 100.
|
||||||
6. `out.format`: The data format. The value could be line or json. The line represents the InfluxDB Line protocol format, and json represents the OpenTSDB JSON format. Default is `line`.
|
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
|
||||||
|
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>-<stable.name>`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `<topic.prefix>-<connection.database>`.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Other notes
|
## Other notes
|
||||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.4.2
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.4.2" />
|
||||||
|
|
||||||
## 3.0.4.1
|
## 3.0.4.1
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.4.1" />
|
<Release type="tdengine" version="3.0.4.1" />
|
||||||
|
|
|
@ -78,7 +78,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
memcpy(str + len, row[i], charLen);
|
memcpy(str + len, row[i], charLen);
|
||||||
len += charLen;
|
len += charLen;
|
||||||
|
|
|
@ -76,7 +76,8 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
memcpy(str + len, row[i], charLen);
|
memcpy(str + len, row[i], charLen);
|
||||||
len += charLen;
|
len += charLen;
|
||||||
|
|
|
@ -6,39 +6,32 @@ import java.sql.Connection;
|
||||||
import java.sql.DriverManager;
|
import java.sql.DriverManager;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.Statement;
|
import java.sql.Statement;
|
||||||
|
import java.text.SimpleDateFormat;
|
||||||
import java.time.LocalDateTime;
|
import java.time.LocalDateTime;
|
||||||
import java.time.ZoneOffset;
|
import java.time.ZoneOffset;
|
||||||
import java.time.format.DateTimeFormatter;
|
import java.time.format.DateTimeFormatter;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Comparator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Random;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public class StmtInsertExample {
|
public class StmtInsertExample {
|
||||||
private static ArrayList<Long> tsToLongArray(String ts) {
|
private static String datePattern = "yyyy-MM-dd HH:mm:ss.SSS";
|
||||||
ArrayList<Long> result = new ArrayList<>();
|
private static DateTimeFormatter formatter = DateTimeFormatter.ofPattern(datePattern);
|
||||||
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS");
|
|
||||||
LocalDateTime localDateTime = LocalDateTime.parse(ts, formatter);
|
|
||||||
result.add(localDateTime.toInstant(ZoneOffset.of("+8")).toEpochMilli());
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static <T> ArrayList<T> toArray(T v) {
|
private static List<String> getRawData(int size) {
|
||||||
ArrayList<T> result = new ArrayList<>();
|
SimpleDateFormat format = new SimpleDateFormat(datePattern);
|
||||||
result.add(v);
|
List<String> result = new ArrayList<>();
|
||||||
return result;
|
long current = System.currentTimeMillis();
|
||||||
}
|
Random random = new Random();
|
||||||
|
for (int i = 0; i < size; i++) {
|
||||||
private static List<String> getRawData() {
|
String time = format.format(current + i);
|
||||||
return Arrays.asList(
|
int id = random.nextInt(10);
|
||||||
"d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
|
result.add("d" + id + "," + time + ",10.30000,219,0.31000,California.SanFrancisco,2");
|
||||||
"d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
|
}
|
||||||
"d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
|
return result.stream()
|
||||||
"d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
|
.sorted(Comparator.comparing(s -> s.split(",")[0])).collect(Collectors.toList());
|
||||||
"d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
|
|
||||||
"d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
|
|
||||||
"d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
|
|
||||||
"d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Connection getConnection() throws SQLException {
|
private static Connection getConnection() throws SQLException {
|
||||||
|
@ -48,9 +41,9 @@ public class StmtInsertExample {
|
||||||
|
|
||||||
private static void createTable(Connection conn) throws SQLException {
|
private static void createTable(Connection conn) throws SQLException {
|
||||||
try (Statement stmt = conn.createStatement()) {
|
try (Statement stmt = conn.createStatement()) {
|
||||||
stmt.execute("CREATE DATABASE power KEEP 3650");
|
stmt.execute("CREATE DATABASE if not exists power KEEP 3650");
|
||||||
stmt.executeUpdate("USE power");
|
stmt.executeUpdate("use power");
|
||||||
stmt.execute("CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " +
|
stmt.execute("CREATE STABLE if not exists meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) " +
|
||||||
"TAGS (location BINARY(64), groupId INT)");
|
"TAGS (location BINARY(64), groupId INT)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -58,21 +51,54 @@ public class StmtInsertExample {
|
||||||
private static void insertData() throws SQLException {
|
private static void insertData() throws SQLException {
|
||||||
try (Connection conn = getConnection()) {
|
try (Connection conn = getConnection()) {
|
||||||
createTable(conn);
|
createTable(conn);
|
||||||
String psql = "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)";
|
String psql = "INSERT INTO ? USING power.meters TAGS(?, ?) VALUES(?, ?, ?, ?)";
|
||||||
try (TSDBPreparedStatement pst = (TSDBPreparedStatement) conn.prepareStatement(psql)) {
|
try (TSDBPreparedStatement pst = (TSDBPreparedStatement) conn.prepareStatement(psql)) {
|
||||||
for (String line : getRawData()) {
|
String tableName = null;
|
||||||
|
ArrayList<Long> ts = new ArrayList<>();
|
||||||
|
ArrayList<Float> current = new ArrayList<>();
|
||||||
|
ArrayList<Integer> voltage = new ArrayList<>();
|
||||||
|
ArrayList<Float> phase = new ArrayList<>();
|
||||||
|
for (String line : getRawData(100000)) {
|
||||||
String[] ps = line.split(",");
|
String[] ps = line.split(",");
|
||||||
// bind table name and tags
|
if (tableName == null) {
|
||||||
pst.setTableName(ps[0]);
|
// bind table name and tags
|
||||||
pst.setTagString(0, ps[5]);
|
tableName = "power." + ps[0];
|
||||||
pst.setTagInt(1, Integer.valueOf(ps[6]));
|
pst.setTableName(ps[0]);
|
||||||
|
pst.setTagString(0, ps[5]);
|
||||||
|
pst.setTagInt(1, Integer.valueOf(ps[6]));
|
||||||
|
} else {
|
||||||
|
if (!tableName.equals(ps[0])) {
|
||||||
|
pst.setTimestamp(0, ts);
|
||||||
|
pst.setFloat(1, current);
|
||||||
|
pst.setInt(2, voltage);
|
||||||
|
pst.setFloat(3, phase);
|
||||||
|
pst.columnDataAddBatch();
|
||||||
|
pst.columnDataExecuteBatch();
|
||||||
|
|
||||||
|
// bind table name and tags
|
||||||
|
tableName = ps[0];
|
||||||
|
pst.setTableName(ps[0]);
|
||||||
|
pst.setTagString(0, ps[5]);
|
||||||
|
pst.setTagInt(1, Integer.valueOf(ps[6]));
|
||||||
|
ts.clear();
|
||||||
|
current.clear();
|
||||||
|
voltage.clear();
|
||||||
|
phase.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
// bind values
|
// bind values
|
||||||
pst.setTimestamp(0, tsToLongArray(ps[1])); //ps[1] looks like: 2018-10-03 14:38:05.000
|
// ps[1] looks like: 2018-10-03 14:38:05.000
|
||||||
pst.setFloat(1, toArray(Float.valueOf(ps[2])));
|
LocalDateTime localDateTime = LocalDateTime.parse(ps[1], formatter);
|
||||||
pst.setInt(2, toArray(Integer.valueOf(ps[3])));
|
ts.add(localDateTime.toInstant(ZoneOffset.of("+8")).toEpochMilli());
|
||||||
pst.setFloat(3, toArray(Float.valueOf(ps[4])));
|
current.add(Float.valueOf(ps[2]));
|
||||||
pst.columnDataAddBatch();
|
voltage.add(Integer.valueOf(ps[3]));
|
||||||
|
phase.add(Float.valueOf(ps[4]));
|
||||||
}
|
}
|
||||||
|
pst.setTimestamp(0, ts);
|
||||||
|
pst.setFloat(1, current);
|
||||||
|
pst.setInt(2, voltage);
|
||||||
|
pst.setFloat(3, phase);
|
||||||
|
pst.columnDataAddBatch();
|
||||||
pst.columnDataExecuteBatch();
|
pst.columnDataExecuteBatch();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,20 +53,28 @@ public class SubscribeDemo {
|
||||||
|
|
||||||
// create consumer
|
// create consumer
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
|
properties.getProperty(TMQConstants.CONNECT_TYPE, "jni");
|
||||||
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030");
|
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030");
|
||||||
|
properties.setProperty(TMQConstants.CONNECT_USER, "root");
|
||||||
|
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
|
||||||
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
||||||
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||||
properties.setProperty(TMQConstants.GROUP_ID, "test");
|
properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
|
||||||
|
properties.setProperty(TMQConstants.GROUP_ID, "test1");
|
||||||
|
properties.setProperty(TMQConstants.CLIENT_ID, "1");
|
||||||
|
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
|
||||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||||
"com.taos.example.MetersDeserializer");
|
"com.taos.example.MetersDeserializer");
|
||||||
|
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||||
|
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||||
|
|
||||||
// poll data
|
// poll data
|
||||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||||
for (ConsumerRecord<Meters> recode : meters) {
|
for (ConsumerRecord<Meters> r : meters) {
|
||||||
Meters meter = recode.value();
|
Meters meter = r.value();
|
||||||
System.out.println(meter);
|
System.out.println(meter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
package com.taos.example;
|
package com.taos.example;
|
||||||
|
|
||||||
|
import com.taosdata.jdbc.tmq.ConsumerRecord;
|
||||||
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
import com.taosdata.jdbc.tmq.ConsumerRecords;
|
||||||
import com.taosdata.jdbc.tmq.TMQConstants;
|
import com.taosdata.jdbc.tmq.TMQConstants;
|
||||||
import com.taosdata.jdbc.tmq.TaosConsumer;
|
import com.taosdata.jdbc.tmq.TaosConsumer;
|
||||||
|
@ -54,18 +55,26 @@ public class WebsocketSubscribeDemo {
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041");
|
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041");
|
||||||
properties.setProperty(TMQConstants.CONNECT_TYPE, "ws");
|
properties.setProperty(TMQConstants.CONNECT_TYPE, "ws");
|
||||||
|
properties.setProperty(TMQConstants.CONNECT_USER, "root");
|
||||||
|
properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
|
||||||
|
properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
|
||||||
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
|
||||||
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
|
||||||
properties.setProperty(TMQConstants.GROUP_ID, "test");
|
properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
|
||||||
|
properties.setProperty(TMQConstants.GROUP_ID, "test2");
|
||||||
|
properties.setProperty(TMQConstants.CLIENT_ID, "1");
|
||||||
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
|
||||||
"com.taos.example.MetersDeserializer");
|
"com.taos.example.MetersDeserializer");
|
||||||
|
properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
|
||||||
|
properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
|
||||||
|
|
||||||
// poll data
|
// poll data
|
||||||
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
try (TaosConsumer<Meters> consumer = new TaosConsumer<>(properties)) {
|
||||||
consumer.subscribe(Collections.singletonList(TOPIC));
|
consumer.subscribe(Collections.singletonList(TOPIC));
|
||||||
while (!shutdown.get()) {
|
while (!shutdown.get()) {
|
||||||
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
|
||||||
for (Meters meter : meters) {
|
for (ConsumerRecord<Meters> r : meters) {
|
||||||
|
Meters meter = (Meters) r.value();
|
||||||
System.out.println(meter);
|
System.out.println(meter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@ TDengine 的主要功能如下:
|
||||||
|
|
||||||
## 典型适用场景
|
## 典型适用场景
|
||||||
|
|
||||||
作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
|
作为一个高性能、分布式、支持 SQL 的时序数据库(Time-series Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
|
||||||
|
|
||||||
### 数据源特点和需求
|
### 数据源特点和需求
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.taosdata.jdbc</groupId>
|
<groupId>com.taosdata.jdbc</groupId>
|
||||||
<artifactId>taos-jdbcdriver</artifactId>
|
<artifactId>taos-jdbcdriver</artifactId>
|
||||||
<version>3.0.0</version>
|
<version>3.2.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -285,10 +285,10 @@ CREATE TOPIC topic_name AS DATABASE db_name;
|
||||||
|
|
||||||
| 参数名称 | 类型 | 参数说明 | 备注 |
|
| 参数名称 | 类型 | 参数说明 | 备注 |
|
||||||
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
| :----------------------------: | :-----: | -------------------------------------------------------- | ------------------------------------------- |
|
||||||
| `td.connect.ip` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.ip` | string | 服务端的 IP 地址 | |
|
||||||
| `td.connect.user` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.user` | string | 用户名 | |
|
||||||
| `td.connect.pass` | string | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.pass` | string | 密码 | |
|
||||||
| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | 仅用于建立原生连接 |
|
| `td.connect.port` | integer | 服务端的端口号 | |
|
||||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
|
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
|
||||||
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
| `client.id` | string | 客户端 ID | 最大长度:192。 |
|
||||||
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
|
||||||
|
@ -321,10 +321,11 @@ tmq_conf_destroy(conf);
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="java" label="Java">
|
<TabItem value="java" label="Java">
|
||||||
|
|
||||||
对于 Java 程序,使用如下配置项:
|
对于 Java 程序,还可以使用如下配置项:
|
||||||
|
|
||||||
| 参数名称 | 类型 | 参数说明 |
|
| 参数名称 | 类型 | 参数说明 |
|
||||||
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `td.connect.type` | string | 连接类型,"jni" 指原生连接,"ws" 指 websocket 连接,默认值为 "jni" |
|
||||||
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
|
| `bootstrap.servers` | string | 连接地址,如 `localhost:6030` |
|
||||||
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
|
| `value.deserializer` | string | 值解析方法,使用此方法应实现 `com.taosdata.jdbc.tmq.Deserializer` 接口或继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer` 类 |
|
||||||
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
|
| `value.deserializer.encoding` | string | 指定字符串解析的字符集 | |
|
||||||
|
@ -401,21 +402,6 @@ from taos.tmq import Consumer
|
||||||
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
|
||||||
```
|
```
|
||||||
|
|
||||||
其中,`configs` 为 dict 类型,传递创建 Consumer 的参数。可以配置的参数有:
|
|
||||||
|
|
||||||
| 参数名称 | 类型 | 参数说明 | 备注 |
|
|
||||||
|:------:|:----:|:-------:|:---:|
|
|
||||||
| `td.connect.ip` | string | 用于创建连接||
|
|
||||||
| `td.connect.user` | string | 用于创建连接||
|
|
||||||
| `td.connect.pass` | string | 用于创建连接||
|
|
||||||
| `td.connect.port` | string | 用于创建连接||
|
|
||||||
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192 |
|
|
||||||
| `client.id` | string | 客户端 ID | 最大长度:192 |
|
|
||||||
| `msg.with.table.name` | string | 是否允许从消息中解析表名,不适用于列订阅 | 合法值:`true`, `false` |
|
|
||||||
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
|
|
||||||
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
|
|
||||||
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
|
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem label="Node.JS" value="Node.JS">
|
<TabItem label="Node.JS" value="Node.JS">
|
||||||
|
|
|
@ -335,7 +335,7 @@ def init()
|
||||||
def destroy()
|
def destroy()
|
||||||
```
|
```
|
||||||
|
|
||||||
其中 init 完成初始化工作。 destroy 完成清理工作。如果没有初始化工作,无需定义 init 函数。如果没有清理工作,无需定义 destroy 函数。
|
其中 init 完成初始化工作。 destroy 完成清理工作。
|
||||||
|
|
||||||
### Python 和 TDengine之间的数据类型映射
|
### Python 和 TDengine之间的数据类型映射
|
||||||
|
|
||||||
|
@ -386,6 +386,17 @@ pyl2norm 实现了输入列的所有数据的二阶范数,即对每个数据
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
### 聚合函数示例 [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
|
||||||
|
|
||||||
|
pycumsum 使用 numpy 计算输入列所有数据的累积和。
|
||||||
|
<details>
|
||||||
|
<summary>pycumsum.py</summary>
|
||||||
|
|
||||||
|
```c
|
||||||
|
{{#include tests/script/sh/pycumsum.py}}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
## 管理和使用 UDF
|
## 管理和使用 UDF
|
||||||
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
|
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
|
||||||
|
|
||||||
|
|
|
@ -962,6 +962,7 @@ statement.executeUpdate("create topic if not exists topic_speed as select ts, sp
|
||||||
|
|
||||||
```java
|
```java
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
config.setProperty("group.id", "group1");
|
config.setProperty("group.id", "group1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
|
||||||
|
@ -969,12 +970,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
|
||||||
TaosConsumer consumer = new TaosConsumer<>(config);
|
TaosConsumer consumer = new TaosConsumer<>(config);
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- bootstrap.servers: TDengine 服务端所在的`ip:port`,如果使用 WebSocket 连接,则为 taosAdapter 所在的`ip:port`。
|
||||||
- enable.auto.commit: 是否允许自动提交。
|
- enable.auto.commit: 是否允许自动提交。
|
||||||
- group.id: consumer: 所在的 group。
|
- group.id: consumer: 所在的 group。
|
||||||
- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。
|
- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。
|
||||||
- td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。
|
- td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。
|
||||||
- httpConnectTimeout:创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||||
- messageWaitTimeout:数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||||
|
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||||
|
|
||||||
#### 订阅消费数据
|
#### 订阅消费数据
|
||||||
|
@ -1016,10 +1019,19 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
public ConsumerLoop() throws SQLException {
|
public ConsumerLoop() throws SQLException {
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
|
config.setProperty("td.connect.type", "jni");
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6030");
|
||||||
|
config.setProperty("td.connect.user", "root");
|
||||||
|
config.setProperty("td.connect.pass", "taosdata");
|
||||||
|
config.setProperty("auto.offset.reset", "earliest");
|
||||||
config.setProperty("msg.with.table.name", "true");
|
config.setProperty("msg.with.table.name", "true");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("auto.commit.interval.ms", "1000");
|
||||||
config.setProperty("group.id", "group1");
|
config.setProperty("group.id", "group1");
|
||||||
|
config.setProperty("client.id", "1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||||
|
config.setProperty("experimental.snapshot.enable", "true");
|
||||||
|
|
||||||
this.consumer = new TaosConsumer<>(config);
|
this.consumer = new TaosConsumer<>(config);
|
||||||
this.topics = Collections.singletonList("topic_speed");
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
@ -1093,12 +1105,19 @@ public abstract class ConsumerLoop {
|
||||||
|
|
||||||
public ConsumerLoop() throws SQLException {
|
public ConsumerLoop() throws SQLException {
|
||||||
Properties config = new Properties();
|
Properties config = new Properties();
|
||||||
config.setProperty("bootstrap.servers", "localhost:6041");
|
|
||||||
config.setProperty("td.connect.type", "ws");
|
config.setProperty("td.connect.type", "ws");
|
||||||
|
config.setProperty("bootstrap.servers", "localhost:6041");
|
||||||
|
config.setProperty("td.connect.user", "root");
|
||||||
|
config.setProperty("td.connect.pass", "taosdata");
|
||||||
|
config.setProperty("auto.offset.reset", "earliest");
|
||||||
config.setProperty("msg.with.table.name", "true");
|
config.setProperty("msg.with.table.name", "true");
|
||||||
config.setProperty("enable.auto.commit", "true");
|
config.setProperty("enable.auto.commit", "true");
|
||||||
|
config.setProperty("auto.commit.interval.ms", "1000");
|
||||||
config.setProperty("group.id", "group2");
|
config.setProperty("group.id", "group2");
|
||||||
|
config.setProperty("client.id", "1");
|
||||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||||
|
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||||
|
config.setProperty("experimental.snapshot.enable", "true");
|
||||||
|
|
||||||
this.consumer = new TaosConsumer<>(config);
|
this.consumer = new TaosConsumer<>(config);
|
||||||
this.topics = Collections.singletonList("topic_speed");
|
this.topics = Collections.singletonList("topic_speed");
|
||||||
|
|
|
@ -45,7 +45,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
|
||||||
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
|
||||||
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
|
||||||
|
|
||||||
|
|
|
@ -869,10 +869,15 @@ FIRST(expr)
|
||||||
### INTERP
|
### INTERP
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INTERP(expr)
|
INTERP(expr [, ignore_null_values])
|
||||||
|
|
||||||
|
ignore_null_values: {
|
||||||
|
0
|
||||||
|
| 1
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
**功能说明**:返回指定时间截面指定列的记录值或插值。
|
**功能说明**:返回指定时间截面指定列的记录值或插值。ignore_null_values 参数的值可以是 0 或 1,为 1 时表示忽略 NULL 值, 缺省值为0。
|
||||||
|
|
||||||
**返回数据类型**:同字段类型。
|
**返回数据类型**:同字段类型。
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE
|
||||||
```
|
```
|
||||||
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
|
||||||
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
|
||||||
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。
|
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言(v3.7+)。
|
||||||
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
|
||||||
- output_type:此函数计算结果的数据类型名称;
|
- output_type:此函数计算结果的数据类型名称;
|
||||||
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
|
||||||
|
|
|
@ -177,7 +177,7 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
|
||||||
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
|
||||||
```
|
```
|
||||||
|
|
||||||
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
我们提供了一个“-E”选项,用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`:
|
||||||
|
@ -189,7 +189,7 @@ curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifica
|
||||||
使用上面获取的 `uid` 值作为 `-E` 输入。
|
使用上面获取的 `uid` 值作为 `-E` 输入。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
|
||||||
```
|
```
|
||||||
|
|
||||||
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
|
||||||
|
|
|
@ -79,14 +79,14 @@ taos --dump-config
|
||||||
:::note
|
:::note
|
||||||
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
|
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
|
||||||
:::
|
:::
|
||||||
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
||||||
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
|
| :--- | :------- | :----------------------------------------------- | :------------------------------------------------------------------------------------------------- |
|
||||||
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
|
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
|
||||||
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
||||||
| TCP | 6043 | taosKeeper 监控服务端口。 | 随 taosKeeper 启动参数设置变化。 |
|
| TCP | 6043 | taosKeeper 监控服务端口。 | 随 taosKeeper 启动参数设置变化。 |
|
||||||
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化|
|
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
|
||||||
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
|
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
|
||||||
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
|
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
|
||||||
|
|
||||||
### maxShellConns
|
### maxShellConns
|
||||||
|
|
||||||
|
@ -97,6 +97,24 @@ taos --dump-config
|
||||||
| 取值范围 | 10-50000000 |
|
| 取值范围 | 10-50000000 |
|
||||||
| 缺省值 | 5000 |
|
| 缺省值 | 5000 |
|
||||||
|
|
||||||
|
### numOfRpcSessions
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | ---------------------------- |
|
||||||
|
| 适用范围 | 客户端和服务端都适用 |
|
||||||
|
| 含义 | 一个客户端能创建的最大连接数 |
|
||||||
|
| 取值范围 | 100-100000 |
|
||||||
|
| 缺省值 | 10000 |
|
||||||
|
|
||||||
|
### timeToGetAvailableConn
|
||||||
|
|
||||||
|
| 属性 | 说明 |
|
||||||
|
| -------- | -------------------------- |
|
||||||
|
| 适用范围 | 客户端和服务端都适用 |
|
||||||
|
| 含义 | 获得可用连接的最长等待时间 |
|
||||||
|
| 取值范围 | 10-50000000(单位为毫秒) |
|
||||||
|
| 缺省值 | 500000 |
|
||||||
|
|
||||||
## 监控相关
|
## 监控相关
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
|
@ -139,43 +157,43 @@ taos --dump-config
|
||||||
|
|
||||||
### telemetryReporting
|
### telemetryReporting
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | ------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 |是否上传 telemetry |
|
| 含义 | 是否上传 telemetry |
|
||||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||||
| 缺省值 | 1 |
|
| 缺省值 | 1 |
|
||||||
|
|
||||||
### crashReporting
|
### crashReporting
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | ------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 |是否上传 crash 信息 |
|
| 含义 | 是否上传 crash 信息 |
|
||||||
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
| 取值范围 | 0,1 0: 不上传;1:上传 |
|
||||||
| 缺省值 | 1 |
|
| 缺省值 | 1 |
|
||||||
|
|
||||||
## 查询相关
|
## 查询相关
|
||||||
|
|
||||||
### queryPolicy
|
### queryPolicy
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------- |
|
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | 查询语句的执行策略 |
|
| 含义 | 查询语句的执行策略 |
|
||||||
| 单位 | 无 |
|
| 单位 | 无 |
|
||||||
| 缺省值 | 1 |
|
| 缺省值 | 1 |
|
||||||
| 补充说明 | 1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
|
| 补充说明 | 1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
|
||||||
|
|
||||||
### querySmaOptimize
|
### querySmaOptimize
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------- |
|
| -------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | sma index 的优化策略 |
|
| 含义 | sma index 的优化策略 |
|
||||||
| 单位 | 无 |
|
| 单位 | 无 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
| 补充说明 |0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
|
| 补充说明 | 0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
|
||||||
|
|
||||||
### maxNumOfDistinctRes
|
### maxNumOfDistinctRes
|
||||||
|
|
||||||
|
@ -188,21 +206,21 @@ taos --dump-config
|
||||||
|
|
||||||
### keepColumnName
|
### keepColumnName
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------- |
|
| -------- | ----------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
|
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
|
||||||
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
### countAlwaysReturnValue
|
### countAlwaysReturnValue
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------- |
|
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值 |
|
| 含义 | count/hyperloglog函数在输入数据为空或者NULL的情况下是否返回值 |
|
||||||
| 取值范围 | 0:返回空行,1:返回 0 |
|
| 取值范围 | 0:返回空行,1:返回 0 |
|
||||||
| 缺省值 | 1 |
|
| 缺省值 | 1 |
|
||||||
| 补充说明 | 该参数设置为 1 时,如果查询中含有 GROUP BY,PARTITION BY 以及 INTERVAL 子句且相应的组或窗口内数据为空或者NULL, 对应的组或窗口将不返回查询结果 |
|
| 补充说明 | 该参数设置为 1 时,如果查询中含有 GROUP BY,PARTITION BY 以及 INTERVAL 子句且相应的组或窗口内数据为空或者NULL, 对应的组或窗口将不返回查询结果 |
|
||||||
|
|
||||||
## 区域相关
|
## 区域相关
|
||||||
|
@ -314,20 +332,20 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### dataDir
|
### dataDir
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------ |
|
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||||
| 缺省值 | /var/lib/taos |
|
| 缺省值 | /var/lib/taos |
|
||||||
| 补充说明 | [多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8) 功能需要与 [KEEP](https://docs.taosdata.com/taos-sql/database/#%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E) 参数配合使用 |
|
| 补充说明 | [多级存储](https://docs.taosdata.com/tdinternal/arch/#%E5%A4%9A%E7%BA%A7%E5%AD%98%E5%82%A8) 功能需要与 [KEEP](https://docs.taosdata.com/taos-sql/database/#%E5%8F%82%E6%95%B0%E8%AF%B4%E6%98%8E) 参数配合使用 |
|
||||||
|
|
||||||
### tempDir
|
### tempDir
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------ |
|
| -------- | ------------------------------------------------ |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
|
| 含义 | 该参数指定所有系统运行过程中的临时文件生成的目录 |
|
||||||
| 缺省值 | /tmp |
|
| 缺省值 | /tmp |
|
||||||
|
|
||||||
### minimalTmpDirGB
|
### minimalTmpDirGB
|
||||||
|
|
||||||
|
@ -340,12 +358,12 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### minimalDataDirGB
|
### minimalDataDirGB
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------------ |
|
| -------- | ---------------------------------------------- |
|
||||||
| 适用范围 | 仅服务端适用 |
|
| 适用范围 | 仅服务端适用 |
|
||||||
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
|
| 含义 | dataDir 指定的时序数据存储目录所需要保留的最小 |
|
||||||
| 单位 | GB |
|
| 单位 | GB |
|
||||||
| 缺省值 | 2.0 |
|
| 缺省值 | 2.0 |
|
||||||
|
|
||||||
## 集群相关
|
## 集群相关
|
||||||
|
|
||||||
|
@ -381,12 +399,12 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### minimalLogDirGB
|
### minimalLogDirGB
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | -------------------------------------------- |
|
| -------- | ------------------------------------------------------ |
|
||||||
| 适用范围 | 服务端和客户端均适用 |
|
| 适用范围 | 服务端和客户端均适用 |
|
||||||
| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 |
|
| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 |
|
||||||
| 单位 | GB |
|
| 单位 | GB |
|
||||||
| 缺省值 | 1.0 |
|
| 缺省值 | 1.0 |
|
||||||
|
|
||||||
### numOfLogLines
|
### numOfLogLines
|
||||||
|
|
||||||
|
@ -590,12 +608,12 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### smlChildTableName
|
### smlChildTableName
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------- |
|
| -------- | ------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless 自定义的子表名的 key |
|
| 含义 | schemaless 自定义的子表名的 key |
|
||||||
| 类型 | 字符串 |
|
| 类型 | 字符串 |
|
||||||
| 缺省值 | 无 |
|
| 缺省值 | 无 |
|
||||||
|
|
||||||
### smlTagName
|
### smlTagName
|
||||||
|
|
||||||
|
@ -608,12 +626,12 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### smlDataFormat
|
### smlDataFormat
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------- |
|
| -------- | -------------------------------------------------------- |
|
||||||
| 适用范围 | 仅客户端适用 |
|
| 适用范围 | 仅客户端适用 |
|
||||||
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
|
||||||
| 值域 | 0:不一致;1: 一致 |
|
| 值域 | 0:不一致;1: 一致 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
## 其他
|
## 其他
|
||||||
|
|
||||||
|
@ -629,12 +647,12 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### enableScience
|
### enableScience
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
| -------- | ---------------------------- |
|
||||||
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
|
| 适用范围 | 仅客户端 TAOS-CLI 适用 |
|
||||||
| 含义 | 是否开启科学计数法显示浮点数 |
|
| 含义 | 是否开启科学计数法显示浮点数 |
|
||||||
| 取值范围 | 0:否,1:是 |
|
| 取值范围 | 0:否,1:是 |
|
||||||
| 缺省值 | 0 |
|
| 缺省值 | 0 |
|
||||||
|
|
||||||
### udf
|
### udf
|
||||||
|
|
||||||
|
@ -649,153 +667,153 @@ charset 的有效值是 UTF-8。
|
||||||
|
|
||||||
### compressMsgSize
|
### compressMsgSize
|
||||||
|
|
||||||
| 属性 | 说明 |
|
| 属性 | 说明 |
|
||||||
| -------- | ----------------------------- |
|
| -------- | ------------------------------------------------------------------------------- |
|
||||||
| 适用于 | 服务端和客户端均适用 |
|
| 适用于 | 服务端和客户端均适用 |
|
||||||
| 含义 | 是否对 RPC 消息进行压缩 |
|
| 含义 | 是否对 RPC 消息进行压缩 |
|
||||||
| 取值范围 | -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩 |
|
| 取值范围 | -1: 所有消息都不压缩; 0: 所有消息都压缩; N (N>0): 只有大于 N 个字节的消息才压缩 |
|
||||||
| 缺省值 | -1 |
|
| 缺省值 | -1 |
|
||||||
|
|
||||||
## 3.0 中有效的配置参数列表
|
## 3.0 中有效的配置参数列表
|
||||||
|
|
||||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :--------------------: | --------------- | ------------------------------- | ------------------ |
|
||||||
| 1 | firstEp | 是 | 是 | |
|
| 1 | firstEp | 是 | 是 | |
|
||||||
| 2 | secondEp | 是 | 是 | |
|
| 2 | secondEp | 是 | 是 | |
|
||||||
| 3 | fqdn | 是 | 是 | |
|
| 3 | fqdn | 是 | 是 | |
|
||||||
| 4 | serverPort | 是 | 是 | |
|
| 4 | serverPort | 是 | 是 | |
|
||||||
| 5 | maxShellConns | 是 | 是 | |
|
| 5 | maxShellConns | 是 | 是 | |
|
||||||
| 6 | monitor | 是 | 是 | |
|
| 6 | monitor | 是 | 是 | |
|
||||||
| 7 | monitorFqdn | 否 | 是 | |
|
| 7 | monitorFqdn | 否 | 是 | |
|
||||||
| 8 | monitorPort | 否 | 是 | |
|
| 8 | monitorPort | 否 | 是 | |
|
||||||
| 9 | monitorInterval | 是 | 是 | |
|
| 9 | monitorInterval | 是 | 是 | |
|
||||||
| 10 | queryPolicy | 否 | 是 | |
|
| 10 | queryPolicy | 否 | 是 | |
|
||||||
| 11 | querySmaOptimize | 否 | 是 | |
|
| 11 | querySmaOptimize | 否 | 是 | |
|
||||||
| 12 | maxNumOfDistinctRes | 是 | 是 | |
|
| 12 | maxNumOfDistinctRes | 是 | 是 | |
|
||||||
| 15 | countAlwaysReturnValue | 是 | 是 | |
|
| 15 | countAlwaysReturnValue | 是 | 是 | |
|
||||||
| 16 | dataDir | 是 | 是 | |
|
| 16 | dataDir | 是 | 是 | |
|
||||||
| 17 | minimalDataDirGB | 是 | 是 | |
|
| 17 | minimalDataDirGB | 是 | 是 | |
|
||||||
| 18 | supportVnodes | 否 | 是 | |
|
| 18 | supportVnodes | 否 | 是 | |
|
||||||
| 19 | tempDir | 是 | 是 | |
|
| 19 | tempDir | 是 | 是 | |
|
||||||
| 20 | minimalTmpDirGB | 是 | 是 | |
|
| 20 | minimalTmpDirGB | 是 | 是 | |
|
||||||
| 21 | smlChildTableName | 是 | 是 | |
|
| 21 | smlChildTableName | 是 | 是 | |
|
||||||
| 22 | smlTagName | 是 | 是 | |
|
| 22 | smlTagName | 是 | 是 | |
|
||||||
| 23 | smlDataFormat | 否 | 是(从3.0.3.0开始,该配置废弃) | |
|
| 23 | smlDataFormat | 否 | 是(从3.0.3.0开始,该配置废弃) | |
|
||||||
| 24 | statusInterval | 是 | 是 | |
|
| 24 | statusInterval | 是 | 是 | |
|
||||||
| 25 | logDir | 是 | 是 | |
|
| 25 | logDir | 是 | 是 | |
|
||||||
| 26 | minimalLogDirGB | 是 | 是 | |
|
| 26 | minimalLogDirGB | 是 | 是 | |
|
||||||
| 27 | numOfLogLines | 是 | 是 | |
|
| 27 | numOfLogLines | 是 | 是 | |
|
||||||
| 28 | asyncLog | 是 | 是 | |
|
| 28 | asyncLog | 是 | 是 | |
|
||||||
| 29 | logKeepDays | 是 | 是 | |
|
| 29 | logKeepDays | 是 | 是 | |
|
||||||
| 30 | debugFlag | 是 | 是 | |
|
| 30 | debugFlag | 是 | 是 | |
|
||||||
| 31 | tmrDebugFlag | 是 | 是 | |
|
| 31 | tmrDebugFlag | 是 | 是 | |
|
||||||
| 32 | uDebugFlag | 是 | 是 | |
|
| 32 | uDebugFlag | 是 | 是 | |
|
||||||
| 33 | rpcDebugFlag | 是 | 是 | |
|
| 33 | rpcDebugFlag | 是 | 是 | |
|
||||||
| 34 | jniDebugFlag | 是 | 是 | |
|
| 34 | jniDebugFlag | 是 | 是 | |
|
||||||
| 35 | qDebugFlag | 是 | 是 | |
|
| 35 | qDebugFlag | 是 | 是 | |
|
||||||
| 36 | cDebugFlag | 是 | 是 | |
|
| 36 | cDebugFlag | 是 | 是 | |
|
||||||
| 37 | dDebugFlag | 是 | 是 | |
|
| 37 | dDebugFlag | 是 | 是 | |
|
||||||
| 38 | vDebugFlag | 是 | 是 | |
|
| 38 | vDebugFlag | 是 | 是 | |
|
||||||
| 39 | mDebugFlag | 是 | 是 | |
|
| 39 | mDebugFlag | 是 | 是 | |
|
||||||
| 40 | wDebugFlag | 是 | 是 | |
|
| 40 | wDebugFlag | 是 | 是 | |
|
||||||
| 41 | sDebugFlag | 是 | 是 | |
|
| 41 | sDebugFlag | 是 | 是 | |
|
||||||
| 42 | tsdbDebugFlag | 是 | 是 | |
|
| 42 | tsdbDebugFlag | 是 | 是 | |
|
||||||
| 43 | tqDebugFlag | 否 | 是 | |
|
| 43 | tqDebugFlag | 否 | 是 | |
|
||||||
| 44 | fsDebugFlag | 是 | 是 | |
|
| 44 | fsDebugFlag | 是 | 是 | |
|
||||||
| 45 | udfDebugFlag | 否 | 是 | |
|
| 45 | udfDebugFlag | 否 | 是 | |
|
||||||
| 46 | smaDebugFlag | 否 | 是 | |
|
| 46 | smaDebugFlag | 否 | 是 | |
|
||||||
| 47 | idxDebugFlag | 否 | 是 | |
|
| 47 | idxDebugFlag | 否 | 是 | |
|
||||||
| 48 | tdbDebugFlag | 否 | 是 | |
|
| 48 | tdbDebugFlag | 否 | 是 | |
|
||||||
| 49 | metaDebugFlag | 否 | 是 | |
|
| 49 | metaDebugFlag | 否 | 是 | |
|
||||||
| 50 | timezone | 是 | 是 | |
|
| 50 | timezone | 是 | 是 | |
|
||||||
| 51 | locale | 是 | 是 | |
|
| 51 | locale | 是 | 是 | |
|
||||||
| 52 | charset | 是 | 是 | |
|
| 52 | charset | 是 | 是 | |
|
||||||
| 53 | udf | 是 | 是 | |
|
| 53 | udf | 是 | 是 | |
|
||||||
| 54 | enableCoreFile | 是 | 是 | |
|
| 54 | enableCoreFile | 是 | 是 | |
|
||||||
|
|
||||||
## 2.x->3.0 的废弃参数
|
## 2.x->3.0 的废弃参数
|
||||||
|
|
||||||
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
| # | **参数** | **适用于 2.X ** | **适用于 3.0 ** | 3.0 版本的当前行为 |
|
||||||
| --- | :---------------------: | --------------- | --------------- | ------------------------------------------------- |
|
| --- | :---------------------: | --------------- | --------------- | ---------------------------------------------------- |
|
||||||
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
| 1 | arbitrator | 是 | 否 | 通过 RAFT 协议选主 |
|
||||||
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
| 2 | numOfThreadsPerCore | 是 | 否 | 有其它参数设置多种线程池的大小 |
|
||||||
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
| 3 | numOfMnodes | 是 | 否 | 通过 create mnode 命令动态创建 mnode |
|
||||||
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
| 4 | vnodeBak | 是 | 否 | 3.0 行为未知 |
|
||||||
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 (暂不支持) |
|
| 5 | balance | 是 | 否 | 负载均衡功能由 split/merge vgroups 实现 (暂不支持) |
|
||||||
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
| 6 | balanceInterval | 是 | 否 | 随着 balance 参数失效 |
|
||||||
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
| 7 | offlineThreshold | 是 | 否 | 3.0 行为未知 |
|
||||||
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
| 8 | role | 是 | 否 | 由 supportVnode 决定是否能够创建 |
|
||||||
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
| 9 | dnodeNopLoop | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||||
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
| 10 | keepTimeOffset | 是 | 否 | 2.6 文档中未找到此参数 |
|
||||||
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
| 11 | rpcTimer | 是 | 否 | 3.0 行为未知 |
|
||||||
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
| 12 | rpcMaxTime | 是 | 否 | 3.0 行为未知 |
|
||||||
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
| 13 | rpcForceTcp | 是 | 否 | 默认为 TCP |
|
||||||
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
| 14 | tcpConnTimeout | 是 | 否 | 3.0 行为未知 |
|
||||||
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
| 15 | syncCheckInterval | 是 | 否 | 3.0 行为未知 |
|
||||||
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
|
||||||
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
|
||||||
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
|
||||||
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
| 23 | retryStreamCompDelay | 是 | 否 | 3.0 行为未知 |
|
||||||
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
| 24 | streamCompDelayRatio | 是 | 否 | 3.0 行为未知 |
|
||||||
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
| 25 | maxVgroupsPerDb | 是 | 否 | 由 create db 的参数 vgroups 指定实际 vgroups 数量 |
|
||||||
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 26 | maxTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 27 | minTablesPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
| 28 | tableIncStepPerVnode | 是 | 否 | DB 中的所有表近似平均分配到各个 vgroup |
|
||||||
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
| 29 | cache | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||||
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
| 30 | blocks | 是 | 否 | 由 buffer 代替 cache\*blocks |
|
||||||
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
| 31 | days | 是 | 否 | 由 create db 的参数 duration 取代 |
|
||||||
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
| 32 | keep | 是 | 否 | 由 create db 的参数 keep 取代 |
|
||||||
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
| 33 | minRows | 是 | 否 | 由 create db 的参数 minRows 取代 |
|
||||||
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
| 34 | maxRows | 是 | 否 | 由 create db 的参数 maxRows 取代 |
|
||||||
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
| 35 | quorum | 是 | 否 | 由 RAFT 协议决定 |
|
||||||
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
| 36 | comp | 是 | 否 | 由 create db 的参数 comp 取代 |
|
||||||
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
| 37 | walLevel | 是 | 否 | 由 create db 的参数 wal_level 取代 |
|
||||||
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
| 38 | fsync | 是 | 否 | 由 create db 的参数 wal_fsync_period 取代 |
|
||||||
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
| 39 | replica | 是 | 否 | 由 create db 的参数 replica 取代 |
|
||||||
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
|
| 40 | partitions | 是 | 否 | 3.0 行为未知 |
|
||||||
| 41 | update | 是 | 否 | 允许更新部分列 |
|
| 41 | update | 是 | 否 | 允许更新部分列 |
|
||||||
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
| 42 | cachelast | 是 | 否 | 由 create db 的参数 cacheModel 取代 |
|
||||||
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
| 43 | maxSQLLength | 是 | 否 | SQL 上限为 1MB,无需参数控制 |
|
||||||
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
| 44 | maxWildCardsLength | 是 | 否 | 3.0 行为未知 |
|
||||||
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
| 45 | maxRegexStringLen | 是 | 否 | 3.0 行为未知 |
|
||||||
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
| 46 | maxNumOfOrderedRes | 是 | 否 | 3.0 行为未知 |
|
||||||
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
| 47 | maxConnections | 是 | 否 | 取决于系统配置和系统处理能力,详见后面的 Note |
|
||||||
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
| 48 | mnodeEqualVnodeNum | 是 | 否 | 3.0 行为未知 |
|
||||||
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
| 49 | http | 是 | 否 | http 服务由 taosAdapter 提供 |
|
||||||
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
| 50 | httpEnableRecordSql | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
| 51 | httpMaxThreads | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
| 52 | restfulRowLimit | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
| 53 | httpDbNameMandatory | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
| 54 | httpKeepAlive | 是 | 否 | taosd 不提供 http 服务 |
|
||||||
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
| 55 | enableRecordSql | 是 | 否 | 3.0 行为未知 |
|
||||||
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
| 56 | maxBinaryDisplayWidth | 是 | 否 | 3.0 行为未知 |
|
||||||
| 57 | stream | 是 | 否 | 默认启用连续查询 |
|
| 57 | stream | 是 | 否 | 默认启用连续查询 |
|
||||||
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
| 58 | retrieveBlockingModel | 是 | 否 | 3.0 行为未知 |
|
||||||
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
| 59 | tsdbMetaCompactRatio | 是 | 否 | 3.0 行为未知 |
|
||||||
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
| 60 | defaultJSONStrType | 是 | 否 | 3.0 行为未知 |
|
||||||
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
| 61 | walFlushSize | 是 | 否 | 3.0 行为未知 |
|
||||||
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
| 62 | keepTimeOffset | 是 | 否 | 3.0 行为未知 |
|
||||||
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
| 63 | flowctrl | 是 | 否 | 3.0 行为未知 |
|
||||||
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
| 64 | slaveQuery | 是 | 否 | 3.0 行为未知: slave vnode 是否能够处理查询? |
|
||||||
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
| 65 | adjustMaster | 是 | 否 | 3.0 行为未知 |
|
||||||
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
| 66 | topicBinaryLen | 是 | 否 | 3.0 行为未知 |
|
||||||
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
| 67 | telegrafUseFieldNum | 是 | 否 | 3.0 行为未知 |
|
||||||
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
| 68 | deadLockKillQuery | 是 | 否 | 3.0 行为未知 |
|
||||||
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
| 69 | clientMerge | 是 | 否 | 3.0 行为未知 |
|
||||||
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 70 | sdbDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 71 | odbcDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 72 | httpDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 73 | monDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 74 | cqDebugFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
| 75 | shortcutFlag | 是 | 否 | 参考 3.0 的 DebugFlag 系列参数 |
|
||||||
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
| 76 | probeSeconds | 是 | 否 | 3.0 行为未知 |
|
||||||
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
| 77 | probeKillSeconds | 是 | 否 | 3.0 行为未知 |
|
||||||
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
| 78 | probeInterval | 是 | 否 | 3.0 行为未知 |
|
||||||
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
| 79 | lossyColumns | 是 | 否 | 3.0 行为未知 |
|
||||||
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
| 80 | fPrecision | 是 | 否 | 3.0 行为未知 |
|
||||||
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
| 81 | dPrecision | 是 | 否 | 3.0 行为未知 |
|
||||||
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
|
| 82 | maxRange | 是 | 否 | 3.0 行为未知 |
|
||||||
| 83 | range | 是 | 否 | 3.0 行为未知 |
|
| 83 | range | 是 | 否 | 3.0 行为未知 |
|
||||||
|
|
|
@ -32,7 +32,7 @@ chmod +x TDinsight.sh
|
||||||
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
|
- 使用已经存在的 Grafana Notification Channel `uid`,参数 `-E`。该参数可以使用 `curl -u admin:admin localhost:3000/api/alert-notifications |jq` 来获取。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E <notifier uid>
|
||||||
```
|
```
|
||||||
|
|
||||||
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
运行程序并重启 Grafana 服务,打开面板:`http://localhost:3000/d/tdinsight`。
|
||||||
|
@ -270,7 +270,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -284,7 +284,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|client\_ip|NCHAR|TAG|client ip|
|
|client\_ip|NCHAR|TAG|client ip|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|request\_method|NCHAR|TAG|request method|
|
|request\_method|NCHAR|TAG|request method|
|
||||||
|
@ -298,7 +298,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
### taosadapter\_restful\_http\_request\_summary\_milliseconds 表
|
||||||
|
@ -326,7 +326,7 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
||||||
### taosadapter\_system\_cpu\_percent 表
|
### taosadapter\_system\_cpu\_percent 表
|
||||||
|
@ -336,5 +336,5 @@ TDinsight dashboard 数据来源于 log 库(存放监控数据的默认db,
|
||||||
|field|type|is\_tag|comment|
|
|field|type|is\_tag|comment|
|
||||||
|:----|:---|:-----|:------|
|
|:----|:---|:-----|:------|
|
||||||
|\_ts|TIMESTAMP||timestamp|
|
|\_ts|TIMESTAMP||timestamp|
|
||||||
|guage|DOUBLE||监控指标值|
|
|gauge|DOUBLE||监控指标值|
|
||||||
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
|endpoint|NCHAR|TAG|taosadpater endpoint|
|
||||||
|
|
|
@ -318,7 +318,6 @@ connection.backoff.ms=5000
|
||||||
topic.prefix=tdengine-source-
|
topic.prefix=tdengine-source-
|
||||||
poll.interval.ms=1000
|
poll.interval.ms=1000
|
||||||
fetch.max.rows=100
|
fetch.max.rows=100
|
||||||
out.format=line
|
|
||||||
key.converter=org.apache.kafka.connect.storage.StringConverter
|
key.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
value.converter=org.apache.kafka.connect.storage.StringConverter
|
value.converter=org.apache.kafka.connect.storage.StringConverter
|
||||||
```
|
```
|
||||||
|
@ -357,7 +356,7 @@ confluent local services connect connector load TDengineSourceConnector --config
|
||||||
|
|
||||||
### 查看 topic 数据
|
### 查看 topic 数据
|
||||||
|
|
||||||
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。
|
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
|
||||||
|
|
||||||
```
|
```
|
||||||
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
|
||||||
|
@ -434,11 +433,12 @@ confluent local services connect connector unload TDengineSourceConnector
|
||||||
### TDengine Source Connector 特有的配置
|
### TDengine Source Connector 特有的配置
|
||||||
|
|
||||||
1. `connection.database`: 源数据库名称,无缺省值。
|
1. `connection.database`: 源数据库名称,无缺省值。
|
||||||
2. `topic.prefix`: 数据导入 kafka 后 topic 名称前缀。 使用 `topic.prefix` + `connection.database` 名称作为完整 topic 名。默认为空字符串 ""。
|
2. `topic.prefix`: 数据导入 kafka 时使用的 topic 名称的前缀。默认为空字符串 ""。
|
||||||
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss'。默认为 "1970-01-01 00:00:00"。
|
3. `timestamp.initial`: 数据同步起始时间。格式为'yyyy-MM-dd HH:mm:ss',若未指定则从指定 DB 中最早的一条记录开始。
|
||||||
4. `poll.interval.ms`: 拉取数据间隔,单位为 ms。默认为 1000。
|
4. `poll.interval.ms`: 检查是否有新建或删除的表的时间间隔,单位为 ms。默认为 1000。
|
||||||
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
5. `fetch.max.rows` : 检索数据库时最大检索条数。 默认为 100。
|
||||||
6. `out.format`: 数据格式。取值 line 或 json。line 表示 InfluxDB Line 协议格式, json 表示 OpenTSDB JSON 格式。默认为 line。
|
6. `query.interval.ms`: 从 TDengine 一次读取数据的时间跨度,需要根据表中的数据特征合理配置,避免一次查询的数据量过大或过小;在具体的环境中建议通过测试设置一个较优值,默认值为 1000.
|
||||||
|
7. `topic.per.stable`: 如果设置为true,表示一个超级表对应一个 Kafka topic,topic的命名规则 `<topic.prefix>-<connection.database>-<stable.name>`;如果设置为 false,则指定的 DB 中的所有数据进入一个 Kafka topic,topic 的命名规则为 `<topic.prefix>-<connection.database>`
|
||||||
|
|
||||||
## 其他说明
|
## 其他说明
|
||||||
|
|
||||||
|
|
|
@ -247,4 +247,10 @@ launchctl limit maxfiles
|
||||||
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
|
||||||
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
正常调大 taos.cfg 中 supportVnodes 参数即可。
|
||||||
|
|
||||||
|
### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
|
||||||
|
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
|
||||||
|
|
||||||
|
### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因?
|
||||||
|
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
||||||
|
|
||||||
import Release from "/components/ReleaseV3";
|
import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
|
## 3.0.4.2
|
||||||
|
|
||||||
|
<Release type="tdengine" version="3.0.4.2" />
|
||||||
|
|
||||||
## 3.0.4.1
|
## 3.0.4.1
|
||||||
|
|
||||||
<Release type="tdengine" version="3.0.4.1" />
|
<Release type="tdengine" version="3.0.4.1" />
|
||||||
|
|
|
@ -14,10 +14,6 @@ import Release from "/components/ReleaseV3";
|
||||||
|
|
||||||
<Release type="tools" version="2.5.0" />
|
<Release type="tools" version="2.5.0" />
|
||||||
|
|
||||||
## 2.5.0
|
|
||||||
|
|
||||||
<Release type="tools" version="2.5.0" />
|
|
||||||
|
|
||||||
## 2.4.12
|
## 2.4.12
|
||||||
|
|
||||||
<Release type="tools" version="2.4.12" />
|
<Release type="tools" version="2.4.12" />
|
||||||
|
|
|
@ -73,7 +73,7 @@ static int32_t init_env() {
|
||||||
taos_free_result(pRes);
|
taos_free_result(pRes);
|
||||||
|
|
||||||
// create database
|
// create database
|
||||||
pRes = taos_query(pConn, "create database tmqdb precision 'ns'");
|
pRes = taos_query(pConn, "create database tmqdb precision 'ns' WAL_RETENTION_PERIOD 3600");
|
||||||
if (taos_errno(pRes) != 0) {
|
if (taos_errno(pRes) != 0) {
|
||||||
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
|
printf("error in create tmqdb, reason:%s\n", taos_errstr(pRes));
|
||||||
goto END;
|
goto END;
|
||||||
|
@ -289,7 +289,7 @@ void consume_repeatly(tmq_t* tmq) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free(pAssign);
|
tmq_free_assignment(pAssign);
|
||||||
|
|
||||||
// let's do it again
|
// let's do it again
|
||||||
basic_consume_loop(tmq);
|
basic_consume_loop(tmq);
|
||||||
|
|
|
@ -162,6 +162,7 @@ static int l_query(lua_State *L){
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
|
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
|
||||||
lua_pushlstring(L,(char *)row[i], length[i]);
|
lua_pushlstring(L,(char *)row[i], length[i]);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -161,6 +161,7 @@ static int l_query(lua_State *L){
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
|
//printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
|
||||||
lua_pushlstring(L,(char *)row[i], length[i]);
|
lua_pushlstring(L,(char *)row[i], length[i]);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -51,7 +51,8 @@ typedef void TAOS_SUB;
|
||||||
#define TSDB_DATA_TYPE_BLOB 18 // binary
|
#define TSDB_DATA_TYPE_BLOB 18 // binary
|
||||||
#define TSDB_DATA_TYPE_MEDIUMBLOB 19
|
#define TSDB_DATA_TYPE_MEDIUMBLOB 19
|
||||||
#define TSDB_DATA_TYPE_BINARY TSDB_DATA_TYPE_VARCHAR // string
|
#define TSDB_DATA_TYPE_BINARY TSDB_DATA_TYPE_VARCHAR // string
|
||||||
#define TSDB_DATA_TYPE_MAX 20
|
#define TSDB_DATA_TYPE_GEOMETRY 20 // geometry
|
||||||
|
#define TSDB_DATA_TYPE_MAX 21
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
TSDB_OPTION_LOCALE,
|
TSDB_OPTION_LOCALE,
|
||||||
|
@ -288,6 +289,7 @@ DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
|
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
|
||||||
int32_t *numOfAssignment);
|
int32_t *numOfAssignment);
|
||||||
|
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||||
|
|
||||||
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
||||||
|
|
|
@ -215,7 +215,7 @@ void colInfoDataCleanup(SColumnInfoData* pColumn, uint32_t numOfRows);
|
||||||
void blockDataCleanup(SSDataBlock* pDataBlock);
|
void blockDataCleanup(SSDataBlock* pDataBlock);
|
||||||
void blockDataEmpty(SSDataBlock* pDataBlock);
|
void blockDataEmpty(SSDataBlock* pDataBlock);
|
||||||
|
|
||||||
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
|
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int32_t extraSize);
|
||||||
|
|
||||||
int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n);
|
int32_t blockDataTrimFirstRows(SSDataBlock* pBlock, size_t n);
|
||||||
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||||
|
|
|
@ -124,6 +124,7 @@ extern int32_t tsRedirectFactor;
|
||||||
extern int32_t tsRedirectMaxPeriod;
|
extern int32_t tsRedirectMaxPeriod;
|
||||||
extern int32_t tsMaxRetryWaitTime;
|
extern int32_t tsMaxRetryWaitTime;
|
||||||
extern bool tsUseAdapter;
|
extern bool tsUseAdapter;
|
||||||
|
extern int32_t tsMetaCacheMaxSize;
|
||||||
extern int32_t tsSlowLogThreshold;
|
extern int32_t tsSlowLogThreshold;
|
||||||
extern int32_t tsSlowLogScope;
|
extern int32_t tsSlowLogScope;
|
||||||
|
|
||||||
|
@ -193,7 +194,7 @@ struct SConfig *taosGetCfg();
|
||||||
|
|
||||||
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
|
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
|
||||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
|
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
|
||||||
int32_t taosSetCfg(SConfig *pCfg, char *name);
|
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name);
|
||||||
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -1642,6 +1642,7 @@ typedef struct {
|
||||||
char fqdn[TSDB_FQDN_LEN];
|
char fqdn[TSDB_FQDN_LEN];
|
||||||
int32_t port;
|
int32_t port;
|
||||||
int8_t force;
|
int8_t force;
|
||||||
|
int8_t unsafe;
|
||||||
} SDropDnodeReq;
|
} SDropDnodeReq;
|
||||||
|
|
||||||
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
|
||||||
|
@ -3190,7 +3191,8 @@ typedef struct {
|
||||||
char dbFName[TSDB_DB_FNAME_LEN];
|
char dbFName[TSDB_DB_FNAME_LEN];
|
||||||
uint64_t suid;
|
uint64_t suid;
|
||||||
int32_t version;
|
int32_t version;
|
||||||
SArray* pIndex;
|
int32_t indexSize;
|
||||||
|
SArray* pIndex; // STableIndexInfo
|
||||||
} STableIndexRsp;
|
} STableIndexRsp;
|
||||||
|
|
||||||
int32_t tSerializeSTableIndexRsp(void* buf, int32_t bufLen, const STableIndexRsp* pRsp);
|
int32_t tSerializeSTableIndexRsp(void* buf, int32_t bufLen, const STableIndexRsp* pRsp);
|
||||||
|
|
|
@ -70,286 +70,288 @@
|
||||||
#define TK_RESTORE 52
|
#define TK_RESTORE 52
|
||||||
#define TK_NK_IPTOKEN 53
|
#define TK_NK_IPTOKEN 53
|
||||||
#define TK_FORCE 54
|
#define TK_FORCE 54
|
||||||
#define TK_LOCAL 55
|
#define TK_UNSAFE 55
|
||||||
#define TK_QNODE 56
|
#define TK_LOCAL 56
|
||||||
#define TK_BNODE 57
|
#define TK_QNODE 57
|
||||||
#define TK_SNODE 58
|
#define TK_BNODE 58
|
||||||
#define TK_MNODE 59
|
#define TK_SNODE 59
|
||||||
#define TK_VNODE 60
|
#define TK_MNODE 60
|
||||||
#define TK_DATABASE 61
|
#define TK_VNODE 61
|
||||||
#define TK_USE 62
|
#define TK_DATABASE 62
|
||||||
#define TK_FLUSH 63
|
#define TK_USE 63
|
||||||
#define TK_TRIM 64
|
#define TK_FLUSH 64
|
||||||
#define TK_COMPACT 65
|
#define TK_TRIM 65
|
||||||
#define TK_IF 66
|
#define TK_COMPACT 66
|
||||||
#define TK_NOT 67
|
#define TK_IF 67
|
||||||
#define TK_EXISTS 68
|
#define TK_NOT 68
|
||||||
#define TK_BUFFER 69
|
#define TK_EXISTS 69
|
||||||
#define TK_CACHEMODEL 70
|
#define TK_BUFFER 70
|
||||||
#define TK_CACHESIZE 71
|
#define TK_CACHEMODEL 71
|
||||||
#define TK_COMP 72
|
#define TK_CACHESIZE 72
|
||||||
#define TK_DURATION 73
|
#define TK_COMP 73
|
||||||
#define TK_NK_VARIABLE 74
|
#define TK_DURATION 74
|
||||||
#define TK_MAXROWS 75
|
#define TK_NK_VARIABLE 75
|
||||||
#define TK_MINROWS 76
|
#define TK_MAXROWS 76
|
||||||
#define TK_KEEP 77
|
#define TK_MINROWS 77
|
||||||
#define TK_PAGES 78
|
#define TK_KEEP 78
|
||||||
#define TK_PAGESIZE 79
|
#define TK_PAGES 79
|
||||||
#define TK_TSDB_PAGESIZE 80
|
#define TK_PAGESIZE 80
|
||||||
#define TK_PRECISION 81
|
#define TK_TSDB_PAGESIZE 81
|
||||||
#define TK_REPLICA 82
|
#define TK_PRECISION 82
|
||||||
#define TK_VGROUPS 83
|
#define TK_REPLICA 83
|
||||||
#define TK_SINGLE_STABLE 84
|
#define TK_VGROUPS 84
|
||||||
#define TK_RETENTIONS 85
|
#define TK_SINGLE_STABLE 85
|
||||||
#define TK_SCHEMALESS 86
|
#define TK_RETENTIONS 86
|
||||||
#define TK_WAL_LEVEL 87
|
#define TK_SCHEMALESS 87
|
||||||
#define TK_WAL_FSYNC_PERIOD 88
|
#define TK_WAL_LEVEL 88
|
||||||
#define TK_WAL_RETENTION_PERIOD 89
|
#define TK_WAL_FSYNC_PERIOD 89
|
||||||
#define TK_WAL_RETENTION_SIZE 90
|
#define TK_WAL_RETENTION_PERIOD 90
|
||||||
#define TK_WAL_ROLL_PERIOD 91
|
#define TK_WAL_RETENTION_SIZE 91
|
||||||
#define TK_WAL_SEGMENT_SIZE 92
|
#define TK_WAL_ROLL_PERIOD 92
|
||||||
#define TK_STT_TRIGGER 93
|
#define TK_WAL_SEGMENT_SIZE 93
|
||||||
#define TK_TABLE_PREFIX 94
|
#define TK_STT_TRIGGER 94
|
||||||
#define TK_TABLE_SUFFIX 95
|
#define TK_TABLE_PREFIX 95
|
||||||
#define TK_NK_COLON 96
|
#define TK_TABLE_SUFFIX 96
|
||||||
#define TK_MAX_SPEED 97
|
#define TK_NK_COLON 97
|
||||||
#define TK_START 98
|
#define TK_MAX_SPEED 98
|
||||||
#define TK_TIMESTAMP 99
|
#define TK_START 99
|
||||||
#define TK_END 100
|
#define TK_TIMESTAMP 100
|
||||||
#define TK_TABLE 101
|
#define TK_END 101
|
||||||
#define TK_NK_LP 102
|
#define TK_TABLE 102
|
||||||
#define TK_NK_RP 103
|
#define TK_NK_LP 103
|
||||||
#define TK_STABLE 104
|
#define TK_NK_RP 104
|
||||||
#define TK_ADD 105
|
#define TK_STABLE 105
|
||||||
#define TK_COLUMN 106
|
#define TK_ADD 106
|
||||||
#define TK_MODIFY 107
|
#define TK_COLUMN 107
|
||||||
#define TK_RENAME 108
|
#define TK_MODIFY 108
|
||||||
#define TK_TAG 109
|
#define TK_RENAME 109
|
||||||
#define TK_SET 110
|
#define TK_TAG 110
|
||||||
#define TK_NK_EQ 111
|
#define TK_SET 111
|
||||||
#define TK_USING 112
|
#define TK_NK_EQ 112
|
||||||
#define TK_TAGS 113
|
#define TK_USING 113
|
||||||
#define TK_BOOL 114
|
#define TK_TAGS 114
|
||||||
#define TK_TINYINT 115
|
#define TK_BOOL 115
|
||||||
#define TK_SMALLINT 116
|
#define TK_TINYINT 116
|
||||||
#define TK_INT 117
|
#define TK_SMALLINT 117
|
||||||
#define TK_INTEGER 118
|
#define TK_INT 118
|
||||||
#define TK_BIGINT 119
|
#define TK_INTEGER 119
|
||||||
#define TK_FLOAT 120
|
#define TK_BIGINT 120
|
||||||
#define TK_DOUBLE 121
|
#define TK_FLOAT 121
|
||||||
#define TK_BINARY 122
|
#define TK_DOUBLE 122
|
||||||
#define TK_NCHAR 123
|
#define TK_BINARY 123
|
||||||
#define TK_UNSIGNED 124
|
#define TK_NCHAR 124
|
||||||
#define TK_JSON 125
|
#define TK_UNSIGNED 125
|
||||||
#define TK_VARCHAR 126
|
#define TK_JSON 126
|
||||||
#define TK_MEDIUMBLOB 127
|
#define TK_VARCHAR 127
|
||||||
#define TK_BLOB 128
|
#define TK_MEDIUMBLOB 128
|
||||||
#define TK_VARBINARY 129
|
#define TK_BLOB 129
|
||||||
#define TK_DECIMAL 130
|
#define TK_VARBINARY 130
|
||||||
#define TK_COMMENT 131
|
#define TK_GEOMETRY 131
|
||||||
#define TK_MAX_DELAY 132
|
#define TK_DECIMAL 132
|
||||||
#define TK_WATERMARK 133
|
#define TK_COMMENT 133
|
||||||
#define TK_ROLLUP 134
|
#define TK_MAX_DELAY 134
|
||||||
#define TK_TTL 135
|
#define TK_WATERMARK 135
|
||||||
#define TK_SMA 136
|
#define TK_ROLLUP 136
|
||||||
#define TK_DELETE_MARK 137
|
#define TK_TTL 137
|
||||||
#define TK_FIRST 138
|
#define TK_SMA 138
|
||||||
#define TK_LAST 139
|
#define TK_DELETE_MARK 139
|
||||||
#define TK_SHOW 140
|
#define TK_FIRST 140
|
||||||
#define TK_PRIVILEGES 141
|
#define TK_LAST 141
|
||||||
#define TK_DATABASES 142
|
#define TK_SHOW 142
|
||||||
#define TK_TABLES 143
|
#define TK_PRIVILEGES 143
|
||||||
#define TK_STABLES 144
|
#define TK_DATABASES 144
|
||||||
#define TK_MNODES 145
|
#define TK_TABLES 145
|
||||||
#define TK_QNODES 146
|
#define TK_STABLES 146
|
||||||
#define TK_FUNCTIONS 147
|
#define TK_MNODES 147
|
||||||
#define TK_INDEXES 148
|
#define TK_QNODES 148
|
||||||
#define TK_ACCOUNTS 149
|
#define TK_FUNCTIONS 149
|
||||||
#define TK_APPS 150
|
#define TK_INDEXES 150
|
||||||
#define TK_CONNECTIONS 151
|
#define TK_ACCOUNTS 151
|
||||||
#define TK_LICENCES 152
|
#define TK_APPS 152
|
||||||
#define TK_GRANTS 153
|
#define TK_CONNECTIONS 153
|
||||||
#define TK_QUERIES 154
|
#define TK_LICENCES 154
|
||||||
#define TK_SCORES 155
|
#define TK_GRANTS 155
|
||||||
#define TK_TOPICS 156
|
#define TK_QUERIES 156
|
||||||
#define TK_VARIABLES 157
|
#define TK_SCORES 157
|
||||||
#define TK_CLUSTER 158
|
#define TK_TOPICS 158
|
||||||
#define TK_BNODES 159
|
#define TK_VARIABLES 159
|
||||||
#define TK_SNODES 160
|
#define TK_CLUSTER 160
|
||||||
#define TK_TRANSACTIONS 161
|
#define TK_BNODES 161
|
||||||
#define TK_DISTRIBUTED 162
|
#define TK_SNODES 162
|
||||||
#define TK_CONSUMERS 163
|
#define TK_TRANSACTIONS 163
|
||||||
#define TK_SUBSCRIPTIONS 164
|
#define TK_DISTRIBUTED 164
|
||||||
#define TK_VNODES 165
|
#define TK_CONSUMERS 165
|
||||||
#define TK_ALIVE 166
|
#define TK_SUBSCRIPTIONS 166
|
||||||
#define TK_LIKE 167
|
#define TK_VNODES 167
|
||||||
#define TK_TBNAME 168
|
#define TK_ALIVE 168
|
||||||
#define TK_QTAGS 169
|
#define TK_LIKE 169
|
||||||
#define TK_AS 170
|
#define TK_TBNAME 170
|
||||||
#define TK_INDEX 171
|
#define TK_QTAGS 171
|
||||||
#define TK_FUNCTION 172
|
#define TK_AS 172
|
||||||
#define TK_INTERVAL 173
|
#define TK_INDEX 173
|
||||||
#define TK_COUNT 174
|
#define TK_FUNCTION 174
|
||||||
#define TK_LAST_ROW 175
|
#define TK_INTERVAL 175
|
||||||
#define TK_TOPIC 176
|
#define TK_COUNT 176
|
||||||
#define TK_META 177
|
#define TK_LAST_ROW 177
|
||||||
#define TK_CONSUMER 178
|
#define TK_TOPIC 178
|
||||||
#define TK_GROUP 179
|
#define TK_META 179
|
||||||
#define TK_DESC 180
|
#define TK_CONSUMER 180
|
||||||
#define TK_DESCRIBE 181
|
#define TK_GROUP 181
|
||||||
#define TK_RESET 182
|
#define TK_DESC 182
|
||||||
#define TK_QUERY 183
|
#define TK_DESCRIBE 183
|
||||||
#define TK_CACHE 184
|
#define TK_RESET 184
|
||||||
#define TK_EXPLAIN 185
|
#define TK_QUERY 185
|
||||||
#define TK_ANALYZE 186
|
#define TK_CACHE 186
|
||||||
#define TK_VERBOSE 187
|
#define TK_EXPLAIN 187
|
||||||
#define TK_NK_BOOL 188
|
#define TK_ANALYZE 188
|
||||||
#define TK_RATIO 189
|
#define TK_VERBOSE 189
|
||||||
#define TK_NK_FLOAT 190
|
#define TK_NK_BOOL 190
|
||||||
#define TK_OUTPUTTYPE 191
|
#define TK_RATIO 191
|
||||||
#define TK_AGGREGATE 192
|
#define TK_NK_FLOAT 192
|
||||||
#define TK_BUFSIZE 193
|
#define TK_OUTPUTTYPE 193
|
||||||
#define TK_LANGUAGE 194
|
#define TK_AGGREGATE 194
|
||||||
#define TK_REPLACE 195
|
#define TK_BUFSIZE 195
|
||||||
#define TK_STREAM 196
|
#define TK_LANGUAGE 196
|
||||||
#define TK_INTO 197
|
#define TK_REPLACE 197
|
||||||
#define TK_PAUSE 198
|
#define TK_STREAM 198
|
||||||
#define TK_RESUME 199
|
#define TK_INTO 199
|
||||||
#define TK_TRIGGER 200
|
#define TK_PAUSE 200
|
||||||
#define TK_AT_ONCE 201
|
#define TK_RESUME 201
|
||||||
#define TK_WINDOW_CLOSE 202
|
#define TK_TRIGGER 202
|
||||||
#define TK_IGNORE 203
|
#define TK_AT_ONCE 203
|
||||||
#define TK_EXPIRED 204
|
#define TK_WINDOW_CLOSE 204
|
||||||
#define TK_FILL_HISTORY 205
|
#define TK_IGNORE 205
|
||||||
#define TK_UPDATE 206
|
#define TK_EXPIRED 206
|
||||||
#define TK_SUBTABLE 207
|
#define TK_FILL_HISTORY 207
|
||||||
#define TK_UNTREATED 208
|
#define TK_UPDATE 208
|
||||||
#define TK_KILL 209
|
#define TK_SUBTABLE 209
|
||||||
#define TK_CONNECTION 210
|
#define TK_UNTREATED 210
|
||||||
#define TK_TRANSACTION 211
|
#define TK_KILL 211
|
||||||
#define TK_BALANCE 212
|
#define TK_CONNECTION 212
|
||||||
#define TK_VGROUP 213
|
#define TK_TRANSACTION 213
|
||||||
#define TK_LEADER 214
|
#define TK_BALANCE 214
|
||||||
#define TK_MERGE 215
|
#define TK_VGROUP 215
|
||||||
#define TK_REDISTRIBUTE 216
|
#define TK_LEADER 216
|
||||||
#define TK_SPLIT 217
|
#define TK_MERGE 217
|
||||||
#define TK_DELETE 218
|
#define TK_REDISTRIBUTE 218
|
||||||
#define TK_INSERT 219
|
#define TK_SPLIT 219
|
||||||
#define TK_NULL 220
|
#define TK_DELETE 220
|
||||||
#define TK_NK_QUESTION 221
|
#define TK_INSERT 221
|
||||||
#define TK_NK_ARROW 222
|
#define TK_NULL 222
|
||||||
#define TK_ROWTS 223
|
#define TK_NK_QUESTION 223
|
||||||
#define TK_QSTART 224
|
#define TK_NK_ARROW 224
|
||||||
#define TK_QEND 225
|
#define TK_ROWTS 225
|
||||||
#define TK_QDURATION 226
|
#define TK_QSTART 226
|
||||||
#define TK_WSTART 227
|
#define TK_QEND 227
|
||||||
#define TK_WEND 228
|
#define TK_QDURATION 228
|
||||||
#define TK_WDURATION 229
|
#define TK_WSTART 229
|
||||||
#define TK_IROWTS 230
|
#define TK_WEND 230
|
||||||
#define TK_ISFILLED 231
|
#define TK_WDURATION 231
|
||||||
#define TK_CAST 232
|
#define TK_IROWTS 232
|
||||||
#define TK_NOW 233
|
#define TK_ISFILLED 233
|
||||||
#define TK_TODAY 234
|
#define TK_CAST 234
|
||||||
#define TK_TIMEZONE 235
|
#define TK_NOW 235
|
||||||
#define TK_CLIENT_VERSION 236
|
#define TK_TODAY 236
|
||||||
#define TK_SERVER_VERSION 237
|
#define TK_TIMEZONE 237
|
||||||
#define TK_SERVER_STATUS 238
|
#define TK_CLIENT_VERSION 238
|
||||||
#define TK_CURRENT_USER 239
|
#define TK_SERVER_VERSION 239
|
||||||
#define TK_CASE 240
|
#define TK_SERVER_STATUS 240
|
||||||
#define TK_WHEN 241
|
#define TK_CURRENT_USER 241
|
||||||
#define TK_THEN 242
|
#define TK_CASE 242
|
||||||
#define TK_ELSE 243
|
#define TK_WHEN 243
|
||||||
#define TK_BETWEEN 244
|
#define TK_THEN 244
|
||||||
#define TK_IS 245
|
#define TK_ELSE 245
|
||||||
#define TK_NK_LT 246
|
#define TK_BETWEEN 246
|
||||||
#define TK_NK_GT 247
|
#define TK_IS 247
|
||||||
#define TK_NK_LE 248
|
#define TK_NK_LT 248
|
||||||
#define TK_NK_GE 249
|
#define TK_NK_GT 249
|
||||||
#define TK_NK_NE 250
|
#define TK_NK_LE 250
|
||||||
#define TK_MATCH 251
|
#define TK_NK_GE 251
|
||||||
#define TK_NMATCH 252
|
#define TK_NK_NE 252
|
||||||
#define TK_CONTAINS 253
|
#define TK_MATCH 253
|
||||||
#define TK_IN 254
|
#define TK_NMATCH 254
|
||||||
#define TK_JOIN 255
|
#define TK_CONTAINS 255
|
||||||
#define TK_INNER 256
|
#define TK_IN 256
|
||||||
#define TK_SELECT 257
|
#define TK_JOIN 257
|
||||||
#define TK_DISTINCT 258
|
#define TK_INNER 258
|
||||||
#define TK_WHERE 259
|
#define TK_SELECT 259
|
||||||
#define TK_PARTITION 260
|
#define TK_DISTINCT 260
|
||||||
#define TK_BY 261
|
#define TK_WHERE 261
|
||||||
#define TK_SESSION 262
|
#define TK_PARTITION 262
|
||||||
#define TK_STATE_WINDOW 263
|
#define TK_BY 263
|
||||||
#define TK_EVENT_WINDOW 264
|
#define TK_SESSION 264
|
||||||
#define TK_SLIDING 265
|
#define TK_STATE_WINDOW 265
|
||||||
#define TK_FILL 266
|
#define TK_EVENT_WINDOW 266
|
||||||
#define TK_VALUE 267
|
#define TK_SLIDING 267
|
||||||
#define TK_VALUE_F 268
|
#define TK_FILL 268
|
||||||
#define TK_NONE 269
|
#define TK_VALUE 269
|
||||||
#define TK_PREV 270
|
#define TK_VALUE_F 270
|
||||||
#define TK_NULL_F 271
|
#define TK_NONE 271
|
||||||
#define TK_LINEAR 272
|
#define TK_PREV 272
|
||||||
#define TK_NEXT 273
|
#define TK_NULL_F 273
|
||||||
#define TK_HAVING 274
|
#define TK_LINEAR 274
|
||||||
#define TK_RANGE 275
|
#define TK_NEXT 275
|
||||||
#define TK_EVERY 276
|
#define TK_HAVING 276
|
||||||
#define TK_ORDER 277
|
#define TK_RANGE 277
|
||||||
#define TK_SLIMIT 278
|
#define TK_EVERY 278
|
||||||
#define TK_SOFFSET 279
|
#define TK_ORDER 279
|
||||||
#define TK_LIMIT 280
|
#define TK_SLIMIT 280
|
||||||
#define TK_OFFSET 281
|
#define TK_SOFFSET 281
|
||||||
#define TK_ASC 282
|
#define TK_LIMIT 282
|
||||||
#define TK_NULLS 283
|
#define TK_OFFSET 283
|
||||||
#define TK_ABORT 284
|
#define TK_ASC 284
|
||||||
#define TK_AFTER 285
|
#define TK_NULLS 285
|
||||||
#define TK_ATTACH 286
|
#define TK_ABORT 286
|
||||||
#define TK_BEFORE 287
|
#define TK_AFTER 287
|
||||||
#define TK_BEGIN 288
|
#define TK_ATTACH 288
|
||||||
#define TK_BITAND 289
|
#define TK_BEFORE 289
|
||||||
#define TK_BITNOT 290
|
#define TK_BEGIN 290
|
||||||
#define TK_BITOR 291
|
#define TK_BITAND 291
|
||||||
#define TK_BLOCKS 292
|
#define TK_BITNOT 292
|
||||||
#define TK_CHANGE 293
|
#define TK_BITOR 293
|
||||||
#define TK_COMMA 294
|
#define TK_BLOCKS 294
|
||||||
#define TK_CONCAT 295
|
#define TK_CHANGE 295
|
||||||
#define TK_CONFLICT 296
|
#define TK_COMMA 296
|
||||||
#define TK_COPY 297
|
#define TK_CONCAT 297
|
||||||
#define TK_DEFERRED 298
|
#define TK_CONFLICT 298
|
||||||
#define TK_DELIMITERS 299
|
#define TK_COPY 299
|
||||||
#define TK_DETACH 300
|
#define TK_DEFERRED 300
|
||||||
#define TK_DIVIDE 301
|
#define TK_DELIMITERS 301
|
||||||
#define TK_DOT 302
|
#define TK_DETACH 302
|
||||||
#define TK_EACH 303
|
#define TK_DIVIDE 303
|
||||||
#define TK_FAIL 304
|
#define TK_DOT 304
|
||||||
#define TK_FILE 305
|
#define TK_EACH 305
|
||||||
#define TK_FOR 306
|
#define TK_FAIL 306
|
||||||
#define TK_GLOB 307
|
#define TK_FILE 307
|
||||||
#define TK_ID 308
|
#define TK_FOR 308
|
||||||
#define TK_IMMEDIATE 309
|
#define TK_GLOB 309
|
||||||
#define TK_IMPORT 310
|
#define TK_ID 310
|
||||||
#define TK_INITIALLY 311
|
#define TK_IMMEDIATE 311
|
||||||
#define TK_INSTEAD 312
|
#define TK_IMPORT 312
|
||||||
#define TK_ISNULL 313
|
#define TK_INITIALLY 313
|
||||||
#define TK_KEY 314
|
#define TK_INSTEAD 314
|
||||||
#define TK_MODULES 315
|
#define TK_ISNULL 315
|
||||||
#define TK_NK_BITNOT 316
|
#define TK_KEY 316
|
||||||
#define TK_NK_SEMI 317
|
#define TK_MODULES 317
|
||||||
#define TK_NOTNULL 318
|
#define TK_NK_BITNOT 318
|
||||||
#define TK_OF 319
|
#define TK_NK_SEMI 319
|
||||||
#define TK_PLUS 320
|
#define TK_NOTNULL 320
|
||||||
#define TK_PRIVILEGE 321
|
#define TK_OF 321
|
||||||
#define TK_RAISE 322
|
#define TK_PLUS 322
|
||||||
#define TK_RESTRICT 323
|
#define TK_PRIVILEGE 323
|
||||||
#define TK_ROW 324
|
#define TK_RAISE 324
|
||||||
#define TK_SEMI 325
|
#define TK_RESTRICT 325
|
||||||
#define TK_STAR 326
|
#define TK_ROW 326
|
||||||
#define TK_STATEMENT 327
|
#define TK_SEMI 327
|
||||||
#define TK_STRICT 328
|
#define TK_STAR 328
|
||||||
#define TK_STRING 329
|
#define TK_STATEMENT 329
|
||||||
#define TK_TIMES 330
|
#define TK_STRICT 330
|
||||||
#define TK_VALUES 331
|
#define TK_STRING 331
|
||||||
#define TK_VARIABLE 332
|
#define TK_TIMES 332
|
||||||
#define TK_VIEW 333
|
#define TK_VALUES 333
|
||||||
#define TK_WAL 334
|
#define TK_VARIABLE 334
|
||||||
|
#define TK_VIEW 335
|
||||||
|
#define TK_WAL 336
|
||||||
|
|
||||||
|
|
||||||
#define TK_NK_SPACE 600
|
#define TK_NK_SPACE 600
|
||||||
|
|
|
@ -269,7 +269,7 @@ typedef struct {
|
||||||
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
|
||||||
|
|
||||||
#define IS_VAR_DATA_TYPE(t) \
|
#define IS_VAR_DATA_TYPE(t) \
|
||||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
|
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||||
|
|
||||||
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
|
||||||
|
@ -316,6 +316,8 @@ static FORCE_INLINE bool isNull(const void *val, int32_t type) {
|
||||||
return *(uint32_t *)val == TSDB_DATA_UINT_NULL;
|
return *(uint32_t *)val == TSDB_DATA_UINT_NULL;
|
||||||
case TSDB_DATA_TYPE_UBIGINT:
|
case TSDB_DATA_TYPE_UBIGINT:
|
||||||
return *(uint64_t *)val == TSDB_DATA_UBIGINT_NULL;
|
return *(uint64_t *)val == TSDB_DATA_UBIGINT_NULL;
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
|
return varDataLen(val) == sizeof(int8_t) && *(uint8_t *)varDataVal(val) == TSDB_DATA_GEOMETRY_NULL;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -214,7 +214,7 @@ int32_t catalogGetSTableMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const
|
||||||
|
|
||||||
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
|
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
|
||||||
|
|
||||||
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
|
int32_t catalogAsyncUpdateTableMeta(SCatalog* pCtg, STableMetaRsp* pMsg);
|
||||||
|
|
||||||
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
|
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
|
||||||
|
|
||||||
|
|
|
@ -158,6 +158,17 @@ typedef enum EFunctionType {
|
||||||
FUNCTION_TYPE_STDDEV_PARTIAL,
|
FUNCTION_TYPE_STDDEV_PARTIAL,
|
||||||
FUNCTION_TYPE_STDDEV_MERGE,
|
FUNCTION_TYPE_STDDEV_MERGE,
|
||||||
|
|
||||||
|
// geometry functions
|
||||||
|
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,
|
||||||
|
FUNCTION_TYPE_AS_TEXT,
|
||||||
|
FUNCTION_TYPE_MAKE_POINT,
|
||||||
|
FUNCTION_TYPE_INTERSECTS,
|
||||||
|
FUNCTION_TYPE_EQUALS,
|
||||||
|
FUNCTION_TYPE_TOUCHES,
|
||||||
|
FUNCTION_TYPE_COVERS,
|
||||||
|
FUNCTION_TYPE_CONTAINS,
|
||||||
|
FUNCTION_TYPE_CONTAINS_PROPERLY,
|
||||||
|
|
||||||
// user defined funcion
|
// user defined funcion
|
||||||
FUNCTION_TYPE_UDF = 10000
|
FUNCTION_TYPE_UDF = 10000
|
||||||
} EFunctionType;
|
} EFunctionType;
|
||||||
|
|
|
@ -109,7 +109,7 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
|
||||||
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
|
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
|
||||||
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
|
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
|
||||||
#define IS_VAR_DATA_TYPE(t) \
|
#define IS_VAR_DATA_TYPE(t) \
|
||||||
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
|
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
|
||||||
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
|
||||||
|
|
||||||
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
|
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TDENGINE_GEOM_FUNC_H
|
||||||
|
#define TDENGINE_GEOM_FUNC_H
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "function.h"
|
||||||
|
|
||||||
|
int32_t makePointFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
|
||||||
|
int32_t geomFromTextFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t asTextFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
|
||||||
|
int32_t intersectsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t equalsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t touchesFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t coversFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t containsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
int32_t containsProperlyFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif // TDENGINE_GEOM_FUNC_H
|
|
@ -0,0 +1,60 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TDENGINE_GEOS_WRAPPER_H
|
||||||
|
#define TDENGINE_GEOS_WRAPPER_H
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include "os.h"
|
||||||
|
|
||||||
|
#include "tgeosctx.h"
|
||||||
|
|
||||||
|
void geosFreeBuffer(void *buffer);
|
||||||
|
|
||||||
|
int32_t initCtxMakePoint();
|
||||||
|
int32_t doMakePoint(double x, double y, unsigned char **outputGeom, size_t *size);
|
||||||
|
|
||||||
|
int32_t initCtxGeomFromText();
|
||||||
|
int32_t doGeomFromText(const char *inputWKT, unsigned char **outputGeom, size_t *size);
|
||||||
|
|
||||||
|
int32_t initCtxAsText();
|
||||||
|
int32_t doAsText(const unsigned char *inputGeom, size_t size, char **outputWKT);
|
||||||
|
|
||||||
|
int32_t initCtxRelationFunc();
|
||||||
|
int32_t doIntersects(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||||
|
bool swapped, char *res);
|
||||||
|
int32_t doEquals(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||||
|
bool swapped, char *res);
|
||||||
|
int32_t doTouches(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||||
|
bool swapped, char *res);
|
||||||
|
int32_t doCovers(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||||
|
bool swapped, char *res);
|
||||||
|
int32_t doContains(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||||
|
bool swapped, char *res);
|
||||||
|
int32_t doContainsProperly(const GEOSGeometry *geom1, const GEOSPreparedGeometry *preparedGeom1, const GEOSGeometry *geom2,
|
||||||
|
bool swapped, char *res);
|
||||||
|
|
||||||
|
int32_t readGeometry(const unsigned char *input, GEOSGeometry **outputGeom, const GEOSPreparedGeometry **outputPreparedGeom);
|
||||||
|
void destroyGeometry(GEOSGeometry **geom, const GEOSPreparedGeometry **preparedGeom);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /*TDENGINE_GEOS_WRAPPER_H*/
|
|
@ -249,6 +249,7 @@ typedef struct SDropDnodeStmt {
|
||||||
char fqdn[TSDB_FQDN_LEN];
|
char fqdn[TSDB_FQDN_LEN];
|
||||||
int32_t port;
|
int32_t port;
|
||||||
bool force;
|
bool force;
|
||||||
|
bool unsafe;
|
||||||
} SDropDnodeStmt;
|
} SDropDnodeStmt;
|
||||||
|
|
||||||
typedef struct SAlterDnodeStmt {
|
typedef struct SAlterDnodeStmt {
|
||||||
|
|
|
@ -90,28 +90,23 @@ typedef struct STbVerInfo {
|
||||||
int32_t tversion;
|
int32_t tversion;
|
||||||
} STbVerInfo;
|
} STbVerInfo;
|
||||||
|
|
||||||
/*
|
#pragma pack(push, 1)
|
||||||
* ASSERT(sizeof(SCTableMeta) == 24)
|
|
||||||
* ASSERT(tableType == TSDB_CHILD_TABLE)
|
|
||||||
* The cached child table meta info. For each child table, 24 bytes are required to keep the essential table info.
|
|
||||||
*/
|
|
||||||
typedef struct SCTableMeta {
|
typedef struct SCTableMeta {
|
||||||
int32_t vgId : 24;
|
|
||||||
int8_t tableType;
|
|
||||||
uint64_t uid;
|
uint64_t uid;
|
||||||
uint64_t suid;
|
uint64_t suid;
|
||||||
|
int32_t vgId;
|
||||||
|
int8_t tableType;
|
||||||
} SCTableMeta;
|
} SCTableMeta;
|
||||||
|
#pragma pack(pop)
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that the first 24 bytes of STableMeta are identical to SCTableMeta, it is safe to cast a STableMeta to be a
|
#pragma pack(push, 1)
|
||||||
* SCTableMeta.
|
|
||||||
*/
|
|
||||||
typedef struct STableMeta {
|
typedef struct STableMeta {
|
||||||
// BEGIN: KEEP THIS PART SAME WITH SCTableMeta
|
// BEGIN: KEEP THIS PART SAME WITH SCTableMeta
|
||||||
int32_t vgId : 24;
|
|
||||||
int8_t tableType;
|
|
||||||
uint64_t uid;
|
uint64_t uid;
|
||||||
uint64_t suid;
|
uint64_t suid;
|
||||||
|
int32_t vgId;
|
||||||
|
int8_t tableType;
|
||||||
// END: KEEP THIS PART SAME WITH SCTableMeta
|
// END: KEEP THIS PART SAME WITH SCTableMeta
|
||||||
|
|
||||||
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
|
// if the table is TSDB_CHILD_TABLE, the following information is acquired from the corresponding super table meta
|
||||||
|
@ -121,6 +116,7 @@ typedef struct STableMeta {
|
||||||
STableComInfo tableInfo;
|
STableComInfo tableInfo;
|
||||||
SSchema schema[];
|
SSchema schema[];
|
||||||
} STableMeta;
|
} STableMeta;
|
||||||
|
#pragma pack(pop)
|
||||||
|
|
||||||
typedef struct SDBVgInfo {
|
typedef struct SDBVgInfo {
|
||||||
int32_t vgVersion;
|
int32_t vgVersion;
|
||||||
|
@ -130,7 +126,7 @@ typedef struct SDBVgInfo {
|
||||||
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
|
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
|
||||||
int64_t stateTs;
|
int64_t stateTs;
|
||||||
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
|
||||||
SArray* vgArray;
|
SArray* vgArray; // SVgroupInfo
|
||||||
} SDBVgInfo;
|
} SDBVgInfo;
|
||||||
|
|
||||||
typedef struct SUseDbOutput {
|
typedef struct SUseDbOutput {
|
||||||
|
|
|
@ -446,6 +446,7 @@ int32_t* taosGetErrno();
|
||||||
#define TSDB_CODE_VND_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0532) // internal
|
#define TSDB_CODE_VND_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0532) // internal
|
||||||
#define TSDB_CODE_VND_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0533) // internal
|
#define TSDB_CODE_VND_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0533) // internal
|
||||||
#define TSDB_CODE_VND_DIR_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0534)
|
#define TSDB_CODE_VND_DIR_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0534)
|
||||||
|
#define TSDB_CODE_VND_META_DATA_UNSAFE_DELETE TAOS_DEF_ERROR_CODE(0, 0x0535)
|
||||||
|
|
||||||
// tsdb
|
// tsdb
|
||||||
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
|
||||||
|
|
|
@ -32,7 +32,7 @@ extern "C" {
|
||||||
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
|
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
|
||||||
|
|
||||||
// Bytes for each type.
|
// Bytes for each type.
|
||||||
extern const int32_t TYPE_BYTES[16];
|
extern const int32_t TYPE_BYTES[17];
|
||||||
|
|
||||||
// TODO: replace and remove code below
|
// TODO: replace and remove code below
|
||||||
#define CHAR_BYTES sizeof(char)
|
#define CHAR_BYTES sizeof(char)
|
||||||
|
@ -53,10 +53,11 @@ extern const int32_t TYPE_BYTES[16];
|
||||||
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
|
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
|
||||||
#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
|
#define TSDB_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
|
||||||
|
|
||||||
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
|
#define TSDB_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
|
||||||
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
|
#define TSDB_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
|
||||||
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
|
#define TSDB_DATA_NCHAR_NULL 0xFFFFFFFF
|
||||||
#define TSDB_DATA_BINARY_NULL 0xFF
|
#define TSDB_DATA_BINARY_NULL 0xFF
|
||||||
|
#define TSDB_DATA_GEOMETRY_NULL 0xFF
|
||||||
|
|
||||||
#define TSDB_DATA_UTINYINT_NULL 0xFF
|
#define TSDB_DATA_UTINYINT_NULL 0xFF
|
||||||
#define TSDB_DATA_USMALLINT_NULL 0xFFFF
|
#define TSDB_DATA_USMALLINT_NULL 0xFFFF
|
||||||
|
@ -253,6 +254,7 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_IPv4ADDR_LEN 16
|
#define TSDB_IPv4ADDR_LEN 16
|
||||||
#define TSDB_FILENAME_LEN 128
|
#define TSDB_FILENAME_LEN 128
|
||||||
#define TSDB_SHOW_SQL_LEN 2048
|
#define TSDB_SHOW_SQL_LEN 2048
|
||||||
|
#define TSDB_SHOW_SCHEMA_JSON_LEN TSDB_MAX_COLUMNS * 256
|
||||||
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
#define TSDB_SLOW_QUERY_SQL_LEN 512
|
||||||
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
#define TSDB_SHOW_SUBQUERY_LEN 1000
|
||||||
|
|
||||||
|
@ -267,8 +269,8 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_DNODE_CONFIG_LEN 128
|
#define TSDB_DNODE_CONFIG_LEN 128
|
||||||
#define TSDB_DNODE_VALUE_LEN 256
|
#define TSDB_DNODE_VALUE_LEN 256
|
||||||
|
|
||||||
#define TSDB_ACTIVE_KEY_LEN 109 // history 109:?
|
#define TSDB_ACTIVE_KEY_LEN 109
|
||||||
#define TSDB_CONN_ACTIVE_KEY_LEN 257 // history 257:?
|
#define TSDB_CONN_ACTIVE_KEY_LEN 255
|
||||||
|
|
||||||
#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE
|
#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE
|
||||||
|
|
||||||
|
@ -410,6 +412,8 @@ typedef enum ELogicConditionType {
|
||||||
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
|
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
|
||||||
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
|
#define TSDB_MAX_GEOMETRY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
|
||||||
|
|
||||||
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
|
||||||
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||||
|
*
|
||||||
|
* This program is free software: you can use, redistribute, and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License, version 3
|
||||||
|
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU Affero General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _TD_UTIL_GEOS_CTX_H_
|
||||||
|
#define _TD_UTIL_GEOS_CTX_H_
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <geos_c.h>
|
||||||
|
|
||||||
|
typedef struct SGeosContext {
|
||||||
|
GEOSContextHandle_t handle;
|
||||||
|
|
||||||
|
GEOSWKTReader *WKTReader;
|
||||||
|
GEOSWKTWriter *WKTWriter;
|
||||||
|
|
||||||
|
GEOSWKBReader *WKBReader;
|
||||||
|
GEOSWKBWriter *WKBWriter;
|
||||||
|
|
||||||
|
char errMsg[512];
|
||||||
|
} SGeosContext;
|
||||||
|
|
||||||
|
SGeosContext* getThreadLocalGeosCtx();
|
||||||
|
void destroyThreadLocalGeosCtx();
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /*_TD_UTIL_GEOS_CTX_H_*/
|
|
@ -123,6 +123,16 @@ else
|
||||||
echo "Unknown cpuType: ${cpuType}"
|
echo "Unknown cpuType: ${cpuType}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# check the tdengine cloud base image existed or not
|
||||||
|
if [ "$cloudBuild" == "y" ]; then
|
||||||
|
CloudBase=$(docker images | grep tdengine/tdengine-cloud-base ||:)
|
||||||
|
if [[ "$CloudBase" == "" ]]; then
|
||||||
|
echo "Rebuild tdengine cloud base image..."
|
||||||
|
docker build --rm -f "${communityDir}/packaging/docker/DockerfileCloud.base" -t tdengine/tdengine-cloud-base "." --build-arg cpuType=${cpuTypeAlias}
|
||||||
|
else
|
||||||
|
echo "Already found tdengine cloud base image"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
docker build --rm -f "${Dockerfile}" --network=host -t tdengine/tdengine-${dockername}:${version} "." --build-arg pkgFile=${pkgFile} --build-arg dirName=${dirName} --build-arg cpuType=${cpuTypeAlias}
|
||||||
if [ "$cloudBuild" != "y" ]; then
|
if [ "$cloudBuild" != "y" ]; then
|
||||||
|
|
|
@ -175,7 +175,7 @@ if [ "$pagMode" == "lite" ]; then
|
||||||
fi
|
fi
|
||||||
chmod a+x ${install_dir}/install_client.sh
|
chmod a+x ${install_dir}/install_client.sh
|
||||||
|
|
||||||
if [[ $productName == "TDengine" ]]; then
|
if [[ $productName == "TDengine" ]] && [ "$verMode" != "cloud" ]; then
|
||||||
# Copy example code
|
# Copy example code
|
||||||
mkdir -p ${install_dir}/examples
|
mkdir -p ${install_dir}/examples
|
||||||
examples_dir="${top_dir}/examples"
|
examples_dir="${top_dir}/examples"
|
||||||
|
@ -191,7 +191,7 @@ if [[ $productName == "TDengine" ]]; then
|
||||||
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
# Copy connector
|
# Copy connector
|
||||||
connector_dir="${code_dir}/connector"
|
connector_dir="${code_dir}/connector"
|
||||||
mkdir -p ${install_dir}/connector
|
mkdir -p ${install_dir}/connector
|
||||||
|
|
|
@ -69,25 +69,30 @@ if [ "$pagMode" == "lite" ]; then
|
||||||
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark "
|
bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark "
|
||||||
taostools_bin_files=""
|
taostools_bin_files=""
|
||||||
else
|
else
|
||||||
|
if [ "$verMode" == "cloud" ]; then
|
||||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||||
|
${build_dir}/bin/taosBenchmark"
|
||||||
|
else
|
||||||
|
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
|
||||||
&& echo "TDinsight.sh downloaded!" \
|
&& echo "TDinsight.sh downloaded!" \
|
||||||
|| echo "failed to download TDinsight.sh"
|
|| echo "failed to download TDinsight.sh"
|
||||||
# download TDinsight caches
|
# download TDinsight caches
|
||||||
orig_pwd=$(pwd)
|
orig_pwd=$(pwd)
|
||||||
tdinsight_caches=""
|
tdinsight_caches=""
|
||||||
cd ${build_dir}/bin/ && \
|
cd ${build_dir}/bin/ && \
|
||||||
chmod +x TDinsight.sh
|
chmod +x TDinsight.sh
|
||||||
./TDinsight.sh --download-only ||:
|
./TDinsight.sh --download-only ||:
|
||||||
# tdinsight_caches=$(./TDinsight.sh --download-only | xargs -I printf "${build_dir}/bin/{} ")
|
# tdinsight_caches=$(./TDinsight.sh --download-only | xargs -I printf "${build_dir}/bin/{} ")
|
||||||
cd $orig_pwd
|
cd $orig_pwd
|
||||||
echo "TDinsight caches: $tdinsight_caches"
|
echo "TDinsight caches: $tdinsight_caches"
|
||||||
|
|
||||||
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
taostools_bin_files=" ${build_dir}/bin/taosdump \
|
||||||
${build_dir}/bin/taosBenchmark \
|
${build_dir}/bin/taosBenchmark \
|
||||||
${build_dir}/bin/TDinsight.sh \
|
${build_dir}/bin/TDinsight.sh \
|
||||||
${build_dir}/bin/tdengine-datasource.zip \
|
${build_dir}/bin/tdengine-datasource.zip \
|
||||||
${build_dir}/bin/tdengine-datasource.zip.md5sum"
|
${build_dir}/bin/tdengine-datasource.zip.md5sum"
|
||||||
|
fi
|
||||||
|
|
||||||
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
|
[ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
|
||||||
explorer_bin_files=$(find ${build_dir}/bin/ -name '*-explorer')
|
explorer_bin_files=$(find ${build_dir}/bin/ -name '*-explorer')
|
||||||
|
|
||||||
|
@ -334,14 +339,14 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
|
||||||
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
|
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
|
||||||
|
|
||||||
# Copy connector
|
# Copy connector
|
||||||
if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
|
if [ "$verMode" == "cluster" ]; then
|
||||||
connector_dir="${code_dir}/connector"
|
connector_dir="${code_dir}/connector"
|
||||||
mkdir -p ${install_dir}/connector
|
mkdir -p ${install_dir}/connector
|
||||||
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
|
||||||
tmp_pwd=`pwd`
|
tmp_pwd=`pwd`
|
||||||
cd ${install_dir}/connector
|
cd ${install_dir}/connector
|
||||||
if [ ! -d taos-connector-jdbc ];then
|
if [ ! -d taos-connector-jdbc ];then
|
||||||
git clone -b 3.1.0 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
git clone -b 3.2.1 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
||||||
fi
|
fi
|
||||||
cd taos-connector-jdbc
|
cd taos-connector-jdbc
|
||||||
mvn clean package -Dmaven.test.skip=true
|
mvn clean package -Dmaven.test.skip=true
|
||||||
|
@ -424,7 +429,7 @@ if [ "$exitcode" != "0" ]; then
|
||||||
exit $exitcode
|
exit $exitcode
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${taostools_bin_files}" ]; then
|
if [ -n "${taostools_bin_files}" ] && [ "$verMode" != "cloud" ]; then
|
||||||
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${taostools_install_dir}/bin/TDinsight.sh && echo "TDinsight.sh downloaded!"|| echo "failed to download TDinsight.sh"
|
||||||
if [ "$osType" != "Darwin" ]; then
|
if [ "$osType" != "Darwin" ]; then
|
||||||
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
tar -zcv -f "$(basename ${taostools_pkg_name}).tar.gz" "$(basename ${taostools_install_dir})" --remove-files || :
|
||||||
|
|
|
@ -232,9 +232,9 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *
|
||||||
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
|
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
|
||||||
//SArray *smlJsonParseTags(char *start, char *end);
|
//SArray *smlJsonParseTags(char *start, char *end);
|
||||||
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
|
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
|
||||||
void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
|
//void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
|
||||||
int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
|
//int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
|
||||||
int nodeListSize(NodeList* list);
|
//int nodeListSize(NodeList* list);
|
||||||
bool smlDoubleToInt64OverFlow(double num);
|
bool smlDoubleToInt64OverFlow(double num);
|
||||||
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
|
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
|
||||||
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
|
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
|
||||||
|
|
|
@ -656,7 +656,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
|
||||||
} else {
|
} else {
|
||||||
tscInfo("set cfg:%s to %s", pItem->name, str);
|
tscInfo("set cfg:%s to %s", pItem->name, str);
|
||||||
if (TSDB_OPTION_SHELL_ACTIVITY_TIMER == option || TSDB_OPTION_USE_ADAPTER == option) {
|
if (TSDB_OPTION_SHELL_ACTIVITY_TIMER == option || TSDB_OPTION_USE_ADAPTER == option) {
|
||||||
code = taosSetCfg(pCfg, pItem->name);
|
code = taosApplyLocalCfg(pCfg, pItem->name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -224,7 +224,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
|
||||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
catalogUpdateTableMeta(pCatalog, rsp);
|
catalogAsyncUpdateTableMeta(pCatalog, rsp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -491,7 +491,8 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
|
||||||
pResInfo->userFields[i].bytes = pSchema[i].bytes;
|
pResInfo->userFields[i].bytes = pSchema[i].bytes;
|
||||||
pResInfo->userFields[i].type = pSchema[i].type;
|
pResInfo->userFields[i].type = pSchema[i].type;
|
||||||
|
|
||||||
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR) {
|
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR ||
|
||||||
|
pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
|
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
|
||||||
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
|
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
|
||||||
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
|
@ -815,7 +816,7 @@ int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
|
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
|
||||||
return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
|
return catalogAsyncUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
|
||||||
|
|
|
@ -579,7 +579,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
|
||||||
dv = GET_DOUBLE_VAL(row[i]);
|
dv = GET_DOUBLE_VAL(row[i]);
|
||||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetDoubleFp, i, (jdouble)dv);
|
||||||
} break;
|
} break;
|
||||||
case TSDB_DATA_TYPE_BINARY: {
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
memcpy(tmp, row[i], length[i]); // handle the case that terminated does not exist
|
memcpy(tmp, row[i], length[i]); // handle the case that terminated does not exist
|
||||||
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
|
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetStringFp, i, (*env)->NewStringUTF(env, tmp));
|
||||||
|
|
||||||
|
|
|
@ -393,9 +393,10 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
|
||||||
} break;
|
} break;
|
||||||
|
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
|
if (fields[i].type == TSDB_DATA_TYPE_BINARY || fields[i].type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
if (ASSERT(charLen <= fields[i].bytes && charLen >= 0)) {
|
if (ASSERT(charLen <= fields[i].bytes && charLen >= 0)) {
|
||||||
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
tscError("taos_print_row error binary. charLen:%d, fields[i].bytes:%d", charLen, fields[i].bytes);
|
||||||
}
|
}
|
||||||
|
@ -475,6 +476,8 @@ const char *taos_data_type(int type) {
|
||||||
return "TSDB_DATA_TYPE_NCHAR";
|
return "TSDB_DATA_TYPE_NCHAR";
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
return "TSDB_DATA_TYPE_JSON";
|
return "TSDB_DATA_TYPE_JSON";
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
|
return "TSDB_DATA_TYPE_GEOMETRY";
|
||||||
case TSDB_DATA_TYPE_UTINYINT:
|
case TSDB_DATA_TYPE_UTINYINT:
|
||||||
return "TSDB_DATA_TYPE_UTINYINT";
|
return "TSDB_DATA_TYPE_UTINYINT";
|
||||||
case TSDB_DATA_TYPE_USMALLINT:
|
case TSDB_DATA_TYPE_USMALLINT:
|
||||||
|
|
|
@ -56,7 +56,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
||||||
cJSON_AddItemToObject(column, "name", cname);
|
cJSON_AddItemToObject(column, "name", cname);
|
||||||
cJSON* ctype = cJSON_CreateNumber(s->type);
|
cJSON* ctype = cJSON_CreateNumber(s->type);
|
||||||
cJSON_AddItemToObject(column, "type", ctype);
|
cJSON_AddItemToObject(column, "type", ctype);
|
||||||
if (s->type == TSDB_DATA_TYPE_BINARY) {
|
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(column, "length", cbytes);
|
cJSON_AddItemToObject(column, "length", cbytes);
|
||||||
|
@ -77,7 +77,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
|
||||||
cJSON_AddItemToObject(tag, "name", tname);
|
cJSON_AddItemToObject(tag, "name", tname);
|
||||||
cJSON* ttype = cJSON_CreateNumber(s->type);
|
cJSON* ttype = cJSON_CreateNumber(s->type);
|
||||||
cJSON_AddItemToObject(tag, "type", ttype);
|
cJSON_AddItemToObject(tag, "type", ttype);
|
||||||
if (s->type == TSDB_DATA_TYPE_BINARY) {
|
if (s->type == TSDB_DATA_TYPE_BINARY || s->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(tag, "length", cbytes);
|
cJSON_AddItemToObject(tag, "length", cbytes);
|
||||||
|
@ -130,7 +130,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
|
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY) {
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -155,7 +155,7 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
|
||||||
cJSON_AddItemToObject(json, "colName", colName);
|
cJSON_AddItemToObject(json, "colName", colName);
|
||||||
cJSON* colType = cJSON_CreateNumber(field->type);
|
cJSON* colType = cJSON_CreateNumber(field->type);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
if (field->type == TSDB_DATA_TYPE_BINARY) {
|
if (field->type == TSDB_DATA_TYPE_BINARY || field->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
int32_t length = field->bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -457,7 +457,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.type);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
|
|
||||||
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY) {
|
if (vAlterTbReq.type == TSDB_DATA_TYPE_BINARY || vAlterTbReq.type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.bytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
@ -478,7 +478,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
|
||||||
cJSON_AddItemToObject(json, "colName", colName);
|
cJSON_AddItemToObject(json, "colName", colName);
|
||||||
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
cJSON* colType = cJSON_CreateNumber(vAlterTbReq.colModType);
|
||||||
cJSON_AddItemToObject(json, "colType", colType);
|
cJSON_AddItemToObject(json, "colType", colType);
|
||||||
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY) {
|
if (vAlterTbReq.colModType == TSDB_DATA_TYPE_BINARY || vAlterTbReq.colModType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
int32_t length = vAlterTbReq.colModBytes - VARSTR_HEADER_SIZE;
|
||||||
cJSON* cbytes = cJSON_CreateNumber(length);
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
cJSON_AddItemToObject(json, "colLength", cbytes);
|
cJSON_AddItemToObject(json, "colLength", cbytes);
|
||||||
|
|
|
@ -24,72 +24,91 @@ int64_t smlToMilli[3] = {3600000LL, 60000LL, 1000LL};
|
||||||
int64_t smlFactorNS[3] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1};
|
int64_t smlFactorNS[3] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1};
|
||||||
int64_t smlFactorS[3] = {1000LL, 1000000LL, 1000000000LL};
|
int64_t smlFactorS[3] = {1000LL, 1000000LL, 1000000000LL};
|
||||||
|
|
||||||
void *nodeListGet(NodeList *list, const void *key, int32_t len, _equal_fn_sml fn) {
|
//void *nodeListGet(NodeList *list, const void *key, int32_t len, _equal_fn_sml fn) {
|
||||||
NodeList *tmp = list;
|
// NodeList *tmp = list;
|
||||||
while (tmp) {
|
// while (tmp) {
|
||||||
if (fn == NULL) {
|
// if (fn == NULL) {
|
||||||
if (tmp->data.used && tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
|
// if (tmp->data.used && tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
|
||||||
return tmp->data.value;
|
// return tmp->data.value;
|
||||||
}
|
// }
|
||||||
} else {
|
// } else {
|
||||||
if (tmp->data.used && fn(tmp->data.key, key) == 0) {
|
// if (tmp->data.used && fn(tmp->data.key, key) == 0) {
|
||||||
return tmp->data.value;
|
// return tmp->data.value;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
|
// tmp = tmp->next;
|
||||||
|
// }
|
||||||
|
// return NULL;
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//int nodeListSet(NodeList **list, const void *key, int32_t len, void *value, _equal_fn_sml fn) {
|
||||||
|
// NodeList *tmp = *list;
|
||||||
|
// while (tmp) {
|
||||||
|
// if (!tmp->data.used) break;
|
||||||
|
// if (fn == NULL) {
|
||||||
|
// if (tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
|
||||||
|
// return -1;
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// if (tmp->data.keyLen == len && fn(tmp->data.key, key) == 0) {
|
||||||
|
// return -1;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// tmp = tmp->next;
|
||||||
|
// }
|
||||||
|
// if (tmp) {
|
||||||
|
// tmp->data.key = key;
|
||||||
|
// tmp->data.keyLen = len;
|
||||||
|
// tmp->data.value = value;
|
||||||
|
// tmp->data.used = true;
|
||||||
|
// } else {
|
||||||
|
// NodeList *newNode = (NodeList *)taosMemoryCalloc(1, sizeof(NodeList));
|
||||||
|
// if (newNode == NULL) {
|
||||||
|
// return -1;
|
||||||
|
// }
|
||||||
|
// newNode->data.key = key;
|
||||||
|
// newNode->data.keyLen = len;
|
||||||
|
// newNode->data.value = value;
|
||||||
|
// newNode->data.used = true;
|
||||||
|
// newNode->next = *list;
|
||||||
|
// *list = newNode;
|
||||||
|
// }
|
||||||
|
// return 0;
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//int nodeListSize(NodeList *list) {
|
||||||
|
// int cnt = 0;
|
||||||
|
// while (list) {
|
||||||
|
// if (list->data.used)
|
||||||
|
// cnt++;
|
||||||
|
// else
|
||||||
|
// break;
|
||||||
|
// list = list->next;
|
||||||
|
// }
|
||||||
|
// return cnt;
|
||||||
|
//}
|
||||||
|
|
||||||
tmp = tmp->next;
|
static int32_t smlCheckAuth(SSmlHandle *info, SRequestConnInfo* conn, const char* pTabName, AUTH_TYPE type){
|
||||||
}
|
SUserAuthInfo pAuth = {0};
|
||||||
return NULL;
|
snprintf(pAuth.user, sizeof(pAuth.user), "%s", info->taos->user);
|
||||||
}
|
if (NULL == pTabName) {
|
||||||
|
tNameSetDbName(&pAuth.tbName, info->taos->acctId, info->pRequest->pDb, strlen(info->pRequest->pDb));
|
||||||
int nodeListSet(NodeList **list, const void *key, int32_t len, void *value, _equal_fn_sml fn) {
|
|
||||||
NodeList *tmp = *list;
|
|
||||||
while (tmp) {
|
|
||||||
if (!tmp->data.used) break;
|
|
||||||
if (fn == NULL) {
|
|
||||||
if (tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (tmp->data.keyLen == len && fn(tmp->data.key, key) == 0) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tmp = tmp->next;
|
|
||||||
}
|
|
||||||
if (tmp) {
|
|
||||||
tmp->data.key = key;
|
|
||||||
tmp->data.keyLen = len;
|
|
||||||
tmp->data.value = value;
|
|
||||||
tmp->data.used = true;
|
|
||||||
} else {
|
} else {
|
||||||
NodeList *newNode = (NodeList *)taosMemoryCalloc(1, sizeof(NodeList));
|
toName(info->taos->acctId, info->pRequest->pDb, pTabName, &pAuth.tbName);
|
||||||
if (newNode == NULL) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
newNode->data.key = key;
|
|
||||||
newNode->data.keyLen = len;
|
|
||||||
newNode->data.value = value;
|
|
||||||
newNode->data.used = true;
|
|
||||||
newNode->next = *list;
|
|
||||||
*list = newNode;
|
|
||||||
}
|
}
|
||||||
return 0;
|
pAuth.type = type;
|
||||||
}
|
|
||||||
|
|
||||||
int nodeListSize(NodeList *list) {
|
int32_t code = TSDB_CODE_SUCCESS;
|
||||||
int cnt = 0;
|
SUserAuthRes authRes = {0};
|
||||||
while (list) {
|
|
||||||
if (list->data.used)
|
|
||||||
cnt++;
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
list = list->next;
|
|
||||||
}
|
|
||||||
return cnt;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
code = catalogChkAuth(info->pCatalog, conn, &pAuth, &authRes);
|
||||||
|
|
||||||
|
|
||||||
|
return (code == TSDB_CODE_SUCCESS) ? (authRes.pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
|
||||||
|
|
||||||
|
}
|
||||||
inline bool smlDoubleToInt64OverFlow(double num) {
|
inline bool smlDoubleToInt64OverFlow(double num) {
|
||||||
if (num >= (double)INT64_MAX || num <= (double)INT64_MIN) return true;
|
if (num >= (double)INT64_MAX || num <= (double)INT64_MIN) return true;
|
||||||
return false;
|
return false;
|
||||||
|
@ -555,7 +574,7 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
|
||||||
return TSDB_CODE_SML_INVALID_DATA;
|
return TSDB_CODE_SML_INVALID_DATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((colField[*index].type == TSDB_DATA_TYPE_VARCHAR &&
|
if (((colField[*index].type == TSDB_DATA_TYPE_VARCHAR || colField[*index].type == TSDB_DATA_TYPE_GEOMETRY) &&
|
||||||
(colField[*index].bytes - VARSTR_HEADER_SIZE) < kv->length) ||
|
(colField[*index].bytes - VARSTR_HEADER_SIZE) < kv->length) ||
|
||||||
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
|
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
|
||||||
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
|
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
|
||||||
|
@ -586,7 +605,7 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
if ((type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_GEOMETRY) && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||||
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
|
||||||
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
|
||||||
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
|
@ -594,7 +613,7 @@ static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_NCHAR) {
|
||||||
result = result * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
result = result * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE;
|
||||||
} else if (type == TSDB_DATA_TYPE_BINARY) {
|
} else if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
result = result + VARSTR_HEADER_SIZE;
|
result = result + VARSTR_HEADER_SIZE;
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -638,7 +657,7 @@ static int32_t smlCheckMeta(SSchema *schema, int32_t length, SArray *cols, bool
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t getBytes(uint8_t type, int32_t length) {
|
static int32_t getBytes(uint8_t type, int32_t length) {
|
||||||
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
|
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
return smlFindNearestPowerOf2(length, type);
|
return smlFindNearestPowerOf2(length, type);
|
||||||
} else {
|
} else {
|
||||||
return tDataTypes[type].bytes;
|
return tDataTypes[type].bytes;
|
||||||
|
@ -813,6 +832,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
|
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
|
||||||
|
|
||||||
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
|
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
|
||||||
|
code = smlCheckAuth(info, &conn, NULL, AUTH_TYPE_WRITE);
|
||||||
|
if(code != TSDB_CODE_SUCCESS){
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas create table:%s", info->id, pName.tname);
|
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas create table:%s", info->id, pName.tname);
|
||||||
SArray *pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
|
SArray *pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
|
||||||
SArray *pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
|
SArray *pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
|
||||||
|
@ -857,6 +880,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
if (action != SCHEMA_ACTION_NULL) {
|
if (action != SCHEMA_ACTION_NULL) {
|
||||||
|
code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
|
||||||
|
if(code != TSDB_CODE_SUCCESS){
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table tag, table:%s, action:%d", info->id, pName.tname,
|
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table tag, table:%s, action:%d", info->id, pName.tname,
|
||||||
action);
|
action);
|
||||||
SArray *pColumns =
|
SArray *pColumns =
|
||||||
|
@ -927,6 +954,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
if (action != SCHEMA_ACTION_NULL) {
|
if (action != SCHEMA_ACTION_NULL) {
|
||||||
|
code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
|
||||||
|
if(code != TSDB_CODE_SUCCESS){
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table col, table:%s, action:%d", info->id, pName.tname,
|
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table col, table:%s, action:%d", info->id, pName.tname,
|
||||||
action);
|
action);
|
||||||
SArray *pColumns =
|
SArray *pColumns =
|
||||||
|
@ -1367,6 +1398,11 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
||||||
conn.requestObjRefId = info->pRequest->self;
|
conn.requestObjRefId = info->pRequest->self;
|
||||||
conn.mgmtEps = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
|
conn.mgmtEps = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
|
||||||
|
|
||||||
|
code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
|
||||||
|
if(code != TSDB_CODE_SUCCESS){
|
||||||
|
return code;
|
||||||
|
}
|
||||||
|
|
||||||
SVgroupInfo vg;
|
SVgroupInfo vg;
|
||||||
code = catalogGetTableHashVgroup(info->pCatalog, &conn, &pName, &vg);
|
code = catalogGetTableHashVgroup(info->pCatalog, &conn, &pName, &vg);
|
||||||
if (code != TSDB_CODE_SUCCESS) {
|
if (code != TSDB_CODE_SUCCESS) {
|
||||||
|
@ -1586,9 +1622,7 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
|
||||||
|
|
||||||
do {
|
do {
|
||||||
code = smlModifyDBSchemas(info);
|
code = smlModifyDBSchemas(info);
|
||||||
if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS
|
if (code != TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER && code != TSDB_CODE_SDB_OBJ_CREATING && code != TSDB_CODE_MND_TRANS_CONFLICT) {
|
||||||
|| code == TSDB_CODE_PAR_INVALID_TAGS_NUM || code == TSDB_CODE_PAR_INVALID_TAGS_LENGTH
|
|
||||||
|| code == TSDB_CODE_PAR_INVALID_ROW_LENGTH || code == TSDB_CODE_MND_FIELD_VALUE_OVERFLOW) {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
taosMsleep(100);
|
taosMsleep(100);
|
||||||
|
|
|
@ -2584,6 +2584,14 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tmq_free_assignment(tmq_topic_assignment* pAssignment) {
|
||||||
|
if (pAssignment == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
taosMemoryFree(pAssignment);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_t offset) {
|
int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_t offset) {
|
||||||
if (tmq == NULL) {
|
if (tmq == NULL) {
|
||||||
tscError("invalid tmq handle, null");
|
tscError("invalid tmq handle, null");
|
||||||
|
|
|
@ -280,7 +280,9 @@ static const SSysDbTableSchema topicSchema[] = {
|
||||||
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
||||||
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
{.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||||
// TODO config
|
{.name = "schema", .bytes = TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||||
|
{.name = "meta", .bytes = 4 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||||
|
{.name = "type", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -445,12 +445,11 @@ int32_t blockDataSplitRows(SSDataBlock* pBlock, bool hasVarCol, int32_t startInd
|
||||||
|
|
||||||
size_t headerSize = sizeof(int32_t);
|
size_t headerSize = sizeof(int32_t);
|
||||||
size_t colHeaderSize = sizeof(int32_t) * numOfCols;
|
size_t colHeaderSize = sizeof(int32_t) * numOfCols;
|
||||||
size_t payloadSize = pageSize - (headerSize + colHeaderSize);
|
|
||||||
|
|
||||||
// TODO speedup by checking if the whole page can fit in firstly.
|
// TODO speedup by checking if the whole page can fit in firstly.
|
||||||
if (!hasVarCol) {
|
if (!hasVarCol) {
|
||||||
size_t rowSize = blockDataGetRowSize(pBlock);
|
size_t rowSize = blockDataGetRowSize(pBlock);
|
||||||
int32_t capacity = payloadSize / (rowSize + numOfCols * bitmapChar / 8.0);
|
int32_t capacity = blockDataGetCapacityInRow(pBlock, pageSize, headerSize + colHeaderSize);
|
||||||
if (capacity <= 0) {
|
if (capacity <= 0) {
|
||||||
return TSDB_CODE_FAILED;
|
return TSDB_CODE_FAILED;
|
||||||
}
|
}
|
||||||
|
@ -1532,10 +1531,10 @@ SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index) {
|
||||||
return taosArrayGet(pBlock->pDataBlock, index);
|
return taosArrayGet(pBlock->pDataBlock, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
|
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int32_t extraSize) {
|
||||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||||
|
|
||||||
int32_t payloadSize = pageSize - blockDataGetSerialMetaSize(numOfCols);
|
int32_t payloadSize = pageSize - extraSize;
|
||||||
int32_t rowSize = pBlock->info.rowSize;
|
int32_t rowSize = pBlock->info.rowSize;
|
||||||
int32_t nRows = payloadSize / rowSize;
|
int32_t nRows = payloadSize / rowSize;
|
||||||
ASSERT(nRows >= 1);
|
ASSERT(nRows >= 1);
|
||||||
|
@ -1928,7 +1927,8 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
|
||||||
case TSDB_DATA_TYPE_DOUBLE:
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
printf(" %15lf |", *(double*)var);
|
printf(" %15lf |", *(double*)var);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_VARCHAR: {
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
char* pData = colDataGetVarData(pColInfoData, j);
|
char* pData = colDataGetVarData(pColInfoData, j);
|
||||||
int32_t dataSize = TMIN(sizeof(pBuf) - 1, varDataLen(pData));
|
int32_t dataSize = TMIN(sizeof(pBuf) - 1, varDataLen(pData));
|
||||||
memset(pBuf, 0, dataSize + 1);
|
memset(pBuf, 0, dataSize + 1);
|
||||||
|
@ -1970,7 +1970,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
|
|
||||||
for (int32_t j = 0; j < rows; j++) {
|
for (int32_t j = 0; j < rows; j++) {
|
||||||
len += snprintf(dumpBuf + len, size - len, "%s %d|", flag, j);
|
len += snprintf(dumpBuf + len, size - len, "%s|", flag);
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
|
|
||||||
for (int32_t k = 0; k < colNum; k++) {
|
for (int32_t k = 0; k < colNum; k++) {
|
||||||
|
@ -2033,7 +2033,8 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
||||||
len += snprintf(dumpBuf + len, size - len, " %15d |", *(bool*)var);
|
len += snprintf(dumpBuf + len, size - len, " %15d |", *(bool*)var);
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_VARCHAR: {
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
memset(pBuf, 0, sizeof(pBuf));
|
memset(pBuf, 0, sizeof(pBuf));
|
||||||
char* pData = colDataGetVarData(pColInfoData, j);
|
char* pData = colDataGetVarData(pColInfoData, j);
|
||||||
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
||||||
|
@ -2052,7 +2053,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
||||||
} break;
|
} break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
len += snprintf(dumpBuf + len, size - len, "\n");
|
len += snprintf(dumpBuf + len, size - len, "%d\n", j);
|
||||||
if (len >= size - 1) return dumpBuf;
|
if (len >= size - 1) return dumpBuf;
|
||||||
}
|
}
|
||||||
len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag);
|
len += snprintf(dumpBuf + len, size - len, "%s |end\n", flag);
|
||||||
|
@ -2139,7 +2140,8 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
case TSDB_DATA_TYPE_VARCHAR: { // TSDB_DATA_TYPE_BINARY
|
case TSDB_DATA_TYPE_VARCHAR: // TSDB_DATA_TYPE_BINARY
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
if (colDataIsNull_s(pColInfoData, j)) {
|
if (colDataIsNull_s(pColInfoData, j)) {
|
||||||
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, pColInfoData->info.type, TD_VTYPE_NULL, NULL,
|
tdAppendColValToRow(&rb, PRIMARYKEY_TIMESTAMP_COL_ID + k, pColInfoData->info.type, TD_VTYPE_NULL, NULL,
|
||||||
false, offset, k);
|
false, offset, k);
|
||||||
|
|
|
@ -1148,7 +1148,8 @@ static void debugPrintTagVal(int8_t type, const void *val, int32_t vlen, const c
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
case TSDB_DATA_TYPE_VARCHAR:
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
case TSDB_DATA_TYPE_NCHAR: {
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
char tmpVal[32] = {0};
|
char tmpVal[32] = {0};
|
||||||
strncpy(tmpVal, val, vlen > 31 ? 31 : vlen);
|
strncpy(tmpVal, val, vlen > 31 ? 31 : vlen);
|
||||||
printf("%s:%d type:%d vlen:%d, val:\"%s\"\n", tag, ln, (int32_t)type, vlen, tmpVal);
|
printf("%s:%d type:%d vlen:%d, val:\"%s\"\n", tag, ln, (int32_t)type, vlen, tmpVal);
|
||||||
|
@ -3542,5 +3543,6 @@ void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_
|
||||||
NULL, // TSDB_DATA_TYPE_VARBINARY
|
NULL, // TSDB_DATA_TYPE_VARBINARY
|
||||||
NULL, // TSDB_DATA_TYPE_DECIMAL
|
NULL, // TSDB_DATA_TYPE_DECIMAL
|
||||||
NULL, // TSDB_DATA_TYPE_BLOB
|
NULL, // TSDB_DATA_TYPE_BLOB
|
||||||
NULL // TSDB_DATA_TYPE_MEDIUMBLOB
|
NULL, // TSDB_DATA_TYPE_MEDIUMBLOB
|
||||||
|
NULL // TSDB_DATA_TYPE_GEOMETRY
|
||||||
};
|
};
|
||||||
|
|
|
@ -117,12 +117,10 @@ int32_t tsRedirectFactor = 2;
|
||||||
int32_t tsRedirectMaxPeriod = 1000;
|
int32_t tsRedirectMaxPeriod = 1000;
|
||||||
int32_t tsMaxRetryWaitTime = 10000;
|
int32_t tsMaxRetryWaitTime = 10000;
|
||||||
bool tsUseAdapter = false;
|
bool tsUseAdapter = false;
|
||||||
|
int32_t tsMetaCacheMaxSize = -1; // MB
|
||||||
int32_t tsSlowLogThreshold = 3; // seconds
|
int32_t tsSlowLogThreshold = 3; // seconds
|
||||||
int32_t tsSlowLogScope = SLOW_LOG_TYPE_ALL;
|
int32_t tsSlowLogScope = SLOW_LOG_TYPE_ALL;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* denote if the server needs to compress response message at the application layer to client, including query rsp,
|
* denote if the server needs to compress response message at the application layer to client, including query rsp,
|
||||||
* metricmeta rsp, and multi-meter query rsp message body. The client compress the submit message to server.
|
* metricmeta rsp, and multi-meter query rsp message body. The client compress the submit message to server.
|
||||||
|
@ -351,6 +349,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
||||||
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
|
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
|
||||||
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
|
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
|
||||||
if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, 1) != 0) return -1;
|
if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, 1) != 0) return -1;
|
||||||
|
if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, 1) != 0) return -1;
|
||||||
if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, true) != 0) return -1;
|
if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, true) != 0) return -1;
|
||||||
if (cfgAddString(pCfg, "slowLogScope", "", true) != 0) return -1;
|
if (cfgAddString(pCfg, "slowLogScope", "", true) != 0) return -1;
|
||||||
|
|
||||||
|
@ -788,6 +787,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
||||||
tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval;
|
tsUseAdapter = cfgGetItem(pCfg, "useAdapter")->bval;
|
||||||
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
tsEnableCrashReport = cfgGetItem(pCfg, "crashReporting")->bval;
|
||||||
tsQueryMaxConcurrentTables = cfgGetItem(pCfg, "queryMaxConcurrentTables")->i64;
|
tsQueryMaxConcurrentTables = cfgGetItem(pCfg, "queryMaxConcurrentTables")->i64;
|
||||||
|
tsMetaCacheMaxSize = cfgGetItem(pCfg, "metaCacheMaxSize")->i32;
|
||||||
tsSlowLogThreshold = cfgGetItem(pCfg, "slowLogThreshold")->i32;
|
tsSlowLogThreshold = cfgGetItem(pCfg, "slowLogThreshold")->i32;
|
||||||
if (taosSetSlowLogScope(cfgGetItem(pCfg, "slowLogScope")->str)) {
|
if (taosSetSlowLogScope(cfgGetItem(pCfg, "slowLogScope")->str)) {
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -916,7 +916,7 @@ void taosLocalCfgForbiddenToChange(char *name, bool *forbidden) {
|
||||||
*forbidden = false;
|
*forbidden = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
|
||||||
int32_t len = strlen(name);
|
int32_t len = strlen(name);
|
||||||
char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0};
|
char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0};
|
||||||
strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len));
|
strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len));
|
||||||
|
@ -1051,6 +1051,12 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 'e': {
|
||||||
|
if (strcasecmp("metaCacheMaxSize", name) == 0) {
|
||||||
|
atomic_store_32(&tsMetaCacheMaxSize, cfgGetItem(pCfg, "metaCacheMaxSize")->i32);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
case 'i': {
|
case 'i': {
|
||||||
if (strcasecmp("minimalTmpDirGB", name) == 0) {
|
if (strcasecmp("minimalTmpDirGB", name) == 0) {
|
||||||
tsTempSpace.reserved = (int64_t)(((double)cfgGetItem(pCfg, "minimalTmpDirGB")->fval) * 1024 * 1024 * 1024);
|
tsTempSpace.reserved = (int64_t)(((double)cfgGetItem(pCfg, "minimalTmpDirGB")->fval) * 1024 * 1024 * 1024);
|
||||||
|
|
|
@ -1566,21 +1566,21 @@ int32_t tDeserializeSGetUserAuthRspImpl(SDecoder *pDecoder, SGetUserAuthRsp *pRs
|
||||||
char db[TSDB_DB_FNAME_LEN] = {0};
|
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||||
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
|
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
|
||||||
int32_t len = strlen(db);
|
int32_t len = strlen(db);
|
||||||
taosHashPut(pRsp->createdDbs, db, len, db, len);
|
taosHashPut(pRsp->createdDbs, db, len, db, len + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfReadDbs; ++i) {
|
for (int32_t i = 0; i < numOfReadDbs; ++i) {
|
||||||
char db[TSDB_DB_FNAME_LEN] = {0};
|
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||||
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
|
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
|
||||||
int32_t len = strlen(db);
|
int32_t len = strlen(db);
|
||||||
taosHashPut(pRsp->readDbs, db, len, db, len);
|
taosHashPut(pRsp->readDbs, db, len, db, len + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int32_t i = 0; i < numOfWriteDbs; ++i) {
|
for (int32_t i = 0; i < numOfWriteDbs; ++i) {
|
||||||
char db[TSDB_DB_FNAME_LEN] = {0};
|
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||||
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
|
if (tDecodeCStrTo(pDecoder, db) < 0) return -1;
|
||||||
int32_t len = strlen(db);
|
int32_t len = strlen(db);
|
||||||
taosHashPut(pRsp->writeDbs, db, len, db, len);
|
taosHashPut(pRsp->writeDbs, db, len, db, len + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tDecodeIsEnd(pDecoder)) {
|
if (!tDecodeIsEnd(pDecoder)) {
|
||||||
|
@ -1701,6 +1701,7 @@ int32_t tSerializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq)
|
||||||
if (tEncodeCStr(&encoder, pReq->fqdn) < 0) return -1;
|
if (tEncodeCStr(&encoder, pReq->fqdn) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pReq->port) < 0) return -1;
|
if (tEncodeI32(&encoder, pReq->port) < 0) return -1;
|
||||||
if (tEncodeI8(&encoder, pReq->force) < 0) return -1;
|
if (tEncodeI8(&encoder, pReq->force) < 0) return -1;
|
||||||
|
if (tEncodeI8(&encoder, pReq->unsafe) < 0) return -1;
|
||||||
tEndEncode(&encoder);
|
tEndEncode(&encoder);
|
||||||
|
|
||||||
int32_t tlen = encoder.pos;
|
int32_t tlen = encoder.pos;
|
||||||
|
@ -1717,6 +1718,12 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq
|
||||||
if (tDecodeCStrTo(&decoder, pReq->fqdn) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pReq->fqdn) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pReq->port) < 0) return -1;
|
if (tDecodeI32(&decoder, &pReq->port) < 0) return -1;
|
||||||
if (tDecodeI8(&decoder, &pReq->force) < 0) return -1;
|
if (tDecodeI8(&decoder, &pReq->force) < 0) return -1;
|
||||||
|
if (!tDecodeIsEnd(&decoder)) {
|
||||||
|
if (tDecodeI8(&decoder, &pReq->unsafe) < 0) return -1;
|
||||||
|
} else {
|
||||||
|
pReq->unsafe = false;
|
||||||
|
}
|
||||||
|
|
||||||
tEndDecode(&decoder);
|
tEndDecode(&decoder);
|
||||||
|
|
||||||
tDecoderClear(&decoder);
|
tDecoderClear(&decoder);
|
||||||
|
@ -3409,6 +3416,7 @@ int32_t tSerializeSTableIndexRsp(void *buf, int32_t bufLen, const STableIndexRsp
|
||||||
if (tEncodeCStr(&encoder, pRsp->dbFName) < 0) return -1;
|
if (tEncodeCStr(&encoder, pRsp->dbFName) < 0) return -1;
|
||||||
if (tEncodeU64(&encoder, pRsp->suid) < 0) return -1;
|
if (tEncodeU64(&encoder, pRsp->suid) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pRsp->version) < 0) return -1;
|
if (tEncodeI32(&encoder, pRsp->version) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pRsp->indexSize) < 0) return -1;
|
||||||
int32_t num = taosArrayGetSize(pRsp->pIndex);
|
int32_t num = taosArrayGetSize(pRsp->pIndex);
|
||||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||||
if (num > 0) {
|
if (num > 0) {
|
||||||
|
@ -3454,6 +3462,7 @@ int32_t tDeserializeSTableIndexRsp(void *buf, int32_t bufLen, STableIndexRsp *pR
|
||||||
if (tDecodeCStrTo(&decoder, pRsp->dbFName) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, pRsp->dbFName) < 0) return -1;
|
||||||
if (tDecodeU64(&decoder, &pRsp->suid) < 0) return -1;
|
if (tDecodeU64(&decoder, &pRsp->suid) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &pRsp->version) < 0) return -1;
|
if (tDecodeI32(&decoder, &pRsp->version) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &pRsp->indexSize) < 0) return -1;
|
||||||
int32_t num = 0;
|
int32_t num = 0;
|
||||||
if (tDecodeI32(&decoder, &num) < 0) return -1;
|
if (tDecodeI32(&decoder, &num) < 0) return -1;
|
||||||
if (num > 0) {
|
if (num > 0) {
|
||||||
|
@ -3728,6 +3737,7 @@ int32_t tSerializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
|
||||||
if (tEncodeCStr(&encoder, pIndexRsp->dbFName) < 0) return -1;
|
if (tEncodeCStr(&encoder, pIndexRsp->dbFName) < 0) return -1;
|
||||||
if (tEncodeU64(&encoder, pIndexRsp->suid) < 0) return -1;
|
if (tEncodeU64(&encoder, pIndexRsp->suid) < 0) return -1;
|
||||||
if (tEncodeI32(&encoder, pIndexRsp->version) < 0) return -1;
|
if (tEncodeI32(&encoder, pIndexRsp->version) < 0) return -1;
|
||||||
|
if (tEncodeI32(&encoder, pIndexRsp->indexSize) < 0) return -1;
|
||||||
int32_t num = taosArrayGetSize(pIndexRsp->pIndex);
|
int32_t num = taosArrayGetSize(pIndexRsp->pIndex);
|
||||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||||
for (int32_t i = 0; i < num; ++i) {
|
for (int32_t i = 0; i < num; ++i) {
|
||||||
|
@ -3790,6 +3800,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
|
||||||
if (tDecodeCStrTo(&decoder, tableIndexRsp.dbFName) < 0) return -1;
|
if (tDecodeCStrTo(&decoder, tableIndexRsp.dbFName) < 0) return -1;
|
||||||
if (tDecodeU64(&decoder, &tableIndexRsp.suid) < 0) return -1;
|
if (tDecodeU64(&decoder, &tableIndexRsp.suid) < 0) return -1;
|
||||||
if (tDecodeI32(&decoder, &tableIndexRsp.version) < 0) return -1;
|
if (tDecodeI32(&decoder, &tableIndexRsp.version) < 0) return -1;
|
||||||
|
if (tDecodeI32(&decoder, &tableIndexRsp.indexSize) < 0) return -1;
|
||||||
int32_t num = 0;
|
int32_t num = 0;
|
||||||
if (tDecodeI32(&decoder, &num) < 0) return -1;
|
if (tDecodeI32(&decoder, &num) < 0) return -1;
|
||||||
if (num > 0) {
|
if (num > 0) {
|
||||||
|
|
|
@ -126,6 +126,9 @@ void tdSCellValPrint(SCellVal *pVal, int8_t colType) {
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
printf("JSON ");
|
printf("JSON ");
|
||||||
break;
|
break;
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
|
printf("GEOMETRY ");
|
||||||
|
break;
|
||||||
case TSDB_DATA_TYPE_VARBINARY:
|
case TSDB_DATA_TYPE_VARBINARY:
|
||||||
printf("VARBIN ");
|
printf("VARBIN ");
|
||||||
break;
|
break;
|
||||||
|
@ -353,7 +356,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
varDataLen += sizeof(VarDataLenT);
|
varDataLen += sizeof(VarDataLenT);
|
||||||
if (pTColumn->type == TSDB_DATA_TYPE_VARCHAR) {
|
if (pTColumn->type == TSDB_DATA_TYPE_VARCHAR || pTColumn->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
varDataLen += CHAR_BYTES;
|
varDataLen += CHAR_BYTES;
|
||||||
if (maxVarDataLen < CHAR_BYTES + sizeof(VarDataLenT)) {
|
if (maxVarDataLen < CHAR_BYTES + sizeof(VarDataLenT)) {
|
||||||
maxVarDataLen = CHAR_BYTES + sizeof(VarDataLenT);
|
maxVarDataLen = CHAR_BYTES + sizeof(VarDataLenT);
|
||||||
|
|
|
@ -296,7 +296,7 @@ static void writeDataToDisk(STSBuf* pTSBuf) {
|
||||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nType, sizeof(pBlock->tag.nType));
|
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nType, sizeof(pBlock->tag.nType));
|
||||||
|
|
||||||
int32_t trueLen = pBlock->tag.nLen;
|
int32_t trueLen = pBlock->tag.nLen;
|
||||||
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) {
|
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || pBlock->tag.nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nLen, sizeof(pBlock->tag.nLen));
|
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, &pBlock->tag.nLen, sizeof(pBlock->tag.nLen));
|
||||||
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, pBlock->tag.pz, (size_t)pBlock->tag.nLen);
|
metaLen += (int32_t)taosWriteFile(pTSBuf->pFile, pBlock->tag.pz, (size_t)pBlock->tag.nLen);
|
||||||
} else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) {
|
} else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) {
|
||||||
|
@ -378,7 +378,7 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) {
|
||||||
|
|
||||||
// NOTE: mix types tags are not supported
|
// NOTE: mix types tags are not supported
|
||||||
size_t sz = 0;
|
size_t sz = 0;
|
||||||
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) {
|
if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR || pBlock->tag.nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
char* tp = taosMemoryRealloc(pBlock->tag.pz, pBlock->tag.nLen + 1);
|
char* tp = taosMemoryRealloc(pBlock->tag.pz, pBlock->tag.nLen + 1);
|
||||||
ASSERT(tp != NULL);
|
ASSERT(tp != NULL);
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
#include "ttypes.h"
|
#include "ttypes.h"
|
||||||
#include "tcompression.h"
|
#include "tcompression.h"
|
||||||
|
|
||||||
const int32_t TYPE_BYTES[16] = {
|
const int32_t TYPE_BYTES[17] = {
|
||||||
-1, // TSDB_DATA_TYPE_NULL
|
-1, // TSDB_DATA_TYPE_NULL
|
||||||
CHAR_BYTES, // TSDB_DATA_TYPE_BOOL
|
CHAR_BYTES, // TSDB_DATA_TYPE_BOOL
|
||||||
CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT
|
CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT
|
||||||
|
@ -34,6 +34,7 @@ const int32_t TYPE_BYTES[16] = {
|
||||||
INT_BYTES, // TSDB_DATA_TYPE_UINT
|
INT_BYTES, // TSDB_DATA_TYPE_UINT
|
||||||
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
|
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
|
||||||
TSDB_MAX_JSON_TAG_LEN, // TSDB_DATA_TYPE_JSON
|
TSDB_MAX_JSON_TAG_LEN, // TSDB_DATA_TYPE_JSON
|
||||||
|
sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_GEOMETRY
|
||||||
};
|
};
|
||||||
|
|
||||||
tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
|
tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
|
||||||
|
@ -56,6 +57,7 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
|
||||||
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
|
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
|
||||||
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
|
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
|
||||||
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
|
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
|
||||||
|
{TSDB_DATA_TYPE_GEOMETRY, 8, 1, "GEOMETRY", 0, 0, tsCompressString, tsDecompressString},
|
||||||
};
|
};
|
||||||
|
|
||||||
static float floatMin = -FLT_MAX, floatMax = FLT_MAX;
|
static float floatMin = -FLT_MAX, floatMax = FLT_MAX;
|
||||||
|
@ -125,6 +127,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
|
||||||
*((int64_t *)val) = GET_INT64_VAL(src);
|
*((int64_t *)val) = GET_INT64_VAL(src);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
varDataCopy(val, src);
|
varDataCopy(val, src);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
|
|
|
@ -121,7 +121,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TSDB_DATA_TYPE_BINARY: { // todo refactor, extract a method
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: { // todo refactor, extract a method
|
||||||
pVar->pz = taosMemoryCalloc(len + 1, sizeof(char));
|
pVar->pz = taosMemoryCalloc(len + 1, sizeof(char));
|
||||||
memcpy(pVar->pz, pz, len);
|
memcpy(pVar->pz, pz, len);
|
||||||
pVar->nLen = (int32_t)len;
|
pVar->nLen = (int32_t)len;
|
||||||
|
@ -140,7 +141,7 @@ void taosVariantDestroy(SVariant *pVar) {
|
||||||
if (pVar == NULL) return;
|
if (pVar == NULL) return;
|
||||||
|
|
||||||
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
|
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
|
||||||
pVar->nType == TSDB_DATA_TYPE_JSON) {
|
pVar->nType == TSDB_DATA_TYPE_JSON || pVar->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
taosMemoryFreeClear(pVar->pz);
|
taosMemoryFreeClear(pVar->pz);
|
||||||
pVar->nLen = 0;
|
pVar->nLen = 0;
|
||||||
}
|
}
|
||||||
|
@ -152,7 +153,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
|
||||||
|
|
||||||
pDst->nType = pSrc->nType;
|
pDst->nType = pSrc->nType;
|
||||||
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
|
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
|
||||||
pSrc->nType == TSDB_DATA_TYPE_JSON) {
|
pSrc->nType == TSDB_DATA_TYPE_JSON || pSrc->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
|
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
|
||||||
char *p = taosMemoryRealloc(pDst->pz, len);
|
char *p = taosMemoryRealloc(pDst->pz, len);
|
||||||
ASSERT(p);
|
ASSERT(p);
|
||||||
|
@ -184,7 +185,7 @@ int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_NCHAR) {
|
if (p1->nType == TSDB_DATA_TYPE_BINARY || p1->nType == TSDB_DATA_TYPE_NCHAR || p1->nType == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
if (p1->nLen == p2->nLen) {
|
if (p1->nLen == p2->nLen) {
|
||||||
return memcmp(p1->pz, p2->pz, p1->nLen);
|
return memcmp(p1->pz, p2->pz, p1->nLen);
|
||||||
} else {
|
} else {
|
||||||
|
@ -237,6 +238,7 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
|
||||||
return (char *)&pVar->f;
|
return (char *)&pVar->f;
|
||||||
case TSDB_DATA_TYPE_BINARY:
|
case TSDB_DATA_TYPE_BINARY:
|
||||||
case TSDB_DATA_TYPE_JSON:
|
case TSDB_DATA_TYPE_JSON:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY:
|
||||||
return (char *)pVar->pz;
|
return (char *)pVar->pz;
|
||||||
case TSDB_DATA_TYPE_NCHAR:
|
case TSDB_DATA_TYPE_NCHAR:
|
||||||
return (char *)pVar->ucs4;
|
return (char *)pVar->ucs4;
|
||||||
|
|
|
@ -235,7 +235,8 @@ int32_t debugPrintSColVal(SColVal *cv, int8_t type) {
|
||||||
case TSDB_DATA_TYPE_DOUBLE:
|
case TSDB_DATA_TYPE_DOUBLE:
|
||||||
printf("%lf ", cv->value.d);
|
printf("%lf ", cv->value.d);
|
||||||
break;
|
break;
|
||||||
case TSDB_DATA_TYPE_VARCHAR: {
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
char tv[15] = {0};
|
char tv[15] = {0};
|
||||||
snprintf(tv, 15, "%s", cv->value.pData);
|
snprintf(tv, 15, "%s", cv->value.pData);
|
||||||
printf("%s ", tv);
|
printf("%s ", tv);
|
||||||
|
@ -337,7 +338,8 @@ static int32_t checkSColVal(const char *rawVal, SColVal *cv, int8_t type) {
|
||||||
sscanf(rawVal, "%lf", &rawSVal.d);
|
sscanf(rawVal, "%lf", &rawSVal.d);
|
||||||
EXPECT_DOUBLE_EQ(cv->value.d, rawSVal.d);
|
EXPECT_DOUBLE_EQ(cv->value.d, rawSVal.d);
|
||||||
} break;
|
} break;
|
||||||
case TSDB_DATA_TYPE_VARCHAR: {
|
case TSDB_DATA_TYPE_VARCHAR:
|
||||||
|
case TSDB_DATA_TYPE_GEOMETRY: {
|
||||||
EXPECT_STRCASEEQ(rawVal, (const char *)cv->value.pData);
|
EXPECT_STRCASEEQ(rawVal, (const char *)cv->value.pData);
|
||||||
} break;
|
} break;
|
||||||
case TSDB_DATA_TYPE_TIMESTAMP: {
|
case TSDB_DATA_TYPE_TIMESTAMP: {
|
||||||
|
|
|
@ -32,6 +32,7 @@ bool mndIsDbReady(SMnode *pMnode, SDbObj *pDb);
|
||||||
|
|
||||||
SSdbRaw *mndDbActionEncode(SDbObj *pDb);
|
SSdbRaw *mndDbActionEncode(SDbObj *pDb);
|
||||||
const char *mndGetDbStr(const char *src);
|
const char *mndGetDbStr(const char *src);
|
||||||
|
const char *mndGetStableStr(const char *src);
|
||||||
|
|
||||||
int32_t mndProcessCompactDbReq(SRpcMsg *pReq);
|
int32_t mndProcessCompactDbReq(SRpcMsg *pReq);
|
||||||
|
|
||||||
|
|
|
@ -521,6 +521,7 @@ typedef struct {
|
||||||
char* physicalPlan;
|
char* physicalPlan;
|
||||||
SSchemaWrapper schema;
|
SSchemaWrapper schema;
|
||||||
int64_t stbUid;
|
int64_t stbUid;
|
||||||
|
char stbName[TSDB_TABLE_FNAME_LEN];
|
||||||
// forbid condition
|
// forbid condition
|
||||||
int64_t ntbUid;
|
int64_t ntbUid;
|
||||||
SArray* ntbColIds;
|
SArray* ntbColIds;
|
||||||
|
|
|
@ -47,6 +47,7 @@ int32_t mndAllocStbSchemas(const SStbObj *pOld, SStbObj *pNew);
|
||||||
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId);
|
int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbFullName, int64_t suid, col_id_t colId);
|
||||||
void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pStb, int32_t *pContLen, void *alterOriData,
|
void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pStb, int32_t *pContLen, void *alterOriData,
|
||||||
int32_t alterOriDataLen);
|
int32_t alterOriDataLen);
|
||||||
|
int32_t mndSetForceDropCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, SStbObj *pStb);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,7 +40,7 @@ int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *p
|
||||||
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
|
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
|
||||||
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
|
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
|
||||||
int32_t mndAddDropVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid, bool isRedo);
|
int32_t mndAddDropVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid, bool isRedo);
|
||||||
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *, STrans *pTrans, int32_t dropDnodeId, bool force);
|
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *, STrans *pTrans, int32_t dropDnodeId, bool force, bool unsafe);
|
||||||
int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
|
int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
|
||||||
SArray *pArray);
|
SArray *pArray);
|
||||||
int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,
|
int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,
|
||||||
|
|
|
@ -1543,6 +1543,13 @@ const char *mndGetDbStr(const char *src) {
|
||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char *mndGetStableStr(const char *src) {
|
||||||
|
char *pos = strstr(src, TS_PATH_DELIMITER);
|
||||||
|
if (pos != NULL) ++pos;
|
||||||
|
if (pos == NULL) return src;
|
||||||
|
return mndGetDbStr(pos);
|
||||||
|
}
|
||||||
|
|
||||||
static int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
|
static int64_t getValOfDiffPrecision(int8_t unit, int64_t val) {
|
||||||
int64_t v = 0;
|
int64_t v = 0;
|
||||||
switch (unit) {
|
switch (unit) {
|
||||||
|
|
|
@ -44,6 +44,11 @@ static const char *offlineReason[] = {
|
||||||
"unknown",
|
"unknown",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
DND_ACTIVE_CODE,
|
||||||
|
DND_CONN_ACTIVE_CODE,
|
||||||
|
};
|
||||||
|
|
||||||
static int32_t mndCreateDefaultDnode(SMnode *pMnode);
|
static int32_t mndCreateDefaultDnode(SMnode *pMnode);
|
||||||
static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode);
|
static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode);
|
||||||
static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw);
|
static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw);
|
||||||
|
@ -227,6 +232,14 @@ static int32_t mndDnodeActionDelete(SSdb *pSdb, SDnodeObj *pDnode) {
|
||||||
static int32_t mndDnodeActionUpdate(SSdb *pSdb, SDnodeObj *pOld, SDnodeObj *pNew) {
|
static int32_t mndDnodeActionUpdate(SSdb *pSdb, SDnodeObj *pOld, SDnodeObj *pNew) {
|
||||||
mTrace("dnode:%d, perform update action, old row:%p new row:%p", pOld->id, pOld, pNew);
|
mTrace("dnode:%d, perform update action, old row:%p new row:%p", pOld->id, pOld, pNew);
|
||||||
pOld->updateTime = pNew->updateTime;
|
pOld->updateTime = pNew->updateTime;
|
||||||
|
#ifdef TD_ENTERPRISE
|
||||||
|
if (strncmp(pOld->active, pNew->active, TSDB_ACTIVE_KEY_LEN) != 0) {
|
||||||
|
strncpy(pOld->active, pNew->active, TSDB_ACTIVE_KEY_LEN);
|
||||||
|
}
|
||||||
|
if (strncmp(pOld->connActive, pNew->connActive, TSDB_CONN_ACTIVE_KEY_LEN) != 0) {
|
||||||
|
strncpy(pOld->connActive, pNew->connActive, TSDB_CONN_ACTIVE_KEY_LEN);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -635,6 +648,69 @@ _OVER:
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfgReq, int8_t action) {
|
||||||
|
SSdbRaw *pRaw = NULL;
|
||||||
|
STrans *pTrans = NULL;
|
||||||
|
SDnodeObj *pDnode = NULL;
|
||||||
|
bool cfgAll = pCfgReq->dnodeId == -1;
|
||||||
|
|
||||||
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
void *pIter = NULL;
|
||||||
|
while (1) {
|
||||||
|
if (cfgAll) {
|
||||||
|
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode);
|
||||||
|
if (pIter == NULL) break;
|
||||||
|
} else if(!(pDnode = mndAcquireDnode(pMnode, pCfgReq->dnodeId))) {
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pTrans) {
|
||||||
|
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "config-dnode");
|
||||||
|
if (!pTrans) goto _OVER;
|
||||||
|
if (mndTrancCheckConflict(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
SDnodeObj tmpDnode = *pDnode;
|
||||||
|
if (action == DND_ACTIVE_CODE) {
|
||||||
|
strncpy(tmpDnode.active, pCfgReq->value, TSDB_ACTIVE_KEY_LEN);
|
||||||
|
} else if (action == DND_CONN_ACTIVE_CODE) {
|
||||||
|
strncpy(tmpDnode.connActive, pCfgReq->value, TSDB_CONN_ACTIVE_KEY_LEN);
|
||||||
|
} else {
|
||||||
|
terrno = TSDB_CODE_INVALID_CFG;
|
||||||
|
goto _OVER;
|
||||||
|
}
|
||||||
|
|
||||||
|
pRaw = mndDnodeActionEncode(&tmpDnode);
|
||||||
|
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
|
||||||
|
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
|
||||||
|
pRaw = NULL;
|
||||||
|
|
||||||
|
mInfo("dnode:%d, config dnode, cfg:%d, app:%p config:%s value:%s", pDnode->id, pCfgReq->dnodeId, pReq->info.ahandle,
|
||||||
|
pCfgReq->config, pCfgReq->value);
|
||||||
|
|
||||||
|
if (cfgAll) {
|
||||||
|
sdbRelease(pSdb, pDnode);
|
||||||
|
pDnode = NULL;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pTrans && mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
|
||||||
|
terrno = 0;
|
||||||
|
|
||||||
|
_OVER:
|
||||||
|
if (cfgAll) {
|
||||||
|
sdbRelease(pSdb, pDnode);
|
||||||
|
} else {
|
||||||
|
mndReleaseDnode(pMnode, pDnode);
|
||||||
|
}
|
||||||
|
mndTransDrop(pTrans);
|
||||||
|
sdbFreeRaw(pRaw);
|
||||||
|
return terrno;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndProcessDnodeListReq(SRpcMsg *pReq) {
|
static int32_t mndProcessDnodeListReq(SRpcMsg *pReq) {
|
||||||
SMnode *pMnode = pReq->info.node;
|
SMnode *pMnode = pReq->info.node;
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
@ -804,7 +880,7 @@ int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq){
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMnodeObj *pMObj, SQnodeObj *pQObj,
|
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMnodeObj *pMObj, SQnodeObj *pQObj,
|
||||||
SSnodeObj *pSObj, int32_t numOfVnodes, bool force) {
|
SSnodeObj *pSObj, int32_t numOfVnodes, bool force, bool unsafe) {
|
||||||
int32_t code = -1;
|
int32_t code = -1;
|
||||||
SSdbRaw *pRaw = NULL;
|
SSdbRaw *pRaw = NULL;
|
||||||
STrans *pTrans = NULL;
|
STrans *pTrans = NULL;
|
||||||
|
@ -844,7 +920,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
|
||||||
|
|
||||||
if (numOfVnodes > 0) {
|
if (numOfVnodes > 0) {
|
||||||
mInfo("trans:%d, %d vnodes on dnode:%d will be dropped", pTrans->id, numOfVnodes, pDnode->id);
|
mInfo("trans:%d, %d vnodes on dnode:%d will be dropped", pTrans->id, numOfVnodes, pDnode->id);
|
||||||
if (mndSetMoveVgroupsInfoToTrans(pMnode, pTrans, pDnode->id, force) != 0) goto _OVER;
|
if (mndSetMoveVgroupsInfoToTrans(pMnode, pTrans, pDnode->id, force, unsafe) != 0) goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||||
|
@ -871,11 +947,18 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
mInfo("dnode:%d, start to drop, ep:%s:%d", dropReq.dnodeId, dropReq.fqdn, dropReq.port);
|
mInfo("dnode:%d, start to drop, ep:%s:%d, force:%s, unsafe:%s",
|
||||||
|
dropReq.dnodeId, dropReq.fqdn, dropReq.port, dropReq.force?"true":"false", dropReq.unsafe?"true":"false");
|
||||||
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
|
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
|
||||||
goto _OVER;
|
goto _OVER;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool force = dropReq.force;
|
||||||
|
if(dropReq.unsafe)
|
||||||
|
{
|
||||||
|
force = true;
|
||||||
|
}
|
||||||
|
|
||||||
pDnode = mndAcquireDnode(pMnode, dropReq.dnodeId);
|
pDnode = mndAcquireDnode(pMnode, dropReq.dnodeId);
|
||||||
if (pDnode == NULL) {
|
if (pDnode == NULL) {
|
||||||
int32_t err = terrno;
|
int32_t err = terrno;
|
||||||
|
@ -903,7 +986,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
|
int32_t numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
|
||||||
if ((numOfVnodes > 0 || pMObj != NULL || pSObj != NULL || pQObj != NULL) && !dropReq.force) {
|
if ((numOfVnodes > 0 || pMObj != NULL || pSObj != NULL || pQObj != NULL) && !force) {
|
||||||
if (!mndIsDnodeOnline(pDnode, taosGetTimestampMs())) {
|
if (!mndIsDnodeOnline(pDnode, taosGetTimestampMs())) {
|
||||||
terrno = TSDB_CODE_DNODE_OFFLINE;
|
terrno = TSDB_CODE_DNODE_OFFLINE;
|
||||||
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
|
mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
|
||||||
|
@ -912,7 +995,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes, dropReq.force);
|
code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes, force, dropReq.unsafe);
|
||||||
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||||
|
|
||||||
_OVER:
|
_OVER:
|
||||||
|
@ -970,6 +1053,34 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
||||||
|
|
||||||
strcpy(dcfgReq.config, "monitor");
|
strcpy(dcfgReq.config, "monitor");
|
||||||
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
|
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
|
||||||
|
#ifdef TD_ENTERPRISE
|
||||||
|
} else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0 || strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) {
|
||||||
|
int8_t opt = strncasecmp(cfgReq.config, "a", 1) == 0 ? DND_ACTIVE_CODE : DND_CONN_ACTIVE_CODE;
|
||||||
|
int8_t index = opt == DND_ACTIVE_CODE ? 10 : 11;
|
||||||
|
if (' ' != cfgReq.config[index] && 0 != cfgReq.config[index]) {
|
||||||
|
mError("dnode:%d, failed to config activeCode since invalid conf:%s", cfgReq.dnodeId, cfgReq.config);
|
||||||
|
terrno = TSDB_CODE_INVALID_CFG;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
int32_t vlen = strlen(cfgReq.value);
|
||||||
|
if (vlen > 0 && ((opt == DND_ACTIVE_CODE && vlen != (TSDB_ACTIVE_KEY_LEN - 1)) ||
|
||||||
|
(opt == DND_CONN_ACTIVE_CODE &&
|
||||||
|
(vlen > (TSDB_CONN_ACTIVE_KEY_LEN - 1) || vlen < (TSDB_ACTIVE_KEY_LEN - 1))))) {
|
||||||
|
mError("dnode:%d, failed to config activeCode since invalid vlen:%d. conf:%s, val:%s", cfgReq.dnodeId, vlen,
|
||||||
|
cfgReq.config, cfgReq.value);
|
||||||
|
terrno = TSDB_CODE_INVALID_OPTION;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
strcpy(dcfgReq.config, opt == DND_ACTIVE_CODE ? "activeCode" : "cActiveCode");
|
||||||
|
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value);
|
||||||
|
|
||||||
|
if (mndConfigDnode(pMnode, pReq, &cfgReq, opt) != 0) {
|
||||||
|
mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr());
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
} else {
|
} else {
|
||||||
bool findOpt = false;
|
bool findOpt = false;
|
||||||
for (int32_t d = 0; d < optionSize; ++d) {
|
for (int32_t d = 0; d < optionSize; ++d) {
|
||||||
|
@ -1023,7 +1134,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
|
||||||
tSerializeSDCfgDnodeReq(pBuf, bufLen, &dcfgReq);
|
tSerializeSDCfgDnodeReq(pBuf, bufLen, &dcfgReq);
|
||||||
mInfo("dnode:%d, send config req to dnode, app:%p config:%s value:%s", cfgReq.dnodeId, pReq->info.ahandle,
|
mInfo("dnode:%d, send config req to dnode, app:%p config:%s value:%s", cfgReq.dnodeId, pReq->info.ahandle,
|
||||||
dcfgReq.config, dcfgReq.value);
|
dcfgReq.config, dcfgReq.value);
|
||||||
SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen, .info = pReq->info};
|
SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen};
|
||||||
tmsgSendReq(&epSet, &rpcMsg);
|
tmsgSendReq(&epSet, &rpcMsg);
|
||||||
code = 0;
|
code = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -285,6 +285,7 @@ void dumpTopic(SSdb *pSdb, SJson *json) {
|
||||||
tjsonAddStringToObject(item, "subType", i642str(pObj->subType));
|
tjsonAddStringToObject(item, "subType", i642str(pObj->subType));
|
||||||
tjsonAddStringToObject(item, "withMeta", i642str(pObj->withMeta));
|
tjsonAddStringToObject(item, "withMeta", i642str(pObj->withMeta));
|
||||||
tjsonAddStringToObject(item, "stbUid", i642str(pObj->stbUid));
|
tjsonAddStringToObject(item, "stbUid", i642str(pObj->stbUid));
|
||||||
|
tjsonAddStringToObject(item, "stbName", mndGetStableStr(pObj->stbName));
|
||||||
tjsonAddStringToObject(item, "sqlLen", i642str(pObj->sqlLen));
|
tjsonAddStringToObject(item, "sqlLen", i642str(pObj->sqlLen));
|
||||||
tjsonAddStringToObject(item, "astLen", i642str(pObj->astLen));
|
tjsonAddStringToObject(item, "astLen", i642str(pObj->astLen));
|
||||||
tjsonAddStringToObject(item, "sqlLen", i642str(pObj->sqlLen));
|
tjsonAddStringToObject(item, "sqlLen", i642str(pObj->sqlLen));
|
||||||
|
|
|
@ -562,7 +562,7 @@ static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int32_t le
|
||||||
return msg;
|
return msg;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) {
|
if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||||
int32_t bytes = len > 0 ? (int32_t)(len - VARSTR_HEADER_SIZE) : len;
|
int32_t bytes = len > 0 ? (int32_t)(len - VARSTR_HEADER_SIZE) : len;
|
||||||
|
|
||||||
snprintf(buf, buflen - 1, "%s(%d)", tDataTypes[type].name, type == TSDB_DATA_TYPE_NCHAR ? bytes / 4 : bytes);
|
snprintf(buf, buflen - 1, "%s(%d)", tDataTypes[type].name, type == TSDB_DATA_TYPE_NCHAR ? bytes / 4 : bytes);
|
||||||
|
|
|
@ -695,7 +695,7 @@ static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnode
|
||||||
if (totalMnodes == 2) {
|
if (totalMnodes == 2) {
|
||||||
if (force) {
|
if (force) {
|
||||||
mError("cant't force drop dnode, since a mnode on it and replica is 2");
|
mError("cant't force drop dnode, since a mnode on it and replica is 2");
|
||||||
terrno = TSDB_CODE_DNODE_OFFLINE;
|
terrno = TSDB_CODE_MNODE_ONLY_TWO_MNODE;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
mInfo("vgId:1, has %d mnodes, exec redo log first", totalMnodes);
|
mInfo("vgId:1, has %d mnodes, exec redo log first", totalMnodes);
|
||||||
|
|
|
@ -1114,6 +1114,7 @@ int32_t mndGetTableSma(SMnode *pMnode, char *tbFName, STableIndexRsp *rsp, bool
|
||||||
return code;
|
return code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rsp->indexSize += sizeof(info) + pSma->exprLen + 1;
|
||||||
*exist = true;
|
*exist = true;
|
||||||
|
|
||||||
sdbRelease(pSdb, pSma);
|
sdbRelease(pSdb, pSma);
|
||||||
|
|
|
@ -687,6 +687,31 @@ static int32_t mndSetCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t mndSetForceDropCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, SStbObj *pStb) {
|
||||||
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
int32_t contLen;
|
||||||
|
|
||||||
|
void *pReq = mndBuildVCreateStbReq(pMnode, pVgroup, pStb, &contLen, NULL, 0);
|
||||||
|
if (pReq == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
STransAction action = {0};
|
||||||
|
action.mTraceId = pTrans->mTraceId;
|
||||||
|
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
|
||||||
|
action.pCont = pReq;
|
||||||
|
action.contLen = contLen;
|
||||||
|
action.msgType = TDMT_VND_CREATE_STB;
|
||||||
|
action.acceptableCode = TSDB_CODE_TDB_STB_ALREADY_EXIST;
|
||||||
|
action.retryCode = TSDB_CODE_TDB_STB_NOT_EXIST;
|
||||||
|
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
|
||||||
|
taosMemoryFree(pReq);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndSetCreateStbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
|
static int32_t mndSetCreateStbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb) {
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
SVgObj *pVgroup = NULL;
|
SVgObj *pVgroup = NULL;
|
||||||
|
@ -1448,7 +1473,7 @@ static int32_t mndAlterStbTagBytes(SMnode *pMnode, const SStbObj *pOld, SStbObj
|
||||||
|
|
||||||
SSchema *pTag = pNew->pTags + tag;
|
SSchema *pTag = pNew->pTags + tag;
|
||||||
|
|
||||||
if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR)) {
|
if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || pTag->type == TSDB_DATA_TYPE_GEOMETRY)) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
|
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1568,7 +1593,7 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
|
||||||
}
|
}
|
||||||
|
|
||||||
SSchema *pCol = pNew->pColumns + col;
|
SSchema *pCol = pNew->pColumns + col;
|
||||||
if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR)) {
|
if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || pCol->type == TSDB_DATA_TYPE_GEOMETRY)) {
|
||||||
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
|
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,6 +109,7 @@ SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) {
|
||||||
SDB_SET_INT8(pRaw, dataPos, pTopic->withMeta, TOPIC_ENCODE_OVER);
|
SDB_SET_INT8(pRaw, dataPos, pTopic->withMeta, TOPIC_ENCODE_OVER);
|
||||||
|
|
||||||
SDB_SET_INT64(pRaw, dataPos, pTopic->stbUid, TOPIC_ENCODE_OVER);
|
SDB_SET_INT64(pRaw, dataPos, pTopic->stbUid, TOPIC_ENCODE_OVER);
|
||||||
|
SDB_SET_BINARY(pRaw, dataPos, pTopic->stbName, TSDB_TABLE_FNAME_LEN, TOPIC_ENCODE_OVER);
|
||||||
SDB_SET_INT32(pRaw, dataPos, pTopic->sqlLen, TOPIC_ENCODE_OVER);
|
SDB_SET_INT32(pRaw, dataPos, pTopic->sqlLen, TOPIC_ENCODE_OVER);
|
||||||
SDB_SET_BINARY(pRaw, dataPos, pTopic->sql, pTopic->sqlLen, TOPIC_ENCODE_OVER);
|
SDB_SET_BINARY(pRaw, dataPos, pTopic->sql, pTopic->sqlLen, TOPIC_ENCODE_OVER);
|
||||||
SDB_SET_INT32(pRaw, dataPos, pTopic->astLen, TOPIC_ENCODE_OVER);
|
SDB_SET_INT32(pRaw, dataPos, pTopic->astLen, TOPIC_ENCODE_OVER);
|
||||||
|
@ -196,6 +197,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
|
||||||
SDB_GET_INT8(pRaw, dataPos, &pTopic->withMeta, TOPIC_DECODE_OVER);
|
SDB_GET_INT8(pRaw, dataPos, &pTopic->withMeta, TOPIC_DECODE_OVER);
|
||||||
|
|
||||||
SDB_GET_INT64(pRaw, dataPos, &pTopic->stbUid, TOPIC_DECODE_OVER);
|
SDB_GET_INT64(pRaw, dataPos, &pTopic->stbUid, TOPIC_DECODE_OVER);
|
||||||
|
SDB_GET_BINARY(pRaw, dataPos, pTopic->stbName, TSDB_TABLE_FNAME_LEN, TOPIC_DECODE_OVER);
|
||||||
SDB_GET_INT32(pRaw, dataPos, &pTopic->sqlLen, TOPIC_DECODE_OVER);
|
SDB_GET_INT32(pRaw, dataPos, &pTopic->sqlLen, TOPIC_DECODE_OVER);
|
||||||
pTopic->sql = taosMemoryCalloc(pTopic->sqlLen, sizeof(char));
|
pTopic->sql = taosMemoryCalloc(pTopic->sqlLen, sizeof(char));
|
||||||
if (pTopic->sql == NULL) {
|
if (pTopic->sql == NULL) {
|
||||||
|
@ -460,6 +462,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
strcpy(topicObj.stbName, pCreate->subStbName);
|
||||||
topicObj.stbUid = pStb->uid;
|
topicObj.stbUid = pStb->uid;
|
||||||
mndReleaseStb(pMnode, pStb);
|
mndReleaseStb(pMnode, pStb);
|
||||||
}
|
}
|
||||||
|
@ -830,6 +833,43 @@ int32_t mndGetNumOfTopics(SMnode *pMnode, char *dbName, int32_t *pNumOfTopics) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void schemaToJson(SSchema *schema, int32_t nCols, char *schemaJson){
|
||||||
|
char* string = NULL;
|
||||||
|
cJSON* columns = cJSON_CreateArray();
|
||||||
|
if (columns == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (int i = 0; i < nCols; i++) {
|
||||||
|
cJSON* column = cJSON_CreateObject();
|
||||||
|
SSchema* s = schema + i;
|
||||||
|
cJSON* cname = cJSON_CreateString(s->name);
|
||||||
|
cJSON_AddItemToObject(column, "name", cname);
|
||||||
|
cJSON* ctype = cJSON_CreateString(tDataTypes[s->type].name);
|
||||||
|
cJSON_AddItemToObject(column, "type", ctype);
|
||||||
|
int32_t length = 0;
|
||||||
|
if (s->type == TSDB_DATA_TYPE_BINARY) {
|
||||||
|
length = s->bytes - VARSTR_HEADER_SIZE;
|
||||||
|
} else if (s->type == TSDB_DATA_TYPE_NCHAR || s->type == TSDB_DATA_TYPE_JSON) {
|
||||||
|
length = (s->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||||
|
} else{
|
||||||
|
length = s->bytes;
|
||||||
|
}
|
||||||
|
cJSON* cbytes = cJSON_CreateNumber(length);
|
||||||
|
cJSON_AddItemToObject(column, "length", cbytes);
|
||||||
|
cJSON_AddItemToArray(columns, column);
|
||||||
|
}
|
||||||
|
string = cJSON_PrintUnformatted(columns);
|
||||||
|
cJSON_Delete(columns);
|
||||||
|
|
||||||
|
size_t len = strlen(string);
|
||||||
|
if(string && len <= TSDB_SHOW_SCHEMA_JSON_LEN){
|
||||||
|
STR_TO_VARSTR(schemaJson, string);
|
||||||
|
}else{
|
||||||
|
mError("mndRetrieveTopic build schema error json:%p, json len:%zu", string, len);
|
||||||
|
}
|
||||||
|
taosMemoryFree(string);
|
||||||
|
}
|
||||||
|
|
||||||
static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) {
|
static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity) {
|
||||||
SMnode *pMnode = pReq->info.node;
|
SMnode *pMnode = pReq->info.node;
|
||||||
SSdb *pSdb = pMnode->pSdb;
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
@ -868,6 +908,49 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
|
||||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
|
colDataSetVal(pColInfo, numOfRows, (const char *)sql, false);
|
||||||
|
|
||||||
|
char *schemaJson = taosMemoryMalloc(TSDB_SHOW_SCHEMA_JSON_LEN + VARSTR_HEADER_SIZE);
|
||||||
|
if(pTopic->subType == TOPIC_SUB_TYPE__COLUMN){
|
||||||
|
schemaToJson(pTopic->schema.pSchema, pTopic->schema.nCols, schemaJson);
|
||||||
|
}else if(pTopic->subType == TOPIC_SUB_TYPE__TABLE){
|
||||||
|
SStbObj *pStb = mndAcquireStb(pMnode, pTopic->stbName);
|
||||||
|
if (pStb == NULL) {
|
||||||
|
terrno = TSDB_CODE_MND_STB_NOT_EXIST;
|
||||||
|
taosMemoryFree(schemaJson);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
schemaToJson(pStb->pColumns, pStb->numOfColumns, schemaJson);
|
||||||
|
|
||||||
|
mndReleaseStb(pMnode, pStb);
|
||||||
|
}else{
|
||||||
|
STR_TO_VARSTR(schemaJson, "NULL");
|
||||||
|
}
|
||||||
|
|
||||||
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
colDataSetVal(pColInfo, numOfRows, (const char *)schemaJson, false);
|
||||||
|
taosMemoryFree(schemaJson);
|
||||||
|
|
||||||
|
char mete[4 + VARSTR_HEADER_SIZE] = {0};
|
||||||
|
if(pTopic->withMeta){
|
||||||
|
STR_TO_VARSTR(mete, "yes");
|
||||||
|
}else{
|
||||||
|
STR_TO_VARSTR(mete, "no");
|
||||||
|
}
|
||||||
|
|
||||||
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
colDataSetVal(pColInfo, numOfRows, (const char *)mete, false);
|
||||||
|
|
||||||
|
char type[8 + VARSTR_HEADER_SIZE] = {0};
|
||||||
|
if(pTopic->subType == TOPIC_SUB_TYPE__COLUMN){
|
||||||
|
STR_TO_VARSTR(type, "column");
|
||||||
|
}else if(pTopic->subType == TOPIC_SUB_TYPE__TABLE){
|
||||||
|
STR_TO_VARSTR(type, "stable");
|
||||||
|
}else{
|
||||||
|
STR_TO_VARSTR(type, "db");
|
||||||
|
}
|
||||||
|
|
||||||
|
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||||
|
colDataSetVal(pColInfo, numOfRows, (const char *)type, false);
|
||||||
|
|
||||||
numOfRows++;
|
numOfRows++;
|
||||||
sdbRelease(pSdb, pTopic);
|
sdbRelease(pSdb, pTopic);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include "mndTrans.h"
|
#include "mndTrans.h"
|
||||||
#include "mndUser.h"
|
#include "mndUser.h"
|
||||||
#include "tmisce.h"
|
#include "tmisce.h"
|
||||||
|
#include "mndStb.h"
|
||||||
|
|
||||||
#define VGROUP_VER_NUMBER 1
|
#define VGROUP_VER_NUMBER 1
|
||||||
#define VGROUP_RESERVE_SIZE 64
|
#define VGROUP_RESERVE_SIZE 64
|
||||||
|
@ -1378,7 +1379,7 @@ int32_t mndAddDropVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgOb
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t vnIndex,
|
int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t vnIndex,
|
||||||
SArray *pArray, bool force) {
|
SArray *pArray, bool force, bool unsafe) {
|
||||||
SVgObj newVg = {0};
|
SVgObj newVg = {0};
|
||||||
memcpy(&newVg, pVgroup, sizeof(SVgObj));
|
memcpy(&newVg, pVgroup, sizeof(SVgObj));
|
||||||
|
|
||||||
|
@ -1455,7 +1456,7 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
|
||||||
mInfo("vgId:%d, will add 1 vnode and force remove 1 vnode", pVgroup->vgId);
|
mInfo("vgId:%d, will add 1 vnode and force remove 1 vnode", pVgroup->vgId);
|
||||||
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVg, pArray) != 0) return -1;
|
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVg, pArray) != 0) return -1;
|
||||||
newVg.replica--;
|
newVg.replica--;
|
||||||
SVnodeGid del = newVg.vnodeGid[vnIndex];
|
//SVnodeGid del = newVg.vnodeGid[vnIndex];
|
||||||
newVg.vnodeGid[vnIndex] = newVg.vnodeGid[newVg.replica];
|
newVg.vnodeGid[vnIndex] = newVg.vnodeGid[newVg.replica];
|
||||||
memset(&newVg.vnodeGid[newVg.replica], 0, sizeof(SVnodeGid));
|
memset(&newVg.vnodeGid[newVg.replica], 0, sizeof(SVnodeGid));
|
||||||
{
|
{
|
||||||
|
@ -1476,7 +1477,31 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
|
||||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg, &newVg.vnodeGid[vnIndex]) != 0) return -1;
|
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, &newVg, &newVg.vnodeGid[vnIndex]) != 0) return -1;
|
||||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1;
|
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg) != 0) return -1;
|
||||||
|
|
||||||
if (newVg.replica == 1) {
|
if(newVg.replica == 1){
|
||||||
|
if(force && !unsafe){
|
||||||
|
terrno = TSDB_CODE_VND_META_DATA_UNSAFE_DELETE;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
SSdb *pSdb = pMnode->pSdb;
|
||||||
|
void *pIter = NULL;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
SStbObj *pStb = NULL;
|
||||||
|
pIter = sdbFetch(pSdb, SDB_STB, pIter, (void **)&pStb);
|
||||||
|
if (pIter == NULL) break;
|
||||||
|
|
||||||
|
if (strcmp(pStb->db, pDb->name) == 0) {
|
||||||
|
if (mndSetForceDropCreateStbRedoActions(pMnode, pTrans, &newVg, pStb) != 0) {
|
||||||
|
sdbCancelFetch(pSdb, pIter);
|
||||||
|
sdbRelease(pSdb, pStb);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sdbRelease(pSdb, pStb);
|
||||||
|
}
|
||||||
|
|
||||||
mInfo("vgId:%d, all data is dropped since replica=1", pVgroup->vgId);
|
mInfo("vgId:%d, all data is dropped since replica=1", pVgroup->vgId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1498,7 +1523,7 @@ int32_t mndSetMoveVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t delDnodeId, bool force) {
|
int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t delDnodeId, bool force, bool unsafe) {
|
||||||
int32_t code = 0;
|
int32_t code = 0;
|
||||||
SArray *pArray = mndBuildDnodesArray(pMnode, delDnodeId);
|
SArray *pArray = mndBuildDnodesArray(pMnode, delDnodeId);
|
||||||
if (pArray == NULL) return -1;
|
if (pArray == NULL) return -1;
|
||||||
|
@ -1521,7 +1546,7 @@ int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t del
|
||||||
if (vnIndex != -1) {
|
if (vnIndex != -1) {
|
||||||
mInfo("vgId:%d, vnode:%d will be removed from dnode:%d, force:%d", pVgroup->vgId, vnIndex, delDnodeId, force);
|
mInfo("vgId:%d, vnode:%d will be removed from dnode:%d, force:%d", pVgroup->vgId, vnIndex, delDnodeId, force);
|
||||||
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
|
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
|
||||||
code = mndSetMoveVgroupInfoToTrans(pMnode, pTrans, pDb, pVgroup, vnIndex, pArray, force);
|
code = mndSetMoveVgroupInfoToTrans(pMnode, pTrans, pDb, pVgroup, vnIndex, pArray, force, unsafe);
|
||||||
mndReleaseDb(pMnode, pDb);
|
mndReleaseDb(pMnode, pDb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2092,27 +2117,32 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
|
||||||
mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
|
mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
|
||||||
pVgroup->vnodeGid[0].dnodeId);
|
pVgroup->vnodeGid[0].dnodeId);
|
||||||
|
|
||||||
//add first
|
//add second
|
||||||
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
|
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
|
||||||
|
|
||||||
|
//learner stage
|
||||||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_LEARNER;
|
||||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1;
|
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1;
|
||||||
|
|
||||||
|
//follower stage
|
||||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||||
if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
if (mndAddAlterVnodeTypeAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
|
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
|
||||||
|
|
||||||
//add second
|
//add third
|
||||||
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
|
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
|
||||||
|
|
||||||
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||||
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||||
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_VOTER;
|
||||||
|
|
||||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
|
||||||
return -1;
|
return -1;
|
||||||
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0)
|
||||||
|
|
|
@ -305,10 +305,6 @@ void tsdbUntakeReadSnap(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proa
|
||||||
// tsdbMerge.c ==============================================================================================
|
// tsdbMerge.c ==============================================================================================
|
||||||
int32_t tsdbMerge(STsdb *pTsdb);
|
int32_t tsdbMerge(STsdb *pTsdb);
|
||||||
|
|
||||||
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
|
|
||||||
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
|
|
||||||
#define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0)
|
|
||||||
|
|
||||||
// tsdbDiskData ==============================================================================================
|
// tsdbDiskData ==============================================================================================
|
||||||
int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder);
|
int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder);
|
||||||
void *tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder);
|
void *tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder);
|
||||||
|
@ -346,13 +342,18 @@ struct STsdbFS {
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
rocksdb_t *db;
|
rocksdb_t *db;
|
||||||
rocksdb_options_t *options;
|
rocksdb_comparator_t *my_comparator;
|
||||||
rocksdb_flushoptions_t *flushoptions;
|
rocksdb_cache_t *blockcache;
|
||||||
rocksdb_writeoptions_t *writeoptions;
|
rocksdb_block_based_table_options_t *tableoptions;
|
||||||
rocksdb_readoptions_t *readoptions;
|
rocksdb_options_t *options;
|
||||||
rocksdb_writebatch_t *writebatch;
|
rocksdb_flushoptions_t *flushoptions;
|
||||||
TdThreadMutex rMutex;
|
rocksdb_writeoptions_t *writeoptions;
|
||||||
|
rocksdb_readoptions_t *readoptions;
|
||||||
|
rocksdb_writebatch_t *writebatch;
|
||||||
|
rocksdb_writebatch_t *rwritebatch;
|
||||||
|
TdThreadMutex rMutex;
|
||||||
|
STSchema *pTSchema;
|
||||||
} SRocksCache;
|
} SRocksCache;
|
||||||
|
|
||||||
struct STsdb {
|
struct STsdb {
|
||||||
|
@ -782,7 +783,7 @@ typedef struct SLDataIter {
|
||||||
#define tMergeTreeGetRow(_t) (&((_t)->pIter->rInfo.row))
|
#define tMergeTreeGetRow(_t) (&((_t)->pIter->rInfo.row))
|
||||||
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
|
int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
|
||||||
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
|
STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
|
||||||
bool destroyLoadInfo, const char *idStr, bool strictTimeRange, SLDataIter* pLDataIter);
|
bool destroyLoadInfo, const char *idStr, bool strictTimeRange, SLDataIter *pLDataIter);
|
||||||
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
|
void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
|
||||||
bool tMergeTreeNext(SMergeTree *pMTree);
|
bool tMergeTreeNext(SMergeTree *pMTree);
|
||||||
bool tMergeTreeIgnoreEarlierTs(SMergeTree *pMTree);
|
bool tMergeTreeIgnoreEarlierTs(SMergeTree *pMTree);
|
||||||
|
@ -822,13 +823,15 @@ typedef struct SCacheRowsReader {
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
TSKEY ts;
|
TSKEY ts;
|
||||||
|
int8_t dirty;
|
||||||
SColVal colVal;
|
SColVal colVal;
|
||||||
} SLastCol;
|
} SLastCol;
|
||||||
|
|
||||||
int32_t tsdbOpenCache(STsdb *pTsdb);
|
int32_t tsdbOpenCache(STsdb *pTsdb);
|
||||||
void tsdbCloseCache(STsdb *pTsdb);
|
void tsdbCloseCache(STsdb *pTsdb);
|
||||||
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
|
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
|
||||||
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype);
|
int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype);
|
||||||
|
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int8_t ltype);
|
||||||
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
|
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
|
||||||
|
|
||||||
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
|
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
|
||||||
|
|
|
@ -199,7 +199,7 @@ void tqClose(STQ*);
|
||||||
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
|
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
|
||||||
int tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg);
|
int tqRegisterPushHandle(STQ* pTq, void* handle, SRpcMsg* pMsg);
|
||||||
int tqUnregisterPushHandle(STQ* pTq, void* pHandle);
|
int tqUnregisterPushHandle(STQ* pTq, void* pHandle);
|
||||||
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
|
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
|
||||||
|
|
||||||
int tqCommit(STQ*);
|
int tqCommit(STQ*);
|
||||||
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
|
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
|
||||||
|
@ -405,6 +405,10 @@ struct SVnode {
|
||||||
#define VND_IS_RSMA(v) ((v)->config.isRsma == 1)
|
#define VND_IS_RSMA(v) ((v)->config.isRsma == 1)
|
||||||
#define VND_IS_TSMA(v) ((v)->config.isTsma == 1)
|
#define VND_IS_TSMA(v) ((v)->config.isTsma == 1)
|
||||||
|
|
||||||
|
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
|
||||||
|
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
|
||||||
|
#define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0)
|
||||||
|
|
||||||
struct STbUidStore {
|
struct STbUidStore {
|
||||||
tb_uid_t suid;
|
tb_uid_t suid;
|
||||||
SArray* tbUids;
|
SArray* tbUids;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue