Merge remote-tracking branch 'origin/3.0' into feature/3.0_debug_wxy
This commit is contained in:
commit
d7a525f21f
|
@ -4,9 +4,6 @@
|
|||
[submodule "src/connector/hivemq-tdengine-extension"]
|
||||
path = src/connector/hivemq-tdengine-extension
|
||||
url = git@github.com:taosdata/hivemq-tdengine-extension.git
|
||||
[submodule "deps/jemalloc"]
|
||||
path = deps/jemalloc
|
||||
url = https://github.com/jemalloc/jemalloc
|
||||
[submodule "deps/TSZ"]
|
||||
path = deps/TSZ
|
||||
url = https://github.com/taosdata/TSZ.git
|
||||
|
|
|
@ -84,6 +84,12 @@ ELSE ()
|
|||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
OFF
|
||||
)
|
||||
|
||||
option(
|
||||
BUILD_SANITIZER
|
||||
"If build sanitizer"
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
|
||||
# jemalloc
|
||||
ExternalProject_Add(jemalloc
|
||||
GIT_REPOSITORY https://github.com/jemalloc/jemalloc.git
|
||||
GIT_TAG 5.3.0
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/jemalloc"
|
||||
BINARY_DIR ""
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
GIT_SHALLOW true
|
||||
GIT_PROGRESS true
|
||||
)
|
|
@ -2,7 +2,7 @@
|
|||
# zlib
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 817cb6a
|
||||
GIT_TAG 2.1.1
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -27,6 +27,10 @@ else ()
|
|||
cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
if(TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif()
|
||||
|
||||
# pthread
|
||||
if(${BUILD_PTHREAD})
|
||||
cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -399,6 +403,18 @@ if(${BUILD_ADDR2LINE})
|
|||
endif(NOT ${TD_WINDOWS})
|
||||
endif(${BUILD_ADDR2LINE})
|
||||
|
||||
# jemalloc
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(jemalloc
|
||||
PREFIX "jemalloc"
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
|
||||
BUILD_IN_SOURCE 1
|
||||
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
|
||||
BUILD_COMMAND ${MAKE}
|
||||
)
|
||||
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
|
||||
ENDIF ()
|
||||
|
||||
# ================================================================================================
|
||||
# Build test
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -16,15 +16,15 @@ toc_max_heading_level: 4
|
|||
SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的绝对值
|
||||
**功能说明**:获得指定字段的绝对值。
|
||||
|
||||
**返回结果类型**:如果输入值为整数,输出值是 UBIGINT 类型。如果输入值是 FLOAT/DOUBLE 数据类型,输出值是 DOUBLE 数据类型。
|
||||
**返回结果类型**:与指定字段的原始数据类型一致。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -34,15 +34,15 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的反余弦结果
|
||||
**功能说明**:获得指定字段的反余弦结果。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -52,15 +52,15 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的反正弦结果
|
||||
**功能说明**:获得指定字段的反正弦结果。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -71,15 +71,15 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的反正切结果
|
||||
**功能说明**:获得指定字段的反正切结果。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -90,20 +90,17 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的向上取整数的结果。
|
||||
**功能说明**:获得指定字段的向上取整数的结果。
|
||||
|
||||
**返回结果类型**:与指定列的原始数据类型一致。例如,如果指定列的原始数据类型为 Float,那么返回的数据类型也为 Float;如果指定列的原始数据类型为 Double,那么返回的数据类型也为 Double。
|
||||
**返回结果类型**:与指定字段的原始数据类型一致。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**: 普通表、超级表。
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 支持 +、-、\*、/ 运算,如 ceil(col1) + ceil(col2)。
|
||||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
**使用说明**: 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
#### COS
|
||||
|
||||
|
@ -111,15 +108,15 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的余弦结果
|
||||
**功能说明**:获得指定字段的余弦结果。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -129,24 +126,24 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的向下取整数的结果。
|
||||
**功能说明**:获得指定字段的向下取整数的结果。
|
||||
其他使用说明参见 CEIL 函数描述。
|
||||
|
||||
#### LOG
|
||||
|
||||
```sql
|
||||
SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列对于底数 base 的对数
|
||||
**功能说明**:获得指定字段对于底数 base 的对数。如果 base 参数省略,则返回指定字段的自然对数值。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -157,15 +154,15 @@ SELECT LOG(field_name, base) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的指数为 power 的幂
|
||||
**功能说明**:获得指定字段的指数为 power 的幂。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -176,7 +173,7 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的四舍五入的结果。
|
||||
**功能说明**:获得指定字段的四舍五入的结果。
|
||||
其他使用说明参见 CEIL 函数描述。
|
||||
|
||||
|
||||
|
@ -186,15 +183,15 @@ SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的正弦结果
|
||||
**功能说明**:获得指定字段的正弦结果。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -204,15 +201,15 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的平方根
|
||||
**功能说明**:获得指定字段的平方根。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -222,15 +219,15 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:获得指定列的正切结果
|
||||
**功能说明**:获得指定字段的正切结果。
|
||||
|
||||
**返回结果类型**:DOUBLE。如果输入值为 NULL,输出值也为 NULL
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用。
|
||||
|
||||
|
@ -246,13 +243,13 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:以字符计数的字符串长度。
|
||||
|
||||
**返回结果类型**:INT。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:BIGINT。
|
||||
|
||||
**适用数据类型**:VARCHAR, NCHAR
|
||||
**适用数据类型**:VARCHAR, NCHAR。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
#### CONCAT
|
||||
|
||||
|
@ -262,13 +259,13 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER
|
|||
|
||||
**功能说明**:字符串连接函数。
|
||||
|
||||
**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:如果所有参数均为 VARCHAR 类型,则结果类型为 VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。
|
||||
|
||||
**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为2个,最大参数个数为8个。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### CONCAT_WS
|
||||
|
@ -279,13 +276,13 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st
|
|||
|
||||
**功能说明**:带分隔符的字符串连接函数。
|
||||
|
||||
**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果输入值为NULL,输出值为NULL。如果separator值不为NULL,其他输入为NULL,输出为空串。
|
||||
**返回结果类型**:如果所有参数均为VARCHAR类型,则结果类型为VARCHAR。如果参数包含NCHAR类型,则结果类型为NCHAR。如果参数包含NULL值,则输出值为NULL。
|
||||
|
||||
**适用数据类型**:VARCHAR, NCHAR。 该函数最小参数个数为3个,最大参数个数为9个。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### LENGTH
|
||||
|
@ -296,13 +293,13 @@ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:以字节计数的字符串长度。
|
||||
|
||||
**返回结果类型**:INT。
|
||||
**返回结果类型**:BIGINT。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### LOWER
|
||||
|
@ -313,13 +310,13 @@ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:将字符串参数值转换为全小写字母。
|
||||
|
||||
**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:与输入字段的原始类型相同。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### LTRIM
|
||||
|
@ -330,13 +327,13 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:返回清除左边空格后的字符串。
|
||||
|
||||
**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:与输入字段的原始类型相同。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### RTRIM
|
||||
|
@ -347,13 +344,13 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:返回清除右边空格后的字符串。
|
||||
|
||||
**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:与输入字段的原始类型相同。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### SUBSTR
|
||||
|
@ -362,15 +359,15 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。
|
||||
**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。
|
||||
|
||||
**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:与输入字段的原始类型相同。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。输入参数pos可以为正数,也可以为负数。如果pos是正数,表示开始位置从字符串开头正数计算。如果pos为负数,表示开始位置从字符串结尾倒数计算。如果输入参数len被忽略,返回的子串包含从pos开始的整个字串。
|
||||
**适用数据类型**:VARCHAR, NCHAR。输入参数 pos 可以为正数,也可以为负数。如果 pos 是正数,表示开始位置从字符串开头正数计算。如果 pos 为负数,表示开始位置从字符串结尾倒数计算。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### UPPER
|
||||
|
@ -381,13 +378,13 @@ SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:将字符串参数值转换为全大写字母。
|
||||
|
||||
**返回结果类型**:同输入类型。如果输入值为NULL,输出值为NULL。
|
||||
**返回结果类型**:与输入字段的原始类型相同。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
### 转换函数
|
||||
|
@ -400,16 +397,19 @@ SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:数据类型转换函数,输入参数 expression 支持普通列、常量、标量函数及它们之间的四则运算,只适用于 select 子句中。
|
||||
**功能说明**:数据类型转换函数,返回 expression 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。
|
||||
|
||||
**返回结果类型**:CAST 中指定的类型(type_name),可以是 BIGINT、BIGINT UNSIGNED、BINARY、VARCHAR、NCHAR和TIMESTAMP。
|
||||
**返回结果类型**:CAST 中指定的类型(type_name)。
|
||||
|
||||
**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型
|
||||
**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 对于不能支持的类型转换会直接报错。
|
||||
- 如果输入值为NULL则输出值也为NULL。
|
||||
- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况:
|
||||
1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。
|
||||
2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。
|
||||
|
@ -418,20 +418,23 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
#### TO_ISO8601
|
||||
|
||||
```sql
|
||||
SELECT TO_ISO8601(ts_val | ts_col) FROM { tb_name | stb_name } [WHERE clause];
|
||||
SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加客户端时区信息。
|
||||
**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果附带当前客户端的系统时区信息。
|
||||
|
||||
**返回结果数据类型**:VARCHAR 类型。
|
||||
|
||||
**适用数据类型**:UNIX 时间戳常量或是 TIMESTAMP 类型的列
|
||||
**适用数据类型**:INTEGER, TIMESTAMP。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 如果输入是 UNIX 时间戳常量,返回格式精度由时间戳的位数决定;
|
||||
- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
|
||||
- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
|
||||
- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
|
||||
|
||||
|
@ -443,32 +446,34 @@ SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**功能说明**: 将字符串常量转换为 JSON 类型。
|
||||
|
||||
**返回结果数据类型**: JSON
|
||||
**返回结果数据类型**: JSON。
|
||||
|
||||
**适用数据类型**: JSON 字符串,形如 '{ "literal" : literal }'。'{}'表示空值。键必须为字符串字面量,值可以为数值字面量、字符串字面量、布尔字面量或空值字面量。str_literal中不支持转义符。
|
||||
|
||||
**适用于**: 表和超级表
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**: 表和超级表。
|
||||
|
||||
|
||||
#### TO_UNIXTIMESTAMP
|
||||
|
||||
```sql
|
||||
SELECT TO_UNIXTIMESTAMP(datetime_string | ts_col) FROM { tb_name | stb_name } [WHERE clause];
|
||||
SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
|
||||
|
||||
**返回结果数据类型**:长整型 INT64。
|
||||
**返回结果数据类型**:BIGINT。
|
||||
|
||||
**应用字段**:字符串常量或是 VARCHAR/NCHAR 类型的列。
|
||||
**应用字段**:VARCHAR, NCHAR。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 0。
|
||||
- 输入的日期时间字符串须符合 ISO8601/RFC3339 标准,无法转换的字符串格式将返回 NULL。
|
||||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
|
||||
|
||||
|
@ -488,11 +493,13 @@ INSERT INTO tb_name VALUES (NOW(), ...);
|
|||
|
||||
**功能说明**:返回客户端当前系统时间。
|
||||
|
||||
**返回结果数据类型**:TIMESTAMP 时间戳类型。
|
||||
**返回结果数据类型**:TIMESTAMP。
|
||||
|
||||
**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
|
@ -504,40 +511,42 @@ INSERT INTO tb_name VALUES (NOW(), ...);
|
|||
#### TIMEDIFF
|
||||
|
||||
```sql
|
||||
SELECT TIMEDIFF(ts_val1 | datetime_string1 | ts_col1, ts_val2 | datetime_string2 | ts_col2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
|
||||
SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。
|
||||
|
||||
**返回结果数据类型**:长整型 INT64。
|
||||
**返回结果数据类型**:BIGINT。输入包含不符合时间日期格式字符串则返回 NULL。
|
||||
|
||||
**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
|
||||
**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
**使用说明**:
|
||||
- 支持的时间单位 time_unit 如下:
|
||||
1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
|
||||
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
|
||||
- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
|
||||
|
||||
|
||||
#### TIMETRUNCATE
|
||||
|
||||
```sql
|
||||
SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name | stb_name } [WHERE clause];
|
||||
SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。
|
||||
|
||||
**返回结果数据类型**:TIMESTAMP 时间戳类型。
|
||||
**返回结果数据类型**:TIMESTAMP。
|
||||
|
||||
**应用字段**:UNIX 时间戳,日期时间格式的字符串,或者 TIMESTAMP 类型的列。
|
||||
**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
- 支持的时间单位 time_unit 如下:
|
||||
1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天)。
|
||||
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
|
||||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
|
||||
|
||||
|
@ -549,11 +558,11 @@ SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**功能说明**:返回客户端当前时区信息。
|
||||
|
||||
**返回结果数据类型**:VARCHAR 类型。
|
||||
**返回结果数据类型**:VARCHAR。
|
||||
|
||||
**应用字段**:无
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
#### TODAY
|
||||
|
@ -566,11 +575,11 @@ INSERT INTO tb_name VALUES (TODAY(), ...);
|
|||
|
||||
**功能说明**:返回客户端当日零时的系统时间。
|
||||
|
||||
**返回结果数据类型**:TIMESTAMP 时间戳类型。
|
||||
**返回结果数据类型**:TIMESTAMP。
|
||||
|
||||
**应用字段**:在 WHERE 或 INSERT 语句中使用时只能作用于 TIMESTAMP 类型的字段。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
|
@ -585,19 +594,37 @@ INSERT INTO tb_name VALUES (TODAY(), ...);
|
|||
|
||||
TDengine 支持针对数据的聚合查询。提供如下聚合函数。
|
||||
|
||||
### APERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
|
||||
|
||||
**返回数据类型**: DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**说明**:
|
||||
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
|
||||
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
|
||||
|
||||
### AVG
|
||||
|
||||
```sql
|
||||
SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中某列的平均值。
|
||||
**功能说明**:统计指定字段的平均值。
|
||||
|
||||
**返回数据类型**:双精度浮点数 Double。
|
||||
**返回数据类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
### COUNT
|
||||
|
@ -606,19 +633,18 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
|
|||
SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中记录行数或某列的非空值个数。
|
||||
**功能说明**:统计指定字段的记录行数。
|
||||
|
||||
**返回数据类型**:长整型 INT64。
|
||||
**返回数据类型**:BIGINT。
|
||||
|
||||
**适用数据类型**:应用全部字段。
|
||||
**适用数据类型**:全部类型字段。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 可以使用星号(\*)来替代具体的字段,使用星号(\*)返回全部记录数量。
|
||||
- 针对同一表的(不包含 NULL 值)字段查询结果均相同。
|
||||
- 如果统计对象是具体的列,则返回该列中非 NULL 值的记录数量。
|
||||
- 如果统计字段是具体的列,则返回该列中非 NULL 值的记录数量。
|
||||
|
||||
|
||||
### ELAPSED
|
||||
|
@ -629,17 +655,18 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
|
||||
**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。
|
||||
|
||||
**返回结果类型**:Double
|
||||
**返回结果类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:Timestamp类型
|
||||
**适用数据类型**:TIMESTAMP。
|
||||
|
||||
**支持的版本**:2.6.0.0 及以后的版本。
|
||||
|
||||
**适用于**: 表,超级表,嵌套查询的外层查询
|
||||
|
||||
**说明**:
|
||||
- field_name参数只能是表的第一列,即timestamp主键列。
|
||||
- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit参数未指定时,以数据库的时间分辨率为时间单位。
|
||||
- field_name参数只能是表的第一列,即 TIMESTAMP 类型的主键列。
|
||||
- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下:
|
||||
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
|
||||
- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。
|
||||
- order by asc/desc不影响差值的计算结果。
|
||||
- 对于超级表,需要和group by tbname子句组合使用,不可以直接使用。
|
||||
|
@ -647,6 +674,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。
|
||||
- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。
|
||||
|
||||
|
||||
### LEASTSQUARES
|
||||
|
||||
```sql
|
||||
|
@ -662,32 +690,17 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
|
|||
**适用于**:表。
|
||||
|
||||
|
||||
### MODE
|
||||
|
||||
```sql
|
||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出空。
|
||||
|
||||
**返回数据类型**:同应用的字段。
|
||||
|
||||
**适用数据类型**: 数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
### SPREAD
|
||||
|
||||
```sql
|
||||
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中某列的最大值和最小值之差。
|
||||
**功能说明**:统计表中某列的最大值和最小值之差。
|
||||
|
||||
**返回数据类型**:双精度浮点数。
|
||||
**返回数据类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型或TIMESTAMP类型。
|
||||
**适用数据类型**:INTEGER, TIMESTAMP。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
@ -700,7 +713,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
**功能说明**:统计表中某列的均方差。
|
||||
|
||||
**返回数据类型**:双精度浮点数 Double。
|
||||
**返回数据类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
|
@ -715,7 +728,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
|
|||
|
||||
**功能说明**:统计表/超级表中某列的和。
|
||||
|
||||
**返回数据类型**:双精度浮点数 Double 和长整型 INT64。
|
||||
**返回数据类型**:DOUBLE, BIGINT。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
|
@ -729,10 +742,10 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
|
|||
```
|
||||
|
||||
**功能说明**:
|
||||
- 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,但是求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。
|
||||
- 采用 hyperloglog 算法,返回某列的基数。该算法在数据量很大的情况下,可以明显降低内存的占用,求出来的基数是个估算值,标准误差(标准误差是多次实验,每次的平均数的标准差,不是与真实结果的误差)为 0.81%。
|
||||
- 在数据量较少的时候该算法不是很准确,可以使用 select count(data) from (select unique(col) as data from table) 的方法。
|
||||
|
||||
**返回结果类型**:整形。
|
||||
**返回结果类型**:INTEGER。
|
||||
|
||||
**适用数据类型**:任何类型。
|
||||
|
||||
|
@ -747,7 +760,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
|
||||
**功能说明**:统计数据按照用户指定区间的分布。
|
||||
|
||||
**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为双精度浮点类型 DOUBLE,否则为长整形 INT64。
|
||||
**返回结果类型**:如归一化参数 normalized 设置为 1,返回结果为 DOUBLE 类型,否则为 BIGINT 类型。
|
||||
|
||||
**适用数据类型**:数值型字段。
|
||||
|
||||
|
@ -769,25 +782,27 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。
|
||||
|
||||
|
||||
### PERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表中某列的值百分比分位数。
|
||||
|
||||
**返回数据类型**: DOUBLE。
|
||||
|
||||
**应用字段**:数值类型。
|
||||
|
||||
**适用于**:表。
|
||||
|
||||
**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
|
||||
|
||||
|
||||
## 选择函数
|
||||
|
||||
选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。
|
||||
|
||||
### APERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT APERCENTILE(field_name, P[, algo_type])
|
||||
FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
|
||||
|
||||
**返回数据类型**: 双精度浮点数 Double。
|
||||
|
||||
**适用数据类型**:数值类型。P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。如果不指定 algo_type 则使用默认算法 。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
|
||||
### BOTTOM
|
||||
|
||||
```sql
|
||||
|
@ -922,21 +937,41 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
### PERCENTILE
|
||||
### MODE
|
||||
|
||||
```sql
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表中某列的值百分比分位数。
|
||||
**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL。
|
||||
|
||||
**返回数据类型**: 双精度浮点数 Double。
|
||||
**返回数据类型**:与输入数据类型一致。
|
||||
|
||||
**应用字段**:数值类型。
|
||||
**适用数据类型**:全部类型字段。
|
||||
|
||||
**适用于**:表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
|
||||
|
||||
### SAMPLE
|
||||
|
||||
```sql
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
|
||||
|
||||
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### TAIL
|
||||
|
@ -951,7 +986,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
|
|||
|
||||
**返回数据类型**:同应用的字段。
|
||||
|
||||
**适用数据类型**:适合于除时间主列外的任何类型。
|
||||
**适用数据类型**:适合于除时间主键列外的任何类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
|
||||
|
@ -968,7 +1003,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
|
@ -1003,19 +1038,19 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
|
||||
|
||||
**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。
|
||||
**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
|
||||
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
|
||||
- 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### DERIVATIVE
|
||||
|
@ -1026,14 +1061,16 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
|||
|
||||
**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
|
||||
|
||||
**返回数据类型**:双精度浮点数。
|
||||
**返回数据类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表
|
||||
|
||||
**使用说明**: DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||
|
||||
### DIFF
|
||||
|
||||
|
@ -1047,9 +1084,12 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**: 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
**使用说明**:
|
||||
|
||||
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
|
||||
|
||||
|
||||
### IRATE
|
||||
|
@ -1060,11 +1100,12 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
|
||||
**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
|
||||
|
||||
**返回数据类型**:双精度浮点数 Double。
|
||||
**返回数据类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
### MAVG
|
||||
|
||||
|
@ -1074,40 +1115,20 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: 返回双精度浮点数类型。
|
||||
**返回结果类型**: DOUBLE。
|
||||
|
||||
**适用数据类型**: 数值类型。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
|
||||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
||||
- 使用在超级表上的时候,需要搭配 Group by tbname使用,将结果强制规约到单个时间线。
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
### SAMPLE
|
||||
|
||||
```sql
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
|
||||
|
||||
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 Group by tbname 使用,将结果强制规约到单个时间线。
|
||||
|
||||
### STATECOUNT
|
||||
|
||||
|
@ -1119,10 +1140,10 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
|
|||
|
||||
**参数范围**:
|
||||
|
||||
- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
|
||||
- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
|
||||
- val : 数值型
|
||||
|
||||
**返回结果类型**:整形。
|
||||
**返回结果类型**:INTEGER。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
|
@ -1132,7 +1153,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
|
||||
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
|
||||
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
|
||||
|
||||
|
||||
|
@ -1146,11 +1167,11 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
|||
|
||||
**参数范围**:
|
||||
|
||||
- oper : LT (小于)、GT(大于)、LE(小于等于)、GE(大于等于)、NE(不等于)、EQ(等于),不区分大小写。
|
||||
- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
|
||||
- val : 数值型
|
||||
- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。
|
||||
- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。
|
||||
|
||||
**返回结果类型**:整形。
|
||||
**返回结果类型**:INTEGER。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
|
@ -1160,7 +1181,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 该函数可以应用在普通表上,在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)
|
||||
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
|
||||
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
|
||||
|
||||
|
||||
|
@ -1172,13 +1193,13 @@ SELECT TWA(field_name) FROM tb_name WHERE clause;
|
|||
|
||||
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
|
||||
|
||||
**返回数据类型**:双精度浮点数 Double。
|
||||
**返回数据类型**:DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表、超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**: TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。
|
||||
**使用说明**: TWA 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
|
||||
|
||||
## 系统信息函数
|
||||
|
|
|
@ -80,21 +80,16 @@ taos --dump-config
|
|||
| 补充说明 | RESTful 服务在 2.4.0.0 之前(不含)由 taosd 提供,默认端口为 6041; 在 2.4.0.0 及后续版本由 taosAdapter,默认端口为 6041 |
|
||||
|
||||
:::note
|
||||
确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。(详细的端口情况请参见下表)
|
||||
确保集群中所有主机在端口 6030 上的 TCP 协议能够互通。(详细的端口情况请参见下表)
|
||||
:::
|
||||
| 协议 | 默认端口 | 用途说明 | 修改方法 |
|
||||
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| TCP | 6030 | 客户端与服务端之间通讯。 | 由配置文件设置 serverPort 决定。 |
|
||||
| TCP | 6035 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
|
||||
| TCP | 6040 | 多节点集群的节点间数据同步。 | 随 serverPort 端口变化。 |
|
||||
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
|
||||
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
||||
| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
|
||||
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
|
||||
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
|
||||
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化(2.3.0.1+以上版本)。 |
|
||||
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
|
||||
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
|
||||
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。
|
||||
|
||||
### maxShellConns
|
||||
|
||||
|
@ -105,26 +100,6 @@ taos --dump-config
|
|||
| 取值范围 | 10-50000000 |
|
||||
| 缺省值 | 5000 |
|
||||
|
||||
### maxConnections
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 一个数据库连接所容许的 dnode 连接数 |
|
||||
| 取值范围 | 1-100000 |
|
||||
| 缺省值 | 5000 |
|
||||
| 补充说明 | 实际测试下来,如果默认没有配,选 50 个 worker thread 会产生 Network unavailable |
|
||||
|
||||
### rpcForceTcp
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 强制使用 TCP 传输 |
|
||||
| 取值范围 | 0: 不开启 1: 开启 |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 在网络比较差的环境中,建议开启。<br/>2.0 版本新增。 |
|
||||
|
||||
## 监控相关
|
||||
|
||||
### monitor
|
||||
|
@ -132,10 +107,26 @@ taos --dump-config
|
|||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽、HTTP 请求量的监控记录,记录信息存储在`LOG`库中。 |
|
||||
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn` 和 `monitorProt` 指定的 TaosKeeper 监控服务 |
|
||||
| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### monitorFqdn
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | TaosKeeper 监控服务的 FQDN |
|
||||
| 缺省值 | 无 |
|
||||
|
||||
### monitorPort
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | TaosKeeper 监控服务的端口号 |
|
||||
| 缺省值 | 6043 |
|
||||
|
||||
### monitorInterval
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -143,9 +134,10 @@ taos --dump-config
|
|||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 监控数据库记录系统参数(CPU/内存)的时间间隔 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 1-600 |
|
||||
| 取值范围 | 1-200000 |
|
||||
| 缺省值 | 30 |
|
||||
|
||||
|
||||
### telemetryReporting
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -167,19 +159,10 @@ taos --dump-config
|
|||
| 缺省值 | 无 |
|
||||
| 补充说明 | 计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。<br/>(2.0.15 以前的版本中,此参数的单位是字节) |
|
||||
|
||||
### ratioOfQueryCores
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 设置查询线程的最大数量。 |
|
||||
| 缺省值 | 1 |
|
||||
| 补充说明 | 最小值 0 表示只有 1 个查询线程 <br/> 最大值 2 表示最大建立 2 倍 CPU 核数的查询线程。<br/>默认为 1,表示最大和 CPU 核数相等的查询线程。<br/>该值可以为小数,即 0.5 表示最大建立 CPU 核数一半的查询线程。 |
|
||||
|
||||
### maxNumOfDistinctRes
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- | --- |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 允许返回的 distinct 结果最大行数 |
|
||||
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
|
||||
|
@ -301,96 +284,6 @@ charset 的有效值是 UTF-8。
|
|||
| 含义 | 数据文件目录,所有的数据文件都将写入该目录 |
|
||||
| 缺省值 | /var/lib/taos |
|
||||
|
||||
### cache
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 内存块的大小 |
|
||||
| 单位 | MB |
|
||||
| 缺省值 | 16 |
|
||||
|
||||
### blocks
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 每个 vnode(tsdb)中有多少 cache 大小的内存块。因此一个 vnode 的用的内存大小粗略为(cache \* blocks) |
|
||||
| 缺省值 | 6 |
|
||||
|
||||
### days
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 数据文件存储数据的时间跨度 |
|
||||
| 单位 | 天 |
|
||||
| 缺省值 | 10 |
|
||||
|
||||
### keep
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 数据保留的天数 |
|
||||
| 单位 | 天 |
|
||||
| 缺省值 | 3650 |
|
||||
|
||||
### minRows
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 文件块中记录的最小条数 |
|
||||
| 缺省值 | 100 |
|
||||
|
||||
### maxRows
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 文件块中记录的最大条数 |
|
||||
| 缺省值 | 4096 |
|
||||
|
||||
### walLevel
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | WAL 级别 |
|
||||
| 取值范围 | 0: 不写WAL; <br/> 1:写 WAL, 但不执行 fsync <br/> 2:写 WAL, 而且执行 fsync |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### fsync
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 当 WAL 设置为 2 时,执行 fsync 的周期 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 最小为 0,表示每次写入,立即执行 fsync <br/> 最大为 180000(三分钟) |
|
||||
| 缺省值 | 3000 |
|
||||
|
||||
### update
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 允许更新已存在的数据行 |
|
||||
| 取值范围 | 0:不允许更新 <br/> 1:允许整行更新 <br/> 2:允许部分列更新。(2.1.7.0 版本开始此参数支持设为 2,在此之前取值只能是 [0, 1]) |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 2.0.8.0 版本之前,不支持此参数。 |
|
||||
|
||||
### cacheLast
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 是否在内存中缓存子表的最近数据 |
|
||||
| 取值范围 | 0:关闭 <br/> 1:缓存子表最近一行数据 <br/> 2:缓存子表每一列的最近的非 NULL 值 <br/> 3:同时打开缓存最近行和列功能。(2.1.2.0 版本开始此参数支持 0 ~ 3 的取值范围,在此之前取值只能是 [0, 1]) |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。 |
|
||||
|
||||
### minimalTmpDirGB
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -409,110 +302,19 @@ charset 的有效值是 UTF-8。
|
|||
| 单位 | GB |
|
||||
| 缺省值 | 2.0 |
|
||||
|
||||
### vnodeBak
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 删除 vnode 时是否备份 vnode 目录 |
|
||||
| 取值范围 | 0:否,1:是 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
## 集群相关
|
||||
|
||||
### numOfMnodes
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 系统中管理节点个数 |
|
||||
| 缺省值 | 3 |
|
||||
|
||||
### replica
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 副本个数 |
|
||||
| 取值范围 | 1-3 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### quorum
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 多副本环境下指令执行的确认数要求 |
|
||||
| 取值范围 | 1,2 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### role
|
||||
### supportVnodes
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | dnode 的可选角色 |
|
||||
| 取值范围 | 0:any(既可作为 mnode,也可分配 vnode) <br/> 1:mgmt(只能作为 mnode,不能分配 vnode) <br/> 2:dnode(不能作为 mnode,只能分配 vnode) |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### balance
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 是否启动负载均衡 |
|
||||
| 取值范围 | 0,1 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### balanceInterval
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 管理节点在正常运行状态下,检查负载均衡的时间间隔 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 1-30000 |
|
||||
| 缺省值 | 300 |
|
||||
|
||||
### arbitrator
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 系统中裁决器的 endpoint,其格式如 firstEp |
|
||||
| 缺省值 | 空 |
|
||||
| 含义 | dnode 支持的最大 vnode 数目 |
|
||||
| 取值范围 | 0-4096 |
|
||||
| 缺省值 | 256 |
|
||||
|
||||
## 时间相关
|
||||
|
||||
### precision
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端 |
|
||||
| 含义 | 创建数据库时使用的时间精度 |
|
||||
| 取值范围 | ms: millisecond; us: microsecond ; ns: nanosecond |
|
||||
| 缺省值 | ms |
|
||||
|
||||
### rpcTimer
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | rpc 重试时长 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 100-3000 |
|
||||
| 缺省值 | 300 |
|
||||
|
||||
### rpcMaxTime
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | rpc 等待应答最大时长 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 100-7200 |
|
||||
| 缺省值 | 600 |
|
||||
|
||||
### statusInterval
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -533,105 +335,8 @@ charset 的有效值是 UTF-8。
|
|||
| 取值范围 | 1-120 |
|
||||
| 缺省值 | 3 |
|
||||
|
||||
### tableMetaKeepTimer
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 表的元数据 cache 时长 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 1-8640000 |
|
||||
| 缺省值 | 7200 |
|
||||
|
||||
### maxTmrCtrl
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 定时器个数 |
|
||||
| 单位 | 个 |
|
||||
| 取值范围 | 8-2048 |
|
||||
| 缺省值 | 512 |
|
||||
|
||||
### offlineThreshold
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | dnode 离线阈值,超过该时间将导致 dnode 离线 |
|
||||
| 单位 | 秒 |
|
||||
| 取值范围 | 5-7200000 |
|
||||
| 缺省值 | 86400\*10(10 天) |
|
||||
|
||||
## 性能调优
|
||||
|
||||
### numOfThreadsPerCore
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 每个 CPU 核生成的队列消费者线程数量 |
|
||||
| 缺省值 | 1.0 |
|
||||
|
||||
### ratioOfQueryThreads
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 设置查询线程的最大数量 |
|
||||
| 取值范围 | 0:表示只有 1 个查询线程 <br/> 1:表示最大和 CPU 核数相等的查询线程 <br/> 2:表示最大建立 2 倍 CPU 核数的查询线程。 |
|
||||
| 缺省值 | 1 |
|
||||
| 补充说明 | 该值可以为小数,即 0.5 表示最大建立 CPU 核数一半的查询线程。 |
|
||||
|
||||
### maxVgroupsPerDb
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 每个 DB 中 能够使用的最大 vnode 个数 |
|
||||
| 取值范围 | 0-8192 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### maxTablesPerVnode
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 每个 vnode 中能够创建的最大表个数 |
|
||||
| 缺省值 | 1000000 |
|
||||
|
||||
### minTablesPerVnode
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 每个 vnode 中必须创建表的最小数量 |
|
||||
| 缺省值 | 1000 |
|
||||
|
||||
### tableIncStepPerVnode
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 每个 vnode 中超过最小表数,i.e. minTablesPerVnode, 后递增步长 |
|
||||
| 缺省值 | 1000 |
|
||||
|
||||
### maxNumOfOrderedRes
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 支持超级表时间排序允许的最多记录数限制 |
|
||||
| 缺省值 | 10 万 |
|
||||
|
||||
### mnodeEqualVnodeNum
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 将一个 mnode 等同于 vnode 消耗的个数 |
|
||||
| 缺省值 | 4 |
|
||||
|
||||
### numOfCommitThreads
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -642,23 +347,6 @@ charset 的有效值是 UTF-8。
|
|||
|
||||
## 压缩相关
|
||||
|
||||
### comp
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 文件压缩标志位 |
|
||||
| 取值范围 | 0:关闭,1:一阶段压缩,2:两阶段压缩 |
|
||||
| 缺省值 | 2 |
|
||||
|
||||
### tsdbMetaCompactRatio
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------------------------- |
|
||||
| 含义 | tsdb meta 文件中冗余数据超过多少阈值,开启 meta 文件的压缩功能 |
|
||||
| 取值范围 | 0:不开启,[1-100]:冗余数据比例 |
|
||||
| 缺省值 | 0 |
|
||||
|
||||
### compressMsgSize
|
||||
|
||||
| 属性 | 说明 |
|
||||
|
@ -680,165 +368,6 @@ charset 的有效值是 UTF-8。
|
|||
| 缺省值 | -1 |
|
||||
| 补充说明 | 2.3.0.0 版本新增。 |
|
||||
|
||||
### lossyColumns
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| 适用范围 | 服务器端 |
|
||||
| 含义 | 配置要进行有损压缩的浮点数据类型 |
|
||||
| 取值范围 | 空字符串:关闭有损压缩 <br/> float:只对 float 类型进行有损压缩 <br/>double:只对 double 类型进行有损压缩 <br/> float \| double:float double 都进行有损压缩 |
|
||||
| 缺省值 | 空字符串 |
|
||||
| 补充说明 | 有损压缩默认为关闭状态,只有配置后才生效 |
|
||||
|
||||
### fPrecision
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 服务器端 |
|
||||
| 含义 | 设置 float 类型浮点数压缩精度 |
|
||||
| 取值范围 | 0.1 ~ 0.00000001 |
|
||||
| 缺省值 | 0.00000001 |
|
||||
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
|
||||
|
||||
### dPrecision
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------- |
|
||||
| 适用范围 | 服务器端 |
|
||||
| 含义 | 设置 double 类型浮点数压缩精度 |
|
||||
| 取值范围 | 0.1 ~ 0.0000000000000001 |
|
||||
| 缺省值 | 0.0000000000000001 |
|
||||
| 补充说明 | 小于此值的浮点数尾数部分将被截取 |
|
||||
|
||||
## 连续查询相关
|
||||
|
||||
### stream
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 是否启用连续查询(流计算功能) |
|
||||
| 取值范围 | 0:不允许 <br/> 1:允许 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### minSlidingTime
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 最小滑动窗口时长 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 10-1000000 |
|
||||
| 缺省值 | 10 |
|
||||
| 补充说明 | 支持 us 补值后,这个值就是 1us 了。 |
|
||||
|
||||
### minIntervalTime
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 时间窗口最小值 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 1-1000000 |
|
||||
| 缺省值 | 10 |
|
||||
|
||||
### maxStreamCompDelay
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 连续查询启动最大延迟 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 10-1000000000 |
|
||||
| 缺省值 | 20000 |
|
||||
|
||||
### maxFirstStreamCompDelay
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 第一次连续查询启动最大延迟 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 10-1000000000 |
|
||||
| 缺省值 | 10000 |
|
||||
|
||||
### retryStreamCompDelay
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 连续查询重试等待间隔 |
|
||||
| 单位 | 毫秒 |
|
||||
| 取值范围 | 10-1000000000 |
|
||||
| 缺省值 | 10 |
|
||||
|
||||
### streamCompDelayRatio
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 连续查询的延迟时间计算系数,实际延迟时间为本参数乘以计算时间窗口 |
|
||||
| 取值范围 | 0.1-0.9 |
|
||||
| 缺省值 | 0.1 |
|
||||
|
||||
:::info
|
||||
为避免多个 stream 同时执行占用太多系统资源,程序中对 stream 的执行时间人为增加了一些随机的延时。<br/>maxFirstStreamCompDelay 是 stream 第一次执行前最少要等待的时间。<br/>streamCompDelayRatio 是延迟时间的计算系数,它乘以查询的 interval 后为延迟时间基准。<br/>maxStreamCompDelay 是延迟时间基准的上限。<br/>实际延迟时间为一个不超过延迟时间基准的随机值。<br/>stream 某次计算失败后需要重试,retryStreamCompDelay 是重试的等待时间基准。<br/>实际重试等待时间为不超过等待时间基准的随机值。
|
||||
|
||||
:::
|
||||
|
||||
## HTTP 相关
|
||||
|
||||
:::note
|
||||
HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0.0 以后(含)由 taosAdapter 提供。
|
||||
本节的配置参数仅在 2.4.0.0(不含)以前的版本中生效。如果您使用的是 2.4.0.0(含)及以后的版本请参考[文档](/reference/taosadapter/)。
|
||||
|
||||
:::
|
||||
|
||||
### http
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 服务器内部的 http 服务开关。 |
|
||||
| 取值范围 | 0:关闭 http 服务, 1:激活 http 服务。 |
|
||||
| 缺省值 | 1 |
|
||||
|
||||
### httpEnableRecordSql
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 记录通过 RESTFul 接口,产生的 SQL 调用。 |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 生成的文件(httpnote.0/httpnote.1),与服务端日志所在目录相同。 |
|
||||
|
||||
### httpMaxThreads
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | RESTFul 接口的线程数。taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
||||
| 缺省值 | 2 |
|
||||
|
||||
### restfulRowLimit
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------------------------------------------------------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | RESTFul 接口单次返回的记录条数。taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
|
||||
| 缺省值 | 10240 |
|
||||
| 补充说明 | 最大 10,000,000 |
|
||||
|
||||
### httpDBNameMandatory
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 是否在 URL 中输入 数据库名称 |
|
||||
| 取值范围 | 0:不开启,1:开启 |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 2.3 版本新增。 |
|
||||
|
||||
## 日志相关
|
||||
|
||||
### logDir
|
||||
|
@ -894,50 +423,23 @@ HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0
|
|||
| 取值范围 | 131(输出错误和警告日志),135(输出错误、警告和调试日志),143(输出错误、警告、调试和跟踪日志) |
|
||||
| 缺省值 | 131 或 135(不同模块有不同的默认值) |
|
||||
|
||||
### mDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 管理模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### dDebugFlag
|
||||
### tmrDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | dnode 模块的日志开关 |
|
||||
| 含义 | 定时器模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
| 缺省值 | |
|
||||
|
||||
### sDebugFlag
|
||||
### uDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | sync 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### wDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | WAL 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### sdbDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | sdb 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 共用功能模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### rpcDebugFlag
|
||||
|
||||
|
@ -948,12 +450,21 @@ HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0
|
|||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### tmrDebugFlag
|
||||
### jniDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | jni 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### qDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 定时器模块的日志开关 |
|
||||
| 含义 | query 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
|
@ -966,158 +477,114 @@ HTTP 服务在 2.4.0.0(不含)以前的版本中由 taosd 提供,在 2.4.0
|
|||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### jniDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | jni 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### odbcDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | odbc 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### uDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 共用功能模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### httpDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | http 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### mqttDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | mqtt 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### monitorDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | 监控模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### qDebugFlag
|
||||
### dDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 查询模块的日志开关 |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | dnode 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### vDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | vnode 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### mDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------ |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | mnode 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### wDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | wal 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### sDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | sync 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | 135 |
|
||||
|
||||
### tsdbDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | TSDB 模块的日志开关 |
|
||||
| 含义 | tsdb 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### cqDebugFlag
|
||||
### tqDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | tq 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### fsDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | fs 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### udfDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 服务端和客户端均适用 |
|
||||
| 含义 | 连续查询模块的日志开关 |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | UDF 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### smaDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | sma 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### idxDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | index 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
### tdbDebugFlag
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------- |
|
||||
| 适用范围 | 仅服务端适用 |
|
||||
| 含义 | tdb 模块的日志开关 |
|
||||
| 取值范围 | 同上 |
|
||||
| 缺省值 | |
|
||||
|
||||
## 仅客户端适用
|
||||
|
||||
### maxSQLLength
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | --------------------------- |
|
||||
| 适用范围 | 仅客户端适用 |
|
||||
| 含义 | 单条 SQL 语句允许的最长限制 |
|
||||
| 单位 | bytes |
|
||||
| 取值范围 | 65480-1048576 |
|
||||
| 缺省值 | 1048576 |
|
||||
|
||||
### tscEnableRecordSql
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ----------------------------------------------------------------------------------- |
|
||||
| 含义 | 是否记录客户端 sql 语句到文件 |
|
||||
| 取值范围 | 0:否,1:是 |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 生成的文件(tscnote-xxxx.0/tscnote-xxx.1,xxxx 是 pid),与客户端日志所在目录相同。 |
|
||||
|
||||
### maxBinaryDisplayWidth
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------------------------------------------------------- |
|
||||
| 含义 | Taos shell 中 binary 和 nchar 字段的显示宽度上限,超过此限制的部分将被隐藏 |
|
||||
| 取值范围 | 5 - |
|
||||
| 缺省值 | 30 |
|
||||
|
||||
:::info
|
||||
实际上限按以下规则计算:如果字段值的长度大于 maxBinaryDisplayWidth,则显示上限为 **字段名长度** 和 **maxBinaryDisplayWidth** 的较大者。<br/>否则,上限为 **字段名长度** 和 **字段值长度** 的较大者。<br/>可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项
|
||||
|
||||
:::
|
||||
|
||||
### maxWildCardsLength
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ------------------------------------------ |
|
||||
| 含义 | 设定 LIKE 算子的通配符字符串允许的最大长度 |
|
||||
| 单位 | bytes |
|
||||
| 取值范围 | 0-16384 |
|
||||
| 缺省值 | 100 |
|
||||
| 补充说明 | 2.1.6.1 版本新增。 |
|
||||
|
||||
### clientMerge
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | ---------------------------- |
|
||||
| 含义 | 是否允许客户端对写入数据去重 |
|
||||
| 取值范围 | 0:不开启,1:开启 |
|
||||
| 缺省值 | 0 |
|
||||
| 补充说明 | 2.3 版本新增。 |
|
||||
|
||||
### maxRegexStringLen
|
||||
|
||||
| 属性 | 说明 |
|
||||
| -------- | -------------------------- |
|
||||
| 含义 | 正则表达式最大允许长度 |
|
||||
| 取值范围 | 默认值 128,最大长度 16384 |
|
||||
| 缺省值 | 128 |
|
||||
| 补充说明 | 2.3 版本新增。 |
|
||||
|
||||
## 其他
|
||||
|
||||
### enableCoreFile
|
||||
|
|
|
@ -41,10 +41,8 @@ extern int32_t tsCompressMsgSize;
|
|||
extern int32_t tsCompressColData;
|
||||
extern int32_t tsMaxNumOfDistinctResults;
|
||||
extern int32_t tsCompatibleModel;
|
||||
extern bool tsEnableSlaveQuery;
|
||||
extern bool tsPrintAuth;
|
||||
extern int64_t tsTickPerMin[3];
|
||||
|
||||
extern int32_t tsCountAlwaysReturnValue;
|
||||
|
||||
// multi-process
|
||||
|
@ -92,8 +90,6 @@ extern uint16_t tsTelemPort;
|
|||
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
|
||||
extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
|
||||
extern bool tsRetrieveBlockingModel; // retrieve threads will be blocked
|
||||
extern bool tsKeepOriginalColumnName;
|
||||
extern bool tsDeadLockKillQuery;
|
||||
|
||||
// query client
|
||||
extern int32_t tsQueryPolicy;
|
||||
|
@ -102,11 +98,6 @@ extern int32_t tsQuerySmaOptimize;
|
|||
// client
|
||||
extern int32_t tsMinSlidingTime;
|
||||
extern int32_t tsMinIntervalTime;
|
||||
extern int32_t tsMaxStreamComputDelay;
|
||||
extern int32_t tsStreamCompStartDelay;
|
||||
extern int32_t tsRetryStreamCompDelay;
|
||||
extern float tsStreamComputDelayRatio; // the delayed computing ration of the whole time window
|
||||
extern int64_t tsMaxRetentWindow;
|
||||
|
||||
// build info
|
||||
extern char version[];
|
||||
|
|
|
@ -55,7 +55,6 @@ enum {
|
|||
TASK_INPUT_STATUS__NORMAL = 1,
|
||||
TASK_INPUT_STATUS__BLOCKED,
|
||||
TASK_INPUT_STATUS__RECOVER,
|
||||
TASK_INPUT_STATUS__PROCESSING,
|
||||
TASK_INPUT_STATUS__STOP,
|
||||
TASK_INPUT_STATUS__FAILED,
|
||||
};
|
||||
|
@ -320,17 +319,6 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
|
|||
void tFreeSStreamTask(SStreamTask* pTask);
|
||||
|
||||
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
|
||||
#if 0
|
||||
while (1) {
|
||||
int8_t inputStatus =
|
||||
atomic_val_compare_exchange_8(&pTask->inputStatus, TASK_INPUT_STATUS__NORMAL, TASK_INPUT_STATUS__PROCESSING);
|
||||
if (inputStatus == TASK_INPUT_STATUS__NORMAL) {
|
||||
break;
|
||||
}
|
||||
ASSERT(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
|
||||
SStreamDataSubmit* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit*)pItem);
|
||||
if (pSubmitClone == NULL) {
|
||||
|
@ -443,13 +431,14 @@ typedef struct {
|
|||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t sourceTaskId;
|
||||
int32_t sourceVg;
|
||||
int32_t upstreamTaskId;
|
||||
int32_t upstreamNodeId;
|
||||
} SStreamTaskRecoverReq;
|
||||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t rspTaskId;
|
||||
int32_t reqTaskId;
|
||||
int8_t inputStatus;
|
||||
} SStreamTaskRecoverRsp;
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ SyncGroupId syncGetVgId(int64_t rid);
|
|||
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
|
||||
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
|
||||
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak);
|
||||
int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize);
|
||||
int32_t syncProposeBatch(int64_t rid, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize);
|
||||
bool syncEnvIsStart();
|
||||
const char* syncStr(ESyncState state);
|
||||
bool syncIsRestoreFinish(int64_t rid);
|
||||
|
|
|
@ -238,7 +238,7 @@ typedef struct SyncClientRequestBatch {
|
|||
char data[]; // block2, block3
|
||||
} SyncClientRequestBatch;
|
||||
|
||||
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize,
|
||||
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg** rpcMsgPArr, SRaftMeta* raftArr, int32_t arrSize,
|
||||
int32_t vgId);
|
||||
void syncClientRequestBatch2RpcMsg(const SyncClientRequestBatch* pSyncMsg, SRpcMsg* pRpcMsg);
|
||||
void syncClientRequestBatchDestroy(SyncClientRequestBatch* pMsg);
|
||||
|
|
|
@ -257,14 +257,13 @@ static const SSysTableMeta infosMeta[] = {
|
|||
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
|
||||
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
|
||||
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
|
||||
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
|
||||
{TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
|
||||
// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
|
||||
// {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
|
||||
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
|
||||
{TSDB_INS_TABLE_USER_DATABASES, userDBSchema, tListLen(userDBSchema)},
|
||||
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
|
||||
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
|
||||
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
|
||||
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
|
||||
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
|
||||
{TSDB_INS_TABLE_USER_TAGS, userTagsSchema, tListLen(userTagsSchema)},
|
||||
// {TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
|
||||
|
|
|
@ -1713,7 +1713,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
|
|||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
printf("%s |block type %d |child id %d|group id %zX\n", flag, (int32_t)pDataBlock->info.type,
|
||||
printf("%s |block type %d |child id %d|group id %" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type,
|
||||
pDataBlock->info.childId, pDataBlock->info.groupId);
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
printf("%s |", flag);
|
||||
|
|
|
@ -35,7 +35,6 @@ int32_t tsNumOfSupportVnodes = 256;
|
|||
// common
|
||||
int32_t tsMaxShellConns = 50000;
|
||||
int32_t tsShellActivityTimer = 3; // second
|
||||
bool tsEnableSlaveQuery = true;
|
||||
bool tsPrintAuth = false;
|
||||
|
||||
// multi process
|
||||
|
@ -118,20 +117,6 @@ int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
|
|||
// 1 database precision unit for interval time range, changed accordingly
|
||||
int32_t tsMinIntervalTime = 1;
|
||||
|
||||
// 20sec, the maximum value of stream computing delay, changed accordingly
|
||||
int32_t tsMaxStreamComputDelay = 20000;
|
||||
|
||||
// 10sec, the first stream computing delay time after system launched successfully, changed accordingly
|
||||
int32_t tsStreamCompStartDelay = 10000;
|
||||
|
||||
// the stream computing delay time after executing failed, change accordingly
|
||||
int32_t tsRetryStreamCompDelay = 10 * 1000;
|
||||
|
||||
// The delayed computing ration. 10% of the whole computing time window by default.
|
||||
float tsStreamComputDelayRatio = 0.1f;
|
||||
|
||||
int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance
|
||||
|
||||
// the maximum allowed query buffer size during query processing for each data node.
|
||||
// -1 no limit (default)
|
||||
// 0 no query allowed, queries are disabled
|
||||
|
@ -142,12 +127,6 @@ int64_t tsQueryBufferSizeBytes = -1;
|
|||
// in retrieve blocking model, the retrieve threads will wait for the completion of the query processing.
|
||||
bool tsRetrieveBlockingModel = false;
|
||||
|
||||
// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
|
||||
bool tsKeepOriginalColumnName = false;
|
||||
|
||||
// kill long query
|
||||
bool tsDeadLockKillQuery = false;
|
||||
|
||||
// tsdb config
|
||||
// For backward compatibility
|
||||
bool tsdbForceKeepFile = false;
|
||||
|
@ -330,11 +309,10 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddString(pCfg, "fqdn", defaultFqdn, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "serverPort", defaultServerPort, 1, 65056, 1) != 0) return -1;
|
||||
if (cfgAddDir(pCfg, "tempDir", tsTempDir, 1) != 0) return -1;
|
||||
if (cfgAddFloat(pCfg, "minimalTempDirGB", 1.0f, 0.001f, 10000000, 1) != 0) return -1;
|
||||
if (cfgAddFloat(pCfg, "minimalTmpDirGB", 1.0f, 0.001f, 10000000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "shellActivityTimer", tsShellActivityTimer, 1, 120, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "keepColumnName", tsKeepOriginalColumnName, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 3, 1) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
|
||||
|
@ -383,15 +361,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxStreamCompDelay", tsMaxStreamComputDelay, 10, 1000000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxFirstStreamCompDelay", tsStreamCompStartDelay, 1000, 1000000000, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "retryStreamCompDelay", tsRetryStreamCompDelay, 10, 1000000000, 0) != 0) return -1;
|
||||
if (cfgAddFloat(pCfg, "streamCompDelayRatio", tsStreamComputDelayRatio, 0.1, 0.9, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "retrieveBlockingModel", tsRetrieveBlockingModel, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "slaveQuery", tsEnableSlaveQuery, 0) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "deadLockKillQuery", tsDeadLockKillQuery, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddInt32(pCfg, "multiProcess", tsMultiProcess, 0, 2, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mnodeShmSize", tsMnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
|
@ -399,7 +371,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "qnodeShmSize", tsQnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "snodeShmSize", tsSnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "bnodeShmSize", tsBnodeShmSize, TSDB_MAX_MSG_SIZE * 2 + 1024, INT32_MAX, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "mumOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfShmThreads", tsNumOfShmThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfRpcThreads = tsNumOfCores / 2;
|
||||
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
|
||||
|
@ -409,25 +381,21 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfMnodeQueryThreads = tsNumOfCores * 2;
|
||||
tsNumOfMnodeQueryThreads = TRANGE(tsNumOfMnodeQueryThreads, 4, 8);
|
||||
if (cfgAddInt32(pCfg, "numOfMnodeQueryThreads", tsNumOfMnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfMnodeReadThreads = tsNumOfCores / 8;
|
||||
tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfMnodeReadThreads", tsNumOfMnodeReadThreads, 1, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeQueryThreads = tsNumOfCores * 2;
|
||||
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeStreamThreads = tsNumOfCores / 4;
|
||||
tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeFetchThreads = tsNumOfCores / 4;
|
||||
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfVnodeWriteThreads = tsNumOfCores;
|
||||
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
|
||||
|
@ -447,11 +415,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
|
||||
tsNumOfSnodeSharedThreads = tsNumOfCores / 4;
|
||||
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeSharedThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeSharedThreads, 2, 1024, 0) != 0) return -1;
|
||||
|
||||
tsNumOfSnodeUniqueThreads = tsNumOfCores / 4;
|
||||
tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 1, 1024, 0) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 2, 1024, 0) != 0) return -1;
|
||||
|
||||
tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1;
|
||||
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, TSDB_MAX_MSG_SIZE * 10000L);
|
||||
|
@ -532,7 +500,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
|
||||
tstrncpy(tsTempDir, cfgGetItem(pCfg, "tempDir")->str, PATH_MAX);
|
||||
taosExpandDir(tsTempDir, tsTempDir, PATH_MAX);
|
||||
tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTempDirGB")->fval;
|
||||
tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTmpDirGB")->fval;
|
||||
if (taosMulMkDir(tsTempDir) != 0) {
|
||||
uError("failed to create tempDir:%s since %s", tsTempDir, terrstr());
|
||||
return -1;
|
||||
|
@ -545,7 +513,6 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
|
|||
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
|
||||
tsCompressMsgSize = cfgGetItem(pCfg, "compressMsgSize")->i32;
|
||||
tsCompressColData = cfgGetItem(pCfg, "compressColData")->i32;
|
||||
tsKeepOriginalColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
|
||||
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
|
||||
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
|
||||
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
|
||||
|
@ -579,15 +546,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsMinIntervalTime = cfgGetItem(pCfg, "minIntervalTime")->i32;
|
||||
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
|
||||
tsCountAlwaysReturnValue = cfgGetItem(pCfg, "countAlwaysReturnValue")->i32;
|
||||
tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32;
|
||||
tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32;
|
||||
tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32;
|
||||
tsStreamComputDelayRatio = cfgGetItem(pCfg, "streamCompDelayRatio")->fval;
|
||||
tsQueryBufferSize = cfgGetItem(pCfg, "queryBufferSize")->i32;
|
||||
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
|
||||
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
|
||||
tsEnableSlaveQuery = cfgGetItem(pCfg, "slaveQuery")->bval;
|
||||
tsDeadLockKillQuery = cfgGetItem(pCfg, "deadLockKillQuery")->i32;
|
||||
|
||||
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
|
||||
tsMnodeShmSize = cfgGetItem(pCfg, "mnodeShmSize")->i32;
|
||||
|
@ -598,7 +559,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
|
||||
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
|
||||
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
|
||||
tsNumOfMnodeQueryThreads = cfgGetItem(pCfg, "numOfMnodeQueryThreads")->i32;
|
||||
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
|
||||
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
|
||||
tsNumOfVnodeStreamThreads = cfgGetItem(pCfg, "numOfVnodeStreamThreads")->i32;
|
||||
|
@ -673,9 +633,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
break;
|
||||
}
|
||||
case 'd': {
|
||||
if (strcasecmp("deadLockKillQuery", name) == 0) {
|
||||
tsDeadLockKillQuery = cfgGetItem(pCfg, "deadLockKillQuery")->i32;
|
||||
} else if (strcasecmp("dDebugFlag", name) == 0) {
|
||||
if (strcasecmp("dDebugFlag", name) == 0) {
|
||||
dDebugFlag = cfgGetItem(pCfg, "dDebugFlag")->i32;
|
||||
}
|
||||
break;
|
||||
|
@ -732,9 +690,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
break;
|
||||
}
|
||||
case 'k': {
|
||||
if (strcasecmp("keepColumnName", name) == 0) {
|
||||
tsKeepOriginalColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'l': {
|
||||
|
@ -758,10 +713,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsMaxShellConns = cfgGetItem(pCfg, "maxShellConns")->i32;
|
||||
} else if (strcasecmp("maxNumOfDistinctRes", name) == 0) {
|
||||
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
|
||||
} else if (strcasecmp("maxStreamCompDelay", name) == 0) {
|
||||
tsMaxStreamComputDelay = cfgGetItem(pCfg, "maxStreamCompDelay")->i32;
|
||||
} else if (strcasecmp("maxFirstStreamCompDelay", name) == 0) {
|
||||
tsStreamCompStartDelay = cfgGetItem(pCfg, "maxFirstStreamCompDelay")->i32;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -772,8 +723,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
break;
|
||||
}
|
||||
case 'i': {
|
||||
if (strcasecmp("minimalTempDirGB", name) == 0) {
|
||||
tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTempDirGB")->fval;
|
||||
if (strcasecmp("minimalTmpDirGB", name) == 0) {
|
||||
tsTempSpace.reserved = cfgGetItem(pCfg, "minimalTmpDirGB")->fval;
|
||||
} else if (strcasecmp("minimalDataDirGB", name) == 0) {
|
||||
tsDataSpace.reserved = cfgGetItem(pCfg, "minimalDataDirGB")->fval;
|
||||
} else if (strcasecmp("minSlidingTime", name) == 0) {
|
||||
|
@ -834,8 +785,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsNumOfRpcThreads = cfgGetItem(pCfg, "numOfRpcThreads")->i32;
|
||||
} else if (strcasecmp("numOfCommitThreads", name) == 0) {
|
||||
tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32;
|
||||
} else if (strcasecmp("numOfMnodeQueryThreads", name) == 0) {
|
||||
tsNumOfMnodeQueryThreads = cfgGetItem(pCfg, "numOfMnodeQueryThreads")->i32;
|
||||
} else if (strcasecmp("numOfMnodeReadThreads", name) == 0) {
|
||||
tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32;
|
||||
} else if (strcasecmp("numOfVnodeQueryThreads", name) == 0) {
|
||||
|
@ -883,9 +832,7 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
break;
|
||||
}
|
||||
case 'r': {
|
||||
if (strcasecmp("retryStreamCompDelay", name) == 0) {
|
||||
tsRetryStreamCompDelay = cfgGetItem(pCfg, "retryStreamCompDelay")->i32;
|
||||
} else if (strcasecmp("retrieveBlockingModel", name) == 0) {
|
||||
if (strcasecmp("retrieveBlockingModel", name) == 0) {
|
||||
tsRetrieveBlockingModel = cfgGetItem(pCfg, "retrieveBlockingModel")->bval;
|
||||
} else if (strcasecmp("rpcQueueMemoryAllowed", name) == 0) {
|
||||
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
|
||||
|
@ -913,10 +860,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
|||
tsNumOfSupportVnodes = cfgGetItem(pCfg, "supportVnodes")->i32;
|
||||
} else if (strcasecmp("statusInterval", name) == 0) {
|
||||
tsStatusInterval = cfgGetItem(pCfg, "statusInterval")->i32;
|
||||
} else if (strcasecmp("streamCompDelayRatio", name) == 0) {
|
||||
tsStreamComputDelayRatio = cfgGetItem(pCfg, "streamCompDelayRatio")->fval;
|
||||
} else if (strcasecmp("slaveQuery", name) == 0) {
|
||||
tsEnableSlaveQuery = cfgGetItem(pCfg, "slaveQuery")->bval;
|
||||
} else if (strcasecmp("snodeShmSize", name) == 0) {
|
||||
tsSnodeShmSize = cfgGetItem(pCfg, "snodeShmSize")->i32;
|
||||
} else if (strcasecmp("serverPort", name) == 0) {
|
||||
|
|
|
@ -14,4 +14,7 @@ target_include_directories(
|
|||
taosd
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/node_mgmt/inc"
|
||||
)
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
add_dependencies(taosd jemalloc)
|
||||
ENDIF ()
|
||||
target_link_libraries(taosd dnode)
|
||||
|
|
|
@ -179,7 +179,7 @@ static int32_t sndProcessTaskRecoverRsp(SSnode *pNode, SRpcMsg *pMsg) {
|
|||
SStreamMeta *pMeta = pNode->pMeta;
|
||||
|
||||
SStreamTaskRecoverRsp *pRsp = pMsg->pCont;
|
||||
int32_t taskId = pRsp->taskId;
|
||||
int32_t taskId = pRsp->rspTaskId;
|
||||
SStreamTask *pTask = *(SStreamTask **)taosHashGet(pMeta->pHash, &taskId, sizeof(int32_t));
|
||||
streamProcessRecoverRsp(pTask, pRsp);
|
||||
return 0;
|
||||
|
|
|
@ -293,7 +293,10 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
|||
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
|
||||
ret = tdbTbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c);
|
||||
if (ret < 0 || c) {
|
||||
ASSERT(0);
|
||||
tdbTbcClose(pUidIdxc);
|
||||
|
||||
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
|
||||
// ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -980,6 +983,9 @@ static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) {
|
|||
tbDbKey.version = pME->version;
|
||||
tbDbKey.uid = pME->uid;
|
||||
|
||||
metaDebug("vgId:%d, start to save table version:%" PRId64 "uid: %" PRId64, TD_VID(pMeta->pVnode), pME->version,
|
||||
pME->uid);
|
||||
|
||||
pKey = &tbDbKey;
|
||||
kLen = sizeof(tbDbKey);
|
||||
|
||||
|
@ -1012,6 +1018,9 @@ static int metaSaveToTbDb(SMeta *pMeta, const SMetaEntry *pME) {
|
|||
return 0;
|
||||
|
||||
_err:
|
||||
metaError("vgId:%d, failed to save table version:%" PRId64 "uid: %" PRId64 " %s", TD_VID(pMeta->pVnode), pME->version,
|
||||
pME->uid, tstrerror(terrno));
|
||||
|
||||
taosMemoryFree(pVal);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -599,14 +599,14 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche
|
|||
SSubmitReq *pReq = NULL;
|
||||
// TODO: the schema update should be handled
|
||||
if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) {
|
||||
smaError("vgId:%d, build submit req for rsma table %" PRIi64 "l evel %" PRIi8 " failed since %s", SMA_VID(pSma),
|
||||
smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma),
|
||||
suid, pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (pReq && tdProcessSubmitReq(sinkTsdb, output->info.version, pReq) < 0) {
|
||||
taosMemoryFreeClear(pReq);
|
||||
smaError("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " failed since %s",
|
||||
smaError("vgId:%d, process submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
|
||||
SMA_VID(pSma), suid, pItem->level, terrstr());
|
||||
goto _err;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "sma.h"
|
||||
|
||||
static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData);
|
||||
static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
|
||||
|
||||
// SRsmaSnapReader ========================================
|
||||
struct SRsmaSnapReader {
|
||||
SSma* pSma;
|
||||
int64_t sver;
|
||||
int64_t ever;
|
||||
|
||||
// for data file
|
||||
int8_t rsmaDataDone[TSDB_RETENTION_L2];
|
||||
STsdbSnapReader* pDataReader[TSDB_RETENTION_L2];
|
||||
|
||||
// for qtaskinfo file
|
||||
int8_t qTaskDone;
|
||||
SQTaskFReader* pQTaskFReader;
|
||||
};
|
||||
|
||||
int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader) {
|
||||
int32_t code = 0;
|
||||
SRsmaSnapReader* pReader = NULL;
|
||||
|
||||
// alloc
|
||||
pReader = (SRsmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
|
||||
if (pReader == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
pReader->pSma = pSma;
|
||||
pReader->sver = sver;
|
||||
pReader->ever = ever;
|
||||
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (pSma->pRSmaTsdb[i]) {
|
||||
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]);
|
||||
if (code < 0) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
}
|
||||
*ppReader = pReader;
|
||||
smaInfo("vgId:%d vnode snapshot rsma reader opened succeed", SMA_VID(pSma));
|
||||
return TSDB_CODE_SUCCESS;
|
||||
_err:
|
||||
smaError("vgId:%d vnode snapshot rsma reader opened failed since %s", SMA_VID(pSma), tstrerror(code));
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData) {
|
||||
int32_t code = 0;
|
||||
SSma* pSma = pReader->pSma;
|
||||
|
||||
_exit:
|
||||
smaInfo("vgId:%d vnode snapshot rsma read qtaskinfo succeed", SMA_VID(pSma));
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d vnode snapshot rsma read qtaskinfo failed since %s", SMA_VID(pSma), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData) {
|
||||
int32_t code = 0;
|
||||
|
||||
*ppData = NULL;
|
||||
|
||||
smaInfo("vgId:%d vnode snapshot rsma read entry", SMA_VID(pReader->pSma));
|
||||
// read rsma1/rsma2 file
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
STsdbSnapReader* pTsdbSnapReader = pReader->pDataReader[i];
|
||||
if (!pTsdbSnapReader) {
|
||||
continue;
|
||||
}
|
||||
if (!pReader->rsmaDataDone[i]) {
|
||||
smaInfo("vgId:%d vnode snapshot rsma read level %d not done", SMA_VID(pReader->pSma), i);
|
||||
code = tsdbSnapRead(pTsdbSnapReader, ppData);
|
||||
if (code) {
|
||||
goto _err;
|
||||
} else {
|
||||
if (*ppData) {
|
||||
goto _exit;
|
||||
} else {
|
||||
pReader->rsmaDataDone[i] = 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
smaInfo("vgId:%d vnode snapshot rsma read level %d is done", SMA_VID(pReader->pSma), i);
|
||||
}
|
||||
}
|
||||
|
||||
// read qtaskinfo file
|
||||
if (!pReader->qTaskDone) {
|
||||
code = rsmaSnapReadQTaskInfo(pReader, ppData);
|
||||
if (code) {
|
||||
goto _err;
|
||||
} else {
|
||||
if (*ppData) {
|
||||
goto _exit;
|
||||
} else {
|
||||
pReader->qTaskDone = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_exit:
|
||||
smaInfo("vgId:%d vnode snapshot rsma read succeed", SMA_VID(pReader->pSma));
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d vnode snapshot rsma read failed since %s", SMA_VID(pReader->pSma), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
|
||||
int32_t code = 0;
|
||||
SRsmaSnapReader* pReader = *ppReader;
|
||||
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (pReader->pDataReader[i]) {
|
||||
tsdbSnapReaderClose(&pReader->pDataReader[i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (pReader->pQTaskFReader) {
|
||||
// TODO: close for qtaskinfo
|
||||
smaInfo("vgId:%d vnode snapshot rsma reader closed for qTaskInfo", SMA_VID(pReader->pSma));
|
||||
}
|
||||
|
||||
|
||||
smaInfo("vgId:%d vnode snapshot rsma reader closed", SMA_VID(pReader->pSma));
|
||||
|
||||
taosMemoryFreeClear(*ppReader);
|
||||
return code;
|
||||
}
|
||||
|
||||
// SRsmaSnapWriter ========================================
|
||||
struct SRsmaSnapWriter {
|
||||
SSma* pSma;
|
||||
int64_t sver;
|
||||
int64_t ever;
|
||||
|
||||
// config
|
||||
int64_t commitID;
|
||||
|
||||
// for data file
|
||||
STsdbSnapWriter* pDataWriter[TSDB_RETENTION_L2];
|
||||
|
||||
// for qtaskinfo file
|
||||
SQTaskFReader* pQTaskFReader;
|
||||
SQTaskFWriter* pQTaskFWriter;
|
||||
};
|
||||
|
||||
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter) {
|
||||
int32_t code = 0;
|
||||
SRsmaSnapWriter* pWriter = NULL;
|
||||
|
||||
// alloc
|
||||
pWriter = (SRsmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
|
||||
if (pWriter == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _err;
|
||||
}
|
||||
pWriter->pSma = pSma;
|
||||
pWriter->sver = sver;
|
||||
pWriter->ever = ever;
|
||||
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (pSma->pRSmaTsdb[i]) {
|
||||
code = tsdbSnapWriterOpen(pSma->pRSmaTsdb[i], sver, ever, &pWriter->pDataWriter[i]);
|
||||
if (code < 0) {
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// qtaskinfo
|
||||
// TODO
|
||||
|
||||
*ppWriter = pWriter;
|
||||
|
||||
smaInfo("vgId:%d rsma snapshot writer open succeed", TD_VID(pSma->pVnode));
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d rsma snapshot writer open failed since %s", TD_VID(pSma->pVnode), tstrerror(code));
|
||||
*ppWriter = NULL;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
|
||||
int32_t code = 0;
|
||||
SRsmaSnapWriter* pWriter = *ppWriter;
|
||||
|
||||
if (rollback) {
|
||||
ASSERT(0);
|
||||
// code = tsdbFSRollback(pWriter->pTsdb->pFS);
|
||||
// if (code) goto _err;
|
||||
} else {
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (pWriter->pDataWriter[i]) {
|
||||
code = tsdbSnapWriterClose(&pWriter->pDataWriter[i], rollback);
|
||||
if (code) goto _err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(pWriter);
|
||||
*ppWriter = NULL;
|
||||
|
||||
smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d vnode snapshot rsma writer close failed since %s", SMA_VID(pWriter->pSma), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
|
||||
int32_t code = 0;
|
||||
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
|
||||
|
||||
// rsma1/rsma2
|
||||
if (pHdr->type == SNAP_DATA_RSMA1) {
|
||||
pHdr->type = SNAP_DATA_TSDB;
|
||||
code = tsdbSnapWrite(pWriter->pDataWriter[0], pData, nData);
|
||||
} else if (pHdr->type == SNAP_DATA_RSMA2) {
|
||||
pHdr->type = SNAP_DATA_TSDB;
|
||||
code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
|
||||
} else if (pHdr->type == SNAP_DATA_QTASK) {
|
||||
code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
|
||||
}
|
||||
if (code < 0) goto _err;
|
||||
|
||||
_exit:
|
||||
smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (pWriter->pQTaskFWriter == NULL) {
|
||||
// SDelFile* pDelFile = pWriter->fs.pDelFile;
|
||||
|
||||
// // reader
|
||||
// if (pDelFile) {
|
||||
// code = tsdbDelFReaderOpen(&pWriter->pDelFReader, pDelFile, pTsdb, NULL);
|
||||
// if (code) goto _err;
|
||||
|
||||
// code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR, NULL);
|
||||
// if (code) goto _err;
|
||||
// }
|
||||
|
||||
// // writer
|
||||
// SDelFile delFile = {.commitID = pWriter->commitID, .offset = 0, .size = 0};
|
||||
// code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &delFile, pTsdb);
|
||||
// if (code) goto _err;
|
||||
}
|
||||
smaInfo("vgId:%d vnode snapshot rsma write qtaskinfo succeed", SMA_VID(pWriter->pSma));
|
||||
_exit:
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d vnode snapshot rsma write qtaskinfo failed since %s", SMA_VID(pWriter->pSma), tstrerror(code));
|
||||
return code;
|
||||
}
|
|
@ -796,7 +796,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRecoverRsp* pRsp = pMsg->pCont;
|
||||
int32_t taskId = pRsp->taskId;
|
||||
int32_t taskId = pRsp->rspTaskId;
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pTq->pStreamTasks, &taskId, sizeof(int32_t));
|
||||
if (ppTask) {
|
||||
streamProcessRecoverRsp(*ppTask, pRsp);
|
||||
|
|
|
@ -46,11 +46,6 @@ void tsdbCloseCache(SLRUCache *pCache) {
|
|||
}
|
||||
}
|
||||
|
||||
/* static void getTableCacheKeyS(tb_uid_t uid, const char *cacheType, char *key, int *len) { */
|
||||
/* snprintf(key, 30, "%" PRIi64 "%s", uid, cacheType); */
|
||||
/* *len = strlen(key); */
|
||||
/* } */
|
||||
|
||||
static void getTableCacheKey(tb_uid_t uid, int cacheType, char *key, int *len) {
|
||||
if (cacheType == 0) { // last_row
|
||||
*(uint64_t *)key = (uint64_t)uid;
|
||||
|
@ -649,44 +644,44 @@ _err:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsRowFromTsdbRow(STSchema *pTSchema, TSDBROW *pRow, STSRow **ppRow) {
|
||||
int32_t code = 0;
|
||||
/* static int32_t tsRowFromTsdbRow(STSchema *pTSchema, TSDBROW *pRow, STSRow **ppRow) { */
|
||||
/* int32_t code = 0; */
|
||||
|
||||
SColVal *pColVal = &(SColVal){0};
|
||||
/* SColVal *pColVal = &(SColVal){0}; */
|
||||
|
||||
if (pRow->type == 0) {
|
||||
*ppRow = tdRowDup(pRow->pTSRow);
|
||||
} else {
|
||||
SArray *pArray = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal));
|
||||
if (pArray == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
/* if (pRow->type == 0) { */
|
||||
/* *ppRow = tdRowDup(pRow->pTSRow); */
|
||||
/* } else { */
|
||||
/* SArray *pArray = taosArrayInit(pTSchema->numOfCols, sizeof(SColVal)); */
|
||||
/* if (pArray == NULL) { */
|
||||
/* code = TSDB_CODE_OUT_OF_MEMORY; */
|
||||
/* goto _exit; */
|
||||
/* } */
|
||||
|
||||
TSDBKEY key = TSDBROW_KEY(pRow);
|
||||
STColumn *pTColumn = &pTSchema->columns[0];
|
||||
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts});
|
||||
/* TSDBKEY key = TSDBROW_KEY(pRow); */
|
||||
/* STColumn *pTColumn = &pTSchema->columns[0]; */
|
||||
/* *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts}); */
|
||||
|
||||
if (taosArrayPush(pArray, pColVal) == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
/* if (taosArrayPush(pArray, pColVal) == NULL) { */
|
||||
/* code = TSDB_CODE_OUT_OF_MEMORY; */
|
||||
/* goto _exit; */
|
||||
/* } */
|
||||
|
||||
for (int16_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) {
|
||||
tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal);
|
||||
if (taosArrayPush(pArray, pColVal) == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
/* for (int16_t iCol = 1; iCol < pTSchema->numOfCols; iCol++) { */
|
||||
/* tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal); */
|
||||
/* if (taosArrayPush(pArray, pColVal) == NULL) { */
|
||||
/* code = TSDB_CODE_OUT_OF_MEMORY; */
|
||||
/* goto _exit; */
|
||||
/* } */
|
||||
/* } */
|
||||
|
||||
code = tdSTSRowNew(pArray, pTSchema, ppRow);
|
||||
if (code) goto _exit;
|
||||
}
|
||||
/* code = tdSTSRowNew(pArray, pTSchema, ppRow); */
|
||||
/* if (code) goto _exit; */
|
||||
/* } */
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
/* _exit: */
|
||||
/* return code; */
|
||||
/* } */
|
||||
|
||||
static bool tsdbKeyDeleted(TSDBKEY *key, SArray *pSkyline, int64_t *iSkyline) {
|
||||
bool deleted = false;
|
||||
|
|
|
@ -413,7 +413,7 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
SyncClientRequestBatch *pSyncMsg = syncClientRequestBatchFromRpcMsg(pMsg);
|
||||
ASSERT(pSyncMsg != NULL);
|
||||
code = syncNodeOnClientRequestBatchCb(pSyncNode, pSyncMsg);
|
||||
syncClientRequestBatchDestroyDeep(pSyncMsg);
|
||||
syncClientRequestBatchDestroy(pSyncMsg);
|
||||
} else if (pMsg->msgType == TDMT_SYNC_REQUEST_VOTE) {
|
||||
SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pMsg);
|
||||
ASSERT(pSyncMsg != NULL);
|
||||
|
|
|
@ -318,6 +318,7 @@ typedef struct STableScanInfo {
|
|||
int32_t currentTable;
|
||||
int8_t scanMode;
|
||||
int8_t noTable;
|
||||
int8_t assignBlockUid;
|
||||
} STableScanInfo;
|
||||
|
||||
typedef struct STableMergeScanInfo {
|
||||
|
|
|
@ -264,6 +264,12 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
|
|||
}
|
||||
|
||||
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
|
||||
if (pTaskInfo->tableqinfoList.map == NULL) {
|
||||
pTaskInfo->tableqinfoList.map =
|
||||
taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
}
|
||||
|
||||
taosHashPut(pTaskInfo->tableqinfoList.map, uid, sizeof(uid), &keyInfo.groupId, sizeof(keyInfo.groupId));
|
||||
}
|
||||
|
||||
if (keyBuf != NULL) {
|
||||
|
|
|
@ -116,7 +116,8 @@ void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
|
|||
}
|
||||
|
||||
static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock* pRes, int32_t currRow,
|
||||
SSDataBlock* pLeftBlock, int32_t leftPos, SSDataBlock* pRightBlock, int32_t rightPos) {
|
||||
SSDataBlock* pLeftBlock, int32_t leftPos, SSDataBlock* pRightBlock,
|
||||
int32_t rightPos) {
|
||||
SJoinOperatorInfo* pJoinInfo = pOperator->info;
|
||||
|
||||
for (int32_t i = 0; i < pOperator->exprSupp.numOfExprs; ++i) {
|
||||
|
@ -129,7 +130,7 @@ static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock*
|
|||
int32_t rowIndex = -1;
|
||||
|
||||
SColumnInfoData* pSrc = NULL;
|
||||
if (pJoinInfo->pLeft->info.blockId == blockId) {
|
||||
if (pLeftBlock->info.blockId == blockId) {
|
||||
pSrc = taosArrayGet(pLeftBlock->pDataBlock, slotId);
|
||||
rowIndex = leftPos;
|
||||
} else {
|
||||
|
@ -144,7 +145,128 @@ static void mergeJoinJoinLeftRight(struct SOperatorInfo* pOperator, SSDataBlock*
|
|||
colDataAppend(pDst, currRow, p, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
typedef struct SRowLocation {
|
||||
SSDataBlock* pDataBlock;
|
||||
int32_t pos;
|
||||
} SRowLocation;
|
||||
|
||||
// pBlock[tsSlotId][startPos, endPos) == timestamp,
|
||||
static int32_t mergeJoinGetBlockRowsEqualTs(SSDataBlock* pBlock, int16_t tsSlotId, int32_t startPos, int64_t timestamp,
|
||||
int32_t* pEndPos, SArray* rowLocations, SArray* createdBlocks) {
|
||||
int32_t numRows = pBlock->info.rows;
|
||||
ASSERT(startPos < numRows);
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, tsSlotId);
|
||||
|
||||
int32_t i = startPos;
|
||||
for (; i < numRows; ++i) {
|
||||
char* pNextVal = colDataGetData(pCol, i);
|
||||
if (timestamp != *(int64_t*)pNextVal) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
int32_t endPos = i;
|
||||
*pEndPos = endPos;
|
||||
|
||||
if (endPos - startPos == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
SSDataBlock* block = pBlock;
|
||||
bool createdNewBlock = false;
|
||||
if (endPos == numRows) {
|
||||
block = blockDataExtractBlock(pBlock, startPos, endPos-startPos);
|
||||
taosArrayPush(createdBlocks, &block);
|
||||
createdNewBlock = true;
|
||||
}
|
||||
SRowLocation location = {0};
|
||||
for (int32_t j = startPos; j < endPos; ++j) {
|
||||
location.pDataBlock = block;
|
||||
location.pos = ( createdNewBlock ? j - startPos : j);
|
||||
taosArrayPush(rowLocations, &location);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// whichChild == 0, left child of join; whichChild ==1, right child of join
|
||||
static int32_t mergeJoinGetDownStreamRowsEqualTimeStamp(SOperatorInfo* pOperator, int32_t whichChild, int16_t tsSlotId,
|
||||
SSDataBlock* startDataBlock, int32_t startPos,
|
||||
int64_t timestamp, SArray* rowLocations,
|
||||
SArray* createdBlocks) {
|
||||
ASSERT(whichChild == 0 || whichChild == 1);
|
||||
|
||||
SJoinOperatorInfo* pJoinInfo = pOperator->info;
|
||||
int32_t endPos = -1;
|
||||
SSDataBlock* dataBlock = startDataBlock;
|
||||
mergeJoinGetBlockRowsEqualTs(dataBlock, tsSlotId, startPos, timestamp, &endPos, rowLocations, createdBlocks);
|
||||
while (endPos == dataBlock->info.rows) {
|
||||
SOperatorInfo* ds = pOperator->pDownstream[whichChild];
|
||||
dataBlock = ds->fpSet.getNextFn(ds);
|
||||
if (whichChild == 0) {
|
||||
pJoinInfo->leftPos = 0;
|
||||
pJoinInfo->pLeft = dataBlock;
|
||||
} else if (whichChild == 1) {
|
||||
pJoinInfo->rightPos = 0;
|
||||
pJoinInfo->pRight = dataBlock;
|
||||
}
|
||||
|
||||
if (dataBlock == NULL) {
|
||||
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
|
||||
endPos = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
mergeJoinGetBlockRowsEqualTs(dataBlock, tsSlotId, 0, timestamp, &endPos, rowLocations, createdBlocks);
|
||||
}
|
||||
if (endPos != -1) {
|
||||
if (whichChild == 0) {
|
||||
pJoinInfo->leftPos = endPos;
|
||||
} else if (whichChild == 1) {
|
||||
pJoinInfo->rightPos = endPos;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t timestamp, SSDataBlock* pRes,
|
||||
int32_t* nRows) {
|
||||
SJoinOperatorInfo* pJoinInfo = pOperator->info;
|
||||
SArray* leftRowLocations = taosArrayInit(8, sizeof(SRowLocation));
|
||||
SArray* leftCreatedBlocks = taosArrayInit(8, POINTER_BYTES);
|
||||
|
||||
SArray* rightRowLocations = taosArrayInit(8, sizeof(SRowLocation));
|
||||
SArray* rightCreatedBlocks = taosArrayInit(8, POINTER_BYTES);
|
||||
|
||||
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 0, pJoinInfo->leftCol.slotId, pJoinInfo->pLeft,
|
||||
pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks);
|
||||
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight,
|
||||
pJoinInfo->rightPos, timestamp, rightRowLocations, rightCreatedBlocks);
|
||||
|
||||
size_t leftNumJoin = taosArrayGetSize(leftRowLocations);
|
||||
size_t rightNumJoin = taosArrayGetSize(rightRowLocations);
|
||||
for (int32_t i = 0; i < leftNumJoin; ++i) {
|
||||
for (int32_t j = 0; j < rightNumJoin; ++j) {
|
||||
SRowLocation* leftRow = taosArrayGet(leftRowLocations, i);
|
||||
SRowLocation* rightRow = taosArrayGet(rightRowLocations, j);
|
||||
mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock,
|
||||
rightRow->pos);
|
||||
++*nRows;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) {
|
||||
SSDataBlock* pBlock = taosArrayGetP(rightCreatedBlocks, i);
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
taosArrayDestroy(rightCreatedBlocks);
|
||||
taosArrayDestroy(rightRowLocations);
|
||||
for (int i = 0; i < taosArrayGetSize(leftCreatedBlocks); ++i) {
|
||||
SSDataBlock* pBlock = taosArrayGetP(leftCreatedBlocks, i);
|
||||
blockDataDestroy(pBlock);
|
||||
}
|
||||
taosArrayDestroy(leftCreatedBlocks);
|
||||
taosArrayDestroy(leftRowLocations);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool mergeJoinGetNextTimestamp(SOperatorInfo* pOperator, int64_t* pLeftTs, int64_t* pRightTs) {
|
||||
|
@ -195,18 +317,15 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes)
|
|||
while (1) {
|
||||
int64_t leftTs = 0;
|
||||
int64_t rightTs = 0;
|
||||
bool hasNextTs = mergeJoinGetNextTimestamp(pOperator, &leftTs, &rightTs);
|
||||
bool hasNextTs = mergeJoinGetNextTimestamp(pOperator, &leftTs, &rightTs);
|
||||
if (!hasNextTs) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (leftTs == rightTs) {
|
||||
mergeJoinJoinLeftRight(pOperator, pRes, nrows,
|
||||
pJoinInfo->pLeft, pJoinInfo->leftPos, pJoinInfo->pRight, pJoinInfo->rightPos);
|
||||
pJoinInfo->leftPos += 1;
|
||||
pJoinInfo->rightPos += 1;
|
||||
|
||||
nrows += 1;
|
||||
mergeJoinJoinLeftRight(pOperator, pRes, nrows, pJoinInfo->pLeft, pJoinInfo->leftPos, pJoinInfo->pRight,
|
||||
pJoinInfo->rightPos);
|
||||
mergeJoinJoinDownstreamTsRanges(pOperator, leftTs, pRes, &nrows);
|
||||
} else if (asc && leftTs < rightTs || !asc && leftTs > rightTs) {
|
||||
pJoinInfo->leftPos += 1;
|
||||
|
||||
|
|
|
@ -408,6 +408,10 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
pBlock->info.groupId = *groupId;
|
||||
}
|
||||
|
||||
if (pTableScanInfo->assignBlockUid) {
|
||||
pBlock->info.groupId = pBlock->info.uid;
|
||||
}
|
||||
|
||||
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
|
||||
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
||||
|
@ -616,6 +620,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
pInfo->scanFlag = MAIN_SCAN;
|
||||
pInfo->pColMatchInfo = pColList;
|
||||
pInfo->currentGroupId = -1;
|
||||
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
|
||||
|
||||
pOperator->name = "TableScanOperator"; // for debug purpose
|
||||
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN;
|
||||
|
|
|
@ -1091,6 +1091,8 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, scanFlag, true);
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
|
||||
|
||||
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
|
||||
}
|
||||
|
||||
|
@ -2098,9 +2100,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||
|
||||
switch (pSliceInfo->fillType) {
|
||||
case TSDB_FILL_NULL:
|
||||
case TSDB_FILL_NULL: {
|
||||
colDataAppendNULL(pDst, rows);
|
||||
pResBlock->info.rows += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_FILL_SET_VALUE: {
|
||||
SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
|
||||
|
@ -2118,9 +2122,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
|
||||
colDataAppend(pDst, rows, (char*)&v, false);
|
||||
}
|
||||
} break;
|
||||
pResBlock->info.rows += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_FILL_LINEAR:
|
||||
case TSDB_FILL_LINEAR: {
|
||||
#if 0
|
||||
if (pCtx->start.key == INT64_MIN || pCtx->start.key > pCtx->startTs
|
||||
|| pCtx->end.key == INT64_MIN || pCtx->end.key < pCtx->startTs) {
|
||||
|
@ -2151,17 +2157,22 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
}
|
||||
}
|
||||
#endif
|
||||
// TODO: pResBlock->info.rows += 1;
|
||||
break;
|
||||
|
||||
}
|
||||
case TSDB_FILL_PREV: {
|
||||
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot);
|
||||
colDataAppend(pDst, rows, pkey->pData, false);
|
||||
} break;
|
||||
pResBlock->info.rows += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_FILL_NEXT: {
|
||||
char* p = colDataGetData(pSrc, rowIndex);
|
||||
colDataAppend(pDst, rows, p, colDataIsNull_s(pSrc, rowIndex));
|
||||
} break;
|
||||
pResBlock->info.rows += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
case TSDB_FILL_NONE:
|
||||
default:
|
||||
|
@ -2169,7 +2180,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
}
|
||||
}
|
||||
|
||||
pResBlock->info.rows += 1;
|
||||
}
|
||||
|
||||
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
|
||||
|
@ -2221,6 +2231,8 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
SInterval* pInterval = &pSliceInfo->interval;
|
||||
SOperatorInfo* downstream = pOperator->pDownstream[0];
|
||||
|
||||
blockDataCleanup(pResBlock);
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
|
||||
|
|
|
@ -1522,6 +1522,7 @@ static const char* jkTableScanPhysiPlanWatermark = "Watermark";
|
|||
static const char* jkTableScanPhysiPlanIgnoreExpired = "IgnoreExpired";
|
||||
static const char* jkTableScanPhysiPlanGroupTags = "GroupTags";
|
||||
static const char* jkTableScanPhysiPlanGroupSort = "GroupSort";
|
||||
static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid";
|
||||
|
||||
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
|
||||
|
@ -1578,6 +1579,9 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanGroupSort, pNode->groupSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanAssignBlockUid, pNode->assignBlockUid);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -1637,6 +1641,9 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanGroupSort, &pNode->groupSort);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanAssignBlockUid, &pNode->assignBlockUid);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -4525,7 +4532,6 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
|
|||
case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
|
||||
return jsonToPhysiScanNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
|
||||
|
||||
return jsonToPhysiLastRowScanNode(pJson, pObj);
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
|
||||
|
|
|
@ -82,6 +82,7 @@ static bool columnNodeEqual(const SColumnNode* a, const SColumnNode* b) {
|
|||
COMPARE_STRING_FIELD(dbName);
|
||||
COMPARE_STRING_FIELD(tableName);
|
||||
COMPARE_STRING_FIELD(colName);
|
||||
COMPARE_STRING_FIELD(tableAlias);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -2336,7 +2336,7 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp
|
|||
FOREACH(pAggTarget, pAgg->pTargets) {
|
||||
SNode* pScanTarget = NULL;
|
||||
FOREACH(pScanTarget, pScanNode->node.pTargets) {
|
||||
if (0 == strcmp(((SColumnNode*)pAggTarget)->colName, ((SColumnNode*)pAggTarget)->colName)) {
|
||||
if (0 == strcmp(((SColumnNode*)pAggTarget)->colName, ((SColumnNode*)pScanTarget)->colName)) {
|
||||
nodesListAppend(pScanTargets, nodesCloneNode(pScanTarget));
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -168,20 +168,20 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// Note: no more task error processing, handled in function internal
|
||||
int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) {
|
||||
if (TSDB_CODE_SCH_IGNORE_ERROR == errCode) {
|
||||
return TSDB_CODE_SCH_IGNORE_ERROR;
|
||||
}
|
||||
|
||||
int8_t status = 0;
|
||||
if (schJobNeedToStop(pJob, &status)) {
|
||||
SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(status));
|
||||
int8_t jobStatus = 0;
|
||||
if (schJobNeedToStop(pJob, &jobStatus)) {
|
||||
SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(jobStatus));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR);
|
||||
}
|
||||
|
||||
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXEC) {
|
||||
SCH_TASK_ELOG("task already not in EXEC status, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
|
||||
int8_t taskStatus = SCH_GET_TASK_STATUS(pTask);
|
||||
if (taskStatus == JOB_TASK_STATUS_FAIL || taskStatus == JOB_TASK_STATUS_SUCC) {
|
||||
SCH_TASK_ELOG("task already done, status:%s", jobTaskStatusStr(taskStatus));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
|
|
|
@ -32,10 +32,10 @@ typedef struct {
|
|||
|
||||
static SStreamGlobalEnv streamEnv;
|
||||
|
||||
int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb);
|
||||
int32_t streamExec(SStreamTask* pTask);
|
||||
int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum);
|
||||
|
||||
int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb);
|
||||
int32_t streamDispatch(SStreamTask* pTask);
|
||||
int32_t streamDispatchReqToData(const SStreamDispatchReq* pReq, SStreamDataBlock* pData);
|
||||
int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData);
|
||||
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data);
|
||||
|
|
|
@ -189,7 +189,7 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
|||
#if 0
|
||||
if (pTask->execType != TASK_EXEC__NONE) {
|
||||
#endif
|
||||
streamExec(pTask, pTask->pMsgCb);
|
||||
streamExec(pTask);
|
||||
#if 0
|
||||
} else {
|
||||
ASSERT(pTask->sinkType != TASK_SINK__NONE);
|
||||
|
@ -208,7 +208,7 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
|||
// 3.2 dispatch / sink
|
||||
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
|
||||
ASSERT(pTask->sinkType == TASK_SINK__NONE);
|
||||
streamDispatch(pTask, pTask->pMsgCb);
|
||||
streamDispatch(pTask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -233,26 +233,55 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
|
|||
return 0;
|
||||
}
|
||||
// continue dispatch
|
||||
streamDispatch(pTask, pTask->pMsgCb);
|
||||
streamDispatch(pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamProcessRunReq(SStreamTask* pTask) {
|
||||
streamExec(pTask, pTask->pMsgCb);
|
||||
streamExec(pTask);
|
||||
|
||||
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
|
||||
streamDispatch(pTask, pTask->pMsgCb);
|
||||
streamDispatch(pTask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg) {
|
||||
//
|
||||
int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pRsp) {
|
||||
void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamTaskRecoverRsp));
|
||||
((SMsgHead*)buf)->vgId = htonl(pReq->upstreamNodeId);
|
||||
|
||||
SStreamTaskRecoverRsp* pCont = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
pCont->inputStatus = pTask->inputStatus;
|
||||
pCont->streamId = pTask->streamId;
|
||||
pCont->reqTaskId = pTask->taskId;
|
||||
pCont->rspTaskId = pReq->upstreamTaskId;
|
||||
|
||||
pRsp->pCont = buf;
|
||||
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamTaskRecoverRsp);
|
||||
tmsgSendRsp(pRsp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp) {
|
||||
//
|
||||
if (pRsp->inputStatus == TASK_INPUT_STATUS__NORMAL) {
|
||||
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
streamProcessRunReq(pTask);
|
||||
|
||||
if (pTask->isDataScan) {
|
||||
// scan data to recover
|
||||
pTask->inputStatus = TASK_INPUT_STATUS__RECOVER;
|
||||
pTask->taskStatus = TASK_STATUS__RECOVERING;
|
||||
qStreamPrepareRecover(pTask->exec.executor, pTask->startVer, pTask->recoverSnapVer);
|
||||
if (streamPipelineExec(pTask, 100) < 0) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
|
||||
pTask->taskStatus = TASK_STATUS__NORMAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -262,10 +291,10 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S
|
|||
streamTaskEnqueueRetrieve(pTask, pReq, pRsp);
|
||||
|
||||
ASSERT(pTask->execType != TASK_EXEC__NONE);
|
||||
streamExec(pTask, pTask->pMsgCb);
|
||||
streamExec(pTask);
|
||||
|
||||
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
|
||||
streamDispatch(pTask, pTask->pMsgCb);
|
||||
streamDispatch(pTask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -438,7 +438,7 @@ FAIL:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t streamDispatch(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||
int32_t streamDispatch(SStreamTask* pTask) {
|
||||
ASSERT(pTask->dispatchType != TASK_DISPATCH__NONE);
|
||||
#if 1
|
||||
int8_t old =
|
||||
|
|
|
@ -141,7 +141,7 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum) {
|
|||
|
||||
if (pTask->dispatchType != TASK_DISPATCH__NONE) {
|
||||
ASSERT(pTask->sinkType == TASK_SINK__NONE);
|
||||
streamDispatch(pTask, pTask->pMsgCb);
|
||||
streamDispatch(pTask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
|
|||
}
|
||||
|
||||
// TODO: handle version
|
||||
int32_t streamExec(SStreamTask* pTask, SMsgCb* pMsgCb) {
|
||||
int32_t streamExec(SStreamTask* pTask) {
|
||||
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pRes == NULL) return -1;
|
||||
while (1) {
|
||||
|
|
|
@ -19,8 +19,8 @@ int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecover
|
|||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->sourceTaskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->sourceVg) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1;
|
||||
tEndEncode(pEncoder);
|
||||
return pEncoder->pos;
|
||||
}
|
||||
|
@ -29,8 +29,8 @@ int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* p
|
|||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->sourceTaskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->sourceVg) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1;
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,7 +38,8 @@ int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* p
|
|||
int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecoverRsp* pRsp) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pRsp->streamId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pRsp->taskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pRsp->reqTaskId) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pRsp->rspTaskId) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pRsp->inputStatus) < 0) return -1;
|
||||
tEndEncode(pEncoder);
|
||||
return pEncoder->pos;
|
||||
|
@ -47,7 +48,8 @@ int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecover
|
|||
int32_t tDecodeStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamTaskRecoverRsp* pReq) {
|
||||
if (tStartDecode(pDecoder) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->reqTaskId) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pReq->rspTaskId) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pReq->inputStatus) < 0) return -1;
|
||||
tEndDecode(pDecoder);
|
||||
return 0;
|
||||
|
@ -125,7 +127,7 @@ int32_t streamProcessFailRecoverReq(SStreamTask* pTask, SMStreamTaskRecoverReq*
|
|||
}
|
||||
|
||||
if (pTask->taskStatus == TASK_STATUS__RECOVERING) {
|
||||
if (streamPipelineExec(pTask, 10) < 0) {
|
||||
if (streamPipelineExec(pTask, 100) < 0) {
|
||||
// set fail
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ void syncNodeStart(SSyncNode* pSyncNode);
|
|||
void syncNodeStartStandBy(SSyncNode* pSyncNode);
|
||||
void syncNodeClose(SSyncNode* pSyncNode);
|
||||
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak);
|
||||
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize);
|
||||
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize);
|
||||
|
||||
// option
|
||||
bool syncNodeSnapshotEnable(SSyncNode* pSyncNode);
|
||||
|
|
|
@ -677,7 +677,7 @@ int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize) {
|
||||
int32_t syncProposeBatch(int64_t rid, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize) {
|
||||
if (arrSize < 0) {
|
||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
return -1;
|
||||
|
@ -690,18 +690,18 @@ int32_t syncProposeBatch(int64_t rid, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_
|
|||
}
|
||||
ASSERT(rid == pSyncNode->rid);
|
||||
|
||||
int32_t ret = syncNodeProposeBatch(pSyncNode, pMsgArr, pIsWeakArr, arrSize);
|
||||
int32_t ret = syncNodeProposeBatch(pSyncNode, pMsgPArr, pIsWeakArr, arrSize);
|
||||
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool syncNodeBatchOK(SRpcMsg* pMsgArr, int32_t arrSize) {
|
||||
static bool syncNodeBatchOK(SRpcMsg** pMsgPArr, int32_t arrSize) {
|
||||
for (int32_t i = 0; i < arrSize; ++i) {
|
||||
if (pMsgArr[i].msgType == TDMT_SYNC_CONFIG_CHANGE) {
|
||||
if (pMsgPArr[i]->msgType == TDMT_SYNC_CONFIG_CHANGE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pMsgArr[i].msgType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
|
||||
if (pMsgPArr[i]->msgType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -709,8 +709,8 @@ static bool syncNodeBatchOK(SRpcMsg* pMsgArr, int32_t arrSize) {
|
|||
return true;
|
||||
}
|
||||
|
||||
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWeakArr, int32_t arrSize) {
|
||||
if (!syncNodeBatchOK(pMsgArr, arrSize)) {
|
||||
int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIsWeakArr, int32_t arrSize) {
|
||||
if (!syncNodeBatchOK(pMsgPArr, arrSize)) {
|
||||
syncNodeErrorLog(pSyncNode, "sync propose batch error");
|
||||
terrno = TSDB_CODE_SYN_BATCH_ERROR;
|
||||
return -1;
|
||||
|
@ -736,16 +736,23 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
|
|||
|
||||
SRaftMeta raftArr[SYNC_MAX_BATCH_SIZE];
|
||||
for (int i = 0; i < arrSize; ++i) {
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d, batch:%d", TMSG_INFO(pMsgPArr[i]->msgType),
|
||||
pMsgPArr[i]->msgType, arrSize);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
SRespStub stub;
|
||||
stub.createTime = taosGetTimestampMs();
|
||||
stub.rpcMsg = pMsgArr[i];
|
||||
stub.rpcMsg = *(pMsgPArr[i]);
|
||||
uint64_t seqNum = syncRespMgrAdd(pSyncNode->pSyncRespMgr, &stub);
|
||||
|
||||
raftArr[i].isWeak = pIsWeakArr[i];
|
||||
raftArr[i].seqNum = seqNum;
|
||||
}
|
||||
|
||||
SyncClientRequestBatch* pSyncMsg = syncClientRequestBatchBuild(pMsgArr, raftArr, arrSize, pSyncNode->vgId);
|
||||
SyncClientRequestBatch* pSyncMsg = syncClientRequestBatchBuild(pMsgPArr, raftArr, arrSize, pSyncNode->vgId);
|
||||
ASSERT(pSyncMsg != NULL);
|
||||
|
||||
SRpcMsg rpcMsg;
|
||||
|
@ -759,7 +766,7 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
|
|||
SRpcMsg* msgArr = syncClientRequestBatchRpcMsgArr(pSyncMsg);
|
||||
ASSERT(arrSize == pSyncMsg->dataCount);
|
||||
for (int i = 0; i < arrSize; ++i) {
|
||||
pMsgArr[i].info.conn.applyIndex = msgArr[i].info.conn.applyIndex;
|
||||
pMsgPArr[i]->info.conn.applyIndex = msgArr[i].info.conn.applyIndex;
|
||||
syncRespMgrDel(pSyncNode->pSyncRespMgr, raftArr[i].seqNum);
|
||||
}
|
||||
|
||||
|
@ -790,9 +797,11 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg* pMsgArr, bool* pIsWe
|
|||
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
|
||||
int32_t ret = 0;
|
||||
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d", TMSG_INFO(pMsg->msgType), pMsg->msgType);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d", TMSG_INFO(pMsg->msgType), pMsg->msgType);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
if (pSyncNode->changing && pMsg->msgType != TDMT_SYNC_CONFIG_CHANGE_FINISH) {
|
||||
|
@ -860,7 +869,8 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
|
|||
} else {
|
||||
ret = -1;
|
||||
terrno = TSDB_CODE_SYN_NOT_LEADER;
|
||||
sError("vgId:%d, sync propose not leader, %s", pSyncNode->vgId, syncUtilState2String(pSyncNode->state));
|
||||
sError("vgId:%d, sync propose not leader, %s, msgtype:%s,%d", pSyncNode->vgId,
|
||||
syncUtilState2String(pSyncNode->state), TMSG_INFO(pMsg->msgType), pMsg->msgType);
|
||||
goto _END;
|
||||
}
|
||||
|
||||
|
|
|
@ -963,9 +963,9 @@ void syncClientRequestLog2(char* s, const SyncClientRequest* pMsg) {
|
|||
// block2: SRaftMeta array
|
||||
// block3: rpc msg array (with pCont)
|
||||
|
||||
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMeta* raftArr, int32_t arrSize,
|
||||
SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg** rpcMsgPArr, SRaftMeta* raftArr, int32_t arrSize,
|
||||
int32_t vgId) {
|
||||
ASSERT(rpcMsgArr != NULL);
|
||||
ASSERT(rpcMsgPArr != NULL);
|
||||
ASSERT(arrSize > 0);
|
||||
|
||||
int32_t dataLen = 0;
|
||||
|
@ -991,7 +991,7 @@ SyncClientRequestBatch* syncClientRequestBatchBuild(SRpcMsg* rpcMsgArr, SRaftMet
|
|||
raftMetaArr[i].seqNum = raftArr[i].seqNum;
|
||||
|
||||
// init msgArr
|
||||
msgArr[i] = rpcMsgArr[i];
|
||||
msgArr[i] = *(rpcMsgPArr[i]);
|
||||
}
|
||||
|
||||
return pMsg;
|
||||
|
|
|
@ -22,25 +22,25 @@ done
|
|||
|
||||
echo ""
|
||||
echo "generate vgId ..."
|
||||
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | sort | uniq > ${logpath}/log.vgIds.tmp
|
||||
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort -T. | uniq | awk -F: '{print $2, $0}' | sort -T. -k1 -n | awk '{print $2}' > ${logpath}/log.vgIds.tmp
|
||||
echo "all vgIds:" > ${logpath}/log.vgIds
|
||||
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
|
||||
cat ${logpath}/log.dnode* | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort -T. | uniq | awk -F: '{print $2, $0}' | sort -T. -k1 -n | awk '{print $2}' >> ${logpath}/log.vgIds
|
||||
for dnode in `ls ${logpath} | grep dnode | grep -v log`;do
|
||||
echo "" >> ${logpath}/log.vgIds
|
||||
echo "" >> ${logpath}/log.vgIds
|
||||
echo "${dnode}:" >> ${logpath}/log.vgIds
|
||||
cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort | uniq >> ${logpath}/log.vgIds
|
||||
cat ${logpath}/${dnode}/log/taosdlog.* | grep SYN | grep "vgId:" | grep -v ERROR | awk '{print $5}' | awk -F, '{print $1}' | sort -T. | uniq | awk -F: '{print $2, $0}' | sort -T. -k1 -n | awk '{print $2}' >> ${logpath}/log.vgIds
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "generate log.dnode.vgId ..."
|
||||
for logdnode in `ls ${logpath}/log.dnode*`;do
|
||||
for vgId in `cat ${logpath}/log.vgIds.tmp`;do
|
||||
rowNum=`cat ${logdnode} | grep "${vgId}" | awk 'BEGIN{rowNum=0}{rowNum++}END{print rowNum}'`
|
||||
rowNum=`cat ${logdnode} | grep "${vgId}," | awk 'BEGIN{rowNum=0}{rowNum++}END{print rowNum}'`
|
||||
#echo "-----${rowNum}"
|
||||
if [ $rowNum -gt 0 ] ; then
|
||||
echo "generate ${logdnode}.${vgId}"
|
||||
cat ${logdnode} | grep "${vgId}" > ${logdnode}.${vgId}
|
||||
cat ${logdnode} | grep "${vgId}," > ${logdnode}.${vgId}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
@ -54,7 +54,7 @@ done
|
|||
|
||||
echo ""
|
||||
echo "generate log.leader.term ..."
|
||||
cat ${logpath}/*.main | grep "become leader" | grep -v "config change" | awk '{print $5,$0}' | awk -F, '{print $4"_"$0}' | sort -k1 > ${logpath}/log.leader.term
|
||||
cat ${logpath}/*.main | grep "become leader" | grep -v "config change" | awk '{print $5,$0}' | awk -F, '{print $4"_"$0}' | sort -T. -k1 > ${logpath}/log.leader.term
|
||||
|
||||
echo ""
|
||||
echo "generate log.index, log.snapshot, log.records, log.actions ..."
|
||||
|
|
|
@ -28,12 +28,12 @@ SRpcMsg *createRpcMsg(int32_t i, int32_t dataLen) {
|
|||
}
|
||||
|
||||
SyncClientRequestBatch *createMsg() {
|
||||
SRpcMsg rpcMsgArr[5];
|
||||
memset(rpcMsgArr, 0, sizeof(rpcMsgArr));
|
||||
SRpcMsg *rpcMsgPArr[5];
|
||||
memset(rpcMsgPArr, 0, sizeof(rpcMsgPArr));
|
||||
for (int32_t i = 0; i < 5; ++i) {
|
||||
SRpcMsg *pRpcMsg = createRpcMsg(i, 20);
|
||||
rpcMsgArr[i] = *pRpcMsg;
|
||||
taosMemoryFree(pRpcMsg);
|
||||
rpcMsgPArr[i] = pRpcMsg;
|
||||
//taosMemoryFree(pRpcMsg);
|
||||
}
|
||||
|
||||
SRaftMeta raftArr[5];
|
||||
|
@ -43,7 +43,7 @@ SyncClientRequestBatch *createMsg() {
|
|||
raftArr[i].isWeak = i % 2;
|
||||
}
|
||||
|
||||
SyncClientRequestBatch *pMsg = syncClientRequestBatchBuild(rpcMsgArr, raftArr, 5, 1234);
|
||||
SyncClientRequestBatch *pMsg = syncClientRequestBatchBuild(rpcMsgPArr, raftArr, 5, 1234);
|
||||
return pMsg;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
add_executable(transportTest "")
|
||||
add_executable(transUT "")
|
||||
add_executable(pushServer "")
|
||||
add_executable(svrBench "")
|
||||
add_executable(cliBench "")
|
||||
|
||||
target_sources(transUT
|
||||
PRIVATE
|
||||
|
@ -12,9 +13,13 @@ target_sources(transportTest
|
|||
"transportTests.cpp"
|
||||
)
|
||||
|
||||
target_sources(pushServer
|
||||
target_sources(svrBench
|
||||
PRIVATE
|
||||
"pushServer.c"
|
||||
"svrBench.c"
|
||||
)
|
||||
target_sources(cliBench
|
||||
PRIVATE
|
||||
"cliBench.c"
|
||||
)
|
||||
|
||||
target_include_directories(transportTest
|
||||
|
@ -45,13 +50,37 @@ target_include_directories(transUT
|
|||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_include_directories(pushServer
|
||||
target_include_directories(svrBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(svrBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries (pushServer
|
||||
target_link_libraries (svrBench
|
||||
os
|
||||
util
|
||||
common
|
||||
gtest_main
|
||||
transport
|
||||
)
|
||||
|
||||
target_include_directories(cliBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(cliBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries (cliBench
|
||||
os
|
||||
util
|
||||
common
|
||||
|
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "transLog.h"
|
||||
#include "trpc.h"
|
||||
#include "tutil.h"
|
||||
|
||||
typedef struct {
|
||||
int index;
|
||||
SEpSet epSet;
|
||||
int num;
|
||||
int numOfReqs;
|
||||
int msgSize;
|
||||
tsem_t rspSem;
|
||||
tsem_t *pOverSem;
|
||||
TdThread thread;
|
||||
void *pRpc;
|
||||
} SInfo;
|
||||
|
||||
static void processResponse(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
SInfo *pInfo = (SInfo *)pMsg->info.ahandle;
|
||||
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen,
|
||||
pMsg->code);
|
||||
|
||||
if (pEpSet) pInfo->epSet = *pEpSet;
|
||||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
tsem_post(&pInfo->rspSem);
|
||||
}
|
||||
|
||||
static int tcount = 0;
|
||||
|
||||
static void *sendRequest(void *param) {
|
||||
SInfo *pInfo = (SInfo *)param;
|
||||
SRpcMsg rpcMsg = {0};
|
||||
|
||||
tDebug("thread:%d, start to send request", pInfo->index);
|
||||
|
||||
while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
|
||||
pInfo->num++;
|
||||
rpcMsg.pCont = rpcMallocCont(pInfo->msgSize);
|
||||
rpcMsg.contLen = pInfo->msgSize;
|
||||
rpcMsg.info.ahandle = pInfo;
|
||||
rpcMsg.msgType = 1;
|
||||
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL);
|
||||
if (pInfo->num % 20000 == 0) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
|
||||
tsem_wait(&pInfo->rspSem);
|
||||
}
|
||||
|
||||
tDebug("thread:%d, it is over", pInfo->index);
|
||||
tcount++;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SRpcInit rpcInit;
|
||||
SEpSet epSet;
|
||||
int msgSize = 128;
|
||||
int numOfReqs = 0;
|
||||
int appThreads = 1;
|
||||
char serverIp[40] = "127.0.0.1";
|
||||
struct timeval systemTime;
|
||||
int64_t startTime, endTime;
|
||||
|
||||
// server info
|
||||
epSet.numOfEps = 1;
|
||||
epSet.inUse = 0;
|
||||
epSet.eps[0].port = 7000;
|
||||
epSet.eps[1].port = 7000;
|
||||
strcpy(epSet.eps[0].fqdn, serverIp);
|
||||
strcpy(epSet.eps[1].fqdn, "192.168.0.1");
|
||||
|
||||
// client info
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
rpcInit.localPort = 0;
|
||||
rpcInit.label = "APP";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = processResponse;
|
||||
rpcInit.sessions = 100;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = "michael";
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
|
||||
rpcDebugFlag = 131;
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
if (strcmp(argv[i], "-p") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-i") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-t") == 0 && i < argc - 1) {
|
||||
rpcInit.numOfThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-m") == 0 && i < argc - 1) {
|
||||
msgSize = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-s") == 0 && i < argc - 1) {
|
||||
rpcInit.sessions = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-n") == 0 && i < argc - 1) {
|
||||
numOfReqs = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-a") == 0 && i < argc - 1) {
|
||||
appThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-o") == 0 && i < argc - 1) {
|
||||
tsCompressMsgSize = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-u") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-k") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-spi") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-d") == 0 && i < argc - 1) {
|
||||
rpcDebugFlag = atoi(argv[++i]);
|
||||
} else {
|
||||
printf("\nusage: %s [options] \n", argv[0]);
|
||||
printf(" [-i ip]: first server IP address, default is:%s\n", serverIp);
|
||||
printf(" [-t threads]: number of rpc threads, default is:%d\n", rpcInit.numOfThreads);
|
||||
printf(" [-m msgSize]: message body size, default is:%d\n", msgSize);
|
||||
printf(" [-a threads]: number of app threads, default is:%d\n", appThreads);
|
||||
printf(" [-n requests]: number of requests per thread, default is:%d\n", numOfReqs);
|
||||
printf(" [-u user]: user name for the connection, default is:%s\n", rpcInit.user);
|
||||
printf(" [-d debugFlag]: debug flag, default:%d\n", rpcDebugFlag);
|
||||
printf(" [-h help]: print out this help\n\n");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
taosInitLog("client.log", 100000);
|
||||
|
||||
void *pRpc = rpcOpen(&rpcInit);
|
||||
if (pRpc == NULL) {
|
||||
tError("failed to initialize RPC");
|
||||
return -1;
|
||||
}
|
||||
|
||||
tInfo("client is initialized");
|
||||
tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs);
|
||||
|
||||
int64_t now = taosGetTimestampUs();
|
||||
|
||||
SInfo *pInfo = (SInfo *)taosMemoryCalloc(1, sizeof(SInfo) * appThreads);
|
||||
SInfo *p = pInfo;
|
||||
for (int i = 0; i < appThreads; ++i) {
|
||||
pInfo->index = i;
|
||||
pInfo->epSet = epSet;
|
||||
pInfo->numOfReqs = numOfReqs;
|
||||
pInfo->msgSize = msgSize;
|
||||
tsem_init(&pInfo->rspSem, 0, 0);
|
||||
pInfo->pRpc = pRpc;
|
||||
|
||||
taosThreadCreate(&pInfo->thread, NULL, sendRequest, pInfo);
|
||||
pInfo++;
|
||||
}
|
||||
|
||||
do {
|
||||
taosUsleep(1);
|
||||
} while (tcount < appThreads);
|
||||
|
||||
float usedTime = (taosGetTimestampUs() - now) / 1000.0f;
|
||||
|
||||
tInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs * appThreads);
|
||||
tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime, msgSize);
|
||||
|
||||
for (int i = 0; i < appThreads; i++) {
|
||||
SInfo *pInfo = p;
|
||||
taosThreadJoin(pInfo->thread, NULL);
|
||||
p++;
|
||||
}
|
||||
int ch = getchar();
|
||||
UNUSED(ch);
|
||||
|
||||
taosCloseLog();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -24,12 +24,12 @@ int msgSize = 128;
|
|||
int commit = 0;
|
||||
TdFilePtr pDataFile = NULL;
|
||||
STaosQueue *qhandle = NULL;
|
||||
STaosQset * qset = NULL;
|
||||
STaosQset *qset = NULL;
|
||||
|
||||
void processShellMsg() {
|
||||
static int num = 0;
|
||||
STaosQall *qall;
|
||||
SRpcMsg * pRpcMsg, rpcMsg;
|
||||
SRpcMsg *pRpcMsg, rpcMsg;
|
||||
int type;
|
||||
SQueueInfo qinfo = {0};
|
||||
|
||||
|
@ -77,7 +77,6 @@ void processShellMsg() {
|
|||
taosFreeQitem(pRpcMsg);
|
||||
|
||||
{
|
||||
// taosSsleep(1);
|
||||
SRpcMsg nRpcMsg = {0};
|
||||
nRpcMsg.pCont = rpcMallocCont(msgSize);
|
||||
nRpcMsg.contLen = msgSize;
|
||||
|
@ -93,26 +92,6 @@ void processShellMsg() {
|
|||
taosFreeQall(qall);
|
||||
}
|
||||
|
||||
int retrieveAuthInfo(void *parent, char *meterId, char *spi, char *encrypt, char *secret, char *ckey) {
|
||||
// app shall retrieve the auth info based on meterID from DB or a data file
|
||||
// demo code here only for simple demo
|
||||
int ret = 0;
|
||||
|
||||
if (strcmp(meterId, "michael") == 0) {
|
||||
*spi = 1;
|
||||
*encrypt = 0;
|
||||
strcpy(secret, "mypassword");
|
||||
strcpy(ckey, "key");
|
||||
} else if (strcmp(meterId, "jeff") == 0) {
|
||||
*spi = 0;
|
||||
*encrypt = 0;
|
||||
} else {
|
||||
ret = -1; // user not there
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void processRequestMsg(void *pParent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
SRpcMsg *pTemp;
|
||||
|
||||
|
@ -131,11 +110,12 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
rpcInit.localPort = 7000;
|
||||
memcpy(rpcInit.localFqdn, "localhost", strlen("localhost"));
|
||||
rpcInit.label = "SER";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = processRequestMsg;
|
||||
rpcInit.sessions = 1000;
|
||||
rpcInit.idleTime = 2 * 1500;
|
||||
rpcDebugFlag = 131;
|
||||
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
if (strcmp(argv[i], "-p") == 0 && i < argc - 1) {
|
||||
|
@ -170,7 +150,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
tsAsyncLog = 0;
|
||||
rpcInit.connType = TAOS_CONN_SERVER;
|
||||
taosInitLog("server.log", 10);
|
||||
taosInitLog("server.log", 100000);
|
||||
|
||||
void *pRpc = rpcOpen(&rpcInit);
|
||||
if (pRpc == NULL) {
|
|
@ -294,7 +294,7 @@ void taosArraySet(SArray* pArray, size_t index, void* pData) {
|
|||
void taosArrayPopFrontBatch(SArray* pArray, size_t cnt) {
|
||||
assert(cnt <= pArray->size);
|
||||
pArray->size = pArray->size - cnt;
|
||||
if (pArray->size == 0) {
|
||||
if (pArray->size == 0 || cnt == 0) {
|
||||
return;
|
||||
}
|
||||
memmove(pArray->pData, (char*)pArray->pData + cnt * pArray->elemSize, pArray->size * pArray->elemSize);
|
||||
|
|
|
@ -16,8 +16,8 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "tlog.h"
|
||||
#include "os.h"
|
||||
#include "tutil.h"
|
||||
#include "tconfig.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#define LOG_MAX_LINE_SIZE (1024)
|
||||
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
|
||||
|
@ -40,7 +40,7 @@
|
|||
#define LOG_BUF_MUTEX(x) ((x)->buffMutex)
|
||||
|
||||
typedef struct {
|
||||
char * buffer;
|
||||
char *buffer;
|
||||
int32_t buffStart;
|
||||
int32_t buffEnd;
|
||||
int32_t buffSize;
|
||||
|
@ -59,15 +59,15 @@ typedef struct {
|
|||
int32_t openInProgress;
|
||||
pid_t pid;
|
||||
char logName[LOG_FILE_NAME_LEN];
|
||||
SLogBuff * logHandle;
|
||||
SLogBuff *logHandle;
|
||||
TdThreadMutex logMutex;
|
||||
} SLogObj;
|
||||
|
||||
extern SConfig *tsCfg;
|
||||
static int8_t tsLogInited = 0;
|
||||
static SLogObj tsLogObj = {.fileNum = 1};
|
||||
static int64_t tsAsyncLogLostLines = 0;
|
||||
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
|
||||
static int8_t tsLogInited = 0;
|
||||
static SLogObj tsLogObj = {.fileNum = 1};
|
||||
static int64_t tsAsyncLogLostLines = 0;
|
||||
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
|
||||
|
||||
bool tsLogEmbedded = 0;
|
||||
bool tsAsyncLog = true;
|
||||
|
@ -106,7 +106,7 @@ int64_t dbgSmallWN = 0;
|
|||
int64_t dbgBigWN = 0;
|
||||
int64_t dbgWSize = 0;
|
||||
|
||||
static void * taosAsyncOutputLog(void *param);
|
||||
static void *taosAsyncOutputLog(void *param);
|
||||
static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t msgLen);
|
||||
static SLogBuff *taosLogBuffNew(int32_t bufSize);
|
||||
static void taosCloseLogByFd(TdFilePtr pFile);
|
||||
|
@ -128,7 +128,11 @@ int32_t taosInitLog(const char *logName, int32_t maxFiles) {
|
|||
osUpdate();
|
||||
|
||||
char fullName[PATH_MAX] = {0};
|
||||
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logName);
|
||||
if (strlen(tsLogDir) != 0) {
|
||||
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logName);
|
||||
} else {
|
||||
snprintf(fullName, PATH_MAX, "%s", logName);
|
||||
}
|
||||
|
||||
tsLogObj.logHandle = taosLogBuffNew(LOG_DEFAULT_BUF_SIZE);
|
||||
if (tsLogObj.logHandle == NULL) return -1;
|
||||
|
@ -704,7 +708,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) {
|
|||
int32_t compressSize = 163840;
|
||||
int32_t ret = 0;
|
||||
int32_t len = 0;
|
||||
char * data = taosMemoryMalloc(compressSize);
|
||||
char *data = taosMemoryMalloc(compressSize);
|
||||
// gzFile dstFp = NULL;
|
||||
|
||||
// srcFp = fopen(srcFileName, "r");
|
||||
|
|
|
@ -489,7 +489,7 @@ class TDDnode:
|
|||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -4 %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
|
@ -503,7 +503,7 @@ class TDDnode:
|
|||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index))
|
||||
|
||||
|
||||
def stoptaosd(self):
|
||||
|
@ -527,7 +527,7 @@ class TDDnode:
|
|||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -4 %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
|
@ -537,7 +537,7 @@ class TDDnode:
|
|||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if (not self.remoteIP == ""):
|
||||
|
|
|
@ -298,8 +298,8 @@
|
|||
./test.sh -f tsim/sma/drop_sma.sim
|
||||
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
# temp disable
|
||||
#./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
|
||||
# --- valgrind
|
||||
./test.sh -f tsim/valgrind/checkError1.sim
|
||||
|
@ -325,7 +325,7 @@
|
|||
|
||||
# --- sync
|
||||
./test.sh -f tsim/sync/3Replica1VgElect.sim
|
||||
./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
#./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
./test.sh -f tsim/sync/oneReplica1VgElect.sim
|
||||
./test.sh -f tsim/sync/oneReplica5VgElect.sim
|
||||
|
||||
|
|
|
@ -75,61 +75,61 @@ if $data02 != leader then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print =============== create drop bnode 1
|
||||
sql create bnode on dnode 1
|
||||
sql show bnodes
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
sql_error create bnode on dnode 1
|
||||
#print =============== create drop bnode 1
|
||||
#sql create bnode on dnode 1
|
||||
#sql show bnodes
|
||||
#if $rows != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error create bnode on dnode 1
|
||||
#
|
||||
#sql drop bnode on dnode 1
|
||||
#sql show bnodes
|
||||
#if $rows != 0 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error drop bnode on dnode 1
|
||||
#
|
||||
#print =============== create drop bnode 2
|
||||
#sql create bnode on dnode 2
|
||||
#sql show bnodes
|
||||
#if $rows != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error create bnode on dnode 2
|
||||
#
|
||||
#sql drop bnode on dnode 2
|
||||
#sql show bnodes
|
||||
#if $rows != 0 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error drop bnode on dnode 2
|
||||
#
|
||||
#print =============== create drop bnodes
|
||||
#sql create bnode on dnode 1
|
||||
#sql create bnode on dnode 2
|
||||
#sql show bnodes
|
||||
#if $rows != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
sql drop bnode on dnode 1
|
||||
sql show bnodes
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql_error drop bnode on dnode 1
|
||||
|
||||
print =============== create drop bnode 2
|
||||
sql create bnode on dnode 2
|
||||
sql show bnodes
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 2 then
|
||||
return -1
|
||||
endi
|
||||
sql_error create bnode on dnode 2
|
||||
|
||||
sql drop bnode on dnode 2
|
||||
sql show bnodes
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql_error drop bnode on dnode 2
|
||||
|
||||
print =============== create drop bnodes
|
||||
sql create bnode on dnode 1
|
||||
sql create bnode on dnode 2
|
||||
sql show bnodes
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== restart
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
|
||||
sleep 2000
|
||||
sql show bnodes
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
#print =============== restart
|
||||
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode1 -s start
|
||||
#system sh/exec.sh -n dnode2 -s start
|
||||
#
|
||||
#sleep 2000
|
||||
#sql show bnodes
|
||||
#if $rows != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
|
|
@ -98,6 +98,11 @@ while $i < $tbNum
|
|||
endw
|
||||
|
||||
print ===============multivnode projection join.sim
|
||||
sql select join_mt0.ts,join_mt0.ts,join_mt0.t1 from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts;
|
||||
print ===> rows $row
|
||||
if $row != 9000 then
|
||||
print expect 9000, actual: $row
|
||||
endi
|
||||
sql select join_mt0.ts,join_mt0.ts,join_mt0.t1 from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1;
|
||||
print ===> rows $row
|
||||
if $row != 3000 then
|
||||
|
|
|
@ -360,8 +360,9 @@ endi
|
|||
if $data04 != @abc0@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select distinct tbname,t1,t2 from select_tags_mt0;
|
||||
print "really this line"
|
||||
sql select distinct tbname,t1,t2 from select_tags_mt0 order by tbname;
|
||||
print $data00 $data01 $data02 $data10 $data111 $data12
|
||||
if $row != 16 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -390,7 +391,7 @@ if $data12 != @abc1@ then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select tbname,ts from select_tags_mt0;
|
||||
sql select tbname,ts from select_tags_mt0 order by ts;
|
||||
if $row != 12800 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -99,7 +99,7 @@ if $rows != 1 then
|
|||
endi
|
||||
#sql select * from information_schema.`streams`
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 31 then
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
#sql select * from information_schema.user_table_distributed
|
||||
|
@ -197,7 +197,7 @@ if $rows != 1 then
|
|||
endi
|
||||
#sql select * from performance_schema.`streams`
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 31 then
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
#sql select * from information_schema.user_table_distributed
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/deploy.sh -n dnode4 -i 4
|
||||
|
||||
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
|
||||
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
system sh/exec.sh -n dnode4 -s start
|
||||
|
||||
sql connect
|
||||
sql create dnode $hostname port 7200
|
||||
sql create dnode $hostname port 7300
|
||||
sql create dnode $hostname port 7400
|
||||
|
||||
$x = 0
|
||||
step1:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step1
|
||||
endi
|
||||
|
||||
$replica = 3
|
||||
$vgroups = 30
|
||||
|
||||
print ============= create database
|
||||
sql create database db replica $replica vgroups $vgroups
|
||||
|
||||
$loop_cnt = 0
|
||||
check_db_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 100 then
|
||||
print ====> db not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show databases
|
||||
print ===> rows: $rows
|
||||
print $data[2][0] $data[2][1] $data[2][2] $data[2][3] $data[2][4] $data[2][5] $data[2][6] $data[2][7] $data[2][8] $data[2][9] $data[2][6] $data[2][11] $data[2][12] $data[2][13] $data[2][14] $data[2][15] $data[2][16] $data[2][17] $data[2][18] $data[2][19]
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data[2][15] != ready then
|
||||
goto check_db_ready
|
||||
endi
|
||||
|
||||
sql use db
|
||||
|
||||
$loop_cnt = 0
|
||||
check_vg_ready:
|
||||
$loop_cnt = $loop_cnt + 1
|
||||
sleep 200
|
||||
if $loop_cnt == 300 then
|
||||
print ====> vgroups not ready!
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql show vgroups
|
||||
print ===> rows: $rows
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6] $data[0][7] $data[0][8] $data[0][9] $data[0][10] $data[0][11]
|
||||
|
||||
if $rows != $vgroups then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data[0][4] == leader then
|
||||
if $data[0][6] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][3]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][6] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][8] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][5]
|
||||
endi
|
||||
endi
|
||||
elif $data[0][8] == leader then
|
||||
if $data[0][4] == follower then
|
||||
if $data[0][6] == follower then
|
||||
print ---- vgroup $data[0][0] leader locate on dnode $data[0][7]
|
||||
endi
|
||||
endi
|
||||
else
|
||||
goto check_vg_ready
|
||||
endi
|
||||
|
||||
|
||||
vg_ready:
|
||||
print ====> create stable/child table
|
||||
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int)
|
||||
|
||||
sql show stables
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql create table ct1 using stb tags(1000)
|
||||
|
||||
|
||||
print ===> write 1000 records
|
||||
$N = 10000
|
||||
$count = 0
|
||||
while $count < $N
|
||||
$ms = 1591200000000 + $count
|
||||
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
|
||||
$count = $count + 1
|
||||
endw
|
||||
|
||||
|
|
@ -132,7 +132,7 @@ print ===> write 100 records
|
|||
$N = 100
|
||||
$count = 0
|
||||
while $count < $N
|
||||
$ms = 1591200000000 + $count
|
||||
$ms = 1658924000000 + $count
|
||||
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
|
||||
$count = $count + 1
|
||||
endw
|
||||
|
@ -149,7 +149,7 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
|
||||
sleep 3000
|
||||
|
||||
########################################################
|
||||
print ===> start dnode1 dnode2 dnode3 dnode4
|
||||
|
|
|
@ -105,7 +105,7 @@ if $rows != 1 then
|
|||
endi
|
||||
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 31 then
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -62,6 +62,8 @@ sql select * from ct1 where ts < now -1d and ts > now +1d
|
|||
sql select * from stb where ts < now -1d and ts > now +1d
|
||||
sql select * from ct1 where ts < now -1d and ts > now +1d order by ts desc
|
||||
sql select * from stb where ts < now -1d and ts > now +1d order by ts desc
|
||||
sql select * from ct1 where t1 between 1000 and 2500
|
||||
sql select * from stb where t1 between 1000 and 2500
|
||||
|
||||
print =============== step7: count
|
||||
sql select count(*) from ct1;
|
||||
|
|
|
@ -377,11 +377,11 @@ class TDTestCase:
|
|||
tdSql.query("select ct1.c_int from db.ct1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows)
|
||||
tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.ct1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows)
|
||||
tdSql.checkRows(self.rows + int(self.rows * 0.6 //3)+ int(self.rows * 0.8 // 4))
|
||||
tdSql.query("select ct1.c_int from db.nt1 as ct1 join db1.nt1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows + 3)
|
||||
tdSql.query("select ct1.c_int from db.stb1 as ct1 join db1.stb1 as cy1 on ct1.ts=cy1.ts")
|
||||
tdSql.checkRows(self.rows * 3 + 6)
|
||||
tdSql.checkRows(50)
|
||||
|
||||
tdSql.query("select count(*) from db.ct1")
|
||||
tdSql.checkData(0, 0, self.rows)
|
||||
|
|
|
@ -0,0 +1,246 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 100,
|
||||
"batchNum": 5000
|
||||
}
|
||||
username="user1"
|
||||
passwd="123"
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# recreate mnode
|
||||
tdSql.execute("drop dnode 2;")
|
||||
tdSql.execute('create dnode "%s:6130";'%self.host)
|
||||
tdDnodes=cluster.dnodes
|
||||
tdDnodes[1].stoptaosd()
|
||||
tdDnodes[1].deploy()
|
||||
|
||||
tdDnodes[1].starttaosd()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
tdSql.execute("create mnode on dnode 6")
|
||||
tdSql.error("drop dnode 1;")
|
||||
|
||||
# check status of clusters
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
tdSql.execute("create user %s pass '%s' ;"%(username,passwd))
|
||||
tdSql.query("show users")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "%s"%username :
|
||||
tdLog.info("create user:%s successfully"%username)
|
||||
|
||||
# # create database and stable
|
||||
# clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
# tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
# tdDnodes=cluster.dnodes
|
||||
# stopcount =0
|
||||
# threads=[]
|
||||
|
||||
# # create stable:stb_0
|
||||
# stableName= paraDict['stbName']
|
||||
# newTdSql=tdCom.newTdSql()
|
||||
# clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
# #create child table:ctb_0
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# newTdSql=tdCom.newTdSql()
|
||||
# clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
# #insert date
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# newTdSql=tdCom.newTdSql()
|
||||
# threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
# for tr in threads:
|
||||
# tr.start()
|
||||
# for tr in threads:
|
||||
# tr.join()
|
||||
|
||||
# while stopcount < restartNumbers:
|
||||
# tdLog.info(" restart loop: %d"%stopcount )
|
||||
# if stopRole == "mnode":
|
||||
# for i in range(mnodeNums):
|
||||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
# elif stopRole == "vnode":
|
||||
# for i in range(vnodeNumbers):
|
||||
# tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i+mnodeNums].starttaosd()
|
||||
# # sleep(10)
|
||||
# elif stopRole == "dnode":
|
||||
# for i in range(dnodeNumbers):
|
||||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
|
||||
# # dnodeNumbers don't include database of schema
|
||||
# if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
# tdLog.info("dnode is ready")
|
||||
# else:
|
||||
# print("dnodes is not ready")
|
||||
# self.stopThread(threads)
|
||||
# tdLog.exit("one or more of dnodes failed to start ")
|
||||
# # self.check3mnode()
|
||||
# stopcount+=1
|
||||
|
||||
|
||||
# clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
# clusterComCheck.checkDbRows(dbNumbers)
|
||||
# # clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
# tdSql.query("show stables")
|
||||
# tdSql.checkRows(paraDict["stbNumbers"])
|
||||
# # for i in range(paraDict['stbNumbers']):
|
||||
# # stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# # tdSql.query("select * from %s"%stableName)
|
||||
# # tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -190,10 +190,9 @@ class TDTestCase:
|
|||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
tdLog.info("dnode is ready")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
print("dnodes is not ready")
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
|
@ -207,10 +206,11 @@ class TDTestCase:
|
|||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# tdSql.query("select * from %s"%stableName)
|
||||
# tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
|
|
@ -0,0 +1,224 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 100,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
# create stable:stb_0
|
||||
stableName= paraDict['stbName']
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
#create child table:ctb_0
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
#insert date
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
for i in range(mnodeNums):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i+mnodeNums].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "dnode":
|
||||
for i in range(dnodeNumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("dnode is ready")
|
||||
else:
|
||||
print("dnodes is not ready")
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# tdSql.query("select * from %s"%stableName)
|
||||
# tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -98,8 +98,10 @@ class TDTestCase:
|
|||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.info("create mnode on dnode 2")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.info("create mnode on dnode 3")
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = int(dnodenumbers * restartNumber)
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -104,7 +104,7 @@ class TDTestCase:
|
|||
tdDnodes[1].starttaosd()
|
||||
tdDnodes[2].starttaosd()
|
||||
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -111,14 +111,14 @@ class TDTestCase:
|
|||
# seperate vnode and mnode in different dnodes.
|
||||
# create database and stable
|
||||
stopcount =0
|
||||
while stopcount <= 2:
|
||||
while stopcount < restartNumber:
|
||||
tdLog.info("first restart loop")
|
||||
for i in range(dnodenumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
tdDnodes[i].starttaosd()
|
||||
stopcount+=1
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
from test import tdDnodes
|
||||
sys.path.append("./6-cluster")
|
||||
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import *
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
# print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
# restart all taosd
|
||||
tdDnodes=cluster.dnodes
|
||||
tdLog.info("stop two mnode ")
|
||||
|
||||
tdDnodes[0].stoptaosd()
|
||||
tdDnodes[1].stoptaosd()
|
||||
|
||||
# tdLog.info("check whether 2 mnode status is offline")
|
||||
# clusterComCheck.check3mnode2off()
|
||||
# tdSql.error("create user user1 pass '123';")
|
||||
|
||||
tdLog.info("start one mnode" )
|
||||
tdDnodes[0].starttaosd()
|
||||
clusterComCheck.check3mnodeoff(2)
|
||||
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
clusterComCheck.checkDb(dbNumbers,1,'db0')
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -55,6 +55,7 @@ class ClusterComCheck:
|
|||
count+=1
|
||||
time.sleep(1)
|
||||
else:
|
||||
tdSql.query("show dnodes")
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("it find cluster with %d dnodes but check that there dnodes are not ready within 30s ! "%dnodeNumbers)
|
||||
|
||||
|
@ -111,7 +112,7 @@ class ClusterComCheck:
|
|||
def checkMnodeStatus(self,mnodeNums):
|
||||
self.mnodeNums=int(mnodeNums)
|
||||
# self.leaderDnode=int(leaderDnode)
|
||||
|
||||
tdLog.debug("start to check status of mnodes")
|
||||
count=0
|
||||
|
||||
while count < 10:
|
||||
|
|
|
@ -45,7 +45,7 @@ class TDTestCase:
|
|||
break
|
||||
|
||||
tdSql.execute('use db_taosx')
|
||||
tdSql.query("select * from ct3")
|
||||
tdSql.query("select * from ct3 order by c1 desc")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 51)
|
||||
tdSql.checkData(0, 4, 940)
|
||||
|
@ -58,17 +58,17 @@ class TDTestCase:
|
|||
tdSql.query("select * from ct2")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select * from ct0")
|
||||
tdSql.query("select * from ct0 order by c1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 3, "a")
|
||||
tdSql.checkData(1, 4, None)
|
||||
|
||||
tdSql.query("select * from n1")
|
||||
tdSql.query("select * from n1 order by ts")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, "eeee")
|
||||
tdSql.checkData(1, 2, 940)
|
||||
|
||||
tdSql.query("select * from jt")
|
||||
tdSql.query("select * from jt order by i desc;")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 11)
|
||||
tdSql.checkData(0, 2, None)
|
||||
|
|
|
@ -164,6 +164,7 @@ python3 ./test.py -f 2-query/function_null.py
|
|||
python3 ./test.py -f 2-query/queryQnode.py
|
||||
python3 ./test.py -f 2-query/max_partition.py
|
||||
python3 ./test.py -f 2-query/last_row.py
|
||||
python3 ./test.py -f 2-query/tsbsQuery.py
|
||||
|
||||
python3 ./test.py -f 6-cluster/5dnode1mnode.py
|
||||
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
|
||||
|
@ -171,7 +172,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
|
|||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
|
||||
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
|
||||
|
@ -179,6 +180,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5
|
|||
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3
|
||||
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 5 -M 3
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartMnodeInsertData.py -N 5 -M 3
|
||||
# python3 ./test.py -f 6-cluster/5dnode3mnodeRestartVnodeInsertData.py -N 5 -M 3
|
||||
|
||||
|
@ -187,6 +189,11 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5
|
|||
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
|
||||
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
|
||||
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3
|
||||
|
||||
|
||||
python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
|
||||
python3 ./test.py -f 7-tmq/basic5.py
|
||||
python3 ./test.py -f 7-tmq/subscribeDb.py
|
||||
|
@ -216,7 +223,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py
|
|||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
|
||||
# python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
|
||||
|
@ -332,7 +339,7 @@ python3 ./test.py -f 2-query/function_null.py -Q 2
|
|||
python3 ./test.py -f 2-query/count_partition.py -Q 2
|
||||
python3 ./test.py -f 2-query/max_partition.py -Q 2
|
||||
python3 ./test.py -f 2-query/last_row.py -Q 2
|
||||
|
||||
python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
|
||||
#------------querPolicy 3-----------
|
||||
|
||||
python3 ./test.py -f 2-query/between.py -Q 3
|
||||
|
@ -419,3 +426,4 @@ python3 ./test.py -f 2-query/function_null.py -Q 3
|
|||
python3 ./test.py -f 2-query/count_partition.py -Q 3
|
||||
python3 ./test.py -f 2-query/max_partition.py -Q 3
|
||||
python3 ./test.py -f 2-query/last_row.py -Q 3
|
||||
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
|
|
@ -24,4 +24,7 @@ target_include_directories(
|
|||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
|
||||
add_dependencies(taosd jemalloc)
|
||||
ENDIF ()
|
||||
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
|
||||
|
|
Loading…
Reference in New Issue