Merge branch '3.0' into test/cluster_case
This commit is contained in:
commit
facf2d5718
|
@ -50,6 +50,10 @@ pysim/
|
|||
tests/script/api/batchprepare
|
||||
taosadapter
|
||||
taosadapter-debug
|
||||
tools/taos-tools/*
|
||||
tools/taosws-rs/*
|
||||
tools/taosadapter/*
|
||||
tools/upx*
|
||||
|
||||
# Doxygen Generated files
|
||||
html/
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
[submodule "src/connector/go"]
|
||||
path = src/connector/go
|
||||
url = git@github.com:taosdata/driver-go.git
|
||||
[submodule "src/connector/hivemq-tdengine-extension"]
|
||||
path = src/connector/hivemq-tdengine-extension
|
||||
url = git@github.com:taosdata/hivemq-tdengine-extension.git
|
||||
[submodule "deps/TSZ"]
|
||||
path = deps/TSZ
|
||||
url = https://github.com/taosdata/TSZ.git
|
||||
[submodule "examples/rust"]
|
||||
path = examples/rust
|
||||
url = https://github.com/songtianyi/tdengine-rust-bindings.git
|
|
@ -118,6 +118,7 @@ def pre_test(){
|
|||
git rm --cached tools/taos-tools 2>/dev/null || :
|
||||
git rm --cached tools/taosadapter 2>/dev/null || :
|
||||
git rm --cached tools/taosws-rs 2>/dev/null || :
|
||||
git rm --cached examples/rust 2>/dev/null || :
|
||||
'''
|
||||
sh '''
|
||||
cd ${WKC}
|
||||
|
@ -269,6 +270,7 @@ def pre_test_win(){
|
|||
git rm --cached tools/taos-tools 2>nul
|
||||
git rm --cached tools/taosadapter 2>nul
|
||||
git rm --cached tools/taosws-rs 2>nul
|
||||
git rm --cached examples/rust 2>nul
|
||||
exit 0
|
||||
'''
|
||||
bat '''
|
||||
|
|
|
@ -90,6 +90,12 @@ ELSE ()
|
|||
ENDIF ()
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
RUST_BINDINGS
|
||||
"If build with rust-bindings"
|
||||
ON
|
||||
)
|
||||
|
||||
option(
|
||||
JEMALLOC_ENABLED
|
||||
"If build with jemalloc"
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
# rust-bindings
|
||||
ExternalProject_Add(rust-bindings
|
||||
GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git
|
||||
GIT_TAG 7ed7a97
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust"
|
||||
BINARY_DIR "${TD_SOURCE_DIR}/examples/rust"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
# zlib
|
||||
# taosadapter
|
||||
ExternalProject_Add(taosadapter
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
|
||||
GIT_TAG df8678f
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
|
||||
# zlib
|
||||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG c529299
|
||||
GIT_TAG 9dc2fec
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
# zlib
|
||||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git
|
||||
GIT_TAG 9de599d
|
||||
|
|
|
@ -105,6 +105,11 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# rust-bindings
|
||||
if(${RUST_BINDINGS})
|
||||
cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${RUST_BINDINGS})
|
||||
|
||||
# lucene
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -135,6 +140,24 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
|||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
# clear submodule
|
||||
execute_process(COMMAND git submodule deinit -f tools/taos-tools
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git rm --cached tools/taos-tools
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git submodule deinit -f tools/taosadapter
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git rm --cached tools/taosadapter
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git submodule deinit -f tools/taosws-rs
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git rm --cached tools/taosws-rs
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git submodule deinit -f examples/rust
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
execute_process(COMMAND git rm --cached examples/rust
|
||||
WORKING_DIRECTORY "${TD_SOURCE_DIR}")
|
||||
|
||||
# ================================================================================================
|
||||
# Build
|
||||
|
|
|
@ -401,7 +401,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**返回结果类型**:CAST 中指定的类型(type_name)。
|
||||
|
||||
**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型。
|
||||
**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
|
@ -410,10 +410,10 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
**使用说明**:
|
||||
|
||||
- 对于不能支持的类型转换会直接报错。
|
||||
- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况:
|
||||
- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况:
|
||||
1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。
|
||||
2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。
|
||||
3)转换到字符串类型时,如果转换后长度超过type_name的长度,则会截断,但不会报错。
|
||||
3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。
|
||||
|
||||
#### TO_ISO8601
|
||||
|
||||
|
@ -421,7 +421,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果附带当前客户端的系统时区信息。
|
||||
**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。
|
||||
|
||||
**返回结果数据类型**:VARCHAR 类型。
|
||||
|
||||
|
@ -435,7 +435,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
|
|||
|
||||
- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
|
||||
- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
|
||||
- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
|
||||
|
||||
#### TO_JSON
|
||||
|
@ -516,7 +516,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM
|
|||
|
||||
**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。
|
||||
|
||||
**返回结果数据类型**:BIGINT。输入包含不符合时间日期格式字符串则返回 NULL。
|
||||
**返回结果数据类型**:BIGINT。
|
||||
|
||||
**应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。
|
||||
|
||||
|
@ -528,6 +528,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM
|
|||
- 支持的时间单位 time_unit 如下:
|
||||
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
|
||||
- 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。
|
||||
- 输入包含不符合时间日期格式的字符串则返回 NULL。
|
||||
|
||||
|
||||
#### TIMETRUNCATE
|
||||
|
@ -548,6 +549,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name
|
|||
- 支持的时间单位 time_unit 如下:
|
||||
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
|
||||
- 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。
|
||||
- 输入包含不符合时间日期格式的字符串则返回 NULL。
|
||||
|
||||
|
||||
#### TIMEZONE
|
||||
|
@ -659,8 +661,6 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
|
||||
**适用数据类型**:TIMESTAMP。
|
||||
|
||||
**支持的版本**:2.6.0.0 及以后的版本。
|
||||
|
||||
**适用于**: 表,超级表,嵌套查询的外层查询
|
||||
|
||||
**说明**:
|
||||
|
@ -773,11 +773,11 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
用户指定 bin 的具体数值。
|
||||
|
||||
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
|
||||
"start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点,
|
||||
"start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
|
||||
生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。
|
||||
|
||||
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
|
||||
"start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点,
|
||||
"start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
|
||||
生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
|
||||
3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。
|
||||
|
||||
|
@ -958,20 +958,20 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
|||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
|
||||
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
|
||||
|
||||
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
|
||||
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### TAIL
|
||||
|
@ -1048,9 +1048,9 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
|
||||
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
|
||||
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### DERIVATIVE
|
||||
|
@ -1069,8 +1069,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||
|
||||
### DIFF
|
||||
|
||||
|
@ -1088,8 +1088,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
|
||||
**使用说明**:
|
||||
|
||||
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
|
||||
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
|
||||
|
||||
|
||||
### IRATE
|
||||
|
@ -1113,21 +1113,21 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
|
|||
SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: DOUBLE。
|
||||
**返回结果类型**: DOUBLE。
|
||||
|
||||
**适用数据类型**: 数值类型。
|
||||
**适用数据类型**: 数值类型。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
**使用说明**:
|
||||
|
||||
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
|
||||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
|
||||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### STATECOUNT
|
||||
|
|
|
@ -29,7 +29,7 @@ static void msg_process(TAOS_RES* msg) {
|
|||
printf("vg: %d\n", tmq_get_vgroup_id(msg));
|
||||
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
|
||||
tmq_raw_data raw = {0};
|
||||
int32_t code = tmq_get_raw(msg, &raw);
|
||||
int32_t code = tmq_get_raw(msg, &raw);
|
||||
if (code == 0) {
|
||||
TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
|
@ -302,7 +302,7 @@ int32_t create_topic() {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
// pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
|
||||
// pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
|
||||
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12
|
|
@ -146,6 +146,7 @@ struct SConfig *taosGetCfg();
|
|||
void taosSetAllDebugFlag(int32_t flag);
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
|
||||
int32_t taosSetCfg(SConfig *pCfg, char *name);
|
||||
void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -155,7 +155,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t
|
|||
|
||||
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
|
||||
|
||||
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes);
|
||||
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/);
|
||||
|
||||
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
|
||||
|
||||
|
|
|
@ -418,8 +418,6 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
|
|||
|
||||
char* nodesGetFillModeString(EFillMode mode);
|
||||
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
|
||||
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
|
||||
SNode** pOtherCond);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -46,6 +46,10 @@ extern int32_t filterFreeNcharColumns(SFilterInfo *pFilterInfo);
|
|||
extern void filterFreeInfo(SFilterInfo *info);
|
||||
extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows);
|
||||
|
||||
/* condition split interface */
|
||||
int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode **pTagIndexCond, SNode **pTagCond,
|
||||
SNode **pOtherCond);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,7 @@ extern "C" {
|
|||
|
||||
typedef struct SFilterInfo SFilterInfo;
|
||||
|
||||
int32_t scalarGetOperatorResultType(SOperatorNode* pOp);
|
||||
int32_t scalarGetOperatorResultType(SOperatorNode *pOp);
|
||||
|
||||
/*
|
||||
pNode will be freed in API;
|
||||
|
@ -43,7 +43,7 @@ int32_t scalarGetOperatorParamNum(EOperatorType type);
|
|||
int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type);
|
||||
|
||||
int32_t vectorGetConvertType(int32_t type1, int32_t type2);
|
||||
int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t* overflow);
|
||||
int32_t vectorConvertImpl(const SScalarParam *pIn, SScalarParam *pOut, int32_t *overflow);
|
||||
|
||||
/* Math functions */
|
||||
int32_t absFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
@ -86,7 +86,7 @@ int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu
|
|||
int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
||||
bool getTimePseudoFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool getTimePseudoFuncEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv);
|
||||
|
||||
int32_t winStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
int32_t winEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
|
||||
|
|
|
@ -62,6 +62,8 @@ typedef int32_t TdUcs4;
|
|||
int32_t taosUcs4len(TdUcs4 *ucs4);
|
||||
int64_t taosStr2int64(const char *str);
|
||||
|
||||
void taosConvInit(void);
|
||||
void taosConvDestroy();
|
||||
int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs);
|
||||
bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len);
|
||||
int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes);
|
||||
|
|
|
@ -361,6 +361,8 @@ void taos_init_imp(void) {
|
|||
|
||||
initQueryModuleMsgHandle();
|
||||
|
||||
taosConvInit();
|
||||
|
||||
rpcInit();
|
||||
|
||||
SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100};
|
||||
|
|
|
@ -353,6 +353,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) {
|
|||
desc.subDesc = NULL;
|
||||
desc.subPlanNum = 0;
|
||||
}
|
||||
desc.subPlanNum = taosArrayGetSize(desc.subDesc);
|
||||
ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc));
|
||||
} else {
|
||||
desc.subDesc = NULL;
|
||||
|
|
|
@ -75,6 +75,8 @@ void taos_cleanup(void) {
|
|||
|
||||
cleanupTaskQueue();
|
||||
|
||||
taosConvDestroy();
|
||||
|
||||
tscInfo("all local resources released");
|
||||
taosCleanupCfg();
|
||||
taosCloseLog();
|
||||
|
|
|
@ -16,65 +16,9 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "tdatablock.h"
|
||||
#include "tcompare.h"
|
||||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "tname.h"
|
||||
|
||||
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
|
||||
pEp->port = 0;
|
||||
strcpy(pEp->fqdn, ep);
|
||||
|
||||
char* temp = strchr(pEp->fqdn, ':');
|
||||
if (temp) {
|
||||
*temp = 0;
|
||||
pEp->port = atoi(temp + 1);
|
||||
}
|
||||
|
||||
if (pEp->port == 0) {
|
||||
pEp->port = tsServerPort;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port) {
|
||||
if (pEpSet == NULL || fqdn == NULL || strlen(fqdn) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t index = pEpSet->numOfEps;
|
||||
tstrncpy(pEpSet->eps[index].fqdn, fqdn, tListLen(pEpSet->eps[index].fqdn));
|
||||
pEpSet->eps[index].port = port;
|
||||
pEpSet->numOfEps += 1;
|
||||
}
|
||||
|
||||
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
|
||||
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < s1->numOfEps; i++) {
|
||||
if (s1->eps[i].port != s2->eps[i].port || strncmp(s1->eps[i].fqdn, s2->eps[i].fqdn, TSDB_FQDN_LEN) != 0)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
|
||||
taosCorBeginWrite(&pEpSet->version);
|
||||
pEpSet->epSet = *pNewEpSet;
|
||||
taosCorEndWrite(&pEpSet->version);
|
||||
}
|
||||
|
||||
SEpSet getEpSet_s(SCorEpSet* pEpSet) {
|
||||
SEpSet ep = {0};
|
||||
taosCorBeginRead(&pEpSet->version);
|
||||
ep = pEpSet->epSet;
|
||||
taosCorEndRead(&pEpSet->version);
|
||||
|
||||
return ep;
|
||||
}
|
||||
|
||||
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
|
||||
ASSERT(pColumnInfoData != NULL);
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
|
@ -1713,8 +1657,9 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) {
|
|||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
|
||||
int32_t rows = pDataBlock->info.rows;
|
||||
printf("%s |block type %d |child id %d|group id %" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type,
|
||||
pDataBlock->info.childId, pDataBlock->info.groupId);
|
||||
printf("%s |block ver %" PRIi64 " |block type %d |child id %d|group id %" PRIu64 "\n", flag,
|
||||
pDataBlock->info.version, (int32_t)pDataBlock->info.type, pDataBlock->info.childId,
|
||||
pDataBlock->info.groupId);
|
||||
for (int32_t j = 0; j < rows; j++) {
|
||||
printf("%s |", flag);
|
||||
for (int32_t k = 0; k < numOfCols; k++) {
|
||||
|
|
|
@ -49,7 +49,7 @@ int32_t tsNumOfShmThreads = 1;
|
|||
// queue & threads
|
||||
int32_t tsNumOfRpcThreads = 1;
|
||||
int32_t tsNumOfCommitThreads = 2;
|
||||
int32_t tsNumOfTaskQueueThreads = 1;
|
||||
int32_t tsNumOfTaskQueueThreads = 4;
|
||||
int32_t tsNumOfMnodeQueryThreads = 4;
|
||||
int32_t tsNumOfMnodeFetchThreads = 1;
|
||||
int32_t tsNumOfMnodeReadThreads = 1;
|
||||
|
@ -317,9 +317,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
|
|||
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
|
||||
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
|
||||
|
||||
tsNumOfTaskQueueThreads = tsNumOfCores / 4;
|
||||
tsNumOfTaskQueueThreads = TRANGE(tsNumOfTaskQueueThreads, 1, 2);
|
||||
if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 1, 1024, 0) != 0) return -1;
|
||||
tsNumOfTaskQueueThreads = tsNumOfCores / 2;
|
||||
tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4);
|
||||
if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 4, 1024, 0) != 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -594,6 +594,20 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void taosLocalCfgForbiddenToChange(char* name, bool* forbidden) {
|
||||
int32_t len = strlen(name);
|
||||
char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0};
|
||||
strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len));
|
||||
|
||||
if (strcasecmp("charset", name) == 0) {
|
||||
*forbidden = true;
|
||||
return;
|
||||
}
|
||||
|
||||
*forbidden = false;
|
||||
}
|
||||
|
||||
|
||||
int32_t taosSetCfg(SConfig *pCfg, char *name) {
|
||||
int32_t len = strlen(name);
|
||||
char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0};
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#define _DEFAULT_SOURCE
|
||||
#include "tdatablock.h"
|
||||
#include "tglobal.h"
|
||||
#include "tlog.h"
|
||||
#include "tname.h"
|
||||
|
||||
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp) {
|
||||
pEp->port = 0;
|
||||
strcpy(pEp->fqdn, ep);
|
||||
|
||||
char* temp = strchr(pEp->fqdn, ':');
|
||||
if (temp) {
|
||||
*temp = 0;
|
||||
pEp->port = atoi(temp + 1);
|
||||
}
|
||||
|
||||
if (pEp->port == 0) {
|
||||
pEp->port = tsServerPort;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port) {
|
||||
if (pEpSet == NULL || fqdn == NULL || strlen(fqdn) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t index = pEpSet->numOfEps;
|
||||
tstrncpy(pEpSet->eps[index].fqdn, fqdn, tListLen(pEpSet->eps[index].fqdn));
|
||||
pEpSet->eps[index].port = port;
|
||||
pEpSet->numOfEps += 1;
|
||||
}
|
||||
|
||||
bool isEpsetEqual(const SEpSet* s1, const SEpSet* s2) {
|
||||
if (s1->numOfEps != s2->numOfEps || s1->inUse != s2->inUse) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < s1->numOfEps; i++) {
|
||||
if (s1->eps[i].port != s2->eps[i].port || strncmp(s1->eps[i].fqdn, s2->eps[i].fqdn, TSDB_FQDN_LEN) != 0)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void updateEpSet_s(SCorEpSet* pEpSet, SEpSet* pNewEpSet) {
|
||||
taosCorBeginWrite(&pEpSet->version);
|
||||
pEpSet->epSet = *pNewEpSet;
|
||||
taosCorEndWrite(&pEpSet->version);
|
||||
}
|
||||
|
||||
SEpSet getEpSet_s(SCorEpSet* pEpSet) {
|
||||
SEpSet ep = {0};
|
||||
taosCorBeginRead(&pEpSet->version);
|
||||
ep = pEpSet->epSet;
|
||||
taosCorEndRead(&pEpSet->version);
|
||||
|
||||
return ep;
|
||||
}
|
||||
|
||||
|
|
@ -20,34 +20,6 @@
|
|||
|
||||
#define VALID_NAME_TYPE(x) ((x) == TSDB_DB_NAME_T || (x) == TSDB_TABLE_NAME_T)
|
||||
|
||||
bool tscValidateTableNameLength(size_t len) { return len < TSDB_TABLE_NAME_LEN; }
|
||||
|
||||
#if 0
|
||||
// TODO refactor
|
||||
SColumnFilterInfo* tFilterInfoDup(const SColumnFilterInfo* src, int32_t numOfFilters) {
|
||||
if (numOfFilters == 0 || src == NULL) {
|
||||
assert(src == NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SColumnFilterInfo* pFilter = taosMemoryCalloc(1, numOfFilters * sizeof(SColumnFilterInfo));
|
||||
|
||||
memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
|
||||
for (int32_t j = 0; j < numOfFilters; ++j) {
|
||||
if (pFilter[j].filterstr) {
|
||||
size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
|
||||
pFilter[j].pz = (int64_t) taosMemoryCalloc(1, len);
|
||||
|
||||
memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t) pFilter[j].len);
|
||||
}
|
||||
}
|
||||
|
||||
assert(src->filterstr == 0 || src->filterstr == 1);
|
||||
assert(!(src->lowerRelOptr == 0 && src->upperRelOptr == 0));
|
||||
|
||||
return pFilter;
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
|
||||
if (slidingTime == 0) {
|
||||
|
|
|
@ -218,6 +218,8 @@ int mainWindows(int argc,char** argv) {
|
|||
taosCleanupArgs();
|
||||
return -1;
|
||||
}
|
||||
|
||||
taosConvInit();
|
||||
|
||||
if (global.dumpConfig) {
|
||||
dmDumpCfg();
|
||||
|
|
|
@ -215,6 +215,7 @@ void dmCleanupDnode(SDnode *pDnode) {
|
|||
dmClearVars(pDnode);
|
||||
rpcCleanup();
|
||||
indexCleanup();
|
||||
taosConvDestroy();
|
||||
dDebug("dnode is closed, ptr:%p", pDnode);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ target_sources(
|
|||
"src/sma/smaOpen.c"
|
||||
"src/sma/smaCommit.c"
|
||||
"src/sma/smaRollup.c"
|
||||
"src/sma/smaSnapshot.c"
|
||||
"src/sma/smaTimeRange.c"
|
||||
|
||||
# tsdb
|
||||
|
|
|
@ -209,6 +209,9 @@ int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen,
|
|||
|
||||
// smaFileUtil ================
|
||||
|
||||
typedef struct SQTaskFReader SQTaskFReader;
|
||||
typedef struct SQTaskFWriter SQTaskFWriter;
|
||||
|
||||
#define TD_FILE_HEAD_SIZE 512
|
||||
|
||||
typedef struct STFInfo STFInfo;
|
||||
|
|
|
@ -90,6 +90,9 @@ int32_t tsdbRowCmprFn(const void *p1, const void *p2);
|
|||
void tRowIterInit(SRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema);
|
||||
SColVal *tRowIterNext(SRowIter *pIter);
|
||||
// SRowMerger
|
||||
int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema);
|
||||
int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
|
||||
|
||||
int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema);
|
||||
void tRowMergerClear(SRowMerger *pMerger);
|
||||
int32_t tRowMerge(SRowMerger *pMerger, TSDBROW *pRow);
|
||||
|
|
|
@ -62,6 +62,8 @@ typedef struct SMetaSnapReader SMetaSnapReader;
|
|||
typedef struct SMetaSnapWriter SMetaSnapWriter;
|
||||
typedef struct STsdbSnapReader STsdbSnapReader;
|
||||
typedef struct STsdbSnapWriter STsdbSnapWriter;
|
||||
typedef struct SRsmaSnapReader SRsmaSnapReader;
|
||||
typedef struct SRsmaSnapWriter SRsmaSnapWriter;
|
||||
typedef struct SSnapDataHdr SSnapDataHdr;
|
||||
|
||||
#define VNODE_META_DIR "meta"
|
||||
|
@ -196,13 +198,21 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr
|
|||
int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
|
||||
int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback);
|
||||
// STsdbSnapReader ========================================
|
||||
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader);
|
||||
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader);
|
||||
int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader);
|
||||
int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData);
|
||||
// STsdbSnapWriter ========================================
|
||||
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter);
|
||||
int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
|
||||
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback);
|
||||
// SRsmaSnapReader ========================================
|
||||
int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader);
|
||||
int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader);
|
||||
int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData);
|
||||
// SRsmaSnapWriter ========================================
|
||||
int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter);
|
||||
int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
|
||||
int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback);
|
||||
|
||||
typedef struct {
|
||||
int8_t streamType; // sma or other
|
||||
|
@ -314,6 +324,15 @@ struct SSma {
|
|||
// sma
|
||||
void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data);
|
||||
|
||||
enum {
|
||||
SNAP_DATA_META = 0,
|
||||
SNAP_DATA_TSDB = 1,
|
||||
SNAP_DATA_DEL = 2,
|
||||
SNAP_DATA_RSMA1 = 3,
|
||||
SNAP_DATA_RSMA2 = 4,
|
||||
SNAP_DATA_QTASK = 5,
|
||||
};
|
||||
|
||||
struct SSnapDataHdr {
|
||||
int8_t type;
|
||||
int64_t index;
|
||||
|
|
|
@ -49,7 +49,8 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
|
|||
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (pSma->pRSmaTsdb[i]) {
|
||||
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]);
|
||||
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2,
|
||||
&pReader->pDataReader[i]);
|
||||
if (code < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
@ -221,10 +222,9 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
|
|||
}
|
||||
}
|
||||
|
||||
smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
|
||||
taosMemoryFree(pWriter);
|
||||
*ppWriter = NULL;
|
||||
|
||||
smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma));
|
||||
return code;
|
||||
|
||||
_err:
|
||||
|
@ -245,15 +245,17 @@ int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
|
|||
code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData);
|
||||
} else if (pHdr->type == SNAP_DATA_QTASK) {
|
||||
code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
if (code < 0) goto _err;
|
||||
|
||||
_exit:
|
||||
smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
|
||||
smaInfo("vgId:%d rsma snapshot write for data type %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
|
||||
smaError("vgId:%d rsma snapshot write for data type %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -684,6 +684,9 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
|
|||
|
||||
taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*));
|
||||
|
||||
/*SMeta* pMeta = pTq->pVnode->pMeta;*/
|
||||
/*tdbTbUpsert(pMeta->pTaskIdx, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn);*/
|
||||
|
||||
return 0;
|
||||
FAIL:
|
||||
if (pTask->inputQueue) streamQueueClose(pTask->inputQueue);
|
||||
|
|
|
@ -311,6 +311,8 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
|
|||
|
||||
tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
|
||||
|
||||
tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
|
||||
|
||||
wSet.diskId = did;
|
||||
wSet.fid = pCommitter->commitFid;
|
||||
fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0};
|
||||
|
|
|
@ -80,15 +80,15 @@ typedef struct SFilesetIter {
|
|||
typedef struct SFileDataBlockInfo {
|
||||
// index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it
|
||||
uint64_t uid;
|
||||
int32_t tbBlockIdx;
|
||||
int32_t tbBlockIdx;
|
||||
} SFileDataBlockInfo;
|
||||
|
||||
typedef struct SDataBlockIter {
|
||||
int32_t numOfBlocks;
|
||||
int32_t index;
|
||||
SArray* blockList; // SArray<SFileDataBlockInfo>
|
||||
int32_t order;
|
||||
SBlock block; // current SBlock data
|
||||
int32_t numOfBlocks;
|
||||
int32_t index;
|
||||
SArray* blockList; // SArray<SFileDataBlockInfo>
|
||||
int32_t order;
|
||||
SBlock block; // current SBlock data
|
||||
SHashObj* pTableMap;
|
||||
} SDataBlockIter;
|
||||
|
||||
|
@ -124,8 +124,8 @@ struct STsdbReader {
|
|||
SSDataBlock* pResBlock;
|
||||
int32_t capacity;
|
||||
SReaderStatus status;
|
||||
char* idStr; // query info handle, for debug purpose
|
||||
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
|
||||
char* idStr; // query info handle, for debug purpose
|
||||
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
|
||||
SBlockLoadSuppInfo suppInfo;
|
||||
STsdbReadSnap* pReadSnap;
|
||||
SIOCostSummary cost;
|
||||
|
@ -133,8 +133,8 @@ struct STsdbReader {
|
|||
SDataFReader* pFileReader;
|
||||
SVersionRange verRange;
|
||||
|
||||
int32_t step;
|
||||
STsdbReader* innerReader[2];
|
||||
int32_t step;
|
||||
STsdbReader* innerReader[2];
|
||||
};
|
||||
|
||||
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
|
||||
|
@ -143,7 +143,7 @@ static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, i
|
|||
static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
|
||||
static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
|
||||
SRowMerger* pMerger);
|
||||
static int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
STsdbReader* pReader);
|
||||
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow);
|
||||
static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex);
|
||||
|
@ -212,8 +212,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
|
|||
pTsdbReader->idStr);
|
||||
}
|
||||
|
||||
tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables, (sizeof(STableBlockScanInfo)*numOfTables)/1024.0,
|
||||
pTsdbReader->idStr);
|
||||
tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables,
|
||||
(sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->idStr);
|
||||
|
||||
return pTableMap;
|
||||
}
|
||||
|
@ -397,7 +397,8 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity)
|
|||
return pResBlock;
|
||||
}
|
||||
|
||||
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity, const char* idstr) {
|
||||
static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity,
|
||||
const char* idstr) {
|
||||
int32_t code = 0;
|
||||
int8_t level = 0;
|
||||
STsdbReader* pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader));
|
||||
|
@ -577,7 +578,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader,
|
|||
|
||||
int64_t et2 = taosGetTimestampUs();
|
||||
tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s",
|
||||
(int32_t)num, (et1 - st)/1000.0, (et2-et1)/1000.0, num * sizeof(SBlockIdx)/1024.0, pReader->idStr);
|
||||
(int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr);
|
||||
|
||||
pReader->cost.headFileLoadTime += (et1 - st) / 1000.0;
|
||||
|
||||
|
@ -592,7 +593,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
|
|||
*numOfValidTables = 0;
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
size_t size = 0;
|
||||
size_t size = 0;
|
||||
|
||||
STableBlockScanInfo* px = NULL;
|
||||
while (1) {
|
||||
|
@ -642,9 +643,9 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_
|
|||
}
|
||||
}
|
||||
|
||||
double el = (taosGetTimestampUs() - st)/1000.0;
|
||||
double el = (taosGetTimestampUs() - st) / 1000.0;
|
||||
tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, size:%.2f Kb, elapsed time:%.2f ms %s",
|
||||
numOfTables, *numOfBlocks, *numOfValidTables, size/1000.0, el, pReader->idStr);
|
||||
numOfTables, *numOfBlocks, *numOfValidTables, size / 1000.0, el, pReader->idStr);
|
||||
|
||||
pReader->cost.numOfBlocks += (*numOfBlocks);
|
||||
pReader->cost.headFileLoadTime += el;
|
||||
|
@ -680,9 +681,7 @@ static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) {
|
|||
return pFBlockInfo;
|
||||
}
|
||||
|
||||
static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) {
|
||||
return &pBlockIter->block;
|
||||
}
|
||||
static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; }
|
||||
|
||||
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
|
@ -690,7 +689,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
|
|||
|
||||
SBlockData* pBlockData = &pStatus->fileBlockData;
|
||||
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
|
||||
SBlock* pBlock = getCurrentBlock(pBlockIter);
|
||||
SBlock* pBlock = getCurrentBlock(pBlockIter);
|
||||
SSDataBlock* pResBlock = pReader->pResBlock;
|
||||
int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
|
||||
|
||||
|
@ -772,17 +771,17 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI
|
|||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
|
||||
SBlock* pBlock = getCurrentBlock(pBlockIter);
|
||||
SBlock* pBlock = getCurrentBlock(pBlockIter);
|
||||
|
||||
SSDataBlock* pResBlock = pReader->pResBlock;
|
||||
int32_t numOfCols = blockDataGetNumOfCols(pResBlock);
|
||||
SSDataBlock* pResBlock = pReader->pResBlock;
|
||||
int32_t numOfCols = blockDataGetNumOfCols(pResBlock);
|
||||
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
|
||||
|
||||
SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid};
|
||||
int32_t code = tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols,
|
||||
pBlockData, NULL, NULL);
|
||||
SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid};
|
||||
int32_t code =
|
||||
tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, pBlockData, NULL, NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
@ -857,7 +856,7 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v
|
|||
}
|
||||
|
||||
static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) {
|
||||
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
|
||||
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
|
||||
STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
|
||||
|
||||
int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx);
|
||||
|
@ -882,7 +881,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
|
|||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
SBlockOrderSupporter sup = {0};
|
||||
int32_t code = initBlockOrderSupporter(&sup, numOfTables);
|
||||
int32_t code = initBlockOrderSupporter(&sup, numOfTables);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
@ -937,8 +936,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
|
|||
}
|
||||
|
||||
int64_t et = taosGetTimestampUs();
|
||||
tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", pReader, cnt,
|
||||
(et - st)/1000.0, pReader->idStr);
|
||||
tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s",
|
||||
pReader, cnt, (et - st) / 1000.0, pReader->idStr);
|
||||
|
||||
pBlockIter->index = asc ? 0 : (numOfBlocks - 1);
|
||||
cleanupBlockOrderSupporter(&sup);
|
||||
|
@ -976,7 +975,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
|
|||
}
|
||||
|
||||
int64_t et = taosGetTimestampUs();
|
||||
tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et-st)/1000.0, pReader->idStr);
|
||||
tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et - st) / 1000.0,
|
||||
pReader->idStr);
|
||||
cleanupBlockOrderSupporter(&sup);
|
||||
taosMemoryFree(pTree);
|
||||
|
||||
|
@ -1024,7 +1024,7 @@ static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STab
|
|||
int32_t step = asc ? 1 : -1;
|
||||
*nextIndex = pFBlockInfo->tbBlockIdx + step;
|
||||
|
||||
SBlock *pBlock = taosMemoryCalloc(1, sizeof(SBlock));
|
||||
SBlock* pBlock = taosMemoryCalloc(1, sizeof(SBlock));
|
||||
int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
|
||||
|
||||
tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetBlock);
|
||||
|
@ -1196,10 +1196,10 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
|
|||
setComposedBlockFlag(pReader, true);
|
||||
|
||||
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
|
||||
tsdbDebug(
|
||||
"%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange: %" PRId64
|
||||
" - %" PRId64 " %s",
|
||||
pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, pReader->idStr);
|
||||
tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange: %" PRId64
|
||||
" - %" PRId64 " %s",
|
||||
pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey,
|
||||
pReader->idStr);
|
||||
|
||||
pReader->cost.buildmemBlock += elapsedTime;
|
||||
return code;
|
||||
|
@ -1230,7 +1230,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
|
||||
tRowMerge(&merge, pRow);
|
||||
doMergeRowsInBuf(pIter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
}
|
||||
|
@ -1246,7 +1246,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
updateSchema(pRow, pBlockScanInfo->uid, pReader);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
doMergeRowsInBuf(pIter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, &fRow);
|
||||
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
|
||||
|
@ -1291,12 +1291,12 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
|
||||
if (ik.ts == key) {
|
||||
tRowMerge(&merge, piRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
}
|
||||
|
||||
if (k.ts == key) {
|
||||
tRowMerge(&merge, pRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
}
|
||||
|
||||
tRowMergerGetRow(&merge, &pTSRow);
|
||||
|
@ -1336,11 +1336,11 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
|
|||
updateSchema(pRow, uid, pReader);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
if (ik.ts == k.ts) {
|
||||
tRowMerge(&merge, piRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
}
|
||||
|
||||
if (k.ts == key) {
|
||||
|
@ -1514,9 +1514,10 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo*
|
|||
setComposedBlockFlag(pReader, true);
|
||||
int64_t et = taosGetTimestampUs();
|
||||
|
||||
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s", pReader,
|
||||
pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows,
|
||||
(et - st)/1000.0, pReader->idStr);
|
||||
tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
|
||||
" rows:%d, elapsed time:%.2f ms %s",
|
||||
pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
|
||||
pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1694,7 +1695,7 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead
|
|||
static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) {
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
|
||||
size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
|
||||
size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
|
||||
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
|
||||
|
||||
while (1) {
|
||||
|
@ -2111,7 +2112,8 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea
|
|||
}
|
||||
}
|
||||
|
||||
int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger, STsdbReader* pReader) {
|
||||
int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
|
||||
STsdbReader* pReader) {
|
||||
while (1) {
|
||||
pIter->hasVal = tsdbTbDataIterNext(pIter->iter);
|
||||
if (!pIter->hasVal) {
|
||||
|
@ -2130,7 +2132,19 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMer
|
|||
break;
|
||||
}
|
||||
|
||||
tRowMerge(pMerger, pRow);
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (sversion != pReader->pSchema->version) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
|
||||
} else {
|
||||
pTSchema = pReader->pSchema;
|
||||
}
|
||||
|
||||
tRowMergerAdd(pMerger, pRow, pTSchema);
|
||||
|
||||
if (sversion != pReader->pSchema->version) {
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2227,7 +2241,7 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
|
|||
CHECK_FILEBLOCK_STATE st;
|
||||
|
||||
SFileDataBlockInfo* pFileBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
|
||||
SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter);
|
||||
SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter);
|
||||
checkForNeighborFileBlock(pReader, pScanInfo, pCurrentBlock, pFileBlockInfo, pMerger, key, &st);
|
||||
if (st == CHECK_FILEBLOCK_QUIT) {
|
||||
break;
|
||||
|
@ -2254,12 +2268,23 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe
|
|||
SRowMerger merge = {0};
|
||||
|
||||
TSDBKEY k = TSDBROW_KEY(pRow);
|
||||
updateSchema(pRow, uid, pReader);
|
||||
// updateSchema(pRow, uid, pReader);
|
||||
int32_t sversion = TSDBROW_SVERSION(pRow);
|
||||
STSchema* pTSchema = NULL;
|
||||
if (sversion != pReader->pSchema->version) {
|
||||
metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema);
|
||||
} else {
|
||||
pTSchema = pReader->pSchema;
|
||||
}
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
doMergeRowsInBuf(pIter, k.ts, pDelList, &merge, pReader);
|
||||
tRowMergerInit2(&merge, pReader->pSchema, pRow, pTSchema);
|
||||
doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader);
|
||||
tRowMergerGetRow(&merge, pTSRow);
|
||||
tRowMergerClear(&merge);
|
||||
|
||||
if (sversion != pReader->pSchema->version) {
|
||||
taosMemoryFree(pTSchema);
|
||||
}
|
||||
}
|
||||
|
||||
void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
|
||||
|
@ -2273,18 +2298,18 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
|
|||
updateSchema(piRow, pBlockScanInfo->uid, pReader);
|
||||
|
||||
tRowMergerInit(&merge, piRow, pReader->pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, pRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
} else {
|
||||
updateSchema(pRow, pBlockScanInfo->uid, pReader);
|
||||
|
||||
tRowMergerInit(&merge, pRow, pReader->pSchema);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
|
||||
tRowMerge(&merge, piRow);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
|
||||
}
|
||||
|
||||
tRowMergerGetRow(&merge, pTSRow);
|
||||
|
@ -2522,7 +2547,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
|||
if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
// update the SQueryTableDataCond to create inner reader
|
||||
STimeWindow w = pCond->twindows;
|
||||
int32_t order = pCond->order;
|
||||
int32_t order = pCond->order;
|
||||
if (order == TSDB_ORDER_ASC) {
|
||||
pCond->twindows.ekey = pCond->twindows.skey;
|
||||
pCond->twindows.skey = INT64_MIN;
|
||||
|
@ -2541,7 +2566,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
|||
if (order == TSDB_ORDER_ASC) {
|
||||
pCond->twindows.skey = w.ekey;
|
||||
pCond->twindows.ekey = INT64_MAX;
|
||||
} else {
|
||||
} else {
|
||||
pCond->twindows.skey = INT64_MIN;
|
||||
pCond->twindows.ekey = w.ekey;
|
||||
}
|
||||
|
@ -2589,10 +2614,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
|
|||
}
|
||||
}
|
||||
} else {
|
||||
STsdbReader* pPrevReader = pReader->innerReader[0];
|
||||
STsdbReader* pPrevReader = pReader->innerReader[0];
|
||||
SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter;
|
||||
|
||||
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, pPrevReader->idStr);
|
||||
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order,
|
||||
pPrevReader->idStr);
|
||||
resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
|
||||
|
||||
// no data in files, let's try buffer in memory
|
||||
|
@ -2647,12 +2673,14 @@ void tsdbReaderClose(STsdbReader* pReader) {
|
|||
|
||||
SIOCostSummary* pCost = &pReader->cost;
|
||||
|
||||
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%"PRId64" SMA-time:%.2f ms, "
|
||||
"fileBlocks:%"PRId64", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo "
|
||||
"size:%.2f Kb %s",
|
||||
tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
|
||||
" SMA-time:%.2f ms, "
|
||||
"fileBlocks:%" PRId64
|
||||
", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo "
|
||||
"size:%.2f Kb %s",
|
||||
pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime,
|
||||
pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock,
|
||||
numOfTables * sizeof(STableBlockScanInfo) /1000.0, pReader->idStr);
|
||||
numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
|
||||
|
||||
taosMemoryFree(pReader->idStr);
|
||||
taosMemoryFree(pReader->pSchema);
|
||||
|
@ -2731,9 +2759,9 @@ static void setBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) {
|
|||
|
||||
void tsdbRetrieveDataBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) {
|
||||
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
if (pReader->step == EXTERNAL_ROWS_MAIN) {
|
||||
if (pReader->step == EXTERNAL_ROWS_MAIN) {
|
||||
setBlockInfo(pReader, pDataBlockInfo);
|
||||
} else if (pReader->step == EXTERNAL_ROWS_PREV) {
|
||||
} else if (pReader->step == EXTERNAL_ROWS_PREV) {
|
||||
setBlockInfo(pReader->innerReader[0], pDataBlockInfo);
|
||||
} else {
|
||||
setBlockInfo(pReader->innerReader[1], pDataBlockInfo);
|
||||
|
@ -2747,7 +2775,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
|||
int32_t code = 0;
|
||||
*allHave = false;
|
||||
|
||||
if(pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) {
|
||||
*pBlockStatis = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -2758,7 +2786,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter);
|
||||
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter);
|
||||
|
||||
SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
|
||||
int64_t stime = taosGetTimestampUs();
|
||||
|
@ -2863,7 +2891,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
}
|
||||
|
||||
pReader->order = pCond->order;
|
||||
pReader->type = TIMEWINDOW_RANGE_CONTAINED;
|
||||
pReader->type = TIMEWINDOW_RANGE_CONTAINED;
|
||||
pReader->status.loadFromFile = true;
|
||||
pReader->status.pTableIter = NULL;
|
||||
pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows);
|
||||
|
@ -2882,7 +2910,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
|
||||
resetDataBlockScanInfo(pReader->status.pTableMap);
|
||||
|
||||
int32_t code = 0;
|
||||
int32_t code = 0;
|
||||
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
||||
|
||||
// no data in files, let's try buffer in memory
|
||||
|
@ -2891,8 +2919,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
|
|||
} else {
|
||||
code = initForFirstBlockInFile(pReader, pBlockIter);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s",
|
||||
pReader, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
|
||||
tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", pReader,
|
||||
numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ struct STsdbSnapReader {
|
|||
int64_t sver;
|
||||
int64_t ever;
|
||||
STsdbFS fs;
|
||||
int8_t type;
|
||||
// for data file
|
||||
int8_t dataDone;
|
||||
int32_t fid;
|
||||
|
@ -62,7 +63,8 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
|
|||
pReader->iBlockIdx = 0;
|
||||
pReader->pBlockIdx = NULL;
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read, fid:%d", TD_VID(pTsdb->pVnode), pReader->fid);
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
pReader->fid);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
|
@ -130,7 +132,7 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
|
|||
}
|
||||
|
||||
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
|
||||
pHdr->type = 1;
|
||||
pHdr->type = pReader->type;
|
||||
pHdr->size = size;
|
||||
|
||||
TABLEID* pId = (TABLEID*)(&pHdr[1]);
|
||||
|
@ -139,9 +141,9 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
|
|||
|
||||
tPutBlockData((uint8_t*)(&pId[1]), &pReader->nBlockData);
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot read data, fid:%d suid:%" PRId64 " uid:%" PRId64
|
||||
tsdbInfo("vgId:%d vnode snapshot read data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64
|
||||
" iBlock:%d minVersion:%d maxVersion:%d nRow:%d out of %d size:%d",
|
||||
TD_VID(pTsdb->pVnode), pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
|
||||
TD_VID(pTsdb->pVnode), pTsdb->path, pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid,
|
||||
pReader->iBlock - 1, pBlock->minVersion, pBlock->maxVersion, pReader->nBlockData.nRow, pBlock->nRow,
|
||||
size);
|
||||
|
||||
|
@ -154,7 +156,8 @@ _exit:
|
|||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb read data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb read data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -212,7 +215,7 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
|
|||
}
|
||||
|
||||
SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData);
|
||||
pHdr->type = 2;
|
||||
pHdr->type = SNAP_DATA_DEL;
|
||||
pHdr->size = size;
|
||||
|
||||
TABLEID* pId = (TABLEID*)(&pHdr[1]);
|
||||
|
@ -228,8 +231,8 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
|
|||
n += tPutDelData((*ppData) + n, pDelData);
|
||||
}
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb read del data, suid:%" PRId64 " uid:%d" PRId64 " size:%d",
|
||||
TD_VID(pTsdb->pVnode), pDelIdx->suid, pDelIdx->uid, size);
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb read del data for %s, suid:%" PRId64 " uid:%d" PRId64 " size:%d",
|
||||
TD_VID(pTsdb->pVnode), pTsdb->path, pDelIdx->suid, pDelIdx->uid, size);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -238,11 +241,12 @@ _exit:
|
|||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb read del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb read del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->pVnode,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader) {
|
||||
int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader) {
|
||||
int32_t code = 0;
|
||||
STsdbSnapReader* pReader = NULL;
|
||||
|
||||
|
@ -255,6 +259,7 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe
|
|||
pReader->pTsdb = pTsdb;
|
||||
pReader->sver = sver;
|
||||
pReader->ever = ever;
|
||||
pReader->type = type;
|
||||
|
||||
code = taosThreadRwlockRdlock(&pTsdb->rwLock);
|
||||
if (code) {
|
||||
|
@ -297,12 +302,13 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe
|
|||
goto _err;
|
||||
}
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb reader opened", TD_VID(pTsdb->pVnode));
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb reader opened for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
|
||||
*ppReader = pReader;
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb reader open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
*ppReader = NULL;
|
||||
return code;
|
||||
}
|
||||
|
@ -327,7 +333,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
|
|||
|
||||
tsdbFSUnref(pReader->pTsdb, &pReader->fs);
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb reader closed", TD_VID(pReader->pTsdb->pVnode));
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
|
||||
|
||||
taosMemoryFree(pReader);
|
||||
*ppReader = NULL;
|
||||
|
@ -368,10 +374,12 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) {
|
|||
}
|
||||
|
||||
_exit:
|
||||
tsdbDebug("vgId:%d vnode snapshot tsdb read for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb read failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb read for %s failed since %s", TD_VID(pReader->pTsdb->pVnode),
|
||||
pReader->pTsdb->path, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -436,7 +444,8 @@ static int32_t tsdbSnapWriteAppendData(STsdbSnapWriter* pWriter, uint8_t* pData,
|
|||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d tsdb snapshot write append data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d tsdb snapshot write append data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
pWriter->pTsdb->path, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -522,9 +531,12 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
|
|||
}
|
||||
|
||||
_exit:
|
||||
tsdbInfo("vgId:%d tsdb snapshot write table data end for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d tsdb snapshot write table data end for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
pWriter->pTsdb->path, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -570,6 +582,8 @@ _exit:
|
|||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d tsdb snapshot move write table data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
pWriter->pTsdb->path, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -708,8 +722,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
|
|||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write table data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write table data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
pWriter->pTsdb->path, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -794,11 +808,12 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
|
|||
if (code) goto _err;
|
||||
|
||||
_exit:
|
||||
tsdbDebug("vgId:%d vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
pWriter->pTsdb->path, tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -833,11 +848,12 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
|
|||
}
|
||||
|
||||
_exit:
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb writer data end", TD_VID(pTsdb->pVnode));
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb writer data end for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb writer data end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb writer data end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -920,12 +936,13 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
|
|||
code = tsdbSnapWriteTableData(pWriter, id);
|
||||
if (code) goto _err;
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb write data, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
|
||||
TD_VID(pTsdb->pVnode), fid, id.suid, id.suid, pBlockData->nRow);
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb write data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
|
||||
TD_VID(pTsdb->pVnode), pTsdb->path, fid, id.suid, id.suid, pBlockData->nRow);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1015,7 +1032,8 @@ _exit:
|
|||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1056,11 +1074,12 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
|
|||
}
|
||||
|
||||
_exit:
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb write del end", TD_VID(pTsdb->pVnode));
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write del end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb write del end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1127,10 +1146,12 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
|
|||
}
|
||||
|
||||
*ppWriter = pWriter;
|
||||
return code;
|
||||
|
||||
tsdbInfo("vgId:%d tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path);
|
||||
return code;
|
||||
_err:
|
||||
tsdbError("vgId:%d tsdb snapshot writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d tsdb snapshot writer open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
|
||||
tstrerror(code));
|
||||
*ppWriter = NULL;
|
||||
return code;
|
||||
}
|
||||
|
@ -1157,14 +1178,16 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
|
|||
if (code) goto _err;
|
||||
}
|
||||
|
||||
tsdbInfo("vgId:%d vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
|
||||
taosMemoryFree(pWriter);
|
||||
*ppWriter = NULL;
|
||||
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d vnode snapshot tsdb writer close failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
tstrerror(code));
|
||||
tsdbError("vgId:%d vnode snapshot tsdb writer close for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
|
||||
pWriter->pTsdb->path, tstrerror(code));
|
||||
taosMemoryFree(pWriter);
|
||||
*ppWriter = NULL;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1173,7 +1196,7 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
|
|||
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
|
||||
|
||||
// ts data
|
||||
if (pHdr->type == 1) {
|
||||
if (pHdr->type == SNAP_DATA_TSDB) {
|
||||
code = tsdbSnapWriteData(pWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
|
||||
|
@ -1186,15 +1209,17 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
|
|||
}
|
||||
|
||||
// del data
|
||||
if (pHdr->type == 2) {
|
||||
if (pHdr->type == SNAP_DATA_DEL) {
|
||||
code = tsdbSnapWriteDel(pWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
_exit:
|
||||
tsdbDebug("vgId:%d tsdb snapshow write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
|
||||
return code;
|
||||
|
||||
_err:
|
||||
tsdbError("vgId:%d tsdb snapshow write failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
|
||||
tsdbError("vgId:%d tsdb snapshow write for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path,
|
||||
tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tdataformat.h"
|
||||
#include "tsdb.h"
|
||||
|
||||
// SMapData =======================================================================
|
||||
|
@ -548,6 +549,103 @@ SColVal *tRowIterNext(SRowIter *pIter) {
|
|||
}
|
||||
|
||||
// SRowMerger ======================================================
|
||||
|
||||
int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema) {
|
||||
int32_t code = 0;
|
||||
TSDBKEY key = TSDBROW_KEY(pRow);
|
||||
SColVal *pColVal = &(SColVal){0};
|
||||
STColumn *pTColumn;
|
||||
int32_t iCol, jCol = 0;
|
||||
|
||||
pMerger->pTSchema = pResTSchema;
|
||||
pMerger->version = key.version;
|
||||
|
||||
pMerger->pArray = taosArrayInit(pResTSchema->numOfCols, sizeof(SColVal));
|
||||
if (pMerger->pArray == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
// ts
|
||||
pTColumn = &pTSchema->columns[jCol++];
|
||||
|
||||
ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP);
|
||||
|
||||
*pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts});
|
||||
if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
// other
|
||||
for (iCol = 1; jCol < pTSchema->numOfCols && iCol < pResTSchema->numOfCols; ++iCol) {
|
||||
pTColumn = &pResTSchema->columns[iCol];
|
||||
if (pTSchema->columns[jCol].colId < pTColumn->colId) {
|
||||
++jCol;
|
||||
--iCol;
|
||||
continue;
|
||||
} else if (pTSchema->columns[jCol].colId > pTColumn->colId) {
|
||||
taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type));
|
||||
continue;
|
||||
}
|
||||
|
||||
tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
|
||||
if (taosArrayPush(pMerger->pArray, pColVal) == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
|
||||
for (; iCol < pResTSchema->numOfCols; ++iCol) {
|
||||
pTColumn = &pResTSchema->columns[iCol];
|
||||
taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type));
|
||||
}
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) {
|
||||
int32_t code = 0;
|
||||
TSDBKEY key = TSDBROW_KEY(pRow);
|
||||
SColVal *pColVal = &(SColVal){0};
|
||||
STColumn *pTColumn;
|
||||
int32_t iCol, jCol = 1;
|
||||
|
||||
ASSERT(((SColVal *)pMerger->pArray->pData)->value.ts == key.ts);
|
||||
|
||||
for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && jCol < pTSchema->numOfCols; ++iCol) {
|
||||
pTColumn = &pMerger->pTSchema->columns[iCol];
|
||||
if (pTSchema->columns[jCol].colId < pTColumn->colId) {
|
||||
++jCol;
|
||||
--iCol;
|
||||
continue;
|
||||
} else if (pTSchema->columns[jCol].colId > pTColumn->colId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
|
||||
|
||||
if (key.version > pMerger->version) {
|
||||
if (!pColVal->isNone) {
|
||||
taosArraySet(pMerger->pArray, iCol, pColVal);
|
||||
}
|
||||
} else if (key.version < pMerger->version) {
|
||||
SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
|
||||
if (tColVal->isNone && !pColVal->isNone) {
|
||||
taosArraySet(pMerger->pArray, iCol, pColVal);
|
||||
}
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
pMerger->version = key.version;
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) {
|
||||
int32_t code = 0;
|
||||
TSDBKEY key = TSDBROW_KEY(pRow);
|
||||
|
|
|
@ -28,7 +28,8 @@ struct SVSnapReader {
|
|||
int8_t tsdbDone;
|
||||
STsdbSnapReader *pTsdbReader;
|
||||
// rsma
|
||||
int8_t rsmaDone[TSDB_RETENTION_L2];
|
||||
int8_t rsmaDone;
|
||||
SRsmaSnapReader *pRsmaReader;
|
||||
};
|
||||
|
||||
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
|
||||
|
@ -57,6 +58,10 @@ _err:
|
|||
int32_t vnodeSnapReaderClose(SVSnapReader *pReader) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (pReader->pRsmaReader) {
|
||||
rsmaSnapReaderClose(&pReader->pRsmaReader);
|
||||
}
|
||||
|
||||
if (pReader->pTsdbReader) {
|
||||
tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||
}
|
||||
|
@ -99,7 +104,7 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
|
|||
if (!pReader->tsdbDone) {
|
||||
// open if not
|
||||
if (pReader->pTsdbReader == NULL) {
|
||||
code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, &pReader->pTsdbReader);
|
||||
code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, SNAP_DATA_TSDB, &pReader->pTsdbReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
|
@ -118,40 +123,26 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData)
|
|||
}
|
||||
|
||||
// RSMA ==============
|
||||
#if 0
|
||||
if (VND_IS_RSMA(pReader->pVnode)) {
|
||||
// RSMA1/RSMA2
|
||||
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
|
||||
if (!pReader->rsmaDone[i]) {
|
||||
if (!pReader->pVnode->pSma->pRSmaTsdb[i]) {
|
||||
// no valid tsdb
|
||||
pReader->rsmaDone[i] = 1;
|
||||
continue;
|
||||
}
|
||||
if (pReader->pTsdbReader == NULL) {
|
||||
code = tsdbSnapReaderOpen(pReader->pVnode->pSma->pRSmaTsdb[i], pReader->sver, pReader->ever,
|
||||
&pReader->pTsdbReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
if (VND_IS_RSMA(pReader->pVnode) && !pReader->rsmaDone) {
|
||||
// open if not
|
||||
if (pReader->pRsmaReader == NULL) {
|
||||
code = rsmaSnapReaderOpen(pReader->pVnode->pSma, pReader->sver, pReader->ever, &pReader->pRsmaReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = tsdbSnapRead(pReader->pTsdbReader, ppData);
|
||||
if (code) {
|
||||
goto _err;
|
||||
} else {
|
||||
if (*ppData) {
|
||||
goto _exit;
|
||||
} else {
|
||||
pReader->tsdbDone = 1;
|
||||
code = tsdbSnapReaderClose(&pReader->pTsdbReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
}
|
||||
code = rsmaSnapRead(pReader->pRsmaReader, ppData);
|
||||
if (code) {
|
||||
goto _err;
|
||||
} else {
|
||||
if (*ppData) {
|
||||
goto _exit;
|
||||
} else {
|
||||
pReader->tsdbDone = 1;
|
||||
code = rsmaSnapReaderClose(&pReader->pRsmaReader);
|
||||
if (code) goto _err;
|
||||
}
|
||||
}
|
||||
// QTaskInfoFile
|
||||
// TODO ...
|
||||
}
|
||||
#endif
|
||||
|
||||
*ppData = NULL;
|
||||
*nData = 0;
|
||||
|
@ -186,6 +177,8 @@ struct SVSnapWriter {
|
|||
SMetaSnapWriter *pMetaSnapWriter;
|
||||
// tsdb
|
||||
STsdbSnapWriter *pTsdbSnapWriter;
|
||||
// rsma
|
||||
SRsmaSnapWriter *pRsmaSnapWriter;
|
||||
};
|
||||
|
||||
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
|
||||
|
@ -235,6 +228,11 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *
|
|||
if (code) goto _err;
|
||||
}
|
||||
|
||||
if (pWriter->pRsmaSnapWriter) {
|
||||
code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
if (!rollback) {
|
||||
SVnodeInfo info = {0};
|
||||
char dir[TSDB_FILENAME_LEN];
|
||||
|
@ -282,28 +280,51 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
|
|||
vInfo("vgId:%d vnode snapshot write data, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), pHdr->index,
|
||||
pHdr->type, nData);
|
||||
|
||||
if (pHdr->type == 0) {
|
||||
// meta
|
||||
switch (pHdr->type) {
|
||||
case SNAP_DATA_META: {
|
||||
// meta
|
||||
if (pWriter->pMetaSnapWriter == NULL) {
|
||||
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
if (pWriter->pMetaSnapWriter == NULL) {
|
||||
code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter);
|
||||
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
}
|
||||
} break;
|
||||
case SNAP_DATA_TSDB: {
|
||||
// tsdb
|
||||
if (pWriter->pTsdbSnapWriter == NULL) {
|
||||
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
} else {
|
||||
// tsdb
|
||||
|
||||
if (pWriter->pTsdbSnapWriter == NULL) {
|
||||
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
|
||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
}
|
||||
} break;
|
||||
case SNAP_DATA_RSMA1:
|
||||
case SNAP_DATA_RSMA2: {
|
||||
// rsma1/rsma2
|
||||
if (pWriter->pRsmaSnapWriter == NULL) {
|
||||
code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
} break;
|
||||
case SNAP_DATA_QTASK: {
|
||||
// qtask for rsma
|
||||
if (pWriter->pRsmaSnapWriter == NULL) {
|
||||
code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter);
|
||||
if (code) goto _err;
|
||||
}
|
||||
|
||||
code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData);
|
||||
if (code) goto _err;
|
||||
} break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
_exit:
|
||||
return code;
|
||||
|
||||
|
|
|
@ -447,6 +447,7 @@ _err:
|
|||
|
||||
static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
|
||||
SDecoder decoder = {0};
|
||||
SEncoder encoder = {0};
|
||||
int32_t rcode = 0;
|
||||
SVCreateTbBatchReq req = {0};
|
||||
SVCreateTbReq *pCreateReq;
|
||||
|
@ -515,7 +516,6 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
|
|||
tdUidStoreFree(pStore);
|
||||
|
||||
// prepare rsp
|
||||
SEncoder encoder = {0};
|
||||
int32_t ret = 0;
|
||||
tEncodeSize(tEncodeSVCreateTbBatchRsp, &rsp, pRsp->contLen, ret);
|
||||
pRsp->pCont = rpcMallocCont(pRsp->contLen);
|
||||
|
|
|
@ -679,6 +679,8 @@ void ctgClearSubTaskRes(SCtgSubRes *pRes);
|
|||
void ctgFreeQNode(SCtgQNode *node);
|
||||
void ctgClearHandle(SCatalog* pCtg);
|
||||
void ctgFreeTbCacheImpl(SCtgTbCache *pCache);
|
||||
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
|
||||
int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup);
|
||||
|
||||
|
||||
extern SCatalogMgmt gCtgMgmt;
|
||||
|
|
|
@ -92,7 +92,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx*
|
|||
int32_t code = 0;
|
||||
|
||||
if (!CTG_FLAG_IS_SYS_DB(ctx->flag)) {
|
||||
CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo));
|
||||
CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo));
|
||||
}
|
||||
|
||||
STableMetaOutput moutput = {0};
|
||||
|
@ -337,7 +337,10 @@ int32_t ctgGetTbType(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName,
|
|||
}
|
||||
|
||||
STableMeta* pMeta = NULL;
|
||||
CTG_ERR_RET(catalogGetTableMeta(pCtg, pConn, pTableName, &pMeta));
|
||||
SCtgTbMetaCtx ctx = {0};
|
||||
ctx.pName = (SName*)pTableName;
|
||||
ctx.flag = CTG_FLAG_UNKNOWN_STB;
|
||||
CTG_ERR_RET(ctgGetTbMeta(pCtg, pConn, &ctx, &pMeta));
|
||||
|
||||
*tbType = pMeta->tableType;
|
||||
taosMemoryFree(pMeta);
|
||||
|
@ -391,7 +394,7 @@ int32_t ctgGetTbCfg(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName,
|
|||
CTG_ERR_RET(ctgGetTableCfgFromMnode(pCtg, pConn, pTableName, pCfg, NULL));
|
||||
} else {
|
||||
SVgroupInfo vgroupInfo = {0};
|
||||
CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, pTableName, &vgroupInfo));
|
||||
CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, pTableName, &vgroupInfo));
|
||||
CTG_ERR_RET(ctgGetTableCfgFromVnode(pCtg, pConn, pTableName, &vgroupInfo, pCfg, NULL));
|
||||
}
|
||||
|
||||
|
@ -477,6 +480,57 @@ _return:
|
|||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
||||
int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
SCtgDBCache* dbCache = NULL;
|
||||
int32_t code = 0;
|
||||
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||
tNameGetFullDbName(pTableName, db);
|
||||
|
||||
SDBVgInfo *vgInfo = NULL;
|
||||
CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo));
|
||||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
|
||||
_return:
|
||||
|
||||
if (dbCache) {
|
||||
ctgRUnlockVgInfo(dbCache);
|
||||
ctgReleaseDBCache(pCtg, dbCache);
|
||||
}
|
||||
|
||||
if (vgInfo) {
|
||||
taosHashCleanup(vgInfo->vgHash);
|
||||
taosMemoryFreeClear(vgInfo);
|
||||
}
|
||||
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName) {
|
||||
int32_t code = 0;
|
||||
|
||||
if (NULL == pCtg || NULL == pTableName) {
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
if (NULL == pCtg->dbCache) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true));
|
||||
|
||||
_return:
|
||||
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
||||
|
||||
int32_t catalogInit(SCatalogCfg* cfg) {
|
||||
if (gCtgMgmt.pCluster) {
|
||||
qError("catalog already initialized");
|
||||
|
@ -772,21 +826,7 @@ _return:
|
|||
int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) {
|
||||
CTG_API_ENTER();
|
||||
|
||||
int32_t code = 0;
|
||||
|
||||
if (NULL == pCtg || NULL == pTableName) {
|
||||
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
if (NULL == pCtg->dbCache) {
|
||||
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true));
|
||||
|
||||
_return:
|
||||
|
||||
CTG_API_LEAVE(code);
|
||||
CTG_API_LEAVE(ctgRemoveTbMeta(pCtg, pTableName));
|
||||
}
|
||||
|
||||
int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid) {
|
||||
|
@ -878,12 +918,12 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray*
|
|||
case TSDB_CHILD_TABLE: {
|
||||
SName stb = name;
|
||||
strcpy(stb.tname, stbName);
|
||||
catalogRemoveTableMeta(pCtg, &stb);
|
||||
ctgRemoveTbMeta(pCtg, &stb);
|
||||
break;
|
||||
}
|
||||
case TSDB_SUPER_TABLE:
|
||||
case TSDB_NORMAL_TABLE:
|
||||
catalogRemoveTableMeta(pCtg, &name);
|
||||
ctgRemoveTbMeta(pCtg, &name);
|
||||
break;
|
||||
default:
|
||||
ctgError("ignore table type %d", tbType);
|
||||
|
@ -947,34 +987,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const
|
|||
int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) {
|
||||
CTG_API_ENTER();
|
||||
|
||||
if (IS_SYS_DBNAME(pTableName->dbname)) {
|
||||
ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname);
|
||||
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
SCtgDBCache* dbCache = NULL;
|
||||
int32_t code = 0;
|
||||
char db[TSDB_DB_FNAME_LEN] = {0};
|
||||
tNameGetFullDbName(pTableName, db);
|
||||
|
||||
SDBVgInfo *vgInfo = NULL;
|
||||
CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo));
|
||||
|
||||
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup));
|
||||
|
||||
_return:
|
||||
|
||||
if (dbCache) {
|
||||
ctgRUnlockVgInfo(dbCache);
|
||||
ctgReleaseDBCache(pCtg, dbCache);
|
||||
}
|
||||
|
||||
if (vgInfo) {
|
||||
taosHashCleanup(vgInfo->vgHash);
|
||||
taosMemoryFreeClear(vgInfo);
|
||||
}
|
||||
|
||||
CTG_API_LEAVE(code);
|
||||
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup));
|
||||
}
|
||||
|
||||
int32_t catalogGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, const SCatalogReq* pReq, SMetaData* pRsp) {
|
||||
|
@ -1200,7 +1213,7 @@ int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const
|
|||
}
|
||||
|
||||
int32_t code = 0;
|
||||
CTG_ERR_JRET(catalogRemoveTableMeta(pCtg, (SName*)pTableName));
|
||||
CTG_ERR_JRET(ctgRemoveTbMeta(pCtg, (SName*)pTableName));
|
||||
|
||||
CTG_ERR_JRET(ctgGetTbCfg(pCtg, pConn, (SName*)pTableName, pCfg));
|
||||
|
||||
|
|
|
@ -398,7 +398,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
|
|||
|
||||
SName* name = taosHashIterate(pTb, NULL);
|
||||
while (name) {
|
||||
catalogRemoveTableMeta(pCtg, name);
|
||||
ctgRemoveTbMeta(pCtg, name);
|
||||
name = taosHashIterate(pTb, name);
|
||||
}
|
||||
|
||||
|
|
|
@ -517,6 +517,13 @@ static int32_t execAlterLocal(SAlterLocalStmt* pStmt) {
|
|||
goto _return;
|
||||
}
|
||||
|
||||
bool forbidden = false;
|
||||
taosLocalCfgForbiddenToChange(pStmt->config, &forbidden);
|
||||
if (forbidden) {
|
||||
terrno = TSDB_CODE_OPS_NOT_SUPPORT;
|
||||
return terrno;
|
||||
}
|
||||
|
||||
if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD)) {
|
||||
return terrno;
|
||||
}
|
||||
|
|
|
@ -855,7 +855,6 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, STimeWin
|
|||
int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, SLoadRemoteDataInfo* pLoadInfo, int32_t numOfRows, char* pData,
|
||||
int32_t compLen, int32_t numOfOutput, int64_t startTs, uint64_t* total,
|
||||
SArray* pColList);
|
||||
STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
|
||||
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
|
||||
|
||||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
|
||||
|
@ -986,9 +985,8 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
|
|||
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
|
||||
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
|
||||
const char* sql, EOPTR_EXEC_MODEL model);
|
||||
int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
|
||||
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
|
||||
int32_t* resNum);
|
||||
int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
|
||||
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
|
||||
|
||||
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
|
||||
int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length);
|
||||
|
|
|
@ -48,7 +48,6 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
|
|||
pOperator->status = OP_NOT_OPENED;
|
||||
|
||||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
/*pInfo->assignBlockUid = assignUid;*/
|
||||
|
||||
// TODO: if a block was set but not consumed,
|
||||
// prevent setting a different type of block
|
||||
|
@ -496,11 +495,9 @@ void qDestroyTask(qTaskInfo_t qTaskHandle) {
|
|||
doDestroyTask(pTaskInfo);
|
||||
}
|
||||
|
||||
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, int32_t* resNum, SExplainExecInfo** pRes) {
|
||||
int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList) {
|
||||
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
|
||||
int32_t capacity = 0;
|
||||
|
||||
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pRes, &capacity, resNum);
|
||||
return getOperatorExplainExecInfo(pTaskInfo->pRoot, pExecInfoList);
|
||||
}
|
||||
|
||||
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len) {
|
||||
|
|
|
@ -4613,42 +4613,29 @@ void releaseQueryBuf(size_t numOfTables) {
|
|||
atomic_add_fetch_64(&tsQueryBufferSizeBytes, t);
|
||||
}
|
||||
|
||||
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SExplainExecInfo** pRes, int32_t* capacity,
|
||||
int32_t* resNum) {
|
||||
if (*resNum >= *capacity) {
|
||||
*capacity += 10;
|
||||
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList) {
|
||||
SExplainExecInfo execInfo = {0};
|
||||
SExplainExecInfo* pExplainInfo = taosArrayPush(pExecInfoList, &execInfo);
|
||||
|
||||
*pRes = taosMemoryRealloc(*pRes, (*capacity) * sizeof(SExplainExecInfo));
|
||||
if (NULL == *pRes) {
|
||||
qError("malloc %d failed", (*capacity) * (int32_t)sizeof(SExplainExecInfo));
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
SExplainExecInfo* pInfo = &(*pRes)[*resNum];
|
||||
|
||||
pInfo->numOfRows = operatorInfo->resultInfo.totalRows;
|
||||
pInfo->startupCost = operatorInfo->cost.openCost;
|
||||
pInfo->totalCost = operatorInfo->cost.totalCost;
|
||||
pExplainInfo->numOfRows = operatorInfo->resultInfo.totalRows;
|
||||
pExplainInfo->startupCost = operatorInfo->cost.openCost;
|
||||
pExplainInfo->totalCost = operatorInfo->cost.totalCost;
|
||||
pExplainInfo->verboseLen = 0;
|
||||
pExplainInfo->verboseInfo = NULL;
|
||||
|
||||
if (operatorInfo->fpSet.getExplainFn) {
|
||||
int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pInfo->verboseInfo, &pInfo->verboseLen);
|
||||
int32_t code = operatorInfo->fpSet.getExplainFn(operatorInfo, &pExplainInfo->verboseInfo, &pExplainInfo->verboseLen);
|
||||
if (code) {
|
||||
qError("%s operator getExplainFn failed, code:%s", GET_TASKID(operatorInfo->pTaskInfo), tstrerror(code));
|
||||
return code;
|
||||
}
|
||||
} else {
|
||||
pInfo->verboseLen = 0;
|
||||
pInfo->verboseInfo = NULL;
|
||||
}
|
||||
|
||||
++(*resNum);
|
||||
|
||||
int32_t code = 0;
|
||||
for (int32_t i = 0; i < operatorInfo->numOfDownstream; ++i) {
|
||||
code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pRes, capacity, resNum);
|
||||
if (code) {
|
||||
taosMemoryFreeClear(*pRes);
|
||||
code = getOperatorExplainExecInfo(operatorInfo->pDownstream[i], pExecInfoList);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
// taosMemoryFreeClear(*pRes);
|
||||
return TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,14 +31,21 @@ static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity
|
|||
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
|
||||
uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
|
||||
|
||||
static void freeGroupKey(void* param) {
|
||||
SGroupKeys* pKey = (SGroupKeys*) param;
|
||||
taosMemoryFree(pKey->pData);
|
||||
}
|
||||
|
||||
static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
|
||||
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
|
||||
cleanupBasicInfo(&pInfo->binfo);
|
||||
taosMemoryFreeClear(pInfo->keyBuf);
|
||||
taosArrayDestroy(pInfo->pGroupCols);
|
||||
taosArrayDestroy(pInfo->pGroupColVals);
|
||||
taosArrayDestroyEx(pInfo->pGroupColVals, freeGroupKey);
|
||||
cleanupExprSupp(&pInfo->scalarSup);
|
||||
|
||||
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
cleanupAggSup(&pInfo->aggSup);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -414,8 +421,6 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
|
|||
pOperator->blocking = true;
|
||||
pOperator->status = OP_NOT_OPENED;
|
||||
// pOperator->operatorType = OP_Groupby;
|
||||
pOperator->exprSupp.pExprInfo = pExprInfo;
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
pOperator->info = pInfo;
|
||||
pOperator->pTaskInfo = pTaskInfo;
|
||||
|
||||
|
|
|
@ -481,10 +481,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
pBlock->info.groupId = *groupId;
|
||||
}
|
||||
|
||||
if (pTableScanInfo->assignBlockUid) {
|
||||
pBlock->info.groupId = pBlock->info.uid;
|
||||
}
|
||||
|
||||
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
|
||||
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
||||
|
@ -1174,12 +1170,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
|||
pInfo->pRes->info.groupId = 0;
|
||||
}
|
||||
|
||||
// for generating rollup SMA result, each time is an independent time serie.
|
||||
// TODO temporarily used, when the statement of "partition by tbname" is ready, remove this
|
||||
if (pInfo->assignBlockUid) {
|
||||
pInfo->pRes->info.groupId = pBlock->info.uid;
|
||||
}
|
||||
|
||||
// todo extract method
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) {
|
||||
SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i);
|
||||
|
|
|
@ -2236,7 +2236,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "interp",
|
||||
.type = FUNCTION_TYPE_INTERP,
|
||||
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getSelectivityFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2246,7 +2246,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "derivative",
|
||||
.type = FUNCTION_TYPE_DERIVATIVE,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_CUMULATIVE_FUNC,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.translateFunc = translateDerivative,
|
||||
.getEnvFunc = getDerivativeFuncEnv,
|
||||
.initFunc = derivativeFuncSetup,
|
||||
|
@ -2280,7 +2280,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_cache_last_row",
|
||||
.type = FUNCTION_TYPE_CACHE_LAST_ROW,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2375,7 +2375,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "histogram",
|
||||
.type = FUNCTION_TYPE_HISTOGRAM,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.translateFunc = translateHistogram,
|
||||
.getEnvFunc = getHistogramFuncEnv,
|
||||
.initFunc = histogramFunctionSetup,
|
||||
|
@ -2414,7 +2414,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "hyperloglog",
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.translateFunc = translateHLL,
|
||||
.getEnvFunc = getHLLFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2428,7 +2428,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
},
|
||||
{
|
||||
.name = "_hyperloglog_partial",
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL,
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateHLLPartial,
|
||||
.getEnvFunc = getHLLFuncEnv,
|
||||
|
@ -2440,7 +2440,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
},
|
||||
{
|
||||
.name = "_hyperloglog_merge",
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE,
|
||||
.type = FUNCTION_TYPE_HYPERLOGLOG_MERGE | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.translateFunc = translateHLLMerge,
|
||||
.getEnvFunc = getHLLFuncEnv,
|
||||
|
@ -2460,7 +2460,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.processFunc = diffFunction,
|
||||
.sprocessFunc = diffScalarFunction,
|
||||
.finalizeFunc = functionFinalize,
|
||||
.estimateReturnRowsFunc = diffEstReturnRows
|
||||
.estimateReturnRowsFunc = diffEstReturnRows,
|
||||
},
|
||||
{
|
||||
.name = "statecount",
|
||||
|
@ -2521,8 +2521,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "tail",
|
||||
.type = FUNCTION_TYPE_TAIL,
|
||||
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC |
|
||||
FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC |
|
||||
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateTail,
|
||||
.getEnvFunc = getTailFuncEnv,
|
||||
.initFunc = tailFunctionSetup,
|
||||
|
|
|
@ -2684,6 +2684,7 @@ static int32_t jsonToDataType(const SJson* pJson, void* pObj) {
|
|||
|
||||
static const char* jkExprDataType = "DataType";
|
||||
static const char* jkExprAliasName = "AliasName";
|
||||
static const char* jkExprUserAlias = "UserAlias";
|
||||
|
||||
static int32_t exprNodeToJson(const void* pObj, SJson* pJson) {
|
||||
const SExprNode* pNode = (const SExprNode*)pObj;
|
||||
|
@ -2692,6 +2693,9 @@ static int32_t exprNodeToJson(const void* pObj, SJson* pJson) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddStringToObject(pJson, jkExprAliasName, pNode->aliasName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonAddStringToObject(pJson, jkExprUserAlias, pNode->userAlias);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
@ -2703,6 +2707,9 @@ static int32_t jsonToExprNode(const SJson* pJson, void* pObj) {
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetStringValue(pJson, jkExprAliasName, pNode->aliasName);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = tjsonGetStringValue(pJson, jkExprUserAlias, pNode->userAlias);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -1502,7 +1502,7 @@ static EDealRes collectColumns(SNode* pNode, void* pContext) {
|
|||
SCollectColumnsCxt* pCxt = (SCollectColumnsCxt*)pContext;
|
||||
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||
if (isCollectType(pCxt->collectType, pCol->colType) &&
|
||||
if (isCollectType(pCxt->collectType, pCol->colType) && 0 != strcmp(pCol->colName, "*") &&
|
||||
(NULL == pCxt->pTableAlias || 0 == strcmp(pCxt->pTableAlias, pCol->tableAlias))) {
|
||||
return doCollect(pCxt, pCol, pNode);
|
||||
}
|
||||
|
@ -1816,187 +1816,3 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) {
|
|||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
typedef struct SClassifyConditionCxt {
|
||||
bool hasPrimaryKey;
|
||||
bool hasTagIndexCol;
|
||||
bool hasTagCol;
|
||||
bool hasOtherCol;
|
||||
} SClassifyConditionCxt;
|
||||
|
||||
static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) {
|
||||
SClassifyConditionCxt* pCxt = (SClassifyConditionCxt*)pContext;
|
||||
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||
if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && TSDB_SYSTEM_TABLE != pCol->tableType) {
|
||||
pCxt->hasPrimaryKey = true;
|
||||
} else if (pCol->hasIndex) {
|
||||
pCxt->hasTagIndexCol = true;
|
||||
pCxt->hasTagCol = true;
|
||||
} else if (COLUMN_TYPE_TAG == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType) {
|
||||
pCxt->hasTagCol = true;
|
||||
} else {
|
||||
pCxt->hasOtherCol = true;
|
||||
}
|
||||
}
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
typedef enum EConditionType {
|
||||
COND_TYPE_PRIMARY_KEY = 1,
|
||||
COND_TYPE_TAG_INDEX,
|
||||
COND_TYPE_TAG,
|
||||
COND_TYPE_NORMAL
|
||||
} EConditionType;
|
||||
|
||||
static EConditionType classifyCondition(SNode* pNode) {
|
||||
SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false};
|
||||
nodesWalkExpr(pNode, classifyConditionImpl, &cxt);
|
||||
return cxt.hasOtherCol ? COND_TYPE_NORMAL
|
||||
: (cxt.hasPrimaryKey && cxt.hasTagCol
|
||||
? COND_TYPE_NORMAL
|
||||
: (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY
|
||||
: (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG)));
|
||||
}
|
||||
|
||||
static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
|
||||
SNode** pOtherCond) {
|
||||
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SNodeList* pPrimaryKeyConds = NULL;
|
||||
SNodeList* pTagIndexConds = NULL;
|
||||
SNodeList* pTagConds = NULL;
|
||||
SNodeList* pOtherConds = NULL;
|
||||
SNode* pCond = NULL;
|
||||
FOREACH(pCond, pLogicCond->pParameterList) {
|
||||
switch (classifyCondition(pCond)) {
|
||||
case COND_TYPE_PRIMARY_KEY:
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG_INDEX:
|
||||
if (NULL != pTagIndexCond) {
|
||||
code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond));
|
||||
}
|
||||
if (NULL != pTagCond) {
|
||||
code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG:
|
||||
if (NULL != pTagCond) {
|
||||
code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_NORMAL:
|
||||
default:
|
||||
if (NULL != pOtherCond) {
|
||||
code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SNode* pTempPrimaryKeyCond = NULL;
|
||||
SNode* pTempTagIndexCond = NULL;
|
||||
SNode* pTempTagCond = NULL;
|
||||
SNode* pTempOtherCond = NULL;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempTagCond, &pTagConds);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempOtherCond, &pOtherConds);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
*pPrimaryKeyCond = pTempPrimaryKeyCond;
|
||||
}
|
||||
if (NULL != pTagIndexCond) {
|
||||
*pTagIndexCond = pTempTagIndexCond;
|
||||
}
|
||||
if (NULL != pTagCond) {
|
||||
*pTagCond = pTempTagCond;
|
||||
}
|
||||
if (NULL != pOtherCond) {
|
||||
*pOtherCond = pTempOtherCond;
|
||||
}
|
||||
nodesDestroyNode(*pCondition);
|
||||
*pCondition = NULL;
|
||||
} else {
|
||||
nodesDestroyList(pPrimaryKeyConds);
|
||||
nodesDestroyList(pTagIndexConds);
|
||||
nodesDestroyList(pTagConds);
|
||||
nodesDestroyList(pOtherConds);
|
||||
nodesDestroyNode(pTempPrimaryKeyCond);
|
||||
nodesDestroyNode(pTempTagIndexCond);
|
||||
nodesDestroyNode(pTempTagCond);
|
||||
nodesDestroyNode(pTempOtherCond);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
|
||||
SNode** pOtherCond) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) &&
|
||||
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) {
|
||||
return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond);
|
||||
}
|
||||
|
||||
bool needOutput = false;
|
||||
switch (classifyCondition(*pCondition)) {
|
||||
case COND_TYPE_PRIMARY_KEY:
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
*pPrimaryKeyCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG_INDEX:
|
||||
if (NULL != pTagIndexCond) {
|
||||
*pTagIndexCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
if (NULL != pTagCond) {
|
||||
SNode* pTempCond = *pCondition;
|
||||
if (NULL != pTagIndexCond) {
|
||||
pTempCond = nodesCloneNode(*pCondition);
|
||||
if (NULL == pTempCond) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
*pTagCond = pTempCond;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG:
|
||||
if (NULL != pTagCond) {
|
||||
*pTagCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_NORMAL:
|
||||
default:
|
||||
if (NULL != pOtherCond) {
|
||||
*pOtherCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (needOutput) {
|
||||
*pCondition = NULL;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -233,18 +233,22 @@ SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const
|
|||
SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
SRawExprNode* pRawExpr = (SRawExprNode*)pNode;
|
||||
SNode* pExpr = pRawExpr->pNode;
|
||||
if (nodesIsExprNode(pExpr)) {
|
||||
SNode* pRealizedExpr = pRawExpr->pNode;
|
||||
if (nodesIsExprNode(pRealizedExpr)) {
|
||||
SExprNode* pExpr = (SExprNode*)pRealizedExpr;
|
||||
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
|
||||
strcpy(((SExprNode*)pExpr)->aliasName, ((SColumnNode*)pExpr)->colName);
|
||||
strcpy(pExpr->aliasName, ((SColumnNode*)pExpr)->colName);
|
||||
strcpy(pExpr->userAlias, ((SColumnNode*)pExpr)->colName);
|
||||
} else {
|
||||
int32_t len = TMIN(sizeof(((SExprNode*)pExpr)->aliasName) - 1, pRawExpr->n);
|
||||
strncpy(((SExprNode*)pExpr)->aliasName, pRawExpr->p, len);
|
||||
((SExprNode*)pExpr)->aliasName[len] = '\0';
|
||||
int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pRawExpr->n);
|
||||
strncpy(pExpr->aliasName, pRawExpr->p, len);
|
||||
pExpr->aliasName[len] = '\0';
|
||||
strncpy(pExpr->userAlias, pRawExpr->p, len);
|
||||
pExpr->userAlias[len] = '\0';
|
||||
}
|
||||
}
|
||||
taosMemoryFreeClear(pNode);
|
||||
return pExpr;
|
||||
return pRealizedExpr;
|
||||
}
|
||||
|
||||
SToken getTokenFromRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
|
||||
|
@ -641,11 +645,12 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
|
|||
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
trimEscape(pAlias);
|
||||
int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n);
|
||||
strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len);
|
||||
((SExprNode*)pNode)->aliasName[len] = '\0';
|
||||
strncpy(((SExprNode*)pNode)->userAlias, pAlias->z, len);
|
||||
((SExprNode*)pNode)->userAlias[len] = '\0';
|
||||
SExprNode* pExpr = (SExprNode*)pNode;
|
||||
int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pAlias->n);
|
||||
strncpy(pExpr->aliasName, pAlias->z, len);
|
||||
pExpr->aliasName[len] = '\0';
|
||||
strncpy(pExpr->userAlias, pAlias->z, len);
|
||||
pExpr->userAlias[len] = '\0';
|
||||
return pNode;
|
||||
}
|
||||
|
||||
|
@ -766,13 +771,21 @@ SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pPr
|
|||
return (SNode*)select;
|
||||
}
|
||||
|
||||
static void setSubquery(SNode* pStmt) {
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
((SSelectStmt*)pStmt)->isSubquery = true;
|
||||
}
|
||||
}
|
||||
|
||||
SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode* pLeft, SNode* pRight) {
|
||||
CHECK_PARSER_STATUS(pCxt);
|
||||
SSetOperator* setOp = (SSetOperator*)nodesMakeNode(QUERY_NODE_SET_OPERATOR);
|
||||
CHECK_OUT_OF_MEM(setOp);
|
||||
setOp->opType = type;
|
||||
setOp->pLeft = pLeft;
|
||||
setSubquery(setOp->pLeft);
|
||||
setOp->pRight = pRight;
|
||||
setSubquery(setOp->pRight);
|
||||
sprintf(setOp->stmtName, "%p", setOp);
|
||||
return (SNode*)setOp;
|
||||
}
|
||||
|
|
|
@ -523,6 +523,9 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p
|
|||
if ('\0' == pCol->node.aliasName[0]) {
|
||||
strcpy(pCol->node.aliasName, pColSchema->name);
|
||||
}
|
||||
if ('\0' == pCol->node.userAlias[0]) {
|
||||
strcpy(pCol->node.userAlias, pColSchema->name);
|
||||
}
|
||||
pCol->tableId = pTable->pMeta->uid;
|
||||
pCol->tableType = pTable->pMeta->tableType;
|
||||
pCol->colId = pColSchema->colId;
|
||||
|
@ -549,6 +552,9 @@ static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColum
|
|||
if ('\0' == pCol->node.aliasName[0]) {
|
||||
strcpy(pCol->node.aliasName, pCol->colName);
|
||||
}
|
||||
if ('\0' == pCol->node.userAlias[0]) {
|
||||
strcpy(pCol->node.userAlias, pCol->colName);
|
||||
}
|
||||
pCol->node.resType = pExpr->resType;
|
||||
}
|
||||
|
||||
|
@ -691,7 +697,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p
|
|||
SNode* pNode;
|
||||
FOREACH(pNode, pProjectionList) {
|
||||
SExprNode* pExpr = (SExprNode*)pNode;
|
||||
if (0 == strcmp((*pCol)->colName, pExpr->aliasName)) {
|
||||
if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) {
|
||||
SColumnRefNode* pColRef = (SColumnRefNode*)nodesMakeNode(QUERY_NODE_COLUMN_REF);
|
||||
if (NULL == pColRef) {
|
||||
pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -1535,6 +1541,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode
|
|||
}
|
||||
strcpy(pFunc->functionName, "_select_value");
|
||||
strcpy(pFunc->node.aliasName, ((SExprNode*)*pNode)->aliasName);
|
||||
strcpy(pFunc->node.userAlias, ((SExprNode*)*pNode)->userAlias);
|
||||
pCxt->errCode = nodesListMakeAppend(&pFunc->pParameterList, *pNode);
|
||||
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
|
||||
pCxt->errCode = getFuncInfo(pCxt, pFunc);
|
||||
|
@ -2171,12 +2178,51 @@ static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t rewriteProjectAlias(SNodeList* pProjectionList) {
|
||||
int32_t no = 1;
|
||||
SNode* pProject = NULL;
|
||||
FOREACH(pProject, pProjectionList) {
|
||||
SExprNode* pExpr = (SExprNode*)pProject;
|
||||
if ('\0' == pExpr->userAlias[0]) {
|
||||
strcpy(pExpr->userAlias, pExpr->aliasName);
|
||||
}
|
||||
sprintf(pExpr->aliasName, "#expr_%d", no++);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t checkProjectAlias(STranslateContext* pCxt, SNodeList* pProjectionList) {
|
||||
SHashObj* pUserAliasSet = taosHashInit(LIST_LENGTH(pProjectionList),
|
||||
taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
|
||||
SNode* pProject = NULL;
|
||||
FOREACH(pProject, pProjectionList) {
|
||||
SExprNode* pExpr = (SExprNode*)pProject;
|
||||
if (NULL != taosHashGet(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias))) {
|
||||
taosHashCleanup(pUserAliasSet);
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pExpr->userAlias);
|
||||
}
|
||||
taosHashPut(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias), &pExpr, POINTER_BYTES);
|
||||
}
|
||||
taosHashCleanup(pUserAliasSet);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateProjectionList(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (pSelect->isSubquery) {
|
||||
return checkProjectAlias(pCxt, pSelect->pProjectionList);
|
||||
}
|
||||
return rewriteProjectAlias(pSelect->pProjectionList);
|
||||
}
|
||||
|
||||
static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
pCxt->currClause = SQL_CLAUSE_SELECT;
|
||||
int32_t code = translateExprList(pCxt, pSelect->pProjectionList);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateStar(pCxt, pSelect);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = translateProjectionList(pCxt, pSelect);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList);
|
||||
}
|
||||
|
@ -2232,7 +2278,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi
|
|||
}
|
||||
|
||||
SNode* pPrimaryKeyCond = NULL;
|
||||
nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL);
|
||||
filterPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
|
@ -2699,6 +2745,7 @@ static SNode* createSetOperProject(const char* pTableAlias, SNode* pNode) {
|
|||
strcpy(pCol->tableAlias, pTableAlias);
|
||||
strcpy(pCol->colName, ((SExprNode*)pNode)->aliasName);
|
||||
strcpy(pCol->node.aliasName, pCol->colName);
|
||||
strcpy(pCol->node.userAlias, ((SExprNode*)pNode)->userAlias);
|
||||
return (SNode*)pCol;
|
||||
}
|
||||
|
||||
|
@ -2810,7 +2857,7 @@ static int32_t partitionDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet
|
|||
|
||||
SNode* pPrimaryKeyCond = NULL;
|
||||
SNode* pOtherCond = NULL;
|
||||
int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond);
|
||||
int32_t code = filterPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pOtherCond) {
|
||||
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DELETE_WHERE);
|
||||
}
|
||||
|
@ -4983,7 +5030,7 @@ static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode) {
|
|||
SNode* pCurrStmt = pCxt->pCurrStmt;
|
||||
int32_t currLevel = pCxt->currLevel;
|
||||
pCxt->currLevel = ++(pCxt->levelNo);
|
||||
int32_t code = translateQuery(pCxt, pNode);
|
||||
int32_t code = translateQuery(pCxt, pNode);
|
||||
pCxt->currClause = currClause;
|
||||
pCxt->pCurrStmt = pCurrStmt;
|
||||
pCxt->currLevel = currLevel;
|
||||
|
|
|
@ -70,6 +70,8 @@ TEST_F(ParserSelectTest, condition) {
|
|||
run("SELECT c1 FROM t1 WHERE NOT ts in (true, false)");
|
||||
|
||||
run("SELECT * FROM t1 WHERE c1 > 10 and c1 is not null");
|
||||
|
||||
run("SELECT * FROM t1 WHERE TBNAME like 'fda%' or TS > '2021-05-05 18:19:01.000'");
|
||||
}
|
||||
|
||||
TEST_F(ParserSelectTest, pseudoColumn) {
|
||||
|
|
|
@ -436,7 +436,7 @@ static int32_t pushDownCondOptDealScan(SOptimizeContext* pCxt, SScanLogicNode* p
|
|||
|
||||
SNode* pPrimaryKeyCond = NULL;
|
||||
SNode* pOtherCond = NULL;
|
||||
int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond,
|
||||
int32_t code = filterPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond,
|
||||
&pOtherCond);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pScan->pTagCond) {
|
||||
code = pushDownCondOptRebuildTbanme(&pScan->pTagCond);
|
||||
|
|
|
@ -248,12 +248,25 @@ static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool stbSplNeedSplitJoin(bool streamQuery, SJoinLogicNode* pJoin) {
|
||||
if (pJoin->isSingleTableJoin) {
|
||||
return false;
|
||||
}
|
||||
SNode* pChild = NULL;
|
||||
FOREACH(pChild, pJoin->node.pChildren) {
|
||||
if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) && QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(pChild)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
|
||||
switch (nodeType(pNode)) {
|
||||
case QUERY_NODE_LOGIC_PLAN_SCAN:
|
||||
return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode);
|
||||
case QUERY_NODE_LOGIC_PLAN_JOIN:
|
||||
return !(((SJoinLogicNode*)pNode)->isSingleTableJoin);
|
||||
return stbSplNeedSplitJoin(streamQuery, (SJoinLogicNode*)pNode);
|
||||
case QUERY_NODE_LOGIC_PLAN_PARTITION:
|
||||
return stbSplIsMultiTbScanChild(streamQuery, pNode);
|
||||
case QUERY_NODE_LOGIC_PLAN_AGG:
|
||||
|
@ -763,6 +776,8 @@ static SNode* stbSplCreateColumnNode(SExprNode* pExpr) {
|
|||
return NULL;
|
||||
}
|
||||
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
|
||||
strcpy(pCol->dbName, ((SColumnNode*)pExpr)->dbName);
|
||||
strcpy(pCol->tableName, ((SColumnNode*)pExpr)->tableName);
|
||||
strcpy(pCol->tableAlias, ((SColumnNode*)pExpr)->tableAlias);
|
||||
}
|
||||
strcpy(pCol->colName, pExpr->aliasName);
|
||||
|
|
|
@ -39,6 +39,8 @@ TEST_F(PlanOrderByTest, expr) {
|
|||
useDb("root", "test");
|
||||
|
||||
run("SELECT * FROM t1 ORDER BY c1 + 10, c2");
|
||||
|
||||
run("SELECT c1 FROM st1 ORDER BY ts, _C0");
|
||||
}
|
||||
|
||||
TEST_F(PlanOrderByTest, nullsOrder) {
|
||||
|
|
|
@ -73,3 +73,9 @@ TEST_F(PlanSubqeuryTest, outerInterval) {
|
|||
|
||||
run("SELECT COUNT(*) FROM (SELECT ts, TOP(c1, 10) FROM st1s1) INTERVAL(5s)");
|
||||
}
|
||||
|
||||
TEST_F(PlanSubqeuryTest, outerPartition) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("SELECT c1, COUNT(*) FROM (SELECT ts, c1 FROM st1) PARTITION BY c1");
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ int32_t qwBuildAndSendFetchRsp(int32_t rspType, SRpcHandleInfo *pConn, SRetrieve
|
|||
void qwBuildFetchRsp(void *msg, SOutputData *input, int32_t len, bool qComplete);
|
||||
int32_t qwBuildAndSendCQueryMsg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
|
||||
int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code, SQWTaskCtx *ctx);
|
||||
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num);
|
||||
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList);
|
||||
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code);
|
||||
void qwFreeFetchRsp(void *msg);
|
||||
int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
|
||||
|
|
|
@ -82,8 +82,9 @@ int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t c
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execInfo, int32_t num) {
|
||||
SExplainRsp rsp = {.numOfPlans = num, .subplanInfo = execInfo};
|
||||
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList) {
|
||||
SExplainExecInfo* pInfo = taosArrayGet(pExecList, 0);
|
||||
SExplainRsp rsp = {.numOfPlans = taosArrayGetSize(pExecList), .subplanInfo = pInfo};
|
||||
|
||||
int32_t contLen = tSerializeSExplainRsp(NULL, 0, &rsp);
|
||||
void * pRsp = rpcMallocCont(contLen);
|
||||
|
@ -96,10 +97,9 @@ int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SExplainExecInfo *execIn
|
|||
.code = 0,
|
||||
.info = *pConn,
|
||||
};
|
||||
|
||||
rpcRsp.info.ahandle = NULL;
|
||||
|
||||
tmsgSendRsp(&rpcRsp);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,18 +44,24 @@ int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *re
|
|||
QW_RET(TSDB_CODE_SUCCESS);
|
||||
}
|
||||
|
||||
static void freeItem(void* param) {
|
||||
SExplainExecInfo* pInfo = param;
|
||||
taosMemoryFree(pInfo->verboseInfo);
|
||||
}
|
||||
|
||||
int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
|
||||
qTaskInfo_t taskHandle = ctx->taskHandle;
|
||||
|
||||
if (TASK_TYPE_TEMP == ctx->taskType && taskHandle) {
|
||||
if (ctx->explain) {
|
||||
SExplainExecInfo *execInfo = NULL;
|
||||
int32_t resNum = 0;
|
||||
QW_ERR_RET(qGetExplainExecInfo(taskHandle, &resNum, &execInfo));
|
||||
SArray* execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
|
||||
QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList));
|
||||
|
||||
SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
|
||||
connInfo.ahandle = NULL;
|
||||
QW_ERR_RET(qwBuildAndSendExplainRsp(&connInfo, execInfo, resNum));
|
||||
int32_t code = qwBuildAndSendExplainRsp(&connInfo, execInfoList);
|
||||
taosArrayDestroyEx(execInfoList, freeItem);
|
||||
QW_ERR_RET(code);
|
||||
}
|
||||
|
||||
if (!ctx->needFetch) {
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "ttime.h"
|
||||
#include "functionMgt.h"
|
||||
|
||||
OptrStr gOptrStr[] = {
|
||||
{0, "invalid"},
|
||||
|
@ -3877,4 +3878,195 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, int8_t** p, SColumnData
|
|||
}
|
||||
|
||||
|
||||
typedef struct SClassifyConditionCxt {
|
||||
bool hasPrimaryKey;
|
||||
bool hasTagIndexCol;
|
||||
bool hasTagCol;
|
||||
bool hasOtherCol;
|
||||
} SClassifyConditionCxt;
|
||||
|
||||
static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) {
|
||||
SClassifyConditionCxt* pCxt = (SClassifyConditionCxt*)pContext;
|
||||
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
|
||||
SColumnNode* pCol = (SColumnNode*)pNode;
|
||||
if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && TSDB_SYSTEM_TABLE != pCol->tableType) {
|
||||
pCxt->hasPrimaryKey = true;
|
||||
} else if (pCol->hasIndex) {
|
||||
pCxt->hasTagIndexCol = true;
|
||||
pCxt->hasTagCol = true;
|
||||
} else if (COLUMN_TYPE_TAG == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType) {
|
||||
pCxt->hasTagCol = true;
|
||||
} else {
|
||||
pCxt->hasOtherCol = true;
|
||||
}
|
||||
} else if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
|
||||
SFunctionNode* pFunc = (SFunctionNode*)pNode;
|
||||
if (fmIsPseudoColumnFunc(pFunc->funcId)) {
|
||||
if (FUNCTION_TYPE_TBNAME==pFunc->funcType) {
|
||||
pCxt->hasTagCol = true;
|
||||
} else {
|
||||
pCxt->hasOtherCol = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
typedef enum EConditionType {
|
||||
COND_TYPE_PRIMARY_KEY = 1,
|
||||
COND_TYPE_TAG_INDEX,
|
||||
COND_TYPE_TAG,
|
||||
COND_TYPE_NORMAL
|
||||
} EConditionType;
|
||||
|
||||
static EConditionType classifyCondition(SNode* pNode) {
|
||||
SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false};
|
||||
nodesWalkExpr(pNode, classifyConditionImpl, &cxt);
|
||||
return cxt.hasOtherCol ? COND_TYPE_NORMAL
|
||||
: (cxt.hasPrimaryKey && cxt.hasTagCol
|
||||
? COND_TYPE_NORMAL
|
||||
: (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY
|
||||
: (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG)));
|
||||
}
|
||||
|
||||
static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
|
||||
SNode** pOtherCond) {
|
||||
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition);
|
||||
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SNodeList* pPrimaryKeyConds = NULL;
|
||||
SNodeList* pTagIndexConds = NULL;
|
||||
SNodeList* pTagConds = NULL;
|
||||
SNodeList* pOtherConds = NULL;
|
||||
SNode* pCond = NULL;
|
||||
FOREACH(pCond, pLogicCond->pParameterList) {
|
||||
switch (classifyCondition(pCond)) {
|
||||
case COND_TYPE_PRIMARY_KEY:
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG_INDEX:
|
||||
if (NULL != pTagIndexCond) {
|
||||
code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond));
|
||||
}
|
||||
if (NULL != pTagCond) {
|
||||
code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG:
|
||||
if (NULL != pTagCond) {
|
||||
code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_NORMAL:
|
||||
default:
|
||||
if (NULL != pOtherCond) {
|
||||
code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond));
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
SNode* pTempPrimaryKeyCond = NULL;
|
||||
SNode* pTempTagIndexCond = NULL;
|
||||
SNode* pTempTagCond = NULL;
|
||||
SNode* pTempOtherCond = NULL;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempTagCond, &pTagConds);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesMergeConds(&pTempOtherCond, &pOtherConds);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
*pPrimaryKeyCond = pTempPrimaryKeyCond;
|
||||
}
|
||||
if (NULL != pTagIndexCond) {
|
||||
*pTagIndexCond = pTempTagIndexCond;
|
||||
}
|
||||
if (NULL != pTagCond) {
|
||||
*pTagCond = pTempTagCond;
|
||||
}
|
||||
if (NULL != pOtherCond) {
|
||||
*pOtherCond = pTempOtherCond;
|
||||
}
|
||||
nodesDestroyNode(*pCondition);
|
||||
*pCondition = NULL;
|
||||
} else {
|
||||
nodesDestroyList(pPrimaryKeyConds);
|
||||
nodesDestroyList(pTagIndexConds);
|
||||
nodesDestroyList(pTagConds);
|
||||
nodesDestroyList(pOtherConds);
|
||||
nodesDestroyNode(pTempPrimaryKeyCond);
|
||||
nodesDestroyNode(pTempTagIndexCond);
|
||||
nodesDestroyNode(pTempTagCond);
|
||||
nodesDestroyNode(pTempOtherCond);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t filterPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
|
||||
SNode** pOtherCond) {
|
||||
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) &&
|
||||
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) {
|
||||
return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond);
|
||||
}
|
||||
|
||||
bool needOutput = false;
|
||||
switch (classifyCondition(*pCondition)) {
|
||||
case COND_TYPE_PRIMARY_KEY:
|
||||
if (NULL != pPrimaryKeyCond) {
|
||||
*pPrimaryKeyCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG_INDEX:
|
||||
if (NULL != pTagIndexCond) {
|
||||
*pTagIndexCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
if (NULL != pTagCond) {
|
||||
SNode* pTempCond = *pCondition;
|
||||
if (NULL != pTagIndexCond) {
|
||||
pTempCond = nodesCloneNode(*pCondition);
|
||||
if (NULL == pTempCond) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
*pTagCond = pTempCond;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_TAG:
|
||||
if (NULL != pTagCond) {
|
||||
*pTagCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
case COND_TYPE_NORMAL:
|
||||
default:
|
||||
if (NULL != pOtherCond) {
|
||||
*pOtherCond = *pCondition;
|
||||
needOutput = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (needOutput) {
|
||||
*pCondition = NULL;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -278,7 +278,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) {
|
|||
}
|
||||
|
||||
SHashObj *planToTask = taosHashInit(
|
||||
SCHEDULE_DEFAULT_MAX_TASK_NUM,
|
||||
pDag->numOfSubplans,
|
||||
taosGetDefaultHashFunction(POINTER_BYTES == sizeof(int64_t) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_INT), false,
|
||||
HASH_NO_LOCK);
|
||||
if (NULL == planToTask) {
|
||||
|
|
|
@ -462,7 +462,8 @@ int32_t streamDispatch(SStreamTask* pTask) {
|
|||
if (streamDispatchAllBlocks(pTask, pBlock) < 0) {
|
||||
ASSERT(0);
|
||||
code = -1;
|
||||
// TODO set status fail
|
||||
streamQueueProcessFail(pTask->outputQueue);
|
||||
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
|
||||
goto FREE;
|
||||
}
|
||||
/*atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);*/
|
||||
|
|
|
@ -790,65 +790,6 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs
|
|||
}
|
||||
} while (0);
|
||||
|
||||
#if 0
|
||||
// fake match
|
||||
//
|
||||
// condition1:
|
||||
// I have snapshot, no log, preIndex > myLastIndex
|
||||
//
|
||||
// condition2:
|
||||
// I have snapshot, have log, log <= snapshot, preIndex > myLastIndex
|
||||
//
|
||||
// condition3:
|
||||
// I have snapshot, preIndex < snapshot.lastApplyIndex
|
||||
//
|
||||
// condition4:
|
||||
// I have snapshot, preIndex == snapshot.lastApplyIndex, no data
|
||||
//
|
||||
// operation:
|
||||
// match snapshot.lastApplyIndex - 1;
|
||||
// no operation on log
|
||||
do {
|
||||
SyncIndex myLastIndex = syncNodeGetLastIndex(ths);
|
||||
SSnapshot snapshot;
|
||||
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
|
||||
|
||||
bool condition0 = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
|
||||
syncNodeHasSnapshot(ths);
|
||||
bool condition1 =
|
||||
condition0 && (ths->pLogStore->syncLogEntryCount(ths->pLogStore) == 0) && (pMsg->prevLogIndex > myLastIndex); // donot use syncLogEntryCount!!! use isEmpty
|
||||
bool condition2 = condition0 && (ths->pLogStore->syncLogLastIndex(ths->pLogStore) <= snapshot.lastApplyIndex) &&
|
||||
(pMsg->prevLogIndex > myLastIndex);
|
||||
bool condition3 = condition0 && (pMsg->prevLogIndex < snapshot.lastApplyIndex);
|
||||
bool condition4 = condition0 && (pMsg->prevLogIndex == snapshot.lastApplyIndex) && (pMsg->dataLen == 0);
|
||||
bool condition = condition1 || condition2 || condition3 || condition4;
|
||||
|
||||
if (condition) {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, fake match, pre-index:%" PRId64 ", pre-term:%" PRIu64,
|
||||
pMsg->prevLogIndex, pMsg->prevLogTerm);
|
||||
syncNodeEventLog(ths, logBuf);
|
||||
|
||||
// prepare response msg
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
|
||||
pReply->srcId = ths->myRaftId;
|
||||
pReply->destId = pMsg->srcId;
|
||||
pReply->term = ths->pRaftStore->currentTerm;
|
||||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = snapshot.lastApplyIndex;
|
||||
|
||||
// send response
|
||||
SRpcMsg rpcMsg;
|
||||
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
|
||||
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
|
||||
syncAppendEntriesReplyDestroy(pReply);
|
||||
|
||||
return ret;
|
||||
}
|
||||
} while (0);
|
||||
#endif
|
||||
|
||||
// fake match
|
||||
//
|
||||
// condition1:
|
||||
|
|
|
@ -441,9 +441,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&pReader->mutex);
|
||||
|
||||
if (pReader->curInvalid || pReader->curVersion != ver) {
|
||||
if (walReadSeekVer(pReader, ver) < 0) {
|
||||
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pReader->pWal->cfg.vgId, ver, terrstr());
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
seeked = true;
|
||||
|
@ -464,6 +467,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
}
|
||||
ASSERT(0);
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -473,6 +477,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since head checksum not passed", pReader->pWal->cfg.vgId,
|
||||
ver);
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -480,6 +485,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
void *ptr = taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReader->pHead->head.bodyLen);
|
||||
if (ptr == NULL) {
|
||||
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
pReader->pHead = ptr;
|
||||
|
@ -494,6 +500,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
ASSERT(0);
|
||||
}
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -503,6 +510,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
pReader->curInvalid = 1;
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
ASSERT(0);
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -516,9 +524,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
|
|||
pReader->curInvalid = 1;
|
||||
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
|
||||
ASSERT(0);
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
return -1;
|
||||
}
|
||||
pReader->curVersion++;
|
||||
|
||||
taosThreadMutexUnlock(&pReader->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -408,7 +408,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy
|
|||
pWal->writeHead.head.version = index;
|
||||
pWal->writeHead.head.bodyLen = bodyLen;
|
||||
pWal->writeHead.head.msgType = msgType;
|
||||
pWal->writeHead.head.ingestTs = taosGetTimestampMs();
|
||||
pWal->writeHead.head.ingestTs = 0;
|
||||
|
||||
// sync info for sync module
|
||||
pWal->writeHead.head.syncMeta = syncMeta;
|
||||
|
|
|
@ -134,21 +134,95 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) {
|
|||
#endif
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
iconv_t conv;
|
||||
int8_t inUse;
|
||||
} SConv;
|
||||
|
||||
SConv *gConv = NULL;
|
||||
int32_t convUsed = 0;
|
||||
int32_t gConvMaxNum = 0;
|
||||
|
||||
void taosConvInit(void) {
|
||||
gConvMaxNum = 512;
|
||||
gConv = taosMemoryCalloc(gConvMaxNum, sizeof(SConv));
|
||||
for (int32_t i = 0; i < gConvMaxNum; ++i) {
|
||||
gConv[i].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset);
|
||||
if ((iconv_t)-1 == gConv[i].conv || (iconv_t)0 == gConv[i].conv) {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void taosConvDestroy() {
|
||||
for (int32_t i = 0; i < gConvMaxNum; ++i) {
|
||||
iconv_close(gConv[i].conv);
|
||||
}
|
||||
taosMemoryFreeClear(gConv);
|
||||
gConvMaxNum = -1;
|
||||
}
|
||||
|
||||
iconv_t taosAcquireConv(int32_t *idx) {
|
||||
if (gConvMaxNum <= 0) {
|
||||
*idx = -1;
|
||||
return iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
int32_t used = atomic_add_fetch_32(&convUsed, 1);
|
||||
if (used > gConvMaxNum) {
|
||||
used = atomic_sub_fetch_32(&convUsed, 1);
|
||||
sched_yield();
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
int32_t startId = taosGetSelfPthreadId() % gConvMaxNum;
|
||||
while (true) {
|
||||
if (gConv[startId].inUse) {
|
||||
startId = (startId + 1) % gConvMaxNum;
|
||||
continue;
|
||||
}
|
||||
|
||||
int8_t old = atomic_val_compare_exchange_8(&gConv[startId].inUse, 0, 1);
|
||||
if (0 == old) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
*idx = startId;
|
||||
return gConv[startId].conv;
|
||||
}
|
||||
|
||||
void taosReleaseConv(int32_t idx, iconv_t conv) {
|
||||
if (idx < 0) {
|
||||
iconv_close(conv);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_store_8(&gConv[idx].inUse, 0);
|
||||
atomic_sub_fetch_32(&convUsed, 1);
|
||||
}
|
||||
|
||||
bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len) {
|
||||
#ifdef DISALLOW_NCHAR_WITHOUT_ICONV
|
||||
printf("Nchar cannot be read and written without iconv, please install iconv library and recompile TDengine.\n");
|
||||
return -1;
|
||||
#else
|
||||
memset(ucs4, 0, ucs4_max_len);
|
||||
iconv_t cd = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset);
|
||||
|
||||
int32_t idx = -1;
|
||||
iconv_t conv = taosAcquireConv(&idx);
|
||||
size_t ucs4_input_len = mbsLength;
|
||||
size_t outLeft = ucs4_max_len;
|
||||
if (iconv(cd, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) {
|
||||
iconv_close(cd);
|
||||
if (iconv(conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) {
|
||||
taosReleaseConv(idx, conv);
|
||||
return false;
|
||||
}
|
||||
|
||||
iconv_close(cd);
|
||||
taosReleaseConv(idx, conv);
|
||||
if (len != NULL) {
|
||||
*len = (int32_t)(ucs4_max_len - outLeft);
|
||||
if (*len < 0) {
|
||||
|
|
|
@ -37,7 +37,7 @@ sql select * from stb1 where c8 < 'nuLl';
|
|||
sql select * from stb1 where c9 > 'nuLl';
|
||||
sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b;
|
||||
sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60;
|
||||
sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60));
|
||||
sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60));
|
||||
sql select * from stb1 where 'c2' is null;
|
||||
sql select * from stb1 where 'c2' is not null;
|
||||
|
||||
|
@ -1475,66 +1475,66 @@ if $data20 != @21-05-05 18:19:25.000@ then
|
|||
return -1
|
||||
endi
|
||||
|
||||
#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts;
|
||||
#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50;
|
||||
#if $rows != 4 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != @21-05-05 18:19:20.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data10 != @21-05-05 18:19:21.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data20 != @21-05-05 18:19:24.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data30 != @21-05-05 18:19:25.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts;
|
||||
sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50;
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @21-05-05 18:19:20.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != @21-05-05 18:19:21.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != @21-05-05 18:19:24.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != @21-05-05 18:19:25.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
#sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts;
|
||||
#sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts;
|
||||
#if $rows != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != @21-05-05 18:19:20.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data10 != @21-05-05 18:19:21.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts;
|
||||
#if $rows != 4 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != @21-05-05 18:19:00.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data10 != @21-05-05 18:19:02.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data20 != @21-05-05 18:19:12.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data30 != @21-05-05 18:19:14.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#
|
||||
#sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;
|
||||
#if $rows != 3 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != @21-05-05 18:19:04.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data10 != @21-05-05 18:19:06.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data20 != @21-05-05 18:19:10.000@ then
|
||||
# return -1
|
||||
#endi
|
||||
sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts;
|
||||
sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts;
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @21-05-05 18:19:20.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != @21-05-05 18:19:21.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts;
|
||||
if $rows != 4 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @21-05-05 18:19:00.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != @21-05-05 18:19:02.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != @21-05-05 18:19:12.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data30 != @21-05-05 18:19:14.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @21-05-05 18:19:04.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data10 != @21-05-05 18:19:06.000@ then
|
||||
return -1
|
||||
endi
|
||||
if $data20 != @21-05-05 18:19:10.000@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select * from stb1 where c1 is null and c1 is not null;
|
||||
if $rows != 0 then
|
||||
|
@ -2574,7 +2574,7 @@ sql select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1
|
|||
sql select * from stb1 where t1 > 0 or t1 > 0 and c1 > 1
|
||||
sql select * from stb1 where (c1 > 0 and t1 > 0 ) or (t1 > 1 and c1 > 3)
|
||||
sql select * from stb1 where (c1 > 0 and t1 > 0 ) or t1 > 1
|
||||
sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1;
|
||||
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1;
|
||||
|
||||
sql select * from stb1 where c1 < 63 and t1 > 5
|
||||
if $rows != 2 then
|
||||
|
@ -2631,7 +2631,7 @@ sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or
|
|||
|
||||
|
||||
print "ts&tbname test"
|
||||
sql_error select count(*) from stb1 where ts > 0 or tbname like 'tb%';
|
||||
sql select count(*) from stb1 where ts > 0 or tbname like 'tb%';
|
||||
|
||||
print "ts&tag test"
|
||||
sql select count(*) from stb1 where ts > 0 or t1 > 0;
|
||||
|
@ -2717,9 +2717,9 @@ print "tbname&tag&join test"
|
|||
|
||||
|
||||
print "column&ts&tbname&tag test"
|
||||
sql_error select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0;
|
||||
sql select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0;
|
||||
sql select * from stb1 where (ts > '2021-05-05 18:19:01.000') and (ts > '2021-05-05 18:19:02.000' or t1 > 3) and (t1 > 5 or t1 < 4) and c1 > 0;
|
||||
sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and col > 0 and t1 > 0;
|
||||
sql select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and c1 > 0 and t1 > 0;
|
||||
|
||||
|
||||
print "column&ts&tbname&join test"
|
||||
|
|
|
@ -283,12 +283,14 @@ print ================== server restart completed
|
|||
sql connect
|
||||
sql use first_db0;
|
||||
|
||||
sql select last(*), tbname from m1 group by tbname;
|
||||
sql select last(*), tbname from m1 group by tbname order by tbname;
|
||||
print $data00 $data01 $data02 $data10 $data11 $data12
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != @20-03-01 01:01:01.000@ then
|
||||
print data00 $data00 != 20-03-01 01:01:01.000@
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
@ -312,4 +314,4 @@ if $data12 != @tm1@ then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql drop table m1
|
||||
sql drop table m1
|
||||
|
|
|
@ -47,7 +47,7 @@ endi
|
|||
|
||||
$replica = 3
|
||||
$vgroups = 1
|
||||
$retentions = 5s:7d,15s:21d
|
||||
$retentions = 5s:7d,15s:21d,1m:365d
|
||||
|
||||
print ============= create database
|
||||
sql create database db replica $replica vgroups $vgroups retentions $retentions
|
||||
|
@ -114,7 +114,7 @@ endi
|
|||
|
||||
vg_ready:
|
||||
print ====> create stable/child table
|
||||
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum)
|
||||
sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum) watermark 3s,3s max_delay 3s,3s
|
||||
|
||||
sql show stables
|
||||
if $rows != 1 then
|
||||
|
@ -129,20 +129,28 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
|||
sleep 3000
|
||||
|
||||
|
||||
print ===> write 100 records
|
||||
$N = 100
|
||||
$count = 0
|
||||
while $count < $N
|
||||
$ms = 1659000000000 + $count
|
||||
sql insert into ct1 values( $ms , $count , 2.1, 3.1)
|
||||
$count = $count + 1
|
||||
endw
|
||||
print ===> write 0-50 records
|
||||
$ms = 0
|
||||
$cnt = 0
|
||||
while $cnt < 50
|
||||
$ms = $cnt . m
|
||||
sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1)
|
||||
$cnt = $cnt + 1
|
||||
endw
|
||||
print ===> flush database db
|
||||
sql flush database db;
|
||||
sleep 5000
|
||||
|
||||
print ===> write 51-100 records
|
||||
while $cnt < 100
|
||||
$ms = $cnt . m
|
||||
sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1)
|
||||
$cnt = $cnt + 1
|
||||
endw
|
||||
|
||||
#sql flush database db;
|
||||
|
||||
|
||||
sleep 3000
|
||||
print ===> flush database db
|
||||
sql flush database db;
|
||||
sleep 5000
|
||||
|
||||
|
||||
print ===> stop dnode1 dnode2 dnode3
|
||||
|
@ -150,8 +158,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
|||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
|
||||
sleep 10000
|
||||
|
||||
########################################################
|
||||
print ===> start dnode1 dnode2 dnode3 dnode4
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
|
@ -164,7 +170,7 @@ sleep 3000
|
|||
print =============== query data
|
||||
sql connect
|
||||
sql use db
|
||||
sql select * from ct1
|
||||
sql select * from ct1 where ts > now - 1d
|
||||
print rows: $rows
|
||||
print $data00 $data01 $data02
|
||||
if $rows != 100 then
|
||||
|
|
|
@ -11,7 +11,7 @@ from util.dnodes import *
|
|||
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
|
||||
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
|
||||
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
|
||||
def init(self, conn, logSql):
|
||||
|
@ -37,7 +37,7 @@ class TDTestCase:
|
|||
def illegal_params(self):
|
||||
|
||||
illegal_params = ["1","0","NULL","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"]
|
||||
|
||||
|
||||
for value in illegal_params:
|
||||
|
||||
tdSql.error("create database testdb replica 1 cachemodel '%s' " %value)
|
||||
|
@ -80,9 +80,9 @@ class TDTestCase:
|
|||
tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) )
|
||||
|
||||
def check_cachemodel_sets(self):
|
||||
|
||||
|
||||
# check cache_last value for database
|
||||
|
||||
|
||||
# check cache_last value for database
|
||||
|
||||
tdSql.query(" show databases ")
|
||||
databases_infos = tdSql.queryResult
|
||||
|
@ -96,10 +96,10 @@ class TDTestCase:
|
|||
continue
|
||||
cache_lasts[dbname]=self.getCacheModelNum(cache_last_value)
|
||||
|
||||
|
||||
# cache_last_set value
|
||||
|
||||
# cache_last_set value
|
||||
for k , v in cache_lasts.items():
|
||||
|
||||
|
||||
if k=="testdb_"+str(self.getCacheModelStr(v)):
|
||||
tdLog.info(" database %s cache_last value check pass, value is %s "%(k,self.getCacheModelStr(v)) )
|
||||
else:
|
||||
|
@ -116,7 +116,7 @@ class TDTestCase:
|
|||
dataPath = buildPath + "/../sim/dnode1/data"
|
||||
abs_vnodePath = os.path.abspath(dataPath)+"/vnode/"
|
||||
tdLog.info("abs_vnodePath: %s" % abs_vnodePath)
|
||||
|
||||
|
||||
tdSql.query(" show dnodes ")
|
||||
dnode_id = tdSql.queryResult[0][0]
|
||||
|
||||
|
@ -127,7 +127,7 @@ class TDTestCase:
|
|||
vgroups_infos = tdSql.queryResult
|
||||
for vgroup_info in vgroups_infos:
|
||||
vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json"
|
||||
vnode_info_of_db = f"cat {vnode_json}"
|
||||
vnode_info_of_db = f"cat {vnode_json}"
|
||||
vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8")
|
||||
infoDict = json.loads(vnode_info)
|
||||
vnode_json_of_dbname = f"{dnode_id}."+ dbname
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
tdLog.exit("cacheLast not found in vnode.json of vnode%d "%(vgroup_info[0]))
|
||||
|
||||
def restart_check_cachemodel_sets(self):
|
||||
|
||||
|
||||
for i in range(3):
|
||||
tdSql.query("show dnodes")
|
||||
index = tdSql.getData(0, 0)
|
||||
|
@ -157,7 +157,7 @@ class TDTestCase:
|
|||
self.prepare_datas()
|
||||
self.check_cachemodel_sets()
|
||||
self.restart_check_cachemodel_sets()
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
|
|
@ -33,14 +33,14 @@ class TDTestCase:
|
|||
tdSql.query('select database()')
|
||||
tdSql.checkData(0,0,self.dbname)
|
||||
tdSql.execute(f'drop database {self.dbname}')
|
||||
|
||||
|
||||
def check_version(self):
|
||||
taos_list = ['server','client']
|
||||
for i in taos_list:
|
||||
tdSql.query(f'select {i}_version()')
|
||||
version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1]
|
||||
tdSql.checkData(0,0,version_info)
|
||||
|
||||
|
||||
def get_server_status(self):
|
||||
sleep(self.delaytime)
|
||||
tdSql.query('select server_status()')
|
||||
|
@ -51,7 +51,7 @@ class TDTestCase:
|
|||
if platform.system().lower() == 'windows':
|
||||
sleep(10)
|
||||
tdSql.error('select server_status()')
|
||||
|
||||
|
||||
def run(self):
|
||||
self.get_database_info()
|
||||
self.check_version()
|
||||
|
@ -61,4 +61,4 @@ class TDTestCase:
|
|||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
|
|
|
@ -18,7 +18,7 @@ from util.dnodes import *
|
|||
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
|
||||
if len(key) == 0:
|
||||
tdLog.exit("taos test key is null!")
|
||||
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
taosCmd = buildPath + '\\build\\bin\\taos.exe '
|
||||
taosCmd = taosCmd.replace('\\','\\\\')
|
||||
|
@ -214,7 +214,7 @@ class TDTestCase:
|
|||
retCode, retVal = taos_command(buildPath, "p", keyDict['p'], "taos>", keyDict['c'], '', "A", '')
|
||||
if retCode != "TAOS_OK":
|
||||
tdLog.exit("taos -A fail")
|
||||
|
||||
|
||||
sqlString = 'create database ' + newDbName + ';'
|
||||
retCode = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', retVal)
|
||||
if retCode != "TAOS_OK":
|
||||
|
@ -237,7 +237,7 @@ class TDTestCase:
|
|||
tdLog.exit("taos -s fail")
|
||||
|
||||
print ("========== check new db ==========")
|
||||
tdSql.query("show databases")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.getData(i, 0) == newDbName:
|
||||
break
|
||||
|
@ -259,24 +259,24 @@ class TDTestCase:
|
|||
if retCode != "TAOS_OK":
|
||||
tdLog.exit("taos -s insert data fail")
|
||||
|
||||
sqlString = "select * from " + newDbName + ".ctb0"
|
||||
sqlString = "select * from " + newDbName + ".ctb0"
|
||||
tdSql.query(sqlString)
|
||||
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
|
||||
tdSql.checkData(0, 1, 10)
|
||||
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
|
||||
tdSql.checkData(1, 1, 20)
|
||||
sqlString = "select * from " + newDbName + ".ctb1"
|
||||
sqlString = "select * from " + newDbName + ".ctb1"
|
||||
tdSql.query(sqlString)
|
||||
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
|
||||
tdSql.checkData(0, 1, 11)
|
||||
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
|
||||
tdSql.checkData(1, 1, 21)
|
||||
|
||||
|
||||
keyDict['s'] = "\"select * from " + newDbName + ".ctb0\""
|
||||
retCode = taos_command(buildPath, "s", keyDict['s'], "2021-04-01 08:00:01.000", keyDict['c'], '', '', '')
|
||||
if retCode != "TAOS_OK":
|
||||
tdLog.exit("taos -r show fail")
|
||||
|
||||
|
||||
tdLog.printNoPrefix("================================ parameter: -r")
|
||||
keyDict['s'] = "\"select * from " + newDbName + ".ctb0\""
|
||||
retCode = taos_command(buildPath, "s", keyDict['s'], "1617235200000", keyDict['c'], '', 'r', '')
|
||||
|
@ -287,9 +287,9 @@ class TDTestCase:
|
|||
retCode = taos_command(buildPath, "s", keyDict['s'], "1617235201000", keyDict['c'], '', 'r', '')
|
||||
if retCode != "TAOS_OK":
|
||||
tdLog.exit("taos -r show fail")
|
||||
|
||||
|
||||
tdSql.query('drop database %s'%newDbName)
|
||||
|
||||
|
||||
tdLog.printNoPrefix("================================ parameter: -f")
|
||||
pwd=os.getcwd()
|
||||
newDbName="dbf"
|
||||
|
@ -298,15 +298,15 @@ class TDTestCase:
|
|||
sql2 = "echo use " + newDbName + " >> " + sqlFile
|
||||
if platform.system().lower() == 'windows':
|
||||
sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile
|
||||
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
|
||||
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
|
||||
else:
|
||||
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile
|
||||
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
|
||||
sql5 = "echo show databases >> " + sqlFile
|
||||
os.system(sql1)
|
||||
os.system(sql2)
|
||||
os.system(sql3)
|
||||
os.system(sql4)
|
||||
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
|
||||
sql5 = "echo show databases >> " + sqlFile
|
||||
os.system(sql1)
|
||||
os.system(sql2)
|
||||
os.system(sql3)
|
||||
os.system(sql4)
|
||||
os.system(sql5)
|
||||
|
||||
keyDict['f'] = pwd + "/0-others/sql.txt"
|
||||
|
@ -316,7 +316,7 @@ class TDTestCase:
|
|||
tdLog.exit("taos -f fail")
|
||||
|
||||
print ("========== check new db ==========")
|
||||
tdSql.query("show databases")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
|
||||
if tdSql.getData(i, 0) == newDbName:
|
||||
|
@ -324,13 +324,13 @@ class TDTestCase:
|
|||
else:
|
||||
tdLog.exit("create db fail after taos -f fail")
|
||||
|
||||
sqlString = "select * from " + newDbName + ".ntbf"
|
||||
sqlString = "select * from " + newDbName + ".ntbf"
|
||||
tdSql.query(sqlString)
|
||||
tdSql.checkData(0, 0, '2021-04-01 08:00:00.000')
|
||||
tdSql.checkData(0, 1, 'test taos -f1')
|
||||
tdSql.checkData(1, 0, '2021-04-01 08:00:01.000')
|
||||
tdSql.checkData(1, 1, 'test taos -f2')
|
||||
|
||||
|
||||
shellCmd = "rm -f " + sqlFile
|
||||
os.system(shellCmd)
|
||||
tdSql.query('drop database %s'%newDbName)
|
||||
|
@ -345,9 +345,9 @@ class TDTestCase:
|
|||
#print ("-C return content:\n ", retVal)
|
||||
totalCfgItem = {"firstEp":['', '', ''], }
|
||||
for line in retVal.splitlines():
|
||||
strList = line.split()
|
||||
strList = line.split()
|
||||
if (len(strList) > 2):
|
||||
totalCfgItem[strList[1]] = strList
|
||||
totalCfgItem[strList[1]] = strList
|
||||
|
||||
#print ("dict content:\n ", totalCfgItem)
|
||||
firstEp = keyDict["h"] + ':' + keyDict['P']
|
||||
|
@ -356,8 +356,8 @@ class TDTestCase:
|
|||
|
||||
if (totalCfgItem["rpcDebugFlag"][2] != self.rpcDebugFlagVal) and (totalCfgItem["rpcDebugFlag"][0] != 'cfg_file'):
|
||||
tdLog.exit("taos -C return rpcDebugFlag error!")
|
||||
|
||||
count = os.cpu_count()
|
||||
|
||||
count = os.cpu_count()
|
||||
if (totalCfgItem["numOfCores"][2] != count) and (totalCfgItem["numOfCores"][0] != 'default'):
|
||||
tdLog.exit("taos -C return numOfCores error!")
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ from util.dnodes import *
|
|||
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
|
||||
if len(key) == 0:
|
||||
tdLog.exit("taos test key is null!")
|
||||
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
taosCmd = buildPath + '\\build\\bin\\taos.exe '
|
||||
taosCmd = taosCmd.replace('\\','\\\\')
|
||||
|
@ -231,7 +231,7 @@ class TDTestCase:
|
|||
tdLog.info("taos -P %s test success"%keyDict['P'])
|
||||
else:
|
||||
tdLog.exit("taos -P %s fail"%keyDict['P'])
|
||||
|
||||
|
||||
tdLog.printNoPrefix("================================ parameter: -f with error sql ")
|
||||
pwd=os.getcwd()
|
||||
newDbName="dbf"
|
||||
|
@ -240,15 +240,15 @@ class TDTestCase:
|
|||
sql2 = "echo use " + newDbName + " >> " + sqlFile
|
||||
if platform.system().lower() == 'windows':
|
||||
sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile
|
||||
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
|
||||
sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile
|
||||
else:
|
||||
sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile
|
||||
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
|
||||
sql5 = "echo show databases >> " + sqlFile
|
||||
os.system(sql1)
|
||||
os.system(sql2)
|
||||
os.system(sql3)
|
||||
os.system(sql4)
|
||||
sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile
|
||||
sql5 = "echo show databases >> " + sqlFile
|
||||
os.system(sql1)
|
||||
os.system(sql2)
|
||||
os.system(sql3)
|
||||
os.system(sql4)
|
||||
os.system(sql5)
|
||||
|
||||
keyDict['f'] = pwd + "/0-others/sql.txt"
|
||||
|
@ -258,7 +258,7 @@ class TDTestCase:
|
|||
tdLog.exit("taos -f fail")
|
||||
|
||||
print ("========== check new db ==========")
|
||||
tdSql.query("show databases")
|
||||
tdSql.query("show databases")
|
||||
for i in range(tdSql.queryRows):
|
||||
#print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0)))
|
||||
if tdSql.getData(i, 0) == newDbName:
|
||||
|
@ -266,9 +266,9 @@ class TDTestCase:
|
|||
else:
|
||||
tdLog.exit("create db fail after taos -f fail")
|
||||
|
||||
sqlString = "select * from " + newDbName + ".ntbf"
|
||||
sqlString = "select * from " + newDbName + ".ntbf"
|
||||
tdSql.error(sqlString)
|
||||
|
||||
|
||||
shellCmd = "rm -f " + sqlFile
|
||||
os.system(shellCmd)
|
||||
|
||||
|
@ -281,16 +281,16 @@ class TDTestCase:
|
|||
tdSql.query('drop database %s'%newDbName)
|
||||
|
||||
tdLog.printNoPrefix("================================ parameter: -a with error value")
|
||||
#newDbName="dba"
|
||||
errorPassword = 'errorPassword'
|
||||
#newDbName="dba"
|
||||
errorPassword = 'errorPassword'
|
||||
sqlString = 'create database ' + newDbName + ';'
|
||||
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', errorPassword)
|
||||
if retCode != "TAOS_FAIL":
|
||||
tdLog.exit("taos -u %s -a %s"%(keyDict['u'], errorPassword))
|
||||
|
||||
tdLog.printNoPrefix("================================ parameter: -p with error value")
|
||||
#newDbName="dba"
|
||||
keyDict['p'] = 'errorPassword'
|
||||
#newDbName="dba"
|
||||
keyDict['p'] = 'errorPassword'
|
||||
retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'p', keyDict['p'])
|
||||
if retCode == "TAOS_FAIL" and "Authentication failure" in retVal:
|
||||
tdLog.info("taos -p %s test success"%keyDict['p'])
|
||||
|
|
|
@ -18,7 +18,7 @@ from util.dnodes import *
|
|||
def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''):
|
||||
if len(key) == 0:
|
||||
tdLog.exit("taos test key is null!")
|
||||
|
||||
|
||||
if platform.system().lower() == 'windows':
|
||||
taosCmd = buildPath + '\\build\\bin\\taos.exe '
|
||||
taosCmd = taosCmd.replace('\\','\\\\')
|
||||
|
@ -158,34 +158,34 @@ class TDTestCase:
|
|||
if "2: service ok" in retVal:
|
||||
tdLog.info("taos -k success")
|
||||
else:
|
||||
tdLog.info(retVal)
|
||||
tdLog.info(retVal)
|
||||
tdLog.exit("taos -k fail 1")
|
||||
|
||||
# stop taosd
|
||||
tdDnodes.stop(1)
|
||||
#sleep(10)
|
||||
#tdDnodes.start(1)
|
||||
#sleep(5)
|
||||
#sleep(5)
|
||||
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
|
||||
if "0: unavailable" in retVal:
|
||||
tdLog.info("taos -k success")
|
||||
else:
|
||||
tdLog.info(retVal)
|
||||
tdLog.info(retVal)
|
||||
tdLog.exit("taos -k fail 2")
|
||||
|
||||
# restart taosd
|
||||
tdDnodes.start(1)
|
||||
#sleep(5)
|
||||
#sleep(5)
|
||||
retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString)
|
||||
if "2: service ok" in retVal:
|
||||
tdLog.info("taos -k success")
|
||||
else:
|
||||
tdLog.info(retVal)
|
||||
tdLog.info(retVal)
|
||||
tdLog.exit("taos -k fail 3")
|
||||
|
||||
tdLog.printNoPrefix("================================ parameter: -n")
|
||||
# stop taosd
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.stop(1)
|
||||
|
||||
try:
|
||||
role = 'server'
|
||||
|
@ -220,7 +220,7 @@ class TDTestCase:
|
|||
#print(child.after.decode())
|
||||
if i == 0:
|
||||
tdLog.exit('taos -n server fail!')
|
||||
|
||||
|
||||
expectString1 = 'response is received, size:' + pktLen
|
||||
expectSTring2 = pktNum + '/' + pktNum
|
||||
if expectString1 in retResult and expectSTring2 in retResult:
|
||||
|
|
|
@ -51,7 +51,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
|
||||
if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None:
|
||||
tdLog.exit("first_ep_dnode_id is null!")
|
||||
|
||||
|
||||
if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None:
|
||||
tdLog.exit("master_uptime is null!")
|
||||
|
||||
|
@ -69,13 +69,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
|
||||
if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None :
|
||||
tdLog.exit("dnodes is null!")
|
||||
|
||||
|
||||
dnodes_info = { "dnode_id": 1,"dnode_ep": self.hostPort,"status":"ready"}
|
||||
|
||||
|
||||
for k ,v in dnodes_info.items():
|
||||
if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] :
|
||||
tdLog.exit("dnodes info is null!")
|
||||
|
||||
|
||||
mnodes_info = { "mnode_id":1, "mnode_ep": self.hostPort,"role": "leader" }
|
||||
|
||||
for k ,v in mnodes_info.items():
|
||||
|
@ -86,7 +86,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
|
||||
if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None:
|
||||
tdLog.exit("vgroup_infos is null!")
|
||||
|
||||
|
||||
vgroup_infos_nums = len(infoDict["vgroup_infos"])
|
||||
|
||||
for index in range(vgroup_infos_nums):
|
||||
|
@ -116,14 +116,14 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
|
||||
if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0:
|
||||
tdLog.exit("timeseries_total is null!")
|
||||
|
||||
|
||||
# dnode_info ====================================
|
||||
|
||||
if "dnode_info" not in infoDict or infoDict["dnode_info"]== None:
|
||||
tdLog.exit("dnode_info is null!")
|
||||
|
||||
dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
|
||||
'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
|
||||
dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine',
|
||||
'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select',
|
||||
'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success',
|
||||
'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode']
|
||||
for elem in dnode_infos:
|
||||
|
@ -134,7 +134,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
|
||||
if "disk_infos" not in infoDict or infoDict["disk_infos"]== None:
|
||||
tdLog.exit("disk_infos is null!")
|
||||
|
||||
|
||||
# bug for data_dir
|
||||
if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 :
|
||||
tdLog.exit("datadir is null!")
|
||||
|
@ -187,7 +187,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
|
||||
|
||||
# log_infos ====================================
|
||||
|
||||
|
||||
if "log_infos" not in infoDict or infoDict["log_infos"]== None:
|
||||
tdLog.exit("log_infos is null!")
|
||||
|
||||
|
@ -206,13 +206,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4:
|
||||
tdLog.exit("summary is null!")
|
||||
|
||||
|
||||
|
||||
if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 :
|
||||
tdLog.exit("total is null!")
|
||||
|
||||
if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]:
|
||||
tdLog.exit("level is null!")
|
||||
|
||||
|
||||
def do_GET(self):
|
||||
"""
|
||||
process GET request
|
||||
|
@ -227,25 +227,25 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
if contentEncoding == 'gzip':
|
||||
req_body = self.rfile.read(int(self.headers["Content-Length"]))
|
||||
plainText = gzip.decompress(req_body).decode()
|
||||
else:
|
||||
else:
|
||||
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
|
||||
|
||||
|
||||
print(plainText)
|
||||
# 1. send response code and header
|
||||
self.send_response(200)
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/html; charset=utf-8")
|
||||
self.end_headers()
|
||||
|
||||
|
||||
# 2. send response content
|
||||
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
|
||||
|
||||
|
||||
# 3. check request body info
|
||||
infoDict = json.loads(plainText)
|
||||
#print("================")
|
||||
# print(infoDict)
|
||||
self.telemetryInfoCheck(infoDict)
|
||||
|
||||
# 4. shutdown the server and exit case
|
||||
# 4. shutdown the server and exit case
|
||||
assassin = threading.Thread(target=self.server.shutdown)
|
||||
assassin.daemon = True
|
||||
assassin.start()
|
||||
|
@ -287,7 +287,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
# time.sleep(2)
|
||||
|
|
|
@ -100,9 +100,9 @@ def telemetryInfoCheck(infoDict=''):
|
|||
|
||||
if "compStorage" not in infoDict or infoDict["compStorage"] < 0:
|
||||
tdLog.exit("compStorage is null!")
|
||||
|
||||
|
||||
class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
||||
|
||||
class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
"""
|
||||
process GET request
|
||||
|
@ -117,26 +117,26 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler):
|
|||
if contentEncoding == 'gzip':
|
||||
req_body = self.rfile.read(int(self.headers["Content-Length"]))
|
||||
plainText = gzip.decompress(req_body).decode()
|
||||
else:
|
||||
else:
|
||||
plainText = self.rfile.read(int(self.headers["Content-Length"])).decode()
|
||||
|
||||
print("monitor info:\n%s"%plainText)
|
||||
|
||||
# 1. send response code and header
|
||||
self.send_response(200)
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/html; charset=utf-8")
|
||||
self.end_headers()
|
||||
|
||||
|
||||
# 2. send response content
|
||||
#self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8"))
|
||||
|
||||
|
||||
# 3. check request body info
|
||||
infoDict = json.loads(plainText)
|
||||
#print("================")
|
||||
#print(infoDict)
|
||||
telemetryInfoCheck(infoDict)
|
||||
|
||||
# 4. shutdown the server and exit case
|
||||
# 4. shutdown the server and exit case
|
||||
assassin = threading.Thread(target=self.server.shutdown)
|
||||
assassin.daemon = True
|
||||
assassin.start()
|
||||
|
@ -176,7 +176,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
# time.sleep(2)
|
||||
|
|
|
@ -512,7 +512,7 @@ class TDTestCase:
|
|||
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
|
||||
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
|
||||
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
]
|
||||
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
|
||||
|
|
|
@ -190,7 +190,7 @@ class TDTestCase:
|
|||
tdSql.execute("use db ")
|
||||
tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb")
|
||||
tdSql.error("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1")
|
||||
|
||||
|
||||
# aggregate functions
|
||||
tdSql.error("select udf2(num1) ,udf2(num2), udf2(num3) from tb")
|
||||
|
||||
|
|
|
@ -514,7 +514,7 @@ class TDTestCase:
|
|||
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
|
||||
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
|
||||
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
]
|
||||
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -16,7 +16,7 @@ class MyDnodes(TDDnodes):
|
|||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
|
@ -26,7 +26,7 @@ class TDTestCase:
|
|||
self.master_dnode = self.TDDnodes.dnodes[0]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
@ -43,7 +43,7 @@ class TDTestCase:
|
|||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def prepare_udf_so(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -61,7 +61,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
def prepare_data(self):
|
||||
|
||||
|
||||
tdSql.execute("drop database if exists db")
|
||||
tdSql.execute("create database if not exists db replica 1 duration 300")
|
||||
tdSql.execute("use db")
|
||||
|
@ -71,7 +71,7 @@ class TDTestCase:
|
|||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
|
||||
|
||||
|
||||
# functions = tdSql.getResult("show functions")
|
||||
# function_nums = len(functions)
|
||||
# if function_nums == 2:
|
||||
|
@ -167,14 +167,14 @@ class TDTestCase:
|
|||
# create aggregate functions
|
||||
|
||||
tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;")
|
||||
|
||||
|
||||
functions = tdSql.getResult("show functions")
|
||||
function_nums = len(functions)
|
||||
if function_nums == 2:
|
||||
tdLog.info("create two udf functions success ")
|
||||
|
||||
|
||||
def basic_udf_query(self , dnode):
|
||||
|
||||
|
||||
mytdSql = self.getConnection(dnode)
|
||||
# scalar functions
|
||||
|
||||
|
@ -229,7 +229,7 @@ class TDTestCase:
|
|||
else:
|
||||
tdLog.info(" UDF query check failed at :dnode_index %s" %dnode.index)
|
||||
tdLog.exit("query check failed at :dnode_index %s" %dnode.index )
|
||||
|
||||
|
||||
|
||||
def check_UDF_query(self):
|
||||
|
||||
|
@ -238,10 +238,10 @@ class TDTestCase:
|
|||
self.basic_udf_query(dnode)
|
||||
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
|
@ -253,7 +253,7 @@ class TDTestCase:
|
|||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
|
@ -261,11 +261,11 @@ class TDTestCase:
|
|||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.start(dnode.index)
|
||||
|
||||
# create cluster
|
||||
# create cluster
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
print(dnode.cfgDict)
|
||||
|
@ -275,12 +275,12 @@ class TDTestCase:
|
|||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster done! ")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def getConnection(self, dnode):
|
||||
host = dnode.cfgDict["fqdn"]
|
||||
port = dnode.cfgDict["serverPort"]
|
||||
|
@ -288,23 +288,23 @@ class TDTestCase:
|
|||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
def restart_udfd(self, dnode):
|
||||
|
||||
|
||||
buildPath = self.getBuildPath()
|
||||
|
||||
if (buildPath == ""):
|
||||
tdLog.exit("taosd not found!")
|
||||
else:
|
||||
tdLog.info("taosd found in %s" % buildPath)
|
||||
|
||||
|
||||
cfgPath = dnode.cfgDir
|
||||
|
||||
|
||||
udfdPath = buildPath +'/build/bin/udfd'
|
||||
|
||||
for i in range(5):
|
||||
|
||||
tdLog.info(" loop restart udfd %d_th at dnode_index : %s" % (i ,dnode.index))
|
||||
self.basic_udf_query(dnode)
|
||||
# stop udfd cmds
|
||||
# stop udfd cmds
|
||||
get_processID = "ps -ef | grep -w udfd | grep %s | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"%cfgPath
|
||||
processID = subprocess.check_output(get_processID, shell=True).decode("utf-8")
|
||||
stop_udfd = " kill -9 %s" % processID
|
||||
|
@ -327,12 +327,12 @@ class TDTestCase:
|
|||
# self.check_UDF_query()
|
||||
self.restart_udfd(self.master_dnode)
|
||||
# self.test_restart_udfd_All_dnodes()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -514,7 +514,7 @@ class TDTestCase:
|
|||
"select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" ,
|
||||
"select num1,num2,num3,udf1(num1,num2,num3) from tb" ,
|
||||
"select c1,c6,udf1(c1,c6) from stb1 order by ts" ,
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
"select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;"
|
||||
]
|
||||
udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" ,
|
||||
"select udf2(c1) from stb1 group by 1-udf1(c1)" ,
|
||||
|
|
|
@ -213,7 +213,7 @@ class TDTestCase:
|
|||
tdSql.error("select irate(c1), abs(c1) from ct4 ")
|
||||
|
||||
# agg functions mix with agg functions
|
||||
tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ")
|
||||
tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname order by tbname")
|
||||
tdSql.checkData(0, 0, 0.000000000)
|
||||
tdSql.checkData(1, 0, 0.000000000)
|
||||
tdSql.checkData(0, 1, 13)
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -18,7 +18,7 @@ class MyDnodes(TDDnodes):
|
|||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
noConn = True
|
||||
def init(self,conn ,logSql):
|
||||
|
@ -29,7 +29,7 @@ class TDTestCase:
|
|||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
@ -46,12 +46,12 @@ class TDTestCase:
|
|||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
|
@ -63,7 +63,7 @@ class TDTestCase:
|
|||
dnode.addExtraCfg("monitorFqdn", hostname)
|
||||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnodes.append(dnode)
|
||||
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
|
@ -71,11 +71,11 @@ class TDTestCase:
|
|||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
|
@ -84,7 +84,7 @@ class TDTestCase:
|
|||
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster done! ")
|
||||
|
||||
|
@ -94,7 +94,7 @@ class TDTestCase:
|
|||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -120,7 +120,7 @@ class TDTestCase:
|
|||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
tdSql.query('show databases;')
|
||||
tdSql.checkData(2,5,'off')
|
||||
tdSql.error("alter database db strict 'off'")
|
||||
|
@ -135,7 +135,7 @@ class TDTestCase:
|
|||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.five_dnode_one_mnode()
|
||||
|
||||
|
@ -145,4 +145,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -17,8 +17,8 @@ import subprocess
|
|||
sys.path.append("./6-cluster")
|
||||
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import *
|
||||
|
||||
from clusterCommonCheck import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
@ -48,7 +48,7 @@ class TDTestCase:
|
|||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -63,7 +63,7 @@ class TDTestCase:
|
|||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkRows(2)
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
print("two mnodes is ready")
|
||||
|
@ -73,7 +73,7 @@ class TDTestCase:
|
|||
print("two mnodes is not ready in 10s ")
|
||||
|
||||
# fisrt check statut ready
|
||||
|
||||
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -106,7 +106,7 @@ class TDTestCase:
|
|||
clusterComCheck.checkDnodes(5)
|
||||
# restart all taosd
|
||||
tdDnodes=cluster.dnodes
|
||||
|
||||
|
||||
# stop follower
|
||||
tdLog.info("stop follower")
|
||||
tdDnodes[1].stoptaosd()
|
||||
|
@ -118,7 +118,7 @@ class TDTestCase:
|
|||
tdDnodes[1].starttaosd()
|
||||
if clusterComCheck.checkMnodeStatus(2) :
|
||||
print("both mnodes are ready")
|
||||
|
||||
|
||||
# stop leader
|
||||
tdLog.info("stop leader")
|
||||
tdDnodes[0].stoptaosd()
|
||||
|
@ -133,7 +133,7 @@ class TDTestCase:
|
|||
if clusterComCheck.checkMnodeStatus(2) :
|
||||
print("both mnodes are ready")
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
self.five_dnode_two_mnode()
|
||||
|
||||
|
||||
|
@ -142,4 +142,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -3,7 +3,7 @@ from numpy import row_stack
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -118,7 +118,7 @@ class TDTestCase:
|
|||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -133,7 +133,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
# dnode6=cluster.addDnode(6)
|
||||
|
@ -166,7 +166,7 @@ class TDTestCase:
|
|||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
tr.start()
|
||||
dnode6Port=int(6030+5*100)
|
||||
tdSql.execute("create dnode '%s:%d'"%(hostname,dnode6Port))
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
@ -179,7 +179,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -191,20 +191,20 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
|
@ -217,7 +217,7 @@ class TDTestCase:
|
|||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
|
||||
|
||||
|
@ -226,4 +226,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -3,7 +3,7 @@ from paramiko import HostKeys
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -19,7 +19,7 @@ class MyDnodes(TDDnodes):
|
|||
super(MyDnodes,self).__init__()
|
||||
self.dnodes = dnodes_lists # dnode must be TDDnode instance
|
||||
self.simDeployed = False
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
|
@ -48,7 +48,7 @@ class TDTestCase:
|
|||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
|
||||
def insert_data(self,count):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
for couti in count:
|
||||
|
@ -70,10 +70,10 @@ class TDTestCase:
|
|||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
tdLog.debug(hostname)
|
||||
dnodes = []
|
||||
|
@ -88,7 +88,7 @@ class TDTestCase:
|
|||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
|
||||
dnodes.append(dnode)
|
||||
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
|
@ -96,11 +96,11 @@ class TDTestCase:
|
|||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# tdLog.debug(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
|
@ -109,7 +109,7 @@ class TDTestCase:
|
|||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
tdLog.debug(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
|
||||
|
||||
|
@ -118,8 +118,8 @@ class TDTestCase:
|
|||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
tdLog.debug("mnode is three nodes")
|
||||
if tdSql.checkRows(3) :
|
||||
tdLog.debug("mnode is three nodes")
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
|
@ -129,20 +129,20 @@ class TDTestCase:
|
|||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
tdLog.debug("three mnodes is ready in 10s")
|
||||
break
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
tdLog.debug("three mnodes is ready in 10s")
|
||||
break
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.debug("three mnodes is not ready in 10s ")
|
||||
return -1
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
|
@ -169,11 +169,11 @@ class TDTestCase:
|
|||
count+=1
|
||||
else:
|
||||
tdLog.debug("stop mnodes on dnode 2 failed in 10s ")
|
||||
return -1
|
||||
return -1
|
||||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'offline')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -200,8 +200,8 @@ class TDTestCase:
|
|||
return -1
|
||||
tdSql.error("drop mnode on dnode 2;")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -229,8 +229,8 @@ class TDTestCase:
|
|||
tdLog.debug("stop mnodes on dnode 3 failed in 10s")
|
||||
return -1
|
||||
tdSql.error("drop mnode on dnode 3;")
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -249,8 +249,8 @@ class TDTestCase:
|
|||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -270,8 +270,8 @@ class TDTestCase:
|
|||
tdSql.query("show dnodes;")
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
|
||||
# drop follower of mnode
|
||||
dropcount =0
|
||||
# drop follower of mnode
|
||||
dropcount =0
|
||||
while dropcount <= 10:
|
||||
for i in range(1,3):
|
||||
tdLog.debug("drop mnode on dnode %d"%(i+1))
|
||||
|
@ -306,7 +306,7 @@ class TDTestCase:
|
|||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# tdLog.debug(self.master_dnode.cfgDict)
|
||||
self.buildcluster(5)
|
||||
self.five_dnode_three_mnode()
|
||||
|
@ -316,4 +316,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -13,7 +13,7 @@ import time
|
|||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -37,7 +37,7 @@ class TDTestCase:
|
|||
self.host=self.master_dnode.cfgDict["fqdn"]
|
||||
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
|
||||
tdSql.init(conn1.cursor())
|
||||
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
@ -63,7 +63,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -74,7 +74,7 @@ class TDTestCase:
|
|||
|
||||
def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(dbcountStart,dbcountStop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -98,7 +98,7 @@ class TDTestCase:
|
|||
|
||||
def insertTabaleData(self,dbcountStart,dbcountStop,stbname,chilCount,ts_start,rowCount):
|
||||
# insert data : create childtable and data
|
||||
|
||||
|
||||
for couti in range(dbcountStart,dbcountStop):
|
||||
tdSql.execute("use db%d" %couti)
|
||||
pre_insert = "insert into "
|
||||
|
@ -115,7 +115,7 @@ class TDTestCase:
|
|||
# print(sql)
|
||||
tdSql.execute(sql)
|
||||
sql = "insert into %s_%d values " %(stbname,i)
|
||||
# end sql
|
||||
# end sql
|
||||
if sql != pre_insert:
|
||||
# print(sql)
|
||||
print(len(sql))
|
||||
|
@ -134,13 +134,13 @@ class TDTestCase:
|
|||
for i in range(stableCount):
|
||||
tdSql.query("select count(*) from %s%d"%(stbname,i))
|
||||
tdSql.checkData(0,0,rowsPerSTable)
|
||||
return
|
||||
|
||||
return
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
def depoly_cluster(self ,dnodes_nums):
|
||||
|
||||
testCluster = False
|
||||
valgrind = 0
|
||||
valgrind = 0
|
||||
hostname = socket.gethostname()
|
||||
dnodes = []
|
||||
start_port = 6030
|
||||
|
@ -154,7 +154,7 @@ class TDTestCase:
|
|||
dnode.addExtraCfg("monitorPort", 7043)
|
||||
dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}")
|
||||
dnodes.append(dnode)
|
||||
|
||||
|
||||
self.TDDnodes = MyDnodes(dnodes)
|
||||
self.TDDnodes.init("")
|
||||
self.TDDnodes.setTestCluster(testCluster)
|
||||
|
@ -162,11 +162,11 @@ class TDTestCase:
|
|||
self.TDDnodes.stopAll()
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.deploy(dnode.index,{})
|
||||
|
||||
|
||||
for dnode in self.TDDnodes.dnodes:
|
||||
self.TDDnodes.starttaosd(dnode.index)
|
||||
|
||||
# create cluster
|
||||
# create cluster
|
||||
for dnode in self.TDDnodes.dnodes[1:]:
|
||||
# print(dnode.cfgDict)
|
||||
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
|
||||
|
@ -175,7 +175,7 @@ class TDTestCase:
|
|||
cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;"
|
||||
print(cmd)
|
||||
os.system(cmd)
|
||||
|
||||
|
||||
time.sleep(2)
|
||||
tdLog.info(" create cluster with %d dnode done! " %dnodes_nums)
|
||||
|
||||
|
@ -185,8 +185,8 @@ class TDTestCase:
|
|||
time.sleep(1)
|
||||
statusReadyBumber=0
|
||||
tdSql.query("show dnodes;")
|
||||
if tdSql.checkRows(dnodenumber) :
|
||||
print("dnode is %d nodes"%dnodenumber)
|
||||
if tdSql.checkRows(dnodenumber) :
|
||||
print("dnode is %d nodes"%dnodenumber)
|
||||
for i in range(dnodenumber):
|
||||
if tdSql.queryResult[i][4] !='ready' :
|
||||
status=tdSql.queryResult[i][4]
|
||||
|
@ -203,15 +203,15 @@ class TDTestCase:
|
|||
else:
|
||||
print("%d mnodes is not ready in 10s "%dnodenumber)
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def check3mnode(self):
|
||||
count=0
|
||||
while count < 10:
|
||||
time.sleep(1)
|
||||
tdSql.query("show mnodes;")
|
||||
if tdSql.checkRows(3) :
|
||||
print("mnode is three nodes")
|
||||
if tdSql.checkRows(3) :
|
||||
print("mnode is three nodes")
|
||||
if tdSql.queryResult[0][2]=='leader' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
|
@ -221,19 +221,19 @@ class TDTestCase:
|
|||
if tdSql.queryResult[1][2]=='leader':
|
||||
if tdSql.queryResult[2][2]=='follower':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
break
|
||||
elif tdSql.queryResult[0][2]=='follower' :
|
||||
if tdSql.queryResult[1][2]=='follower':
|
||||
if tdSql.queryResult[2][2]=='leader':
|
||||
print("three mnodes is ready in 10s")
|
||||
break
|
||||
break
|
||||
count+=1
|
||||
else:
|
||||
print("three mnodes is not ready in 10s ")
|
||||
return -1
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,3,'ready')
|
||||
tdSql.checkData(1,1,'%s:6130'%self.host)
|
||||
|
@ -263,8 +263,8 @@ class TDTestCase:
|
|||
return -1
|
||||
tdSql.error("drop mnode on dnode 1;")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'offline')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -291,8 +291,8 @@ class TDTestCase:
|
|||
return -1
|
||||
tdSql.error("drop mnode on dnode 2;")
|
||||
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -320,8 +320,8 @@ class TDTestCase:
|
|||
print("stop mnodes on dnode 3 failed in 10s")
|
||||
return -1
|
||||
tdSql.error("drop mnode on dnode 3;")
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -348,8 +348,8 @@ class TDTestCase:
|
|||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
tdSql.checkData(0,4,'ready')
|
||||
tdSql.checkData(4,4,'ready')
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("show mnodes;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(0,2,'leader')
|
||||
tdSql.checkData(0,3,'ready')
|
||||
|
@ -364,7 +364,7 @@ class TDTestCase:
|
|||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
tdLog.debug("stop all of mnode ")
|
||||
tdLog.debug("stop all of mnode ")
|
||||
|
||||
# drop follower of mnode and insert data
|
||||
self.createDbTbale(dbcountStart, dbcountStop,stbname,tablesPerStb)
|
||||
|
@ -378,7 +378,7 @@ class TDTestCase:
|
|||
rowsPerTable))
|
||||
|
||||
threads.start()
|
||||
dropcount =0
|
||||
dropcount =0
|
||||
while dropcount <= 10:
|
||||
for i in range(1,3):
|
||||
tdLog.debug("drop mnode on dnode %d"%(i+1))
|
||||
|
@ -415,7 +415,7 @@ class TDTestCase:
|
|||
return taos.connect(host=host, port=int(port), config=config_dir)
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.buildcluster(5)
|
||||
self.five_dnode_three_mnode(5)
|
||||
|
@ -425,4 +425,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -3,7 +3,7 @@ from numpy import row_stack
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -112,7 +112,7 @@ class TDTestCase:
|
|||
}
|
||||
username="user1"
|
||||
passwd="123"
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
|
@ -120,7 +120,7 @@ class TDTestCase:
|
|||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -135,7 +135,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -162,10 +162,10 @@ class TDTestCase:
|
|||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "%s"%username :
|
||||
tdLog.info("create user:%s successfully"%username)
|
||||
|
||||
|
||||
# # create database and stable
|
||||
# clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
# tdLog.info("Take turns stopping Mnodes ")
|
||||
# tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
# tdDnodes=cluster.dnodes
|
||||
# stopcount =0
|
||||
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
# # sleep(10)
|
||||
# elif stopRole == "vnode":
|
||||
# for i in range(vnodeNumbers):
|
||||
# tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -209,7 +209,7 @@ class TDTestCase:
|
|||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
# # sleep(10)
|
||||
|
||||
# # dnodeNumbers don't include database of schema
|
||||
# if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -220,7 +220,7 @@ class TDTestCase:
|
|||
# tdLog.exit("one or more of dnodes failed to start ")
|
||||
# # self.check3mnode()
|
||||
# stopcount+=1
|
||||
|
||||
|
||||
|
||||
# clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
# clusterComCheck.checkDbRows(dbNumbers)
|
||||
|
@ -234,7 +234,7 @@ class TDTestCase:
|
|||
# # tdSql.query("select * from %s"%stableName)
|
||||
# # tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
|
@ -243,4 +243,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -3,7 +3,7 @@ from numpy import row_stack
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -110,7 +110,7 @@ class TDTestCase:
|
|||
"rowsPerTbl": 100,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
|
@ -118,7 +118,7 @@ class TDTestCase:
|
|||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -133,7 +133,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
|
@ -174,7 +174,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -186,7 +186,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
|
@ -211,7 +211,7 @@ class TDTestCase:
|
|||
# tdSql.query("select * from %s"%stableName)
|
||||
# tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
|
@ -220,4 +220,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -3,7 +3,7 @@ from numpy import row_stack
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -110,7 +110,7 @@ class TDTestCase:
|
|||
"rowsPerTbl": 100,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
|
@ -118,7 +118,7 @@ class TDTestCase:
|
|||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -133,7 +133,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
|
@ -173,7 +173,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -185,7 +185,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -196,10 +196,10 @@ class TDTestCase:
|
|||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
@ -212,7 +212,7 @@ class TDTestCase:
|
|||
# tdSql.query("select * from %s"%stableName)
|
||||
# tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
|
@ -221,4 +221,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -67,7 +67,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -107,13 +107,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -128,7 +128,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -149,7 +149,7 @@ class TDTestCase:
|
|||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
|
@ -157,7 +157,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -169,7 +169,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -180,7 +180,7 @@ class TDTestCase:
|
|||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
tdLog.info("check dnode number:")
|
||||
|
@ -196,7 +196,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='dnode')
|
||||
|
||||
|
@ -205,4 +205,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -81,13 +81,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -102,7 +102,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -111,7 +111,7 @@ class TDTestCase:
|
|||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
|
@ -130,7 +130,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -142,19 +142,19 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
@ -169,7 +169,7 @@ class TDTestCase:
|
|||
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
|
||||
|
||||
|
@ -178,4 +178,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -3,7 +3,7 @@ from numpy import row_stack
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -13,13 +13,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -57,7 +57,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -110,7 +110,7 @@ class TDTestCase:
|
|||
"rowsPerTbl": 10000,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
|
@ -118,7 +118,7 @@ class TDTestCase:
|
|||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -133,7 +133,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
|
@ -171,7 +171,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -183,19 +183,19 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
@ -209,7 +209,7 @@ class TDTestCase:
|
|||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
|
||||
|
||||
|
@ -218,4 +218,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -82,13 +82,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -105,14 +105,14 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
tdLog.info("create database and stable")
|
||||
tdLog.info("create database and stable")
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
@ -124,7 +124,7 @@ class TDTestCase:
|
|||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
|
@ -132,7 +132,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -144,7 +144,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -155,7 +155,7 @@ class TDTestCase:
|
|||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
tdLog.info("check dnode number:")
|
||||
|
@ -170,7 +170,7 @@ class TDTestCase:
|
|||
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='mnode')
|
||||
|
||||
|
@ -179,4 +179,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -82,13 +82,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -103,14 +103,14 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
tdLog.info("create database and stable")
|
||||
tdLog.info("create database and stable")
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
@ -122,7 +122,7 @@ class TDTestCase:
|
|||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
|
@ -130,7 +130,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -142,7 +142,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -153,14 +153,14 @@ class TDTestCase:
|
|||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
tdLog.info("check dnode number:")
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
tdSql.query("show databases")
|
||||
tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers-2))
|
||||
|
||||
|
||||
# tdLog.info("check DB Rows:")
|
||||
# clusterComCheck.checkDbRows(allDbNumbers)
|
||||
# tdLog.info("check DB Status on by on")
|
||||
|
@ -168,7 +168,7 @@ class TDTestCase:
|
|||
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
|
||||
|
||||
|
@ -177,4 +177,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -67,7 +67,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -106,13 +106,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -127,7 +127,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -148,7 +148,7 @@ class TDTestCase:
|
|||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
|
@ -157,7 +157,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -169,19 +169,19 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
@ -195,7 +195,7 @@ class TDTestCase:
|
|||
# tdSql.checkRows(allStbNumbers)
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode')
|
||||
|
||||
|
@ -204,4 +204,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -82,13 +82,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allDbNumbers=(paraDict['dbNumbers']*restartNumbers)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
|
@ -103,7 +103,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -124,7 +124,7 @@ class TDTestCase:
|
|||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
|
@ -132,7 +132,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -144,7 +144,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
|
@ -155,7 +155,7 @@ class TDTestCase:
|
|||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
@ -169,7 +169,7 @@ class TDTestCase:
|
|||
# clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i))
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='vnode')
|
||||
|
||||
|
@ -178,4 +178,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
|
@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
|||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
|
@ -12,13 +12,13 @@ from util.dnodes import TDDnode
|
|||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
@ -31,7 +31,7 @@ class TDTestCase:
|
|||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
print(tdSql)
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -56,7 +56,7 @@ class TDTestCase:
|
|||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
@ -67,7 +67,7 @@ class TDTestCase:
|
|||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
|
@ -106,13 +106,13 @@ class TDTestCase:
|
|||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
}
|
||||
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allStbNumbers=(paraDict['stbNumbers']*restartNumbers)
|
||||
dbNumbers = 1
|
||||
|
||||
|
||||
print(tdSql)
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -128,7 +128,7 @@ class TDTestCase:
|
|||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -137,12 +137,12 @@ class TDTestCase:
|
|||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
|
||||
for i in range(restartNumbers):
|
||||
stableName= '%s%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
|
@ -151,7 +151,7 @@ class TDTestCase:
|
|||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
|
@ -159,7 +159,7 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
|
@ -171,19 +171,19 @@ class TDTestCase:
|
|||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
@ -197,7 +197,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(allStbNumbers)
|
||||
|
||||
|
||||
def run(self):
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='vnode')
|
||||
|
||||
|
@ -206,4 +206,4 @@ class TDTestCase:
|
|||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue