diff --git a/cmake/cmake.version b/cmake/cmake.version
index 03598519ed..5c5abe79bb 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.1.6")
+ SET(TD_VER_NUMBER "3.0.1.7")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in
index 7d48eb9d8a..e79c6b799f 100644
--- a/cmake/taosws_CMakeLists.txt.in
+++ b/cmake/taosws_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
- GIT_TAG 9843872
+ GIT_TAG f406d51
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md
index 253f1270f5..699b3ebe5f 100644
--- a/docs/en/07-develop/09-udf.md
+++ b/docs/en/07-develop/09-udf.md
@@ -205,13 +205,13 @@ Additional functions are defined in `taosudf.h` to make it easier to work with t
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
-For example, the sample UDF `add_one.c` can be compiled into a DLL as follows:
+For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
```bash
-gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
+gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
```
-The generated DLL file `add_one.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
+The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
## Manage and Use User-Defined Functions
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index 678c38a22e..f00ec90f57 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -23,7 +23,7 @@ The following characters cannot occur in a password: single quotation marks ('),
## General Limits
-- Maximum length of database name is 32 bytes
+- Maximum length of database name is 64 bytes
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
- The maximum length of a column name is 64 bytes.
@@ -35,7 +35,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- The number of replicas can only be 1 or 3.
- The maximum length of a username is 23 bytes.
-- The maximum length of a password is 15 bytes.
+- The maximum length of a password is 128 bytes.
- The maximum number of rows depends on system resources.
- The maximum number of vnodes in a database is 1024.
diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md
index e6199e8b31..977f3bcc08 100644
--- a/docs/en/12-taos-sql/26-udf.md
+++ b/docs/en/12-taos-sql/26-udf.md
@@ -62,7 +62,7 @@ SHOW FUNCTIONS;
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
```sql
-SELECT X(c1,c2) FROM table/stable;
+SELECT bit_and(c1,c2) FROM table;
```
-The above SQL statement invokes function X for column c1 and c2. You can use query keywords like WHERE with user-defined functions.
+The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index eee2f94ee1..8bfdf72cc7 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
+## 3.0.1.7
+
+
+
## 3.0.1.6
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
index 6e8a040f8b..2bc22a4450 100644
--- a/docs/en/28-releases/02-tools.md
+++ b/docs/en/28-releases/02-tools.md
@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
+## 2.2.9
+
+
+
## 2.2.7
diff --git a/docs/examples/rust/nativeexample/Cargo.toml b/docs/examples/rust/nativeexample/Cargo.toml
index cdf739d357..5ecc407854 100644
--- a/docs/examples/rust/nativeexample/Cargo.toml
+++ b/docs/examples/rust/nativeexample/Cargo.toml
@@ -10,4 +10,4 @@ chrono = "0.4"
serde = { version = "1", features = ["derive"] }
tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
-taos = { version = "0.*" }
+taos = { version = "0.4.8" }
diff --git a/docs/examples/rust/nativeexample/examples/stmt_example.rs b/docs/examples/rust/nativeexample/examples/stmt_example.rs
index 9cf8e8e1fc..7d5a7c0f2b 100644
--- a/docs/examples/rust/nativeexample/examples/stmt_example.rs
+++ b/docs/examples/rust/nativeexample/examples/stmt_example.rs
@@ -12,7 +12,10 @@ async fn main() -> anyhow::Result<()> {
// bind table name and tags
stmt.set_tbname_tags(
"d1001",
- &[Value::VarChar("California.SanFransico".into()), Value::Int(2)],
+ &[
+ Value::VarChar("California.SanFransico".into()),
+ Value::Int(2),
+ ],
)?;
// bind values.
let values = vec![
@@ -30,9 +33,9 @@ async fn main() -> anyhow::Result<()> {
ColumnView::from_floats(vec![0.33]),
];
stmt.bind(&values2)?;
-
+
stmt.add_batch()?;
-
+
// execute.
let rows = stmt.execute()?;
assert_eq!(rows, 2);
diff --git a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
index 11d6d4e004..7551ad46b1 100644
--- a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
+++ b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
@@ -50,7 +50,7 @@ async fn main() -> anyhow::Result<()> {
// create super table
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
// create topic for subscription
- format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
+ format!("CREATE TOPIC tmq_meters AS SELECT * FROM `meters`")
])
.await?;
@@ -64,13 +64,9 @@ async fn main() -> anyhow::Result<()> {
let mut consumer = tmq.build()?;
consumer.subscribe(["tmq_meters"]).await?;
- {
- let mut stream = consumer.stream();
-
- while let Some((offset, message)) = stream.try_next().await? {
- // get information from offset
-
- // the topic
+ consumer
+ .stream()
+ .try_for_each(|(offset, message)| async {
let topic = offset.topic();
// the vgroup id, like partition id in kafka.
let vgroup_id = offset.vgroup_id();
@@ -78,20 +74,14 @@ async fn main() -> anyhow::Result<()> {
if let Some(data) = message.into_data() {
while let Some(block) = data.fetch_raw_block().await? {
- // one block for one table, get table name if needed
- let name = block.table_name();
let records: Vec = block.deserialize().try_collect()?;
- println!(
- "** table: {}, got {} records: {:#?}\n",
- name.unwrap(),
- records.len(),
- records
- );
+ println!("** read {} records: {:#?}\n", records.len(), records);
}
}
consumer.commit(offset).await?;
- }
- }
+ Ok(())
+ })
+ .await?;
consumer.unsubscribe().await;
diff --git a/docs/examples/rust/restexample/examples/insert_example.rs b/docs/examples/rust/restexample/examples/insert_example.rs
index 11a84f1661..4953a09b35 100644
--- a/docs/examples/rust/restexample/examples/insert_example.rs
+++ b/docs/examples/rust/restexample/examples/insert_example.rs
@@ -5,7 +5,6 @@ async fn main() -> anyhow::Result<()> {
let dsn = "ws://";
let taos = TaosBuilder::from_dsn(dsn)?.build()?;
-
taos.exec_many([
"DROP DATABASE IF EXISTS power",
"CREATE DATABASE power",
diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md
index 6faa7fed01..832310aa7c 100644
--- a/docs/zh/05-get-started/index.md
+++ b/docs/zh/05-get-started/index.md
@@ -18,7 +18,18 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
```
-### 加入 TDengine 官方社区
+## 学习 TDengine 知识地图
+
+TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概念实体之间的调用关系和数据流向。学习和了解 TDengine 知识地图有助于你快速掌握 TDengine 的知识体系。
+
+
+
+
+图 1. TDengine 知识地图
+
+
+
+## 加入 TDengine 官方社区
微信扫描以下二维码,学习了解 TDengine 的最新技术,与大家共同交流物联网大数据技术应用、TDengine 使用问题和技巧等话题。
diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md
index 3239eae49b..7a3a3822d5 100644
--- a/docs/zh/07-develop/09-udf.md
+++ b/docs/zh/07-develop/09-udf.md
@@ -205,13 +205,13 @@ typedef struct SUdfInterBuf {
用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。
-例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,以 Linux 为例可以执行如下指令编译得到动态链接库文件:
+例如,按照上一章节描述的规则准备好了用户定义函数的源代码 bit_and.c,以 Linux 为例可以执行如下指令编译得到动态链接库文件:
```bash
-gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
+gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
```
-这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
+这样就准备好了动态链接库 libbitand.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
## 管理和使用UDF
编译好的UDF,还需要将其加入到系统才能被正常的SQL调用。关于如何管理和使用UDF,参见[UDF使用说明](../12-taos-sql/26-udf.md)
diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md
index a9743addda..7b6692f1b7 100644
--- a/docs/zh/12-taos-sql/19-limit.md
+++ b/docs/zh/12-taos-sql/19-limit.md
@@ -24,19 +24,19 @@ description: 合法字符集和命名中的限制规则
## 一般限制
-- 数据库名最大长度为 32
-- 表名最大长度为 192,不包括数据库名前缀和分隔符
+- 数据库名最大长度为 64 字节
+- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
-- 列名最大长度为 64
+- 列名最大长度为 64 字节
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
-- 标签名最大长度为 64
+- 标签名最大长度为 64 字节
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
- SQL 语句最大长度 1048576 个字符
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 数据库的副本数只能设置为 1 或 3
-- 用户名的最大长度是 23 个字节
-- 用户密码的最大长度是 15 个字节
+- 用户名的最大长度是 23 字节
+- 用户密码的最大长度是 128 字节
- 总数据行数取决于可用资源
- 单个数据库的虚拟结点数上限为 1024
diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md
index 6dc1b6eb5f..cb349109a7 100644
--- a/docs/zh/12-taos-sql/26-udf.md
+++ b/docs/zh/12-taos-sql/26-udf.md
@@ -63,7 +63,7 @@ SHOW FUNCTIONS;
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如:
```sql
-SELECT X(c1,c2) FROM table/stable;
+SELECT bit_and(c1,c2) FROM table;
```
-表示对名为 c1, c2 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
+表示对表 table 上名为 c1, c2 的数据列调用名为 bit_and 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index 4108212c55..fd2be899eb 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
+## 3.0.1.7
+
+
+
## 3.0.1.6
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
index 28e0d4bca9..3f73b53fab 100644
--- a/docs/zh/28-releases/02-tools.md
+++ b/docs/zh/28-releases/02-tools.md
@@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
+## 2.2.9
+
+
+
## 2.2.7
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 69d272386e..02d4c2279c 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -3199,6 +3199,14 @@ typedef struct {
int32_t tSerializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq);
int32_t tDeserializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq);
+static FORCE_INLINE void tFreeSBatchReqMsg(void* msg) {
+ if (NULL == msg) {
+ return;
+ }
+ SBatchMsg* pMsg = (SBatchMsg*)msg;
+ taosMemoryFree(pMsg->msg);
+}
+
int32_t tSerializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp);
int32_t tDeserializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp);
diff --git a/include/common/tvariant.h b/include/common/tvariant.h
index 0507934e6a..130945cce5 100644
--- a/include/common/tvariant.h
+++ b/include/common/tvariant.h
@@ -30,6 +30,7 @@ typedef struct SVariant {
int64_t i;
uint64_t u;
double d;
+ float f;
char *pz;
TdUcs4 *ucs4;
SArray *arr; // only for 'in' query to hold value list, not value for a field
@@ -47,7 +48,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc);
int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2);
-char *taosVariantGet(SVariant *pVar, int32_t type);
+char *taosVariantGet(SVariant *pVar, int32_t type);
#ifdef __cplusplus
}
diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h
index 9443df5e14..59f030a60c 100644
--- a/include/libs/stream/streamState.h
+++ b/include/libs/stream/streamState.h
@@ -27,16 +27,21 @@ typedef struct SStreamTask SStreamTask;
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
-// incremental state storage
-typedef struct {
+typedef struct STdbState {
SStreamTask* pOwner;
TDB* db;
TTB* pStateDb;
TTB* pFuncStateDb;
TTB* pFillStateDb; // todo refactor
TTB* pSessionStateDb;
+ TTB* pParNameDb;
TXN txn;
- int32_t number;
+} STdbState;
+
+// incremental state storage
+typedef struct {
+ STdbState* pTdbState;
+ int32_t number;
} SStreamState;
SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
@@ -44,6 +49,7 @@ void streamStateClose(SStreamState* pState);
int32_t streamStateBegin(SStreamState* pState);
int32_t streamStateCommit(SStreamState* pState);
int32_t streamStateAbort(SStreamState* pState);
+void streamStateDestroy(SStreamState* pState);
typedef struct {
TBC* pCur;
@@ -99,6 +105,9 @@ int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
+int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal);
+
#if 0
char* streamStateSessionDump(SStreamState* pState);
#endif
diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h
index 1c490852f9..ab328c6ad5 100644
--- a/include/libs/stream/tstreamUpdate.h
+++ b/include/libs/stream/tstreamUpdate.h
@@ -47,7 +47,7 @@ typedef struct SUpdateInfo {
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
-void updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
+TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 636decc60b..e5d0bcb249 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -352,6 +352,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_STB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x061A)
#define TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x061B)
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x061C)
+#define TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE TAOS_DEF_ERROR_CODE(0, 0x061D)
// query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700)
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 556ad6b680..48dedd3e3e 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -489,6 +489,9 @@ enum {
#define MAX_META_MSG_IN_BATCH 1048576
#define MAX_META_BATCH_RSP_SIZE (1 * 1048576 * 1024)
+// sort page size by default
+#define DEFAULT_PAGESIZE 4096
+
#ifdef __cplusplus
}
#endif
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index eeb089d1ab..e22aa85c97 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -73,10 +73,10 @@
# compressColData -1
# system time zone
-# timezone Asia/Shanghai (CST, +0800)
+# timezone UTC-8
# system time zone (for windows 10)
-# timezone UTC-8
+# timezone Asia/Shanghai (CST, +0800)
# system locale
# locale en_US.UTF-8
@@ -179,4 +179,4 @@
# metaDebugFlag 131
# generate core file when service crash
-# enableCoreFile 1
\ No newline at end of file
+# enableCoreFile 1
diff --git a/packaging/docker/DockerfileCloud b/packaging/docker/DockerfileCloud
index 2b060c1b91..21e387bab3 100644
--- a/packaging/docker/DockerfileCloud
+++ b/packaging/docker/DockerfileCloud
@@ -7,6 +7,9 @@ ARG dirName
ARG cpuType
RUN echo ${pkgFile} && echo ${dirName}
+RUN apt update
+RUN apt install -y curl
+
COPY ${pkgFile} /root/
ENV TINI_VERSION v0.19.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
diff --git a/packaging/docker/run.sh b/packaging/docker/run.sh
old mode 100644
new mode 100755
index 2700b0b060..b02b3cbd8a
--- a/packaging/docker/run.sh
+++ b/packaging/docker/run.sh
@@ -1,16 +1,160 @@
#!/bin/bash
+
TAOS_RUN_TAOSBENCHMARK_TEST_ONCE=0
+#ADMIN_URL=${ADMIN_URL:-http://172.26.10.84:10001}
+TAOSD_STARTUP_TIMEOUT_SECOND=${TAOSD_STARTUP_TIMEOUT_SECOND:-160}
+TAOS_TIMEOUT_SECOND=${TAOS_TIMEOUT_SECOND:-5}
+BACKUP_CORE_FOLDER=/var/log/corefile
+ALERT_URL=app/system/alert/add
+
+echo "ADMIN_URL: ${ADMIN_URL}"
+echo "TAOS_TIMEOUT_SECOND: ${TAOS_TIMEOUT_SECOND}"
+
+function set_service_state() {
+ #echo "set service state: $1, $2"
+ service_state="$1"
+ service_msg="$2"
+}
+set_service_state "init" "ok"
+app_name=`hostname |cut -d\- -f1`
+
+function check_taosd() {
+ timeout $TAOS_TIMEOUT_SECOND taos -s "show databases;" >/dev/null
+ local ret=$?
+ if [ $ret -ne 0 ]; then
+ echo "`date` check taosd error $ret"
+ if [ "x$1" != "xignore" ]; then
+ set_service_state "error" "taos check failed $ret"
+ fi
+ else
+ set_service_state "ready" "ok"
+ fi
+}
+function post_error_msg() {
+ if [ ! -z "${ADMIN_URL}" ]; then
+ taos_version=`taos --version`
+ echo "app_name: ${app_name}"
+ echo "service_state: ${service_state}"
+ echo "`date` service_msg: ${service_msg}"
+ echo "${taos_version}"
+ curl --connect-timeout 10 --max-time 20 -X POST -H "Content-Type: application/json" \
+ -d"{\"appName\":\"${app_name}\",\
+ \"alertLevel\":\"${service_state}\",\
+ \"taosVersion\":\"${taos_version}\",\
+ \"alertMsg\":\"${service_msg}\"}" \
+ ${ADMIN_URL}/${ALERT_URL}
+ fi
+}
+function check_taosd_exit_type() {
+ local core_pattern=`cat /proc/sys/kernel/core_pattern`
+ echo "$core_pattern" | grep -q "^/"
+ if [ $? -eq 0 ]; then
+ core_folder=`dirname $core_pattern`
+ core_prefix=`basename $core_pattern | sed "s/%.*//"`
+ else
+ core_folder=`pwd`
+ core_prefix="$core_pattern"
+ fi
+ local core_files=`ls $core_folder | grep "^${core_prefix}"`
+ if [ ! -z "$core_files" ]; then
+ # move core files to another folder
+ mkdir -p ${BACKUP_CORE_FOLDER}
+ mv ${core_folder}/${core_prefix}* ${BACKUP_CORE_FOLDER}/
+ set_service_state "error" "taosd exit with core file"
+ else
+ set_service_state "error" "taosd exit without core file"
+ fi
+}
+disk_usage_level=(60 80 99)
+current_disk_level=0
+disk_state="ok"
+disk_msg="ok"
+get_usage_ok="yes"
+function post_disk_error_msg() {
+ if [ ! -z "${ADMIN_URL}" ]; then
+ taos_version=`taos --version`
+ echo "app_name: ${app_name}"
+ echo "disk_state: ${disk_state}"
+ echo "`date` disk_msg: ${disk_msg}"
+ echo "${taos_version}"
+ curl --connect-timeout 10 --max-time 20 -X POST -H "Content-Type: application/json" \
+ -d"{\"appName\":\"${app_name}\",\
+ \"alertLevel\":\"${disk_state}\",\
+ \"taosVersion\":\"${taos_version}\",\
+ \"alertMsg\":\"${disk_msg}\"}" \
+ ${ADMIN_URL}/${ALERT_URL}
+ fi
+}
+function check_disk() {
+ local folder=`cat /etc/taos/taos.cfg|grep -v "^#"|grep dataDir|awk '{print $NF}'`
+ if [ -z "$folder" ]; then
+ folder="/var/lib/taos"
+ fi
+ local mount_point="$folder"
+ local usage=""
+ while [ -z "$usage" ]; do
+ usage=`df -h|grep -w "${mount_point}"|awk '{print $5}'|grep -v Use|sed "s/%$//"`
+ if [ "x${mount_point}" = "x/" ]; then
+ break
+ fi
+ mount_point=`dirname ${mount_point}`
+ done
+ if [ -z "$usage" ]; then
+ disk_state="error"
+ disk_msg="cannot get disk usage"
+ if [ "$get_usage_ok" = "yes" ]; then
+ post_disk_error_msg
+ get_usage_ok="no"
+ fi
+ else
+ get_usage_ok="yes"
+ local current_level=0
+ for level in ${disk_usage_level[*]}; do
+ if [ ${usage} -ge ${level} ]; then
+ disk_state="error"
+ disk_msg="disk usage over ${level}%"
+ current_level=${level}
+ fi
+ done
+ if [ ${current_level} -gt ${current_disk_level} ]; then
+ post_disk_error_msg
+ elif [ ${current_level} -lt ${current_disk_level} ]; then
+ echo "disk usage reduced from ${current_disk_level} to ${current_level}"
+ fi
+ current_disk_level=${current_level}
+ fi
+}
+function run_taosd() {
+ taosd
+ set_service_state "error" "taosd exit"
+ # post error msg
+ # check crash or OOM
+ check_taosd_exit_type
+ post_error_msg
+}
+function print_service_state_change() {
+ if [ "x$1" != "x${service_state}" ]; then
+ echo "`date` service state: ${service_state}, ${service_msg}"
+ fi
+}
+taosd_start_time=`date +%s`
while ((1))
do
+ check_disk
# echo "outer loop: $a"
- sleep 10
- output=`taos -k`
- status=${output:0:1}
+ output=`timeout $TAOS_TIMEOUT_SECOND taos -k`
+ if [ -z "${output}" ]; then
+ echo "`date` taos -k error"
+ status=""
+ else
+ status=${output:0:1}
+ fi
# echo $output
# echo $status
if [ "$status"x = "0"x ]
then
- taosd &
+ # taosd_start_time=`date +%s`
+ run_taosd &
fi
# echo "$status"x "$TAOS_RUN_TAOSBENCHMARK_TEST"x "$TAOS_RUN_TAOSBENCHMARK_TEST_ONCE"x
if [ "$status"x = "2"x ] && [ "$TAOS_RUN_TAOSBENCHMARK_TEST"x = "1"x ] && [ "$TAOS_RUN_TAOSBENCHMARK_TEST_ONCE"x = "0"x ]
@@ -24,13 +168,37 @@ do
taos -s "select stable_name from information_schema.ins_stables where db_name = 'test';"|grep -q -w meters
if [ $? -ne 0 ]; then
taosBenchmark -y -t 1000 -n 1000 -S 900000
- taos -s "create user admin_user pass 'NDS65R6t' sysinfo 0;"
- taos -s "GRANT ALL on test.* to admin_user;"
+ taos -s "create user admin_user pass 'NDS65R6t' sysinfo 0;"
+ taos -s "GRANT ALL on test.* to admin_user;"
fi
fi
+ # check taosd status
+ if [ "$service_state" = "ready" ]; then
+ # check taosd status
+ check_taosd
+ print_service_state_change "ready"
+ if [ "$service_state" = "error" ]; then
+ post_error_msg
+ fi
+ elif [ "$service_state" = "init" ]; then
+ check_taosd "ignore"
+ # check timeout
+ current_time=`date +%s`
+ time_elapsed=$(( current_time - taosd_start_time ))
+ if [ ${time_elapsed} -gt ${TAOSD_STARTUP_TIMEOUT_SECOND} ]; then
+ set_service_state "error" "taosd startup timeout"
+ post_error_msg
+ fi
+ print_service_state_change "init"
+ elif [ "$service_state" = "error" ]; then
+ # check taosd status
+ check_taosd
+ print_service_state_change "error"
+ fi
# check taosadapter
nc -z localhost 6041
if [ $? -ne 0 ]; then
- taosadapter &
+ taosadapter &
fi
+ sleep 10
done
diff --git a/packaging/tools/tdengine.iss b/packaging/tools/tdengine.iss
index 1c0c105179..2e9c46d06b 100644
--- a/packaging/tools/tdengine.iss
+++ b/packaging/tools/tdengine.iss
@@ -63,7 +63,7 @@ Source: {#MyAppSourceDir}{#MyAppTaosdemoExeName}; DestDir: "{app}"; Flags: igNor
[run]
Filename: {sys}\sc.exe; Parameters: "create taosd start= DEMAND binPath= ""C:\\TDengine\\taosd.exe --win_service""" ; Flags: runhidden
-Filename: {sys}\sc.exe; Parameters: "create taosadapter start= DEMAND binPath= ""C:\\TDengine\\taosadapter.exe --win_service""" ; Flags: runhidden
+Filename: {sys}\sc.exe; Parameters: "create taosadapter start= DEMAND binPath= ""C:\\TDengine\\taosadapter.exe""" ; Flags: runhidden
[UninstallRun]
RunOnceId: "stoptaosd"; Filename: {sys}\sc.exe; Parameters: "stop taosd" ; Flags: runhidden
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index f7b1196248..3c2a5377e3 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -1403,6 +1403,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
pBlock->info = pDataBlock->info;
pBlock->info.rows = 0;
pBlock->info.capacity = 0;
+ pBlock->info.rowSize = 0;
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 1164c88477..5dfcb62fad 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -4510,7 +4510,7 @@ int32_t tDeserializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq) {
if (tDecodeI32(&decoder, &msg.msgIdx) < 0) return -1;
if (tDecodeI32(&decoder, &msg.msgType) < 0) return -1;
if (tDecodeI32(&decoder, &msg.msgLen) < 0) return -1;
- if (tDecodeBinary(&decoder, (uint8_t**)&msg.msg, NULL) < 0) return -1;
+ if (tDecodeBinaryAlloc(&decoder, &msg.msg, NULL) < 0) return -1;
if (NULL == taosArrayPush(pReq->pMsgs, &msg)) return -1;
}
diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c
index 1b140e6c04..a106a09a69 100644
--- a/source/common/src/ttime.c
+++ b/source/common/src/ttime.c
@@ -436,21 +436,24 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
ASSERT(toPrecision == TSDB_TIME_PRECISION_MILLI || toPrecision == TSDB_TIME_PRECISION_MICRO ||
toPrecision == TSDB_TIME_PRECISION_NANO);
- double tempResult = (double)utime;
-
switch (fromPrecision) {
case TSDB_TIME_PRECISION_MILLI: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
return utime;
case TSDB_TIME_PRECISION_MICRO:
- tempResult *= 1000;
- utime *= 1000;
- goto end_;
+ if (utime > INT64_MAX / 1000) {
+ return INT64_MAX;
+ }
+ return utime * 1000;
case TSDB_TIME_PRECISION_NANO:
- tempResult *= 1000000;
- utime *= 1000000;
- goto end_;
+ if (utime > INT64_MAX / 1000000) {
+ return INT64_MAX;
+ }
+ return utime * 1000000;
+ default:
+ ASSERT(0);
+ return utime;
}
} // end from milli
case TSDB_TIME_PRECISION_MICRO: {
@@ -460,9 +463,13 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
case TSDB_TIME_PRECISION_MICRO:
return utime;
case TSDB_TIME_PRECISION_NANO:
- tempResult *= 1000;
- utime *= 1000;
- goto end_;
+ if (utime > INT64_MAX / 1000) {
+ return INT64_MAX;
+ }
+ return utime * 1000;
+ default:
+ ASSERT(0);
+ return utime;
}
} // end from micro
case TSDB_TIME_PRECISION_NANO: {
@@ -473,17 +480,17 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
return utime / 1000;
case TSDB_TIME_PRECISION_NANO:
return utime;
+ default:
+ ASSERT(0);
+ return utime;
}
} // end from nano
default: {
- assert(0);
+ ASSERT(0);
return utime; // only to pass windows compilation
}
} // end switch fromPrecision
-end_:
- if (tempResult >= (double)INT64_MAX) return INT64_MAX;
- if (tempResult <= (double)INT64_MIN) return INT64_MIN; // INT64_MIN means NULL
return utime;
}
@@ -599,18 +606,33 @@ int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
switch (unit) {
case 's':
+ if (val > INT64_MAX / MILLISECOND_PER_SECOND) {
+ return -1;
+ }
(*result) = convertTimePrecision(val * MILLISECOND_PER_SECOND, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'm':
+ if (val > INT64_MAX / MILLISECOND_PER_MINUTE) {
+ return -1;
+ }
(*result) = convertTimePrecision(val * MILLISECOND_PER_MINUTE, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'h':
+ if (val > INT64_MAX / MILLISECOND_PER_MINUTE) {
+ return -1;
+ }
(*result) = convertTimePrecision(val * MILLISECOND_PER_HOUR, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'd':
+ if (val > INT64_MAX / MILLISECOND_PER_DAY) {
+ return -1;
+ }
(*result) = convertTimePrecision(val * MILLISECOND_PER_DAY, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'w':
+ if (val > INT64_MAX / MILLISECOND_PER_WEEK) {
+ return -1;
+ }
(*result) = convertTimePrecision(val * MILLISECOND_PER_WEEK, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'a':
@@ -650,7 +672,7 @@ int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* dura
/* get the basic numeric value */
int64_t timestamp = taosStr2Int64(token, &endPtr, 10);
- if (errno != 0) {
+ if (timestamp < 0 || errno != 0) {
return -1;
}
@@ -668,7 +690,7 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
/* get the basic numeric value */
*duration = taosStr2Int64(token, NULL, 10);
- if (errno != 0) {
+ if (*duration < 0 || errno != 0) {
return -1;
}
diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c
index 8d4c17a821..65e9a767f5 100644
--- a/source/common/src/tvariant.c
+++ b/source/common/src/tvariant.c
@@ -109,7 +109,7 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
}
case TSDB_DATA_TYPE_FLOAT: {
pVar->nLen = tDataTypes[type].bytes;
- pVar->d = GET_FLOAT_VAL(pz);
+ pVar->f = GET_FLOAT_VAL(pz);
break;
}
case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length
@@ -223,12 +223,18 @@ int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2) {
} else {
return p1->nLen > p2->nLen ? 1 : -1;
}
- } else if (p1->nType == TSDB_DATA_TYPE_FLOAT || p1->nType == TSDB_DATA_TYPE_DOUBLE) {
+ } else if (p1->nType == TSDB_DATA_TYPE_DOUBLE) {
if (p1->d == p2->d) {
return 0;
} else {
return p1->d > p2->d ? 1 : -1;
}
+ } else if (p1->nType == TSDB_DATA_TYPE_FLOAT) {
+ if (p1->f == p2->f) {
+ return 0;
+ } else {
+ return p1->f > p2->f ? 1 : -1;
+ }
} else if (IS_UNSIGNED_NUMERIC_TYPE(p1->nType)) {
if (p1->u == p2->u) {
return 0;
@@ -259,8 +265,9 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
case TSDB_DATA_TYPE_UBIGINT:
return (char *)&pVar->u;
case TSDB_DATA_TYPE_DOUBLE:
- case TSDB_DATA_TYPE_FLOAT:
return (char *)&pVar->d;
+ case TSDB_DATA_TYPE_FLOAT:
+ return (char *)&pVar->f;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_JSON:
return (char *)pVar->pz;
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c
index e50b527bac..d3d92e1bbf 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c
@@ -56,6 +56,9 @@ static void mmProcessRpcMsg(SQueueInfo *pInfo, SRpcMsg *pMsg) {
if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) {
if (code != 0 && terrno != 0) code = terrno;
mmSendRsp(pMsg, code);
+ } else {
+ rpcFreeCont(pMsg->info.rsp);
+ pMsg->info.rsp = NULL;
}
if (code == TSDB_CODE_RPC_REDIRECT) {
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index dcfc046a1e..b5ddd6f279 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -577,9 +577,9 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->info.rsp = rpcMallocCont(contLen);
- pMsg->info.hasEpSet = 1;
if (pMsg->info.rsp != NULL) {
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
+ pMsg->info.hasEpSet = 1;
pMsg->info.rspLen = contLen;
terrno = TSDB_CODE_RPC_REDIRECT;
} else {
diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c
index b7a946d4e4..5278fc7761 100644
--- a/source/dnode/mnode/impl/src/mndQuery.c
+++ b/source/dnode/mnode/impl/src/mndQuery.c
@@ -157,6 +157,7 @@ _exit:
mError("mnd get batch meta failed cause of %s", tstrerror(code));
}
+ taosArrayDestroyEx(batchReq.pMsgs, tFreeSBatchReqMsg);
taosArrayDestroyEx(batchRsp.pRsps, mnodeFreeSBatchRspMsg);
return code;
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index db878d72b9..27c58dfba1 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -1277,9 +1277,11 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
pAction->id);
code = mndTransSync(pMnode, pTrans);
if (code != 0) {
+ pTrans->redoActionPos--;
pTrans->code = terrno;
mError("trans:%d, %s:%d is executed and failed to sync to other mnodes since %s", pTrans->id,
mndTransStr(pAction->stage), pAction->id, terrstr());
+ break;
}
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
mInfo("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index e00d0d955e..7901093315 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -1739,6 +1739,7 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
code = 0;
_OVER:
+ taosArrayDestroy(pArray);
mndTransDrop(pTrans);
sdbFreeRaw(pRaw);
return code;
@@ -1907,6 +1908,7 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) {
}
_OVER:
+ taosHashCleanup(pBalancedVgroups);
mndTransDrop(pTrans);
return code;
}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 5921adfbfa..8250d68ae1 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -403,6 +403,11 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMe
// validate req
metaReaderInit(&mr, pMeta, 0);
if (metaGetTableEntryByName(&mr, pReq->name) == 0) {
+ if (pReq->type == TSDB_CHILD_TABLE && pReq->ctb.suid != mr.me.ctbEntry.suid) {
+ terrno = TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE;
+ metaReaderClear(&mr);
+ return -1;
+ }
pReq->uid = mr.me.uid;
if (pReq->type == TSDB_CHILD_TABLE) {
pReq->ctb.suid = mr.me.ctbEntry.suid;
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index 45873f2744..5c5ba1205e 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -398,6 +398,35 @@ bool tqNextDataBlock(STqReader* pReader) {
return false;
}
+int32_t tqMaskBlock(SSchemaWrapper* pDst, SSDataBlock* pBlock, const SSchemaWrapper* pSrc, char* mask) {
+ int32_t code;
+
+ int32_t cnt = 0;
+ for (int32_t i = 0; i < pSrc->nCols; i++) {
+ cnt += mask[i];
+ }
+
+ pDst->nCols = cnt;
+ pDst->pSchema = taosMemoryCalloc(cnt, sizeof(SSchema));
+ if (pDst->pSchema == NULL) {
+ return -1;
+ }
+
+ int32_t j = 0;
+ for (int32_t i = 0; i < pSrc->nCols; i++) {
+ if (mask[i]) {
+ pDst->pSchema[j++] = pSrc->pSchema[i];
+ SColumnInfoData colInfo =
+ createColumnInfoData(pSrc->pSchema[i].type, pSrc->pSchema[i].bytes, pSrc->pSchema[i].colId);
+ code = blockDataAppendColInfo(pBlock, &colInfo);
+ if (code != 0) {
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
bool tqNextDataBlockFilterOut(STqReader* pHandle, SHashObj* filterOutUids) {
while (1) {
if (tGetSubmitMsgNext(&pHandle->msgIter, &pHandle->pBlock) < 0) {
@@ -527,6 +556,119 @@ FAIL:
return -1;
}
+int32_t tqSplitRetrieveDataBlock(STqReader* pReader, SArray* blocks, SArray* schemas) {
+ int32_t sversion = htonl(pReader->pBlock->sversion);
+
+ if (pReader->cachedSchemaSuid == 0 || pReader->cachedSchemaVer != sversion ||
+ pReader->cachedSchemaSuid != pReader->msgIter.suid) {
+ if (pReader->pSchema) taosMemoryFree(pReader->pSchema);
+ pReader->pSchema = metaGetTbTSchema(pReader->pVnodeMeta, pReader->msgIter.uid, sversion, 1);
+ if (pReader->pSchema == NULL) {
+ tqWarn("cannot found tsschema for table: uid:%" PRId64 " (suid:%" PRId64 "), version %d, possibly dropped table",
+ pReader->msgIter.uid, pReader->msgIter.suid, pReader->cachedSchemaVer);
+ /*ASSERT(0);*/
+ pReader->cachedSchemaSuid = 0;
+ terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
+ return -1;
+ }
+
+ if (pReader->pSchemaWrapper) tDeleteSSchemaWrapper(pReader->pSchemaWrapper);
+ pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, pReader->msgIter.uid, sversion, 1);
+ if (pReader->pSchemaWrapper == NULL) {
+ tqWarn("cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table",
+ pReader->msgIter.uid, pReader->cachedSchemaVer);
+ /*ASSERT(0);*/
+ pReader->cachedSchemaSuid = 0;
+ terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
+ return -1;
+ }
+ pReader->cachedSchemaVer = sversion;
+ pReader->cachedSchemaSuid = pReader->msgIter.suid;
+ }
+
+ STSchema* pTschema = pReader->pSchema;
+ SSchemaWrapper* pSchemaWrapper = pReader->pSchemaWrapper;
+
+ int32_t colAtMost = pSchemaWrapper->nCols;
+
+ int32_t curRow = 0;
+
+ char* assigned = taosMemoryCalloc(1, pSchemaWrapper->nCols);
+ if (assigned) return -1;
+
+ tInitSubmitBlkIter(&pReader->msgIter, pReader->pBlock, &pReader->blkIter);
+ STSRowIter iter = {0};
+ tdSTSRowIterInit(&iter, pTschema);
+ STSRow* row;
+
+ while ((row = tGetSubmitBlkNext(&pReader->blkIter)) != NULL) {
+ bool buildNew = false;
+ tdSTSRowIterReset(&iter, row);
+
+ for (int32_t i = 0; i < colAtMost; i++) {
+ SCellVal sVal = {0};
+ if (!tdSTSRowIterFetch(&iter, pSchemaWrapper->pSchema[i].colId, pSchemaWrapper->pSchema[i].type, &sVal)) {
+ break;
+ }
+ if (curRow == 0) {
+ assigned[i] = sVal.valType != TD_VTYPE_NONE;
+ buildNew = true;
+ } else {
+ bool currentRowAssigned = sVal.valType != TD_VTYPE_NONE;
+ if (currentRowAssigned != assigned[i]) {
+ assigned[i] = currentRowAssigned;
+ buildNew = true;
+ }
+ }
+ }
+
+ if (buildNew) {
+ SSDataBlock block;
+ SSchemaWrapper sw;
+ if (tqMaskBlock(&sw, &block, pSchemaWrapper, assigned) < 0) {
+ goto FAIL;
+ }
+
+ taosArrayPush(blocks, &block);
+ taosArrayPush(schemas, &sw);
+ }
+
+ SSDataBlock* pBlock = taosArrayGetLast(blocks);
+ pBlock->info.uid = pReader->msgIter.uid;
+ pBlock->info.rows = pReader->msgIter.numOfRows;
+ pBlock->info.version = pReader->pMsg->version;
+
+ if (blockDataEnsureCapacity(pBlock, pReader->msgIter.numOfRows - curRow) < 0) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto FAIL;
+ }
+
+ tdSTSRowIterInit(&iter, pTschema);
+ for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
+ SCellVal sVal = {0};
+
+ if (!tdSTSRowIterFetch(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
+ break;
+ }
+
+ ASSERT(sVal.valType != TD_VTYPE_NONE);
+
+ if (colDataAppend(pColData, curRow, sVal.val, sVal.valType != TD_VTYPE_NORM) < 0) {
+ goto FAIL;
+ }
+ }
+ curRow++;
+ }
+
+ taosMemoryFree(assigned);
+ return 0;
+
+FAIL:
+ taosMemoryFree(assigned);
+ return -1;
+}
+
void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { pReader->pColIdList = pColIdList; }
int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c
index bb20a9b012..f71b5b6706 100644
--- a/source/dnode/vnode/src/tsdb/tsdbOpen.c
+++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c
@@ -49,7 +49,7 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee
pTsdb->path = (char *)&pTsdb[1];
snprintf(pTsdb->path, TD_PATH_MAX, "%s%s%s", pVnode->path, TD_DIRSEP, dir);
- taosRealPath(pTsdb->path, NULL, slen);
+ // taosRealPath(pTsdb->path, NULL, slen);
pTsdb->pVnode = pVnode;
taosThreadRwlockInit(&pTsdb->rwLock, NULL);
if (!pKeepCfg) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 0aa2c6ab83..b0cf09662c 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -1698,7 +1698,7 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
size += pBlockCol->szBitmap;
// offset
- if (IS_VAR_DATA_TYPE(pColData->type)) {
+ if (IS_VAR_DATA_TYPE(pColData->type) && pColData->flag != (HAS_NULL | HAS_NONE)) {
code = tsdbCmprData((uint8_t *)pColData->aOffset, sizeof(int32_t) * pColData->nVal, TSDB_DATA_TYPE_INT, cmprAlg,
ppOut, nOut + size, &pBlockCol->szOffset, ppBuf);
if (code) goto _exit;
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 8d6ebe5c14..8e9aab0afd 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -366,6 +366,7 @@ _exit:
qError("vnd get batch meta failed cause of %s", tstrerror(code));
}
+ taosArrayDestroyEx(batchReq.pMsgs, tFreeSBatchReqMsg);
taosArrayDestroyEx(batchRsp.pRsps, tFreeSBatchRspMsg);
tmsgSendRsp(&rspMsg);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 0e2635459d..9b9a1ef259 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -163,7 +163,7 @@ typedef struct {
SArray* pStopInfo;
} STaskStopInfo;
-typedef struct SExecTaskInfo {
+struct SExecTaskInfo {
STaskIdInfo id;
uint32_t status;
STimeWindow window;
@@ -182,7 +182,7 @@ typedef struct SExecTaskInfo {
struct SOperatorInfo* pRoot;
SLocalFetch localFetch;
STaskStopInfo stopInfo;
-} SExecTaskInfo;
+};
enum {
OP_NOT_OPENED = 0x0,
@@ -315,37 +315,39 @@ typedef struct STableMetaCacheInfo {
uint64_t cacheHit;
} STableMetaCacheInfo;
-typedef struct STableScanInfo {
+typedef struct STableScanBase {
STsdbReader* dataReader;
- SReadHandle readHandle;
- SLimitInfo limitInfo;
SFileBlockLoadRecorder readRecorder;
+ SQueryTableDataCond cond;
+ SAggOptrPushDownInfo pdInfo;
+ SColMatchInfo matchInfo;
+ SReadHandle readHandle;
+ SExprSupp pseudoSup;
+ STableMetaCacheInfo metaCache;
+ int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
+ int32_t dataBlockLoadFlag;
+ SLimitInfo limitInfo;
+} STableScanBase;
+
+typedef struct STableScanInfo {
+ STableScanBase base;
SScanInfo scanInfo;
int32_t scanTimes;
SSDataBlock* pResBlock;
- SColMatchInfo matchInfo;
- SExprSupp pseudoSup;
- SQueryTableDataCond cond;
- int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
- int32_t dataBlockLoadFlag;
SSampleExecInfo sample; // sample execution info
int32_t currentGroupId;
int32_t currentTable;
int8_t scanMode;
- SAggOptrPushDownInfo pdInfo;
int8_t assignBlockUid;
- STableMetaCacheInfo metaCache;
} STableScanInfo;
typedef struct STableMergeScanInfo {
- STableListInfo* tableListInfo;
int32_t tableStartIndex;
int32_t tableEndIndex;
bool hasGroupId;
uint64_t groupId;
SArray* queryConds; // array of queryTableDataCond
- STsdbReader* pReader;
- SReadHandle readHandle;
+ STableScanBase base;
int32_t bufPageSize;
uint32_t sortBufSize; // max buffer size for in-memory sort
SArray* pSortInfo;
@@ -354,27 +356,12 @@ typedef struct STableMergeScanInfo {
int64_t startTs; // sort start time
SArray* sortSourceParams;
SLimitInfo limitInfo;
- SFileBlockLoadRecorder readRecorder;
int64_t numOfRows;
SScanInfo scanInfo;
int32_t scanTimes;
- SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
- SResultRowInfo* pResultRowInfo;
- int32_t* rowEntryInfoOffset;
- SExprInfo* pExpr;
SSDataBlock* pResBlock;
- SColMatchInfo matchInfo;
- int32_t numOfOutput;
- SExprSupp pseudoSup;
- SQueryTableDataCond cond;
- int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
- int32_t dataBlockLoadFlag;
-
- // if the upstream is an interval operator, the interval info is also kept here to get the time
- // window to check if current data block needs to be loaded.
- SInterval interval;
- SSampleExecInfo sample; // sample execution info
- SSortExecInfo sortExecInfo;
+ SSampleExecInfo sample; // sample execution info
+ SSortExecInfo sortExecInfo;
} STableMergeScanInfo;
typedef struct STagScanInfo {
@@ -387,17 +374,17 @@ typedef struct STagScanInfo {
} STagScanInfo;
typedef struct SLastrowScanInfo {
- SSDataBlock* pRes;
- SReadHandle readHandle;
- void* pLastrowReader;
- SColMatchInfo matchInfo;
- int32_t* pSlotIds;
- SExprSupp pseudoExprSup;
- int32_t retrieveType;
- int32_t currentGroupIndex;
- SSDataBlock* pBufferredRes;
- SArray* pUidList;
- int32_t indexOfBufferedRes;
+ SSDataBlock* pRes;
+ SReadHandle readHandle;
+ void* pLastrowReader;
+ SColMatchInfo matchInfo;
+ int32_t* pSlotIds;
+ SExprSupp pseudoExprSup;
+ int32_t retrieveType;
+ int32_t currentGroupIndex;
+ SSDataBlock* pBufferredRes;
+ SArray* pUidList;
+ int32_t indexOfBufferedRes;
} SLastrowScanInfo;
typedef enum EStreamScanMode {
@@ -414,13 +401,6 @@ enum {
PROJECT_RETRIEVE_DONE = 0x2,
};
-typedef struct SCatchSupporter {
- SHashObj* pWindowHashTable; // quick locate the window object for each window
- SDiskbasedBuf* pDataBuf; // buffer based on blocked-wised disk file
- int32_t keySize;
- int64_t* pKeyBuf;
-} SCatchSupporter;
-
typedef struct SStreamAggSupporter {
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
SSDataBlock* pScanBlock;
@@ -504,7 +484,6 @@ typedef struct SStreamScanInfo {
STimeWindow updateWin;
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
- SHashObj* pGroupIdTbNameMap;
// status for tmq
SNodeList* pGroupTags;
SNode* pTagCond;
@@ -612,7 +591,6 @@ typedef struct SStreamIntervalOperatorInfo {
SArray* pChildren;
SStreamState* pState;
SWinKey delKey;
- SHashObj* pGroupIdTbNameMap; // uint64_t -> char[TSDB_TABLE_NAME_LEN]
} SStreamIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
@@ -745,7 +723,6 @@ typedef struct SStreamSessionAggOperatorInfo {
SPhysiNode* pPhyNode; // create new child
bool isFinal;
bool ignoreExpiredData;
- SHashObj* pGroupIdTbNameMap;
} SStreamSessionAggOperatorInfo;
typedef struct SStreamStateAggOperatorInfo {
@@ -761,7 +738,6 @@ typedef struct SStreamStateAggOperatorInfo {
void* pDelIterator;
SArray* pChildren; // cache for children's result;
bool ignoreExpiredData;
- SHashObj* pGroupIdTbNameMap;
} SStreamStateAggOperatorInfo;
typedef struct SStreamPartitionOperatorInfo {
@@ -800,7 +776,6 @@ typedef struct STimeSliceOperatorInfo {
SArray* pPrevRow; // SArray
SArray* pNextRow; // SArray
SArray* pLinearInfo; // SArray
- bool fillLastPoint;
bool isPrevRowSet;
bool isNextRowSet;
int32_t fillType; // fill type
@@ -1046,8 +1021,8 @@ int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPos
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
- SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo);
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h
index ed019be767..2d8df81dbd 100644
--- a/source/libs/executor/inc/tfill.h
+++ b/source/libs/executor/inc/tfill.h
@@ -36,7 +36,8 @@ typedef struct SFillColInfo {
typedef struct SFillLinearInfo {
SPoint start;
SPoint end;
- bool hasNull;
+ bool isStartSet;
+ bool isEndSet;
int16_t type;
int32_t bytes;
} SFillLinearInfo;
diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h
index 5591299d30..51440a7f59 100644
--- a/source/libs/executor/inc/tsort.h
+++ b/source/libs/executor/inc/tsort.h
@@ -163,9 +163,10 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
/**
* get proper sort buffer pages according to the row size
* @param rowSize
+ * @param numOfCols columns count that be put into page
* @return
*/
-int32_t getProperSortPageSize(size_t rowSize);
+int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
#ifdef __cplusplus
}
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index e561b6e124..7dad9245d5 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -1026,8 +1026,8 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
SStreamScanInfo* pInfo = pOperator->info;
if (pOffset->type == TMQ_OFFSET__LOG) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ tsdbReaderClose(pTSInfo->base.dataReader);
+ pTSInfo->base.dataReader = NULL;
#if 0
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
@@ -1079,23 +1079,23 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
// TODO after dropping table, table may not found
ASSERT(found);
- if (pTableScanInfo->dataReader == NULL) {
+ if (pTableScanInfo->base.dataReader == NULL) {
STableKeyInfo* pList = tableListGetInfo(pTaskInfo->pTableInfoList, 0);
int32_t num = tableListGetSize(pTaskInfo->pTableInfoList);
- if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond, pList, num,
- &pTableScanInfo->dataReader, NULL) < 0 ||
- pTableScanInfo->dataReader == NULL) {
+ if (tsdbReaderOpen(pTableScanInfo->base.readHandle.vnode, &pTableScanInfo->base.cond, pList, num,
+ &pTableScanInfo->base.dataReader, NULL) < 0 ||
+ pTableScanInfo->base.dataReader == NULL) {
ASSERT(0);
}
}
STableKeyInfo tki = {.uid = uid};
- tsdbSetTableList(pTableScanInfo->dataReader, &tki, 1);
- int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
- pTableScanInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
- pTableScanInfo->cond.twindows.skey = oldSkey;
+ tsdbSetTableList(pTableScanInfo->base.dataReader, &tki, 1);
+ int64_t oldSkey = pTableScanInfo->base.cond.twindows.skey;
+ pTableScanInfo->base.cond.twindows.skey = ts + 1;
+ tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
+ pTableScanInfo->base.cond.twindows.skey = oldSkey;
pTableScanInfo->scanTimes = 0;
qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 39e876800f..0dd5765aa4 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -1000,12 +1000,6 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
return TSDB_CODE_SUCCESS;
}
-static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo) {
- if (pTableQueryInfo == NULL) {
- return;
- }
-}
-
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
if (status == TASK_NOT_COMPLETED) {
pTaskInfo->status = status;
@@ -1054,7 +1048,7 @@ void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pCol
}
SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
- int32_t code = filterSetDataFromSlotId(pFilterInfo, ¶m1);
+ int32_t code = filterSetDataFromSlotId(pFilterInfo, ¶m1);
SColumnInfoData* p = NULL;
int32_t status = 0;
@@ -1064,7 +1058,7 @@ void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pCol
extractQualifiedTupleByFilterResult(pBlock, p, keep, status);
if (pColMatchInfo != NULL) {
- size_t size = taosArrayGetSize(pColMatchInfo->pList);
+ size_t size = taosArrayGetSize(pColMatchInfo->pList);
for (int32_t i = 0; i < size; ++i) {
SColMatchItem* pInfo = taosArrayGet(pColMatchInfo->pList, i);
if (pInfo->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
@@ -1336,27 +1330,14 @@ void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGr
pBlock->info.groupId = 0;
ASSERT(!pbInfo->mergeResultBlock);
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo);
- if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
- SStreamStateAggOperatorInfo* pInfo = pOperator->info;
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
- if (tbname != NULL) {
- memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- } else {
- pBlock->info.parTbName[0] = 0;
- }
- } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
- pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
- pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
- SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
-
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
- if (tbname != NULL) {
- memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- } else {
- pBlock->info.parTbName[0] = 0;
- }
+ void* tbname = NULL;
+ if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.groupId, &tbname) < 0) {
+ pBlock->info.parTbName[0] = 0;
+ } else {
+ memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
+ tdbFree(tbname);
}
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
@@ -1665,55 +1646,6 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
static int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey);
-static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int32_t rowIndex) {
- size_t size = taosArrayGetSize(groupInfo);
- if (size == 0) {
- return true;
- }
-
- for (int32_t i = 0; i < size; ++i) {
- int32_t* index = taosArrayGet(groupInfo, i);
-
- SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, *index);
- bool isNull = colDataIsNull(pColInfo, rowIndex, pBlock->info.rows, NULL);
-
- if ((isNull && buf[i] != NULL) || (!isNull && buf[i] == NULL)) {
- return false;
- }
-
- char* pCell = colDataGetData(pColInfo, rowIndex);
- if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
- if (varDataLen(pCell) != varDataLen(buf[i])) {
- return false;
- } else {
- if (memcmp(varDataVal(pCell), varDataVal(buf[i]), varDataLen(pCell)) != 0) {
- return false;
- }
- }
- } else {
- if (memcmp(pCell, buf[i], pColInfo->info.bytes) != 0) {
- return false;
- }
- }
- }
-
- return 0;
-}
-
-static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex) {
- int32_t size = (int32_t)taosArrayGetSize(pColumnList);
-
- for (int32_t i = 0; i < size; ++i) {
- int32_t* index = taosArrayGet(pColumnList, i);
- SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, *index);
-
- char* data = colDataGetData(pColInfo, rowIndex);
- memcpy(rowColData[i], data, colDataGetLength(pColInfo, rowIndex));
- }
-
- return true;
-}
-
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) {
// todo add more information about exchange operation
int32_t type = pOperator->operatorType;
@@ -1725,13 +1657,13 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
return TSDB_CODE_SUCCESS;
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = pOperator->info;
- *order = pTableScanInfo->cond.order;
- *scanFlag = pTableScanInfo->scanFlag;
+ *order = pTableScanInfo->base.cond.order;
+ *scanFlag = pTableScanInfo->base.scanFlag;
return TSDB_CODE_SUCCESS;
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN) {
STableMergeScanInfo* pTableScanInfo = pOperator->info;
- *order = pTableScanInfo->cond.order;
- *scanFlag = pTableScanInfo->scanFlag;
+ *order = pTableScanInfo->base.cond.order;
+ *scanFlag = pTableScanInfo->base.scanFlag;
return TSDB_CODE_SUCCESS;
} else {
if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
@@ -1994,8 +1926,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
int32_t scanFlag = MAIN_SCAN;
getTableScanInfo(pOperator, &order, &scanFlag);
- int64_t ekey =
- Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey;
+ int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
blockDataCleanup(pInfo->pRes);
@@ -2146,7 +2077,7 @@ static SSDataBlock* doFill(SOperatorInfo* pOperator) {
break;
}
- doFilter(fillResult, pOperator->exprSupp.pFilterInfo, &pInfo->matchInfo );
+ doFilter(fillResult, pOperator->exprSupp.pFilterInfo, &pInfo->matchInfo);
if (fillResult->info.rows > 0) {
break;
}
@@ -2373,14 +2304,14 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
pInfo->groupId = UINT64_MAX;
- setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, true, OP_NOT_OPENED, pInfo, pTaskInfo);
- pOperator->fpSet =
- createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, destroyAggOperatorInfo, NULL);
+ setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, true, OP_NOT_OPENED, pInfo,
+ pTaskInfo);
+ pOperator->fpSet = createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, destroyAggOperatorInfo, NULL);
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = downstream->info;
- pTableScanInfo->pdInfo.pExprSup = &pOperator->exprSupp;
- pTableScanInfo->pdInfo.pAggSup = &pInfo->aggSup;
+ pTableScanInfo->base.pdInfo.pExprSup = &pOperator->exprSupp;
+ pTableScanInfo->base.pdInfo.pAggSup = &pInfo->aggSup;
}
code = appendDownstream(pOperator, &downstream, 1);
@@ -2745,7 +2676,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
}
STableScanInfo* pScanInfo = pOperator->info;
- pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
+ pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
} else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
@@ -2763,14 +2694,14 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return NULL;
}
- pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pTableListInfo, pHandle, pTaskInfo);
+ pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pHandle, pTaskInfo);
if (NULL == pOperator) {
pTaskInfo->code = terrno;
return NULL;
}
STableScanInfo* pScanInfo = pOperator->info;
- pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
+ pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
} else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
pOperator = createExchangeOperatorInfo(pHandle ? pHandle->pMsgCb->clientRpc : NULL, (SExchangePhysiNode*)pPhyNode,
pTaskInfo);
@@ -3352,13 +3283,13 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
if (pBlock->info.groupId == 0) {
pBlock->info.groupId = pPos->groupId;
- SStreamIntervalOperatorInfo* pInfo = pOperator->info;
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
- if (tbname != NULL) {
- memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- } else {
+ void* tbname = NULL;
+ if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.groupId, &tbname) < 0) {
pBlock->info.parTbName[0] = 0;
+ } else {
+ memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
+ tdbFree(tbname);
} else {
// current value belongs to different group, it can't be packed into one datablock
if (pBlock->info.groupId != pPos->groupId) {
@@ -3444,30 +3375,13 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pSta
if (pBlock->info.groupId == 0) {
pBlock->info.groupId = pKey->groupId;
- if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
- SStreamStateAggOperatorInfo* pInfo = pOperator->info;
-
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
- if (tbname != NULL) {
- memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- } else {
- pBlock->info.parTbName[0] = 0;
- }
- } else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
- pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
- pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
- SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
-
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
- if (tbname != NULL) {
- memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- } else {
- pBlock->info.parTbName[0] = 0;
- }
+ void* tbname = NULL;
+ if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.groupId, &tbname) < 0) {
+ pBlock->info.parTbName[0] = 0;
} else {
- ASSERT(0);
+ memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
-
+ tdbFree(tbname);
} else {
// current value belongs to different group, it can't be packed into one datablock
if (pBlock->info.groupId != pKey->groupId) {
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 229effc96b..916b6df969 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -233,25 +233,25 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
STableScanInfo* pTableScanInfo = pOperator->info;
- SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->base.pdInfo.pAggSup->pResultRowHashTable, buf,
GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
if (p1 == NULL) {
return NULL;
}
- *pPage = getBufPage(pTableScanInfo->pdInfo.pAggSup->pResultBuf, p1->pageId);
+ *pPage = getBufPage(pTableScanInfo->base.pdInfo.pAggSup->pResultBuf, p1->pageId);
return (SResultRow*)((char*)(*pPage) + p1->offset);
}
static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo* pBlockInfo, uint32_t* status) {
STableScanInfo* pTableScanInfo = pOperator->info;
- if (pTableScanInfo->pdInfo.pExprSup == NULL) {
+ if (pTableScanInfo->base.pdInfo.pExprSup == NULL) {
return TSDB_CODE_SUCCESS;
}
- SExprSupp* pSup1 = pTableScanInfo->pdInfo.pExprSup;
+ SExprSupp* pSup1 = pTableScanInfo->base.pdInfo.pExprSup;
SFilePage* pPage = NULL;
SResultRow* pRow = getTableGroupOutputBuf(pOperator, pBlockInfo->groupId, &pPage);
@@ -264,7 +264,7 @@ static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo*
for (int32_t i = 0; i < pSup1->numOfExprs; ++i) {
int32_t functionId = pSup1->pCtx[i].functionId;
- SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, pTableScanInfo->pdInfo.pExprSup->rowEntryInfoOffset);
+ SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, pTableScanInfo->base.pdInfo.pExprSup->rowEntryInfoOffset);
int32_t reqStatus = fmFuncDynDataRequired(functionId, pEntry, &pBlockInfo->window);
if (reqStatus != FUNC_DATA_REQUIRED_NOT_LOAD) {
@@ -274,7 +274,7 @@ static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo*
}
// release buffer pages
- releaseBufPage(pTableScanInfo->pdInfo.pAggSup->pResultBuf, pPage);
+ releaseBufPage(pTableScanInfo->base.pdInfo.pAggSup->pResultBuf, pPage);
if (notLoadBlock) {
*status = FUNC_DATA_REQUIRED_NOT_LOAD;
@@ -293,7 +293,7 @@ static bool doFilterByBlockSMA(SFilterInfo* pFilterInfo, SColumnDataAgg** pColsA
return keep;
}
-static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
+static bool doLoadBlockSMA(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
bool allColumnsHaveAgg = true;
SColumnDataAgg** pColAgg = NULL;
@@ -330,16 +330,20 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
return true;
}
-static void doSetTagColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo,
+static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo,
int32_t rows) {
if (pTableScanInfo->pseudoSup.numOfExprs > 0) {
SExprSupp* pSup = &pTableScanInfo->pseudoSup;
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, rows,
GET_TASKID(pTaskInfo), &pTableScanInfo->metaCache);
- if (code != TSDB_CODE_SUCCESS) {
+ // ignore the table not exists error, since this table may have been dropped during the scan procedure.
+ if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) {
T_LONG_JMP(pTaskInfo->env, code);
}
+
+ // reset the error code.
+ terrno = 0;
}
}
@@ -370,19 +374,16 @@ void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo
}
}
-static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
+static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- STableScanInfo* pInfo = pOperator->info;
-
SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
pCost->totalBlocks += 1;
pCost->totalRows += pBlock->info.rows;
bool loadSMA = false;
-
- *status = pInfo->dataBlockLoadFlag;
+ *status = pTableScanInfo->dataBlockLoadFlag;
if (pOperator->exprSupp.pFilterInfo != NULL ||
overlapWithTimeWindow(&pTableScanInfo->pdInfo.interval, &pBlock->info, pTableScanInfo->cond.order)) {
(*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
@@ -481,14 +482,14 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
}
}
- applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo, pOperator);
+ applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator);
pCost->totalRows += pBlock->info.rows;
- pInfo->limitInfo.numOfOutputRows = pCost->totalRows;
+ pTableScanInfo->limitInfo.numOfOutputRows = pCost->totalRows;
return TSDB_CODE_SUCCESS;
}
-static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunctionCtx* pCtx, int32_t numOfOutput) {
+static void prepareForDescendingScan(STableScanBase* pTableScanInfo, SqlFunctionCtx* pCtx, int32_t numOfOutput) {
SET_REVERSE_SCAN_FLAG(pTableScanInfo);
switchCtxOrder(pCtx, numOfOutput);
@@ -513,6 +514,21 @@ static void freeTableCachedVal(void* param) {
taosMemoryFree(pVal);
}
+static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) {
+ STableCachedVal* pVal = taosMemoryMalloc(sizeof(STableCachedVal));
+ pVal->pName = strdup(pMetaReader->me.name);
+ pVal->pTags = NULL;
+
+ // only child table has tag value
+ if (pMetaReader->me.type == TSDB_CHILD_TABLE) {
+ STag* pTag = (STag*)pMetaReader->me.ctbEntry.pTags;
+ pVal->pTags = taosMemoryMalloc(pTag->len);
+ memcpy(pVal->pTags, pTag, pTag->len);
+ }
+
+ return pVal;
+}
+
// const void *key, size_t keyLen, void *value
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); }
@@ -540,7 +556,12 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
metaReaderInit(&mr, pHandle->meta, 0);
code = metaGetTableEntryByUid(&mr, pBlock->info.uid);
if (code != TSDB_CODE_SUCCESS) {
- qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
+ if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid,
+ tstrerror(terrno), idStr);
+ } else {
+ qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
+ }
metaReaderClear(&mr);
return terrno;
}
@@ -559,23 +580,20 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
metaReaderInit(&mr, pHandle->meta, 0);
code = metaGetTableEntryByUid(&mr, pBlock->info.uid);
if (code != TSDB_CODE_SUCCESS) {
- qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
+ if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s",
+ pBlock->info.uid, tstrerror(terrno), idStr);
+ } else {
+ qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno),
+ idStr);
+ }
metaReaderClear(&mr);
return terrno;
}
metaReaderReleaseLock(&mr);
- STableCachedVal* pVal = taosMemoryMalloc(sizeof(STableCachedVal));
- pVal->pName = strdup(mr.me.name);
- pVal->pTags = NULL;
-
- // only child table has tag value
- if (mr.me.type == TSDB_CHILD_TABLE) {
- STag* pTag = (STag*)mr.me.ctbEntry.pTags;
- pVal->pTags = taosMemoryMalloc(pTag->len);
- memcpy(pVal->pTags, mr.me.ctbEntry.pTags, pTag->len);
- }
+ STableCachedVal* pVal = createTableCacheVal(&mr);
val = *pVal;
freeReader = true;
@@ -590,6 +608,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
pCache->cacheHit += 1;
STableCachedVal* pVal = taosLRUCacheValue(pCache->pTableMetaEntryCache, h);
val = *pVal;
+
taosLRUCacheRelease(pCache->pTableMetaEntryCache, h, false);
}
@@ -678,7 +697,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
int64_t st = taosGetTimestampUs();
- while (tsdbNextDataBlock(pTableScanInfo->dataReader)) {
+ while (tsdbNextDataBlock(pTableScanInfo->base.dataReader)) {
if (isTaskKilled(pTaskInfo)) {
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
@@ -693,7 +712,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
SDataBlockInfo* pBInfo = &pBlock->info;
int32_t rows = 0;
- tsdbRetrieveDataBlockInfo(pTableScanInfo->dataReader, &rows, &pBInfo->uid, &pBInfo->window);
+ tsdbRetrieveDataBlockInfo(pTableScanInfo->base.dataReader, &rows, &pBInfo->uid, &pBInfo->window);
blockDataEnsureCapacity(pBlock, rows); // todo remove it latter
pBInfo->rows = rows;
@@ -702,7 +721,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
pBlock->info.groupId = getTableGroupId(pTaskInfo->pTableInfoList, pBlock->info.uid);
uint32_t status = 0;
- int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status);
+ int32_t code = loadDataBlock(pOperator, &pTableScanInfo->base, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pOperator->pTaskInfo->env, code);
@@ -713,10 +732,10 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
continue;
}
- pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
- pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
+ pOperator->resultInfo.totalRows = pTableScanInfo->base.readRecorder.totalRows;
+ pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
- pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime;
+ pOperator->cost.totalCost = pTableScanInfo->base.readRecorder.elapsedTime;
// todo refactor
/*pTableScanInfo->lastStatus.uid = pBlock->info.uid;*/
@@ -736,7 +755,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
// The read handle is not initialized yet, since no qualified tables exists
- if (pTableScanInfo->dataReader == NULL || pOperator->status == OP_EXEC_DONE) {
+ if (pTableScanInfo->base.dataReader == NULL || pOperator->status == OP_EXEC_DONE) {
return NULL;
}
@@ -751,19 +770,19 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
- pTableScanInfo->scanFlag = REPEAT_SCAN;
+ pTableScanInfo->base.scanFlag = REPEAT_SCAN;
qDebug("start to repeat ascending order scan data blocks due to query func required, %s", GET_TASKID(pTaskInfo));
// do prepare for the next round table scan operation
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
}
}
int32_t total = pTableScanInfo->scanInfo.numOfAsc + pTableScanInfo->scanInfo.numOfDesc;
if (pTableScanInfo->scanTimes < total) {
- if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) {
- prepareForDescendingScan(pTableScanInfo, pOperator->exprSupp.pCtx, 0);
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ if (pTableScanInfo->base.cond.order == TSDB_ORDER_ASC) {
+ prepareForDescendingScan(&pTableScanInfo->base, pOperator->exprSupp.pCtx, 0);
+ tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
}
@@ -777,10 +796,10 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < total) {
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
- pTableScanInfo->scanFlag = REPEAT_SCAN;
+ pTableScanInfo->base.scanFlag = REPEAT_SCAN;
qDebug("%s start to repeat descending order scan data blocks", GET_TASKID(pTaskInfo));
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
}
}
}
@@ -809,11 +828,11 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
}
STableKeyInfo* pTableInfo = tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->currentTable);
- tsdbSetTableList(pInfo->dataReader, pTableInfo, 1);
+ tsdbSetTableList(pInfo->base.dataReader, pTableInfo, 1);
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d %s", pTableInfo->uid, numOfTables,
pInfo->currentTable, pTaskInfo->id.str);
- tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
+ tsdbReaderReset(pInfo->base.dataReader, &pInfo->base.cond);
pInfo->scanTimes = 0;
}
} else { // scan table group by group sequentially
@@ -826,10 +845,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
int32_t num = 0;
STableKeyInfo* pList = NULL;
tableListGetGroupList(pTaskInfo->pTableInfoList, pInfo->currentGroupId, &pList, &num);
- ASSERT(pInfo->dataReader == NULL);
+ ASSERT(pInfo->base.dataReader == NULL);
- int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, pList, num,
- (STsdbReader**)&pInfo->dataReader, GET_TASKID(pTaskInfo));
+ int32_t code = tsdbReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num,
+ (STsdbReader**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
@@ -848,15 +867,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
// reset value for the next group data output
pOperator->status = OP_OPENED;
- pInfo->limitInfo.numOfOutputRows = 0;
- pInfo->limitInfo.remainOffset = pInfo->limitInfo.limit.offset;
+ pInfo->base.limitInfo.numOfOutputRows = 0;
+ pInfo->base.limitInfo.remainOffset = pInfo->base.limitInfo.limit.offset;
int32_t num = 0;
STableKeyInfo* pList = NULL;
tableListGetGroupList(pTaskInfo->pTableInfoList, pInfo->currentGroupId, &pList, &num);
- tsdbSetTableList(pInfo->dataReader, pList, num);
- tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
+ tsdbSetTableList(pInfo->base.dataReader, pList, num);
+ tsdbReaderReset(pInfo->base.dataReader, &pInfo->base.cond);
pInfo->scanTimes = 0;
result = doGroupedTableScan(pOperator);
@@ -872,7 +891,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) {
SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder));
STableScanInfo* pTableScanInfo = pOptr->info;
- *pRecorder = pTableScanInfo->readRecorder;
+ *pRecorder = pTableScanInfo->base.readRecorder;
*pOptrExplain = pRecorder;
*len = sizeof(SFileBlockLoadRecorder);
return 0;
@@ -881,17 +900,17 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr
static void destroyTableScanOperatorInfo(void* param) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
blockDataDestroy(pTableScanInfo->pResBlock);
- cleanupQueryTableDataCond(&pTableScanInfo->cond);
+ cleanupQueryTableDataCond(&pTableScanInfo->base.cond);
- tsdbReaderClose(pTableScanInfo->dataReader);
- pTableScanInfo->dataReader = NULL;
+ tsdbReaderClose(pTableScanInfo->base.dataReader);
+ pTableScanInfo->base.dataReader = NULL;
- if (pTableScanInfo->matchInfo.pList != NULL) {
- taosArrayDestroy(pTableScanInfo->matchInfo.pList);
+ if (pTableScanInfo->base.matchInfo.pList != NULL) {
+ taosArrayDestroy(pTableScanInfo->base.matchInfo.pList);
}
- taosLRUCacheCleanup(pTableScanInfo->metaCache.pTableMetaEntryCache);
- cleanupExprSupp(&pTableScanInfo->pseudoSup);
+ taosLRUCacheCleanup(pTableScanInfo->base.metaCache.pTableMetaEntryCache);
+ cleanupExprSupp(&pTableScanInfo->base.pseudoSup);
taosMemoryFreeClear(param);
}
@@ -908,30 +927,32 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
int32_t numOfCols = 0;
int32_t code =
- extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
+ extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- initLimitInfo(pScanNode->node.pLimit, pScanNode->node.pSlimit, &pInfo->limitInfo);
- code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
+ initLimitInfo(pScanNode->node.pLimit, pScanNode->node.pSlimit, &pInfo->base.limitInfo);
+ code = initQueryTableDataCond(&pInfo->base.cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
if (pScanNode->pScanPseudoCols != NULL) {
- SExprSupp* pSup = &pInfo->pseudoSup;
+ SExprSupp* pSup = &pInfo->base.pseudoSup;
pSup->pExprInfo = createExprInfo(pScanNode->pScanPseudoCols, NULL, &pSup->numOfExprs);
pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
- pInfo->pdInfo.interval = extractIntervalInfo(pTableScanNode);
- pInfo->readHandle = *readHandle;
+
+ pInfo->base.scanFlag = MAIN_SCAN;
+ pInfo->base.pdInfo.interval = extractIntervalInfo(pTableScanNode);
+ pInfo->base.readHandle = *readHandle;
pInfo->sample.sampleRatio = pTableScanNode->ratio;
pInfo->sample.seed = taosGetTimestampSec();
- pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
+ pInfo->base.dataBlockLoadFlag = pTableScanNode->dataRequired;
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pResBlock = createResDataBlock(pDescNode);
@@ -942,7 +963,6 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
goto _error;
}
- pInfo->scanFlag = MAIN_SCAN;
pInfo->currentGroupId = -1;
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
@@ -950,13 +970,13 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
pTaskInfo);
pOperator->exprSupp.numOfExprs = numOfCols;
- pInfo->metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5);
- if (pInfo->metaCache.pTableMetaEntryCache == NULL) {
+ pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5);
+ if (pInfo->base.metaCache.pTableMetaEntryCache == NULL) {
code = terrno;
goto _error;
}
- taosLRUCacheSetStrictCapacity(pInfo->metaCache.pTableMetaEntryCache, false);
+ taosLRUCacheSetStrictCapacity(pInfo->base.metaCache.pTableMetaEntryCache, false);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, destroyTableScanOperatorInfo,
getTableScannerExecInfo);
@@ -978,7 +998,7 @@ SOperatorInfo* createTableSeqScanOperatorInfo(void* pReadHandle, SExecTaskInfo*
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- pInfo->dataReader = pReadHandle;
+ pInfo->base.dataReader = pReadHandle;
// pInfo->prevGroupId = -1;
setOperatorInfo(pOperator, "TableSeqScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, false, OP_NOT_OPENED,
@@ -1185,11 +1205,11 @@ static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t grou
}
void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin) {
- pTableScanInfo->cond.twindows = *pWin;
+ pTableScanInfo->base.cond.twindows = *pWin;
pTableScanInfo->scanTimes = 0;
pTableScanInfo->currentGroupId = -1;
- tsdbReaderClose(pTableScanInfo->dataReader);
- pTableScanInfo->dataReader = NULL;
+ tsdbReaderClose(pTableScanInfo->base.dataReader);
+ pTableScanInfo->base.dataReader = NULL;
}
static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbUid, TSKEY startTs, TSKEY endTs,
@@ -1197,7 +1217,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
STableKeyInfo tblInfo = {.uid = tbUid, .groupId = 0};
STableScanInfo* pTableScanInfo = pTableScanOp->info;
- SQueryTableDataCond cond = pTableScanInfo->cond;
+ SQueryTableDataCond cond = pTableScanInfo->base.cond;
cond.startVersion = -1;
cond.endVersion = maxVersion;
@@ -1209,7 +1229,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
blockDataCleanup(pBlock);
STsdbReader* pReader = NULL;
- int32_t code = tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &cond, &tblInfo, 1, (STsdbReader**)&pReader,
+ int32_t code = tsdbReaderOpen(pTableScanInfo->base.readHandle.vnode, &cond, &tblInfo, 1, (STsdbReader**)&pReader,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
@@ -1228,8 +1248,8 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
blockDataEnsureCapacity(pBlock, rows);
pBlock->info.rows = rows;
- relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
- doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, rows);
+ relocateColumnData(pBlock, pTableScanInfo->base.matchInfo.pList, pCols, true);
+ doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, rows);
pBlock->info.groupId = getTableGroupId(pTaskInfo->pTableInfoList, pBInfo->uid);
}
@@ -1356,8 +1376,8 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
*pRowIndex = 0;
pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTableScanInfo->dataReader);
- pTableScanInfo->dataReader = NULL;
+ tsdbReaderClose(pTableScanInfo->base.dataReader);
+ pTableScanInfo->base.dataReader = NULL;
return NULL;
}
@@ -1520,7 +1540,9 @@ static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pS
groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
}
if (pInfo->tbnameCalSup.pExprInfo) {
- char* parTbname = taosHashGet(pInfo->pGroupIdTbNameMap, &groupId, sizeof(int64_t));
+ void* parTbname = NULL;
+ streamStateGetParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, groupId, &parTbname);
+
memcpy(varDataVal(tbname), parTbname, TSDB_TABLE_NAME_LEN);
varDataSetLen(tbname, strlen(varDataVal(tbname)));
}
@@ -1570,15 +1592,18 @@ static void calBlockTag(SExprSupp* pTagCalSup, SSDataBlock* pBlock, SSDataBlock*
}
void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
- SExprSupp* pTbNameCalSup = &pInfo->tbnameCalSup;
+ SExprSupp* pTbNameCalSup = &pInfo->tbnameCalSup;
+ SStreamState* pState = pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState;
if (pTbNameCalSup == NULL || pTbNameCalSup->numOfExprs == 0) return;
if (pBlock == NULL || pBlock->info.rows == 0) return;
- if (pBlock->info.groupId) {
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
- if (tbname != NULL) {
- memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- }
+
+ void* tbname = NULL;
+ if (streamStateGetParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, pBlock->info.groupId, &tbname) < 0) {
+ pBlock->info.parTbName[0] = 0;
+ } else {
+ memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
+ tdbFree(tbname);
SSDataBlock* pSrcBlock = blockCopyOneRow(pBlock, 0);
ASSERT(pSrcBlock->info.rows == 1);
@@ -1606,9 +1631,8 @@ void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
pBlock->info.parTbName[0] = 0;
}
- if (pBlock->info.groupId) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
+ if (pBlock->info.groupId && pBlock->info.parTbName[0]) {
+ streamStatePutParName(pState, pBlock->info.groupId, pBlock->info.parTbName);
}
blockDataDestroy(pSrcBlock);
@@ -1787,8 +1811,8 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
} else {
if (!pTaskInfo->streamInfo.returned) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ tsdbReaderClose(pTSInfo->base.dataReader);
+ pTSInfo->base.dataReader = NULL;
tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
qDebug("queue scan tsdb over, switch to wal ver %" PRId64 "", pTaskInfo->streamInfo.snapshotVer + 1);
if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
@@ -1954,22 +1978,22 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1 ||
pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE2) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
+ memcpy(&pTSInfo->base.cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1) {
- pTSInfo->cond.startVersion = 0;
- pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
- qDebug("stream recover step 1, from %" PRId64 " to %" PRId64, pTSInfo->cond.startVersion,
- pTSInfo->cond.endVersion);
+ pTSInfo->base.cond.startVersion = 0;
+ pTSInfo->base.cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
+ qDebug("stream recover step 1, from %" PRId64 " to %" PRId64, pTSInfo->base.cond.startVersion,
+ pTSInfo->base.cond.endVersion);
} else {
- pTSInfo->cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1;
- pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2;
- qDebug("stream recover step 2, from %" PRId64 " to %" PRId64, pTSInfo->cond.startVersion,
- pTSInfo->cond.endVersion);
+ pTSInfo->base.cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1;
+ pTSInfo->base.cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2;
+ qDebug("stream recover step 2, from %" PRId64 " to %" PRId64, pTSInfo->base.cond.startVersion,
+ pTSInfo->base.cond.endVersion);
}
/*resetTableScanInfo(pTSInfo, pWin);*/
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ tsdbReaderClose(pTSInfo->base.dataReader);
+ pTSInfo->base.dataReader = NULL;
pTSInfo->scanTimes = 0;
pTSInfo->currentGroupId = -1;
@@ -1981,18 +2005,20 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
if (pBlock != NULL) {
calBlockTbName(pInfo, pBlock);
if (pInfo->pUpdateInfo) {
- updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
+ TSKEY maxTs = updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
}
qDebug("stream recover scan get block, rows %d", pBlock->info.rows);
+ printDataBlock(pBlock, "scan recover");
return pBlock;
}
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ tsdbReaderClose(pTSInfo->base.dataReader);
+ pTSInfo->base.dataReader = NULL;
- pTSInfo->cond.startVersion = -1;
- pTSInfo->cond.endVersion = -1;
+ pTSInfo->base.cond.startVersion = -1;
+ pTSInfo->base.cond.endVersion = -1;
return NULL;
}
@@ -2009,6 +2035,9 @@ FETCH_NEXT_BLOCK:
int32_t current = pInfo->validBlockIndex++;
SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
+ if (pBlock->info.groupId && pBlock->info.parTbName[0]) {
+ streamStatePutParName(pTaskInfo->streamInfo.pState, pBlock->info.groupId, pBlock->info.parTbName);
+ }
// TODO move into scan
pBlock->info.calWin.skey = INT64_MIN;
pBlock->info.calWin.ekey = INT64_MAX;
@@ -2097,8 +2126,8 @@ FETCH_NEXT_BLOCK:
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader);
- updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
+ uint64_t version = getReaderMaxVersion(pTableScanInfo->base.dataReader);
+ updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->base.cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
// printDataBlock(pSDB, "stream scan update");
@@ -2407,7 +2436,6 @@ static void destroyStreamScanOperatorInfo(void* param) {
}
cleanupExprSupp(&pStreamScan->tbnameCalSup);
- taosHashCleanup(pStreamScan->pGroupIdTbNameMap);
updateInfoDestroy(pStreamScan->pUpdateInfo);
blockDataDestroy(pStreamScan->pRes);
@@ -2465,8 +2493,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1) != 0) {
goto _error;
}
- pInfo->pGroupIdTbNameMap =
- taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
}
if (pTableScanNode->pTags != NULL) {
@@ -2492,7 +2518,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTaskInfo);
STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info;
if (pHandle->version > 0) {
- pTSInfo->cond.endVersion = pHandle->version;
+ pTSInfo->base.cond.endVersion = pHandle->version;
}
STableKeyInfo* pList = NULL;
@@ -2501,8 +2527,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTableReader) {
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
- pTSInfo->dataReader = NULL;
- code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, pList, num, &pTSInfo->dataReader, NULL);
+ pTSInfo->base.dataReader = NULL;
+ code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, &pTSInfo->base.dataReader, NULL);
if (code != 0) {
terrno = code;
destroyTableScanOperatorInfo(pTableScanOp);
@@ -2538,7 +2564,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
goto _error;
}
taosArrayDestroy(tableIdList);
- memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->cond, sizeof(SQueryTableDataCond));
+ memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->base.cond, sizeof(SQueryTableDataCond));
} else {
taosArrayDestroy(pColIds);
}
@@ -4353,123 +4379,6 @@ _error:
return NULL;
}
-// todo refactor
-static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
- SSDataBlock* pBlock, uint32_t* status) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- STableMergeScanInfo* pInfo = pOperator->info;
-
- SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
-
- pCost->totalBlocks += 1;
- pCost->totalRows += pBlock->info.rows;
-
- *status = pInfo->dataBlockLoadFlag;
- if (pOperator->exprSupp.pFilterInfo != NULL ||
- overlapWithTimeWindow(&pTableScanInfo->interval, &pBlock->info, pTableScanInfo->cond.order)) {
- (*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
- }
-
- SDataBlockInfo* pBlockInfo = &pBlock->info;
- taosMemoryFreeClear(pBlock->pBlockAgg);
-
- if (*status == FUNC_DATA_REQUIRED_FILTEROUT) {
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
- pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
- pCost->filterOutBlocks += 1;
- return TSDB_CODE_SUCCESS;
- } else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
- qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
- pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
- pCost->skipBlocks += 1;
-
- // clear all data in pBlock that are set when handing the previous block
- for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
- SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i);
- pcol->pData = NULL;
- }
-
- return TSDB_CODE_SUCCESS;
- } else if (*status == FUNC_DATA_REQUIRED_SMA_LOAD) {
- pCost->loadBlockStatis += 1;
-
- bool allColumnsHaveAgg = true;
- SColumnDataAgg** pColAgg = NULL;
-
- if (allColumnsHaveAgg == true) {
- int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
-
- // todo create this buffer during creating operator
- if (pBlock->pBlockAgg == NULL) {
- pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
- }
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColMatchItem* pColMatchInfo = taosArrayGet(pTableScanInfo->matchInfo.pList, i);
- if (!pColMatchInfo->needOutput) {
- continue;
- }
- pBlock->pBlockAgg[pColMatchInfo->dstSlotId] = pColAgg[i];
- }
-
- return TSDB_CODE_SUCCESS;
- } else { // failed to load the block sma data, data block statistics does not exist, load data block instead
- *status = FUNC_DATA_REQUIRED_DATA_LOAD;
- }
- }
-
- ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
-
- // todo filter data block according to the block sma data firstly
-#if 0
- if (!doFilterByBlockSMA(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
- pCost->filterOutBlocks += 1;
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey,
- pBlockInfo->window.ekey, pBlockInfo->rows);
- (*status) = FUNC_DATA_REQUIRED_FILTEROUT;
- return TSDB_CODE_SUCCESS;
- }
-#endif
-
- pCost->totalCheckedRows += pBlock->info.rows;
- pCost->loadBlocks += 1;
-
- STsdbReader* reader = pTableScanInfo->pReader;
- SArray* pCols = tsdbRetrieveDataBlock(reader, NULL);
- if (pCols == NULL) {
- return terrno;
- }
-
- relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
-
- // currently only the tbname pseudo column
- SExprSupp* pSup = &pTableScanInfo->pseudoSup;
-
- int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock,
- pBlock->info.rows, GET_TASKID(pTaskInfo), NULL);
- if (code != TSDB_CODE_SUCCESS) {
- T_LONG_JMP(pTaskInfo->env, code);
- }
-
- if (pOperator->exprSupp.pFilterInfo != NULL) {
- int64_t st = taosGetTimestampMs();
- doFilter(pBlock, pOperator->exprSupp.pFilterInfo, &pTableScanInfo->matchInfo);
-
- double el = (taosGetTimestampUs() - st) / 1000.0;
- pTableScanInfo->readRecorder.filterTime += el;
-
- if (pBlock->info.rows == 0) {
- pCost->filterOutBlocks += 1;
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d, elapsed time:%.2f ms",
- GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, el);
- } else {
- qDebug("%s data block filter applied, elapsed time:%.2f ms", GET_TASKID(pTaskInfo), el);
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-
static SSDataBlock* getTableDataBlockImpl(void* param) {
STableMergeScanSortSourceParam* source = param;
SOperatorInfo* pOperator = source->pOperator;
@@ -4484,15 +4393,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
int64_t st = taosGetTimestampUs();
- void* p = tableListGetInfo(pInfo->tableListInfo, readIdx + pInfo->tableStartIndex);
- SReadHandle* pHandle = &pInfo->readHandle;
+ void* p = tableListGetInfo(pTaskInfo->pTableInfoList, readIdx + pInfo->tableStartIndex);
+ SReadHandle* pHandle = &pInfo->base.readHandle;
- int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->pReader, GET_TASKID(pTaskInfo));
+ int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->base.dataReader, GET_TASKID(pTaskInfo));
if (code != 0) {
T_LONG_JMP(pTaskInfo->env, code);
}
- STsdbReader* reader = pInfo->pReader;
+ STsdbReader* reader = pInfo->base.dataReader;
while (tsdbNextDataBlock(reader)) {
if (isTaskKilled(pTaskInfo)) {
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
@@ -4518,7 +4427,8 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
}
uint32_t status = 0;
- code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, pBlock, &status);
+ loadDataBlock(pOperator, &pTableScanInfo->base, pBlock, &status);
+// code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
@@ -4531,15 +4441,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
pBlock->info.groupId = getTableGroupId(pTaskInfo->pTableInfoList, pBlock->info.uid);
pOperator->resultInfo.totalRows += pBlock->info.rows;
- pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
+ pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
- tsdbReaderClose(pInfo->pReader);
- pInfo->pReader = NULL;
+ tsdbReaderClose(pInfo->base.dataReader);
+ pInfo->base.dataReader = NULL;
return pBlock;
}
- tsdbReaderClose(pInfo->pReader);
- pInfo->pReader = NULL;
+ tsdbReaderClose(pInfo->base.dataReader);
+ pInfo->base.dataReader = NULL;
return NULL;
}
@@ -4577,10 +4487,10 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
{
- size_t numOfTables = tableListGetSize(pInfo->tableListInfo);
+ size_t numOfTables = tableListGetSize(pTaskInfo->pTableInfoList);
int32_t i = pInfo->tableStartIndex + 1;
for (; i < numOfTables; ++i) {
- STableKeyInfo* tableKeyInfo = tableListGetInfo(pInfo->tableListInfo, i);
+ STableKeyInfo* tableKeyInfo = tableListGetInfo(pTaskInfo->pTableInfoList, i);
if (tableKeyInfo->groupId != pInfo->groupId) {
break;
}
@@ -4591,7 +4501,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
int32_t tableStartIdx = pInfo->tableStartIndex;
int32_t tableEndIdx = pInfo->tableEndIndex;
- pInfo->pReader = NULL;
+ pInfo->base.dataReader = NULL;
// todo the total available buffer should be determined by total capacity of buffer of this task.
// the additional one is reserved for merge result
@@ -4614,7 +4524,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
taosArrayPush(pInfo->sortSourceParams, ¶m);
SQueryTableDataCond cond;
- dumpSQueryTableCond(&pInfo->cond, &cond);
+ dumpSQueryTableCond(&pInfo->base.cond, &cond);
taosArrayPush(pInfo->queryConds, &cond);
}
@@ -4654,6 +4564,7 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
taosArrayClear(pInfo->sortSourceParams);
tsortDestroySortHandle(pInfo->pSortHandle);
+ pInfo->pSortHandle = NULL;
for (int32_t i = 0; i < taosArrayGetSize(pInfo->queryConds); i++) {
SQueryTableDataCond* cond = taosArrayGet(pInfo->queryConds, i);
@@ -4704,7 +4615,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
T_LONG_JMP(pTaskInfo->env, code);
}
- size_t tableListSize = tableListGetSize(pInfo->tableListInfo);
+ size_t tableListSize = tableListGetSize(pTaskInfo->pTableInfoList);
if (!pInfo->hasGroupId) {
pInfo->hasGroupId = true;
@@ -4713,7 +4624,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
return NULL;
}
pInfo->tableStartIndex = 0;
- pInfo->groupId = ((STableKeyInfo*)tableListGetInfo(pInfo->tableListInfo, pInfo->tableStartIndex))->groupId;
+ pInfo->groupId = ((STableKeyInfo*)tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->tableStartIndex))->groupId;
startGroupTableMergeScan(pOperator);
}
@@ -4732,7 +4643,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
break;
}
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
- pInfo->groupId = tableListGetInfo(pInfo->tableListInfo, pInfo->tableStartIndex)->groupId;
+ pInfo->groupId = tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->tableStartIndex)->groupId;
startGroupTableMergeScan(pOperator);
}
}
@@ -4742,7 +4653,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
void destroyTableMergeScanOperatorInfo(void* param) {
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
- cleanupQueryTableDataCond(&pTableScanInfo->cond);
+ cleanupQueryTableDataCond(&pTableScanInfo->base.cond);
int32_t numOfTable = taosArrayGetSize(pTableScanInfo->queryConds);
@@ -4752,9 +4663,11 @@ void destroyTableMergeScanOperatorInfo(void* param) {
}
taosArrayDestroy(pTableScanInfo->sortSourceParams);
+ tsortDestroySortHandle(pTableScanInfo->pSortHandle);
+ pTableScanInfo->pSortHandle = NULL;
- tsdbReaderClose(pTableScanInfo->pReader);
- pTableScanInfo->pReader = NULL;
+ tsdbReaderClose(pTableScanInfo->base.dataReader);
+ pTableScanInfo->base.dataReader = NULL;
for (int i = 0; i < taosArrayGetSize(pTableScanInfo->queryConds); i++) {
SQueryTableDataCond* pCond = taosArrayGet(pTableScanInfo->queryConds, i);
@@ -4762,17 +4675,20 @@ void destroyTableMergeScanOperatorInfo(void* param) {
}
taosArrayDestroy(pTableScanInfo->queryConds);
- if (pTableScanInfo->matchInfo.pList != NULL) {
- taosArrayDestroy(pTableScanInfo->matchInfo.pList);
+ if (pTableScanInfo->base.matchInfo.pList != NULL) {
+ taosArrayDestroy(pTableScanInfo->base.matchInfo.pList);
}
pTableScanInfo->pResBlock = blockDataDestroy(pTableScanInfo->pResBlock);
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
taosArrayDestroy(pTableScanInfo->pSortInfo);
- cleanupExprSupp(&pTableScanInfo->pseudoSup);
+ cleanupExprSupp(&pTableScanInfo->base.pseudoSup);
+
+ tsdbReaderClose(pTableScanInfo->base.dataReader);
+ pTableScanInfo->base.dataReader = NULL;
+ taosLRUCacheCleanup(pTableScanInfo->base.metaCache.pTableMetaEntryCache);
- taosMemoryFreeClear(pTableScanInfo->rowEntryInfoOffset);
taosMemoryFreeClear(param);
}
@@ -4781,7 +4697,7 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
// TODO: merge these two info into one struct
STableMergeScanExecInfo* execInfo = taosMemoryCalloc(1, sizeof(STableMergeScanExecInfo));
STableMergeScanInfo* pInfo = pOptr->info;
- execInfo->blockRecorder = pInfo->readRecorder;
+ execInfo->blockRecorder = pInfo->base.readRecorder;
execInfo->sortExecInfo = pInfo->sortExecInfo;
*pOptrExplain = execInfo;
@@ -4790,8 +4706,8 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
return TSDB_CODE_SUCCESS;
}
-SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
- SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo) {
STableMergeScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableMergeScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -4802,38 +4718,46 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
int32_t numOfCols = 0;
int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID,
- &pInfo->matchInfo);
+ &pInfo->base.matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
+ code = initQueryTableDataCond(&pInfo->base.cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pInfo->matchInfo.pList);
+ taosArrayDestroy(pInfo->base.matchInfo.pList);
goto _error;
}
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
- SExprSupp* pSup = &pInfo->pseudoSup;
+ SExprSupp* pSup = &pInfo->base.pseudoSup;
pSup->pExprInfo = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->numOfExprs);
pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
- pInfo->readHandle = *readHandle;
- pInfo->interval = extractIntervalInfo(pTableScanNode);
+ pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5);
+ if (pInfo->base.metaCache.pTableMetaEntryCache == NULL) {
+ code = terrno;
+ goto _error;
+ }
+
+ pInfo->base.dataBlockLoadFlag = FUNC_DATA_REQUIRED_DATA_LOAD;
+ pInfo->base.scanFlag = MAIN_SCAN;
+ pInfo->base.readHandle = *readHandle;
+
+ pInfo->base.limitInfo.limit.limit = -1;
+ pInfo->base.limitInfo.slimit.limit = -1;
+
pInfo->sample.sampleRatio = pTableScanNode->ratio;
pInfo->sample.seed = taosGetTimestampSec();
- pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- pInfo->tableListInfo = pTableListInfo;
- pInfo->scanFlag = MAIN_SCAN;
initResultSizeInfo(&pOperator->resultInfo, 1024);
pInfo->pResBlock = createResDataBlock(pDescNode);
@@ -4841,12 +4765,13 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam));
- pInfo->pSortInfo = generateSortByTsInfo(pInfo->matchInfo.pList, pInfo->cond.order);
+ pInfo->pSortInfo = generateSortByTsInfo(pInfo->base.matchInfo.pList, pInfo->base.cond.order);
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
- int32_t rowSize = pInfo->pResBlock->info.rowSize;
- pInfo->bufPageSize = getProperSortPageSize(rowSize);
+ int32_t rowSize = pInfo->pResBlock->info.rowSize;
+ uint32_t nCols = taosArrayGetSize(pInfo->pResBlock->pDataBlock);
+ pInfo->bufPageSize = getProperSortPageSize(rowSize, nCols);
setOperatorInfo(pOperator, "TableMergeScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, false, OP_NOT_OPENED,
pInfo, pTaskInfo);
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index fc53623d44..add580ce7c 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -760,7 +760,8 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
pInfo->groupSort = pMergePhyNode->groupSort;
pInfo->pSortInfo = createSortInfo(pMergePhyNode->pMergeKeys);
pInfo->pInputBlock = pInputBlock;
- pInfo->bufPageSize = getProperSortPageSize(rowSize);
+ size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock);
+ pInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols);
pInfo->sortBufSize = pInfo->bufPageSize * (numStreams + 1); // one additional is reserved for merged result.
setOperatorInfo(pOperator, "MultiwayMergeOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE, false, OP_NOT_OPENED, pInfo, pTaskInfo);
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 87b4ed4c4a..fc78f8a20d 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -1450,7 +1450,6 @@ static int32_t closeStreamIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp
}
}
tSimpleHashIterateRemove(pHashMap, pWinKey, sizeof(SWinKey), &pIte, &iter);
- /*taosHashRemove(pInfo->pGroupIdTbNameMap, &pWinKey->groupId, sizeof(int64_t));*/
}
}
return TSDB_CODE_SUCCESS;
@@ -1547,7 +1546,8 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin
uint64_t uid = 0;
for (int32_t i = *index; i < size; i++) {
SWinKey* pWin = taosArrayGet(pWins, i);
- char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pWin->groupId, sizeof(int64_t));
+ void* tbname = NULL;
+ streamStateGetParName(pInfo->pState, pWin->groupId, &tbname);
if (tbname == NULL) {
appendOneRowToStreamSpecialBlock(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId, NULL);
} else {
@@ -1555,6 +1555,7 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin
STR_WITH_MAXSIZE_TO_VARSTR(parTbName, tbname, sizeof(parTbName));
appendOneRowToStreamSpecialBlock(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId, parTbName);
}
+ tdbFree(tbname);
(*index)++;
}
}
@@ -1610,17 +1611,13 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
- destroyStreamFinalIntervalOperatorInfo(pChildOp->info);
- taosMemoryFree(pChildOp->pDownstream);
- cleanupExprSupp(&pChildOp->exprSupp);
- taosMemoryFreeClear(pChildOp);
+ destroyOperatorInfo(pChildOp);
}
taosArrayDestroy(pInfo->pChildren);
}
nodesDestroyNode((SNode*)pInfo->pPhyNode);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
cleanupGroupResInfo(&pInfo->groupResInfo);
- taosHashCleanup(pInfo->pGroupIdTbNameMap);
taosMemoryFreeClear(param);
}
@@ -1945,10 +1942,8 @@ static void doKeepPrevRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
- // null data should not be kept since it can not be used to perform interpolation
- if (!colDataIsNull_s(pColInfoData, i)) {
- SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, i);
-
+ SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, i);
+ if (!colDataIsNull_s(pColInfoData, rowIndex)) {
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
if (!IS_VAR_DATA_TYPE(pkey->type)) {
@@ -1956,6 +1951,8 @@ static void doKeepPrevRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
} else {
memcpy(pkey->pData, val, varDataLen(val));
}
+ } else {
+ pkey->isNull = true;
}
}
@@ -1967,10 +1964,8 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
- // null data should not be kept since it can not be used to perform interpolation
- if (!colDataIsNull_s(pColInfoData, i)) {
- SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, i);
-
+ SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, i);
+ if (!colDataIsNull_s(pColInfoData, rowIndex)) {
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
if (!IS_VAR_DATA_TYPE(pkey->type)) {
@@ -1978,50 +1973,51 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
} else {
memcpy(pkey->pData, val, varDataLen(val));
}
+ } else {
+ pkey->isNull = true;
}
}
pSliceInfo->isNextRowSet = true;
}
-static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex,
- bool isLastRow) {
+static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex) {
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
- bool fillLastPoint = pSliceInfo->fillLastPoint;
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId);
SFillLinearInfo* pLinearInfo = taosArrayGet(pSliceInfo->pLinearInfo, i);
- // null data should not be kept since it can not be used to perform interpolation
- if (!colDataIsNull_s(pColInfoData, i)) {
- if (isLastRow) {
+ // null value is represented by using key = INT64_MIN for now.
+ // TODO: optimize to ignore null values for linear interpolation.
+ if (!pLinearInfo->isStartSet) {
+ if (!colDataIsNull_s(pColInfoData, rowIndex)) {
pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
memcpy(pLinearInfo->start.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
- } else if (fillLastPoint) {
+ }
+ pLinearInfo->isStartSet = true;
+ } else if (!pLinearInfo->isEndSet) {
+ if (!colDataIsNull_s(pColInfoData, rowIndex)) {
+ pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
+ memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
+ }
+ pLinearInfo->isEndSet = true;
+ } else {
+ pLinearInfo->start.key = pLinearInfo->end.key;
+ memcpy(pLinearInfo->start.val, pLinearInfo->end.val, pLinearInfo->bytes);
+
+ if (!colDataIsNull_s(pColInfoData, rowIndex)) {
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
} else {
- pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
- pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex + 1);
-
- char* val;
- val = colDataGetData(pColInfoData, rowIndex);
- memcpy(pLinearInfo->start.val, val, pLinearInfo->bytes);
- val = colDataGetData(pColInfoData, rowIndex + 1);
- memcpy(pLinearInfo->end.val, val, pLinearInfo->bytes);
+ pLinearInfo->end.key = INT64_MIN;
}
-
- pLinearInfo->hasNull = false;
- } else {
- pLinearInfo->hasNull = true;
}
}
- pSliceInfo->fillLastPoint = isLastRow;
}
-static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock) {
+static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, bool beforeTs) {
int32_t rows = pResBlock->info.rows;
blockDataEnsureCapacity(pResBlock, rows + 1);
// todo set the correct primary timestamp column
@@ -2040,7 +2036,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
- // SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
switch (pSliceInfo->fillType) {
case TSDB_FILL_NULL: {
colDataAppendNULL(pDst, rows);
@@ -2072,21 +2067,26 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
SPoint start = pLinearInfo->start;
SPoint end = pLinearInfo->end;
SPoint current = {.key = pSliceInfo->current};
- current.val = taosMemoryCalloc(pLinearInfo->bytes, 1);
- // before interp range, do not fill
- if (start.key == INT64_MIN || end.key == INT64_MAX) {
+ // do not interpolate before ts range, only increate pSliceInfo->current
+ if (beforeTs && !pLinearInfo->isEndSet) {
+ return true;
+ }
+
+ if (!pLinearInfo->isStartSet || !pLinearInfo->isEndSet) {
hasInterp = false;
break;
}
- if (pLinearInfo->hasNull) {
+ if (start.key == INT64_MIN || end.key == INT64_MIN) {
colDataAppendNULL(pDst, rows);
- } else {
- taosGetLinearInterpolationVal(¤t, pLinearInfo->type, &start, &end, pLinearInfo->type);
- colDataAppend(pDst, rows, (char*)current.val, false);
+ break;
}
+ current.val = taosMemoryCalloc(pLinearInfo->bytes, 1);
+ taosGetLinearInterpolationVal(¤t, pLinearInfo->type, &start, &end, pLinearInfo->type);
+ colDataAppend(pDst, rows, (char*)current.val, false);
+
taosMemoryFree(current.val);
break;
}
@@ -2097,7 +2097,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot);
- colDataAppend(pDst, rows, pkey->pData, false);
+ if (pkey->isNull == false) {
+ colDataAppend(pDst, rows, pkey->pData, false);
+ } else {
+ colDataAppendNULL(pDst, rows);
+ }
break;
}
@@ -2108,7 +2112,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
- colDataAppend(pDst, rows, pkey->pData, false);
+ if (pkey->isNull == false) {
+ colDataAppend(pDst, rows, pkey->pData, false);
+ } else {
+ colDataAppendNULL(pDst, rows);
+ }
break;
}
@@ -2121,8 +2129,40 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
if (hasInterp) {
pResBlock->info.rows += 1;
}
+
+ return hasInterp;
}
+static void addCurrentRowToResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock,
+ SSDataBlock* pSrcBlock, int32_t index) {
+ blockDataEnsureCapacity(pResBlock, pResBlock->info.rows + 1);
+ for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
+ SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
+
+ int32_t dstSlot = pExprInfo->base.resSchema.slotId;
+ SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
+
+ if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
+ colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
+ } else {
+ int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
+ SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, srcSlot);
+
+ if (colDataIsNull_s(pSrc, index)) {
+ colDataAppendNULL(pDst, pResBlock->info.rows);
+ continue;
+ }
+
+ char* v = colDataGetData(pSrc, index);
+ colDataAppend(pDst, pResBlock->info.rows, v, false);
+ }
+ }
+
+ pResBlock->info.rows += 1;
+ return;
+}
+
+
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
if (pInfo->pPrevRow != NULL) {
return TSDB_CODE_SUCCESS;
@@ -2193,24 +2233,19 @@ static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB
SFillLinearInfo linearInfo = {0};
linearInfo.start.key = INT64_MIN;
- linearInfo.end.key = INT64_MAX;
+ linearInfo.end.key = INT64_MIN;
linearInfo.start.val = taosMemoryCalloc(1, pColInfo->info.bytes);
linearInfo.end.val = taosMemoryCalloc(1, pColInfo->info.bytes);
- linearInfo.hasNull = false;
+ linearInfo.isStartSet = false;
+ linearInfo.isEndSet = false;
linearInfo.type = pColInfo->info.type;
linearInfo.bytes = pColInfo->info.bytes;
taosArrayPush(pInfo->pLinearInfo, &linearInfo);
}
- pInfo->fillLastPoint = false;
-
return TSDB_CODE_SUCCESS;
}
-static bool needToFillLastPoint(STimeSliceOperatorInfo* pSliceInfo) {
- return (pSliceInfo->fillLastPoint == true && pSliceInfo->fillType == TSDB_FILL_LINEAR);
-}
-
static int32_t initKeeperInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
int32_t code;
code = initPrevRowsKeeper(pInfo, pBlock);
@@ -2266,195 +2301,73 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
int64_t ts = *(int64_t*)colDataGetData(pTsCol, i);
- if (i == 0 && needToFillLastPoint(pSliceInfo)) { // first row in current block
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
- while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
- pSliceInfo->current =
- taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
- }
- }
-
if (pSliceInfo->current > pSliceInfo->win.ekey) {
setOperatorCompleted(pOperator);
break;
}
if (ts == pSliceInfo->current) {
- blockDataEnsureCapacity(pResBlock, pResBlock->info.rows + 1);
- for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
- SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
+ addCurrentRowToResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i);
- int32_t dstSlot = pExprInfo->base.resSchema.slotId;
- SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
-
- if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
- colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
- } else {
- int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
- SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
-
- if (colDataIsNull_s(pSrc, i)) {
- colDataAppendNULL(pDst, pResBlock->info.rows);
- continue;
- }
-
- char* v = colDataGetData(pSrc, i);
- colDataAppend(pDst, pResBlock->info.rows, v, false);
- }
- }
-
- pResBlock->info.rows += 1;
doKeepPrevRows(pSliceInfo, pBlock, i);
+ doKeepLinearInfo(pSliceInfo, pBlock, i);
- // for linear interpolation, always fill value between this and next points;
- // if its the first point in data block, also fill values between previous(if there's any) and this point;
- // if its the last point in data block, no need to fill, but reserve this point as the start value and do
- // the interpolation when processing next data block.
- if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
- pSliceInfo->current =
- taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
- if (i < pBlock->info.rows - 1) {
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
- int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
- if (nextTs > pSliceInfo->current) {
- while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
- pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
- pInterval->precision);
- }
-
- if (pSliceInfo->current > pSliceInfo->win.ekey) {
- setOperatorCompleted(pOperator);
- break;
- }
- }
- } else { // it is the last row of current block
- // store ts value as start, and calculate interp value when processing next block
- doKeepLinearInfo(pSliceInfo, pBlock, i, true);
- }
- } else { // non-linear interpolation
- pSliceInfo->current =
- taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
- if (pSliceInfo->current > pSliceInfo->win.ekey) {
- setOperatorCompleted(pOperator);
- break;
- }
+ pSliceInfo->current =
+ taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
+ if (pSliceInfo->current > pSliceInfo->win.ekey) {
+ setOperatorCompleted(pOperator);
+ break;
}
} else if (ts < pSliceInfo->current) {
// in case of interpolation window starts and ends between two datapoints, fill(prev) need to interpolate
doKeepPrevRows(pSliceInfo, pBlock, i);
+ doKeepLinearInfo(pSliceInfo, pBlock, i);
- if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
- // no need to increate pSliceInfo->current here
- // pSliceInfo->current =
- // taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
- if (i < pBlock->info.rows - 1) {
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
- int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
- if (nextTs > pSliceInfo->current) {
- while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
- pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
- pInterval->precision);
- }
-
- if (pSliceInfo->current > pSliceInfo->win.ekey) {
- setOperatorCompleted(pOperator);
+ if (i < pBlock->info.rows - 1) {
+ // in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
+ doKeepNextRows(pSliceInfo, pBlock, i + 1);
+ int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
+ if (nextTs > pSliceInfo->current) {
+ while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
+ if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, false) && pSliceInfo->fillType == TSDB_FILL_LINEAR) {
break;
+ } else {
+ pSliceInfo->current =
+ taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
}
}
+
+ if (pSliceInfo->current > pSliceInfo->win.ekey) {
+ setOperatorCompleted(pOperator);
+ break;
+ }
} else {
- // store ts value as start, and calculate interp value when processing next block
- doKeepLinearInfo(pSliceInfo, pBlock, i, true);
- }
- } else { // non-linear interpolation
- if (i < pBlock->info.rows - 1) {
- // in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
- doKeepNextRows(pSliceInfo, pBlock, i + 1);
- int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
- if (nextTs > pSliceInfo->current) {
- while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
- pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
- pInterval->precision);
- }
-
- if (pSliceInfo->current > pSliceInfo->win.ekey) {
- setOperatorCompleted(pOperator);
- break;
- }
- } else {
- // ignore current row, and do nothing
- }
- } else { // it is the last row of current block
- doKeepPrevRows(pSliceInfo, pBlock, i);
+ // ignore current row, and do nothing
}
+ } else { // it is the last row of current block
+ doKeepPrevRows(pSliceInfo, pBlock, i);
}
} else { // ts > pSliceInfo->current
// in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
doKeepNextRows(pSliceInfo, pBlock, i);
+ doKeepLinearInfo(pSliceInfo, pBlock, i);
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
- pSliceInfo->current =
- taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
+ if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, true) && pSliceInfo->fillType == TSDB_FILL_LINEAR) {
+ break;
+ } else {
+ pSliceInfo->current =
+ taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
+ }
}
// add current row if timestamp match
if (ts == pSliceInfo->current && pSliceInfo->current <= pSliceInfo->win.ekey) {
- blockDataEnsureCapacity(pResBlock, pResBlock->info.rows + 1);
- for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
- SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
-
- int32_t dstSlot = pExprInfo->base.resSchema.slotId;
- SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
-
- if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
- colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
- } else {
- int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
- SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
-
- if (colDataIsNull_s(pSrc, i)) {
- colDataAppendNULL(pDst, pResBlock->info.rows);
- continue;
- }
-
- char* v = colDataGetData(pSrc, i);
- colDataAppend(pDst, pResBlock->info.rows, v, false);
- }
- }
-
- pResBlock->info.rows += 1;
+ addCurrentRowToResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i);
doKeepPrevRows(pSliceInfo, pBlock, i);
- if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
- pSliceInfo->current =
- taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
- if (i < pBlock->info.rows - 1) {
- doKeepLinearInfo(pSliceInfo, pBlock, i, false);
- int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
- if (nextTs > pSliceInfo->current) {
- while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
- pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
- pInterval->precision);
- }
-
- if (pSliceInfo->current > pSliceInfo->win.ekey) {
- setOperatorCompleted(pOperator);
- break;
- }
- }
- } else { // it is the last row of current block
- // store ts value as start, and calculate interp value when processing next block
- doKeepLinearInfo(pSliceInfo, pBlock, i, true);
- }
- } else { // non-linear interpolation
- pSliceInfo->current =
- taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
- }
+ pSliceInfo->current =
+ taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
}
if (pSliceInfo->current > pSliceInfo->win.ekey) {
@@ -2469,7 +2382,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
// except for fill(next), fill(linear)
while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT &&
pSliceInfo->fillType != TSDB_FILL_LINEAR) {
- genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
+ genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, false);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
}
@@ -2548,9 +2461,11 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
pInfo->interval.interval = pInterpPhyNode->interval;
pInfo->current = pInfo->win.skey;
- STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
- pScanInfo->cond.twindows = pInfo->win;
- pScanInfo->cond.type = TIMEWINDOW_RANGE_EXTERNAL;
+ if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
+ STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
+ pScanInfo->base.cond.twindows = pInfo->win;
+ pScanInfo->base.cond.type = TIMEWINDOW_RANGE_EXTERNAL;
+ }
setOperatorInfo(pOperator, "TimeSliceOperator", QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, false, OP_NOT_OPENED, pInfo,
pTaskInfo);
@@ -3156,11 +3071,6 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval final recv" : "interval semi recv");
- if (pBlock->info.parTbName[0]) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), &pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
- }
-
ASSERT(pBlock->info.type != STREAM_INVERT);
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) {
pInfo->binfo.pRes->info.type = pBlock->info.type;
@@ -3375,9 +3285,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->delKey.ts = INT64_MAX;
pInfo->delKey.groupId = 0;
- pInfo->pGroupIdTbNameMap =
- taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
-
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
@@ -3418,18 +3325,16 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
if (pInfo->pChildren != NULL) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
- SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
- SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
- destroyStreamSessionAggOperatorInfo(pChInfo);
- taosMemoryFreeClear(pChild);
+ SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
+ destroyOperatorInfo(pChild);
}
+ taosArrayDestroy(pInfo->pChildren);
}
colDataDestroy(&pInfo->twAggSup.timeWindowData);
blockDataDestroy(pInfo->pDelRes);
blockDataDestroy(pInfo->pWinBlock);
blockDataDestroy(pInfo->pUpdateRes);
tSimpleHashCleanup(pInfo->pStDeleted);
- taosHashCleanup(pInfo->pGroupIdTbNameMap);
taosMemoryFreeClear(param);
}
@@ -3470,7 +3375,9 @@ void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int
}
SStreamScanInfo* pScanInfo = downstream->info;
pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = pAggSup->gap, .parentType = type};
- pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
+ if (!pScanInfo->pUpdateInfo) {
+ pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
+ }
}
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, int64_t gap,
@@ -3859,30 +3766,18 @@ void doBuildDeleteDataBlock(SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlo
SColumnInfoData* pCalEdCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
colDataAppendNULL(pCalEdCol, pBlock->info.rows);
- SHashObj* pGroupIdTbNameMap = NULL;
- if (pOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
- pOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION ||
- pOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
- SStreamSessionAggOperatorInfo* pInfo = pOp->info;
- pGroupIdTbNameMap = pInfo->pGroupIdTbNameMap;
- } else if (pOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) {
- SStreamStateAggOperatorInfo* pInfo = pOp->info;
- pGroupIdTbNameMap = pInfo->pGroupIdTbNameMap;
- } else {
- ASSERT(0);
- }
-
- char* tbname = taosHashGet(pGroupIdTbNameMap, &res->groupId, sizeof(int64_t));
SColumnInfoData* pTableCol = taosArrayGet(pBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
+
+ void* tbname = NULL;
+ streamStateGetParName(pOp->pTaskInfo->streamInfo.pState, res->groupId, &tbname);
if (tbname == NULL) {
- /*printf("\n\n no tbname for group id %" PRId64 "%p %p\n\n", res->groupId, pOp->info, pGroupIdTbNameMap);*/
colDataAppendNULL(pTableCol, pBlock->info.rows);
} else {
char parTbName[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN];
STR_WITH_MAXSIZE_TO_VARSTR(parTbName, tbname, sizeof(parTbName));
colDataAppend(pTableCol, pBlock->info.rows, (const char*)parTbName, false);
- /*printf("\n\n get tbname %s group id %" PRId64 "\n\n", tbname, res->groupId);*/
}
+ tdbFree(tbname);
pBlock->info.rows += 1;
}
if ((*Ite) == NULL) {
@@ -4055,19 +3950,6 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
}
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "final session recv" : "single session recv");
- if (pBlock->info.parTbName[0]) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), &pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
- /*printf("\n\n put tbname %s group id %" PRId64 "\n\n into %p %p", pBlock->info.parTbName, pBlock->info.groupId,*/
- /*pInfo, pInfo->pGroupIdTbNameMap);*/
- }
-
- if (pBlock->info.parTbName[0]) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), &pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
- /*printf("\n\n put tbname %s\n\n", pBlock->info.parTbName);*/
- }
-
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
pBlock->info.type == STREAM_CLEAR) {
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
@@ -4211,8 +4093,6 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pInfo->isFinal = false;
pInfo->pPhyNode = pPhyNode;
pInfo->ignoreExpiredData = pSessionNode->window.igExpired;
- pInfo->pGroupIdTbNameMap =
- taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
setOperatorInfo(pOperator, "StreamSessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION, true,
OP_NOT_OPENED, pInfo, pTaskInfo);
@@ -4287,12 +4167,6 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
}
printDataBlock(pBlock, "semi session recv");
- if (pBlock->info.parTbName[0]) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), &pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
- /*printf("\n\n put tbname %s\n\n", pBlock->info.parTbName);*/
- }
-
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
pBlock->info.type == STREAM_CLEAR) {
// gap must be 0
@@ -4374,9 +4248,6 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
setOperatorInfo(pOperator, name, pPhyNode->type, false, OP_NOT_OPENED, pInfo, pTaskInfo);
- pInfo->pGroupIdTbNameMap =
- taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
-
pOperator->operatorType = pPhyNode->type;
if (numOfChild > 0) {
pInfo->pChildren = taosArrayInit(numOfChild, sizeof(void*));
@@ -4415,16 +4286,14 @@ void destroyStreamStateOperatorInfo(void* param) {
if (pInfo->pChildren != NULL) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
- SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
- SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
- destroyStreamSessionAggOperatorInfo(pChInfo);
- taosMemoryFreeClear(pChild);
+ SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
+ destroyOperatorInfo(pChild);
}
+ taosArrayDestroy(pInfo->pChildren);
}
colDataDestroy(&pInfo->twAggSup.timeWindowData);
blockDataDestroy(pInfo->pDelRes);
tSimpleHashCleanup(pInfo->pSeDeleted);
- taosHashCleanup(pInfo->pGroupIdTbNameMap);
taosMemoryFreeClear(param);
}
@@ -4618,12 +4487,6 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
}
printDataBlock(pBlock, "single state recv");
- if (pBlock->info.parTbName[0]) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), &pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
- /*printf("\n\n put tbname %s\n\n", pBlock->info.parTbName);*/
- }
-
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
pBlock->info.type == STREAM_CLEAR) {
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
@@ -4737,9 +4600,6 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pChildren = NULL;
pInfo->ignoreExpiredData = pStateNode->window.igExpired;
- pInfo->pGroupIdTbNameMap =
- taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
-
setOperatorInfo(pOperator, "StreamStateAggOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE, true, OP_NOT_OPENED,
pInfo, pTaskInfo);
pOperator->fpSet =
@@ -4898,6 +4758,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
if (pMiaInfo->groupId == 0) {
if (pMiaInfo->groupId != pBlock->info.groupId) {
pMiaInfo->groupId = pBlock->info.groupId;
+ pRes->info.groupId = pMiaInfo->groupId;
}
} else {
if (pMiaInfo->groupId != pBlock->info.groupId) {
@@ -4911,6 +4772,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
break;
} else {
// continue
+ pRes->info.groupId = pMiaInfo->groupId;
}
}
@@ -5388,12 +5250,6 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
}
printDataBlock(pBlock, "single interval recv");
- if (pBlock->info.parTbName[0]) {
- taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), &pBlock->info.parTbName,
- TSDB_TABLE_NAME_LEN);
- /*printf("\n\n put tbname %s\n\n", pBlock->info.parTbName);*/
- }
-
if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT ||
pBlock->info.type == STREAM_CLEAR) {
doDeleteWindows(pOperator, &pInfo->interval, pBlock, pInfo->pDelWins, pUpdatedMap);
@@ -5551,9 +5407,6 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->delKey.ts = INT64_MAX;
pInfo->delKey.groupId = 0;
- pInfo->pGroupIdTbNameMap =
- taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
-
setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED,
pInfo, pTaskInfo);
pOperator->fpSet =
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 71fad2e27c..9c10b51b1f 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -584,15 +584,11 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
return 0;
}
-// TODO consider the page meta size
-int32_t getProperSortPageSize(size_t rowSize) {
- uint32_t defaultPageSize = 4096;
-
- uint32_t pgSize = 0;
- if (rowSize * 4 > defaultPageSize) {
- pgSize = rowSize * 4;
- } else {
- pgSize = defaultPageSize;
+// get sort page size
+int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols) {
+ uint32_t pgSize = rowSize * 4 + blockDataGetSerialMetaSize(numOfCols);
+ if (pgSize < DEFAULT_PAGESIZE) {
+ return DEFAULT_PAGESIZE;
}
return pgSize;
@@ -612,7 +608,8 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
}
if (pHandle->pDataBlock == NULL) {
- pHandle->pageSize = getProperSortPageSize(blockDataGetRowSize(pBlock));
+ uint32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
+ pHandle->pageSize = getProperSortPageSize(blockDataGetRowSize(pBlock), numOfCols);
// todo, number of pages are set according to the total available sort buffer
pHandle->numOfPages = 1024;
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index f877bef44d..fe010786eb 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -917,6 +917,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
int32_t startIndex;
if (numOfParams != 4) {
snprintf(errMsg, msgLen, "%s", msg1);
+ cJSON_Delete(binDesc);
return false;
}
@@ -928,17 +929,20 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) {
snprintf(errMsg, msgLen, "%s", msg3);
+ cJSON_Delete(binDesc);
return false;
}
if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000
snprintf(errMsg, msgLen, "%s", msg4);
+ cJSON_Delete(binDesc);
return false;
}
if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) ||
(factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) {
snprintf(errMsg, msgLen, "%s", msg5);
+ cJSON_Delete(binDesc);
return false;
}
@@ -957,6 +961,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (width->valuedouble == 0) {
snprintf(errMsg, msgLen, "%s", msg6);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
@@ -964,6 +969,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (isinf(intervals[startIndex])) {
snprintf(errMsg, msgLen, "%s", msg5);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
startIndex++;
@@ -973,11 +979,13 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (start->valuedouble == 0) {
snprintf(errMsg, msgLen, "%s", msg7);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) {
snprintf(errMsg, msgLen, "%s", msg8);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
@@ -985,6 +993,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (isinf(intervals[startIndex])) {
snprintf(errMsg, msgLen, "%s", msg5);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
startIndex++;
@@ -992,6 +1001,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
} else {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
@@ -1007,6 +1017,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
} else if (cJSON_IsArray(binDesc)) { /* user input bins */
if (binType != USER_INPUT_BIN) {
snprintf(errMsg, msgLen, "%s", msg3);
+ cJSON_Delete(binDesc);
return false;
}
numOfBins = cJSON_GetArraySize(binDesc);
@@ -1015,6 +1026,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (bin == NULL) {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
int i = 0;
@@ -1023,11 +1035,13 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
if (!cJSON_IsNumber(bin)) {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
if (i != 0 && intervals[i] <= intervals[i - 1]) {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
+ cJSON_Delete(binDesc);
return false;
}
bin = bin->next;
@@ -1035,6 +1049,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
}
} else {
snprintf(errMsg, msgLen, "%s", msg3);
+ cJSON_Delete(binDesc);
return false;
}
@@ -1464,11 +1479,16 @@ static int32_t translateDerivative(SFunctionNode* pFunc, char* pErrBuf, int32_t
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
// param1
- SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
+ SValueNode* pValue1 = (SValueNode*)pParamNode1;
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
+ if (pValue1->datum.i <= 0) {
+ return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
SValueNode* pValue = (SValueNode*)pParamNode1;
pValue->notReserved = true;
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index e57b9f9a34..87e15370e4 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -48,8 +48,8 @@ typedef struct SSumRes {
double dsum;
};
int16_t type;
- int64_t prevTs; // used for csum only
- bool isPrevTsSet; //used for csum only
+ int64_t prevTs; // used for csum only
+ bool isPrevTsSet; // used for csum only
} SSumRes;
@@ -1083,7 +1083,10 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
int32_t type = pAvgRes->type;
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ if (pAvgRes->count == 0) {
+ // [ASAN] runtime error: division by zero
+ GET_RES_INFO(pCtx)->numOfRes = 0;
+ } else if (IS_SIGNED_NUMERIC_TYPE(type)) {
pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count);
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
pAvgRes->result = pAvgRes->sum.usum / ((double)pAvgRes->count);
@@ -2217,8 +2220,8 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf
SLeastSQRInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
- pInfo->startVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[1].param.d : (double)pCtx->param[1].param.i;
- pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[2].param.nType) ? pCtx->param[2].param.d : (double)pCtx->param[2].param.i;
+ GET_TYPED_DATA(pInfo->startVal, double, pCtx->param[1].param.nType, &pCtx->param[1].param.i);
+ GET_TYPED_DATA(pInfo->stepVal, double, pCtx->param[2].param.nType, &pCtx->param[2].param.i);
return true;
}
@@ -2562,8 +2565,8 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SVariant* pVal = &pCtx->param[1].param;
- double v =
- (IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
+ double v = 0;
+ GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
@@ -2622,8 +2625,8 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
SVariant* pVal = &pCtx->param[1].param;
- pInfo->percent =
- (IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
+ pInfo->percent = 0;
+ GET_TYPED_DATA(pInfo->percent, double, pVal->nType, &pVal->i);
if (pCtx->numOfParams == 2) {
pInfo->algo = APERCT_ALGO_DEFAULT;
@@ -3719,6 +3722,12 @@ static int32_t topBotResComparFn(const void* p1, const void* p2, const void* par
}
return (val1->v.u > val2->v.u) ? 1 : -1;
+ } else if (TSDB_DATA_TYPE_FLOAT == type) {
+ if (val1->v.f == val2->v.f) {
+ return 0;
+ }
+
+ return (val1->v.f > val2->v.f) ? 1 : -1;
}
if (val1->v.d == val2->v.d) {
@@ -3759,10 +3768,12 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
} else { // replace the minimum value in the result
if ((isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && val.i > pItems[0].v.i) ||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u > pItems[0].v.u) ||
- (IS_FLOAT_TYPE(type) && val.d > pItems[0].v.d))) ||
+ (TSDB_DATA_TYPE_FLOAT == type && val.f > pItems[0].v.f) ||
+ (TSDB_DATA_TYPE_DOUBLE == type && val.d > pItems[0].v.d))) ||
(!isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && val.i < pItems[0].v.i) ||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u < pItems[0].v.u) ||
- (IS_FLOAT_TYPE(type) && val.d < pItems[0].v.d)))) {
+ (TSDB_DATA_TYPE_FLOAT == type && val.f < pItems[0].v.f) ||
+ (TSDB_DATA_TYPE_DOUBLE == type && val.d < pItems[0].v.d)))) {
// replace the old data and the coresponding tuple data
STopBotResItem* pItem = &pItems[0];
pItem->v = val;
@@ -3925,12 +3936,7 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
}
for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) {
STopBotResItem* pItem = &pRes->pItems[i];
- if (type == TSDB_DATA_TYPE_FLOAT) {
- float v = pItem->v.d;
- colDataAppend(pCol, currentRow, (const char*)&v, false);
- } else {
- colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
- }
+ colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
#ifdef BUF_PAGE_DEBUG
qDebug("page_finalize i:%d,item:%p,pageId:%d, offset:%d\n", i, pItem, pItem->tuplePos.pageId,
pItem->tuplePos.offset);
@@ -3961,10 +3967,12 @@ void addResult(SqlFunctionCtx* pCtx, STopBotResItem* pSourceItem, int16_t type,
} else { // replace the minimum value in the result
if ((isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && pSourceItem->v.i > pItems[0].v.i) ||
(IS_UNSIGNED_NUMERIC_TYPE(type) && pSourceItem->v.u > pItems[0].v.u) ||
- (IS_FLOAT_TYPE(type) && pSourceItem->v.d > pItems[0].v.d))) ||
+ (TSDB_DATA_TYPE_FLOAT == type && pSourceItem->v.f > pItems[0].v.f) ||
+ (TSDB_DATA_TYPE_DOUBLE == type && pSourceItem->v.d > pItems[0].v.d))) ||
(!isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && pSourceItem->v.i < pItems[0].v.i) ||
(IS_UNSIGNED_NUMERIC_TYPE(type) && pSourceItem->v.u < pItems[0].v.u) ||
- (IS_FLOAT_TYPE(type) && pSourceItem->v.d < pItems[0].v.d)))) {
+ (TSDB_DATA_TYPE_FLOAT == type && pSourceItem->v.f < pItems[0].v.f) ||
+ (TSDB_DATA_TYPE_DOUBLE == type && pSourceItem->v.d < pItems[0].v.d)))) {
// replace the old data and the coresponding tuple data
STopBotResItem* pItem = &pItems[0];
pItem->v = pSourceItem->v;
@@ -6035,7 +6043,7 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
} else {
if (pInfo->win.ekey == pInfo->win.skey) {
pInfo->dOutput = pInfo->p.val;
- } else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { //no data in timewindow
+ } else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { // no data in timewindow
pInfo->dOutput = 0;
} else {
pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index cc1bae6a3c..8c1a85b101 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -2036,6 +2036,8 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
pVal->u = pNode->datum.u;
break;
case TSDB_DATA_TYPE_FLOAT:
+ pVal->f = pNode->datum.d;
+ break;
case TSDB_DATA_TYPE_DOUBLE:
pVal->d = pNode->datum.d;
break;
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 9883c81819..54f450e971 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -2762,17 +2762,17 @@ static bool needFill(SNode* pNode) {
return hasFillFunc;
}
-static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
- if (TSDB_DATA_TYPE_NULL == fillDt.type) {
- return false;
+static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList* pValues, int32_t index) {
+ SListCell* pCell = nodesListGetCell(pValues, index);
+ if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType)) {
+ return TSDB_CODE_SUCCESS;
}
- if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
- return true;
+ SNode* pCaseFunc = NULL;
+ int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCaseFunc);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
}
- if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
- return true;
- }
- return false;
+ return code;
}
static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
@@ -2788,8 +2788,8 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
}
- if (mismatchFillDataType(((SExprNode*)pProject)->resType,
- ((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
+ if (TSDB_CODE_SUCCESS !=
+ convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
}
++fillNo;
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 40b70d7d3e..45fa67faef 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -348,7 +348,9 @@ static int32_t scanPathOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
int32_t code = scanPathOptMatch(pCxt, pLogicSubplan->pNode, &info);
if (TSDB_CODE_SUCCESS == code && info.pScan) {
scanPathOptSetScanWin(info.pScan);
- scanPathOptSetScanOrder(info.scanOrder, info.pScan);
+ if (!pCxt->pPlanCxt->streamQuery) {
+ scanPathOptSetScanOrder(info.scanOrder, info.pScan);
+ }
}
if (TSDB_CODE_SUCCESS == code && (NULL != info.pDsoFuncs || NULL != info.pSdrFuncs)) {
info.pScan->dataRequired = scanPathOptGetDataRequired(info.pSdrFuncs);
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index e45beb7e13..4caa266d8a 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -1191,6 +1191,8 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) {
if (QW_QUERY_RUNNING(ctx)) {
qwKillTaskHandle(ctx);
+ } else if (!QW_EVENT_PROCESSED(ctx, QW_EVENT_DROP)) {
+ QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_DROP);
}
QW_UNLOCK(QW_WRITE, &ctx->lock);
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4befe29e1d..45931c209c 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -1704,7 +1704,8 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
}
qDebug("GROUP Num:%u", info->groupNum);
- for (uint32_t i = 0; i < info->groupNum; ++i) {
+ uint32_t maxDbgGrpNum = TMIN(info->groupNum, 1000);
+ for (uint32_t i = 0; i < maxDbgGrpNum; ++i) {
SFilterGroup *group = &info->groups[i];
qDebug("Group%d : unit num[%u]", i, group->unitNum);
@@ -3248,14 +3249,18 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, SColumnInfoData *pRes, SC
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint32_t uidx = group->unitIdxs[u];
SFilterComUnit *cunit = &info->cunits[uidx];
- void *colData = colDataGetData((SColumnInfoData *)(cunit->colData), i);
-
+ void *colData = NULL;
+ bool isNull = colDataIsNull((SColumnInfoData *)(cunit->colData), 0, i, NULL);
// if (FILTER_UNIT_GET_F(info, uidx)) {
// p[i] = FILTER_UNIT_GET_R(info, uidx);
// } else {
uint8_t optr = cunit->optr;
- if (colData == NULL || colDataIsNull((SColumnInfoData *)(cunit->colData), 0, i, NULL)) {
+ if (!isNull) {
+ colData = colDataGetData((SColumnInfoData *)(cunit->colData), i);
+ }
+
+ if (colData == NULL || isNull) {
p[i] = optr == OP_TYPE_IS_NULL ? true : false;
} else {
if (optr == OP_TYPE_IS_NOT_NULL) {
@@ -3916,6 +3921,10 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) {
} else {
SColumnNode *refNode = (SColumnNode *)node->pLeft;
SNodeListNode *listNode = (SNodeListNode *)node->pRight;
+ if (LIST_LENGTH(listNode->pNodeList) > 10) {
+ stat->scalarMode = true;
+ return DEAL_RES_CONTINUE;
+ }
int32_t type = vectorGetConvertType(refNode->node.resType.type, listNode->dataType.type);
if (0 != type && type != refNode->node.resType.type) {
stat->scalarMode = true;
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index e7f2b60704..009f7eec9a 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -91,6 +91,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
void* exec = pTask->exec.executor;
qSetStreamOpOpen(exec);
+ bool finished = false;
while (1) {
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
@@ -106,7 +107,10 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
if (qExecTask(exec, &output, &ts) < 0) {
ASSERT(0);
}
- if (output == NULL) break;
+ if (output == NULL) {
+ finished = true;
+ break;
+ }
SSDataBlock block = {0};
assignOneDataBlock(&block, output);
@@ -133,6 +137,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
streamDispatch(pTask);
}
+ if (finished) break;
}
return 0;
}
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index aefe30116b..0374e22a4a 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -16,6 +16,7 @@
#include "executor.h"
#include "streamInc.h"
#include "tcommon.h"
+#include "tcompare.h"
#include "ttimer.h"
// todo refactor
@@ -113,6 +114,12 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+ pState->pTdbState = taosMemoryCalloc(1, sizeof(STdbState));
+ if (pState->pTdbState == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ streamStateDestroy(pState);
+ return NULL;
+ }
char statePath[1024];
if (!specPath) {
@@ -121,26 +128,34 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
memset(statePath, 0, 1024);
tstrncpy(statePath, path, 1024);
}
- if (tdbOpen(statePath, szPage, pages, &pState->db, 0) < 0) {
+ if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 0) < 0) {
goto _err;
}
// open state storage backend
- if (tdbTbOpen("state.db", sizeof(SStateKey), -1, stateKeyCmpr, pState->db, &pState->pStateDb, 0) < 0) {
+ if (tdbTbOpen("state.db", sizeof(SStateKey), -1, stateKeyCmpr, pState->pTdbState->db, &pState->pTdbState->pStateDb,
+ 0) < 0) {
goto _err;
}
// todo refactor
- if (tdbTbOpen("fill.state.db", sizeof(SWinKey), -1, winKeyCmpr, pState->db, &pState->pFillStateDb, 0) < 0) {
+ if (tdbTbOpen("fill.state.db", sizeof(SWinKey), -1, winKeyCmpr, pState->pTdbState->db,
+ &pState->pTdbState->pFillStateDb, 0) < 0) {
goto _err;
}
- if (tdbTbOpen("session.state.db", sizeof(SStateSessionKey), -1, stateSessionKeyCmpr, pState->db,
- &pState->pSessionStateDb, 0) < 0) {
+ if (tdbTbOpen("session.state.db", sizeof(SStateSessionKey), -1, stateSessionKeyCmpr, pState->pTdbState->db,
+ &pState->pTdbState->pSessionStateDb, 0) < 0) {
goto _err;
}
- if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb, 0) < 0) {
+ if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->pTdbState->db,
+ &pState->pTdbState->pFuncStateDb, 0) < 0) {
+ goto _err;
+ }
+
+ if (tdbTbOpen("parname.state.db", sizeof(int64_t), TSDB_TABLE_NAME_LEN, NULL, pState->pTdbState->db,
+ &pState->pTdbState->pParNameDb, 0) < 0) {
goto _err;
}
@@ -148,115 +163,117 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
goto _err;
}
- pState->pOwner = pTask;
+ pState->pTdbState->pOwner = pTask;
return pState;
_err:
- tdbTbClose(pState->pStateDb);
- tdbTbClose(pState->pFuncStateDb);
- tdbTbClose(pState->pFillStateDb);
- tdbTbClose(pState->pSessionStateDb);
- tdbClose(pState->db);
- taosMemoryFree(pState);
+ tdbTbClose(pState->pTdbState->pStateDb);
+ tdbTbClose(pState->pTdbState->pFuncStateDb);
+ tdbTbClose(pState->pTdbState->pFillStateDb);
+ tdbTbClose(pState->pTdbState->pSessionStateDb);
+ tdbTbClose(pState->pTdbState->pParNameDb);
+ tdbClose(pState->pTdbState->db);
+ streamStateDestroy(pState);
return NULL;
}
void streamStateClose(SStreamState* pState) {
- tdbCommit(pState->db, &pState->txn);
- tdbPostCommit(pState->db, &pState->txn);
- tdbTbClose(pState->pStateDb);
- tdbTbClose(pState->pFuncStateDb);
- tdbTbClose(pState->pFillStateDb);
- tdbTbClose(pState->pSessionStateDb);
- tdbClose(pState->db);
+ tdbCommit(pState->pTdbState->db, &pState->pTdbState->txn);
+ tdbPostCommit(pState->pTdbState->db, &pState->pTdbState->txn);
+ tdbTbClose(pState->pTdbState->pStateDb);
+ tdbTbClose(pState->pTdbState->pFuncStateDb);
+ tdbTbClose(pState->pTdbState->pFillStateDb);
+ tdbTbClose(pState->pTdbState->pSessionStateDb);
+ tdbTbClose(pState->pTdbState->pParNameDb);
+ tdbClose(pState->pTdbState->db);
- taosMemoryFree(pState);
+ streamStateDestroy(pState);
}
int32_t streamStateBegin(SStreamState* pState) {
- if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
- 0) {
+ if (tdbTxnOpen(&pState->pTdbState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL,
+ TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
return -1;
}
- if (tdbBegin(pState->db, &pState->txn) < 0) {
- tdbTxnClose(&pState->txn);
+ if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
+ tdbTxnClose(&pState->pTdbState->txn);
return -1;
}
return 0;
}
int32_t streamStateCommit(SStreamState* pState) {
- if (tdbCommit(pState->db, &pState->txn) < 0) {
+ if (tdbCommit(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
return -1;
}
- if (tdbPostCommit(pState->db, &pState->txn) < 0) {
+ if (tdbPostCommit(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
return -1;
}
- memset(&pState->txn, 0, sizeof(TXN));
- if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
- 0) {
+ memset(&pState->pTdbState->txn, 0, sizeof(TXN));
+ if (tdbTxnOpen(&pState->pTdbState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL,
+ TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
return -1;
}
- if (tdbBegin(pState->db, &pState->txn) < 0) {
+ if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
return -1;
}
return 0;
}
int32_t streamStateAbort(SStreamState* pState) {
- if (tdbAbort(pState->db, &pState->txn) < 0) {
+ if (tdbAbort(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
return -1;
}
- memset(&pState->txn, 0, sizeof(TXN));
- if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
- 0) {
+ memset(&pState->pTdbState->txn, 0, sizeof(TXN));
+ if (tdbTxnOpen(&pState->pTdbState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL,
+ TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
return -1;
}
- if (tdbBegin(pState->db, &pState->txn) < 0) {
+ if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
return -1;
}
return 0;
}
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) {
- return tdbTbUpsert(pState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->txn);
+ return tdbTbUpsert(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->pTdbState->txn);
}
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) {
- return tdbTbGet(pState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
+ return tdbTbGet(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
}
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) {
- return tdbTbDelete(pState->pFuncStateDb, key, sizeof(STupleKey), &pState->txn);
+ return tdbTbDelete(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), &pState->pTdbState->txn);
}
// todo refactor
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
SStateKey sKey = {.key = *key, .opNum = pState->number};
- return tdbTbUpsert(pState->pStateDb, &sKey, sizeof(SStateKey), value, vLen, &pState->txn);
+ return tdbTbUpsert(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), value, vLen, &pState->pTdbState->txn);
}
// todo refactor
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
- return tdbTbUpsert(pState->pFillStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
+ return tdbTbUpsert(pState->pTdbState->pFillStateDb, key, sizeof(SWinKey), value, vLen, &pState->pTdbState->txn);
}
// todo refactor
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
SStateKey sKey = {.key = *key, .opNum = pState->number};
- return tdbTbGet(pState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen);
+ return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen);
}
// todo refactor
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
- return tdbTbGet(pState->pFillStateDb, key, sizeof(SWinKey), pVal, pVLen);
+ return tdbTbGet(pState->pTdbState->pFillStateDb, key, sizeof(SWinKey), pVal, pVLen);
}
// todo refactor
int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
SStateKey sKey = {.key = *key, .opNum = pState->number};
- return tdbTbDelete(pState->pStateDb, &sKey, sizeof(SStateKey), &pState->txn);
+ return tdbTbDelete(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), &pState->pTdbState->txn);
}
int32_t streamStateClear(SStreamState* pState) {
@@ -280,7 +297,7 @@ void streamStateSetNumber(SStreamState* pState, int32_t number) { pState->number
// todo refactor
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key) {
- return tdbTbDelete(pState->pFillStateDb, key, sizeof(SWinKey), &pState->txn);
+ return tdbTbDelete(pState->pTdbState->pFillStateDb, key, sizeof(SWinKey), &pState->pTdbState->txn);
}
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
@@ -306,7 +323,7 @@ int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pV
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
if (pCur == NULL) return NULL;
- tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL);
+ tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL);
int32_t c = 0;
SStateKey sKey = {.key = *key, .opNum = pState->number};
@@ -322,7 +339,7 @@ SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
SStreamStateCur* streamStateFillGetCur(SStreamState* pState, const SWinKey* key) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
if (pCur == NULL) return NULL;
- tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL);
+ tdbTbcOpen(pState->pTdbState->pFillStateDb, &pCur->pCur, NULL);
int32_t c = 0;
tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c);
@@ -414,7 +431,7 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key
return NULL;
}
pCur->number = pState->number;
- if (tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
@@ -440,7 +457,7 @@ SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey*
if (!pCur) {
return NULL;
}
- if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pFillStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
@@ -465,7 +482,7 @@ SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey*
if (pCur == NULL) {
return NULL;
}
- if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pFillStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
@@ -512,7 +529,8 @@ void streamFreeVal(void* val) { tdbFree(val); }
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
- return tdbTbUpsert(pState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), value, vLen, &pState->txn);
+ return tdbTbUpsert(pState->pTdbState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), value, vLen,
+ &pState->pTdbState->txn);
}
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) {
@@ -535,7 +553,7 @@ int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVa
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key) {
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
- return tdbTbDelete(pState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), &pState->txn);
+ return tdbTbDelete(pState->pTdbState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), &pState->pTdbState->txn);
}
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key) {
@@ -544,7 +562,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, cons
return NULL;
}
pCur->number = pState->number;
- if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
@@ -571,7 +589,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, cons
return NULL;
}
pCur->number = pState->number;
- if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
@@ -599,7 +617,7 @@ SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSess
return NULL;
}
pCur->number = pState->number;
- if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
@@ -666,7 +684,7 @@ int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey*
return -1;
}
pCur->number = pState->number;
- if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return -1;
}
@@ -812,6 +830,22 @@ _end:
return res;
}
+int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) {
+ tdbTbUpsert(pState->pTdbState->pParNameDb, &groupId, sizeof(int64_t), tbname, TSDB_TABLE_NAME_LEN,
+ &pState->pTdbState->txn);
+ return 0;
+}
+
+int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal) {
+ int32_t len;
+ return tdbTbGet(pState->pTdbState->pParNameDb, &groupId, sizeof(int64_t), pVal, &len);
+}
+
+void streamStateDestroy(SStreamState* pState) {
+ taosMemoryFreeClear(pState->pTdbState);
+ taosMemoryFreeClear(pState);
+}
+
#if 0
char* streamStateSessionDump(SStreamState* pState) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
@@ -819,7 +853,7 @@ char* streamStateSessionDump(SStreamState* pState) {
return NULL;
}
pCur->number = pState->number;
- if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
+ if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
streamStateFreeCur(pCur);
return NULL;
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index 199892c241..15526cd8bb 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -163,9 +163,9 @@ bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid) {
return false;
}
-void updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol) {
- if (pBlock == NULL || pBlock->info.rows == 0) return;
- TSKEY maxTs = -1;
+TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol) {
+ if (pBlock == NULL || pBlock->info.rows == 0) return INT64_MIN;
+ TSKEY maxTs = INT64_MIN;
int64_t tbUid = pBlock->info.uid;
SColumnInfoData *pColDataInfo = taosArrayGet(pBlock->pDataBlock, primaryTsCol);
@@ -186,6 +186,7 @@ void updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t pr
if (pMaxTs == NULL || *pMaxTs > maxTs) {
taosHashPut(pInfo->pMap, &tbUid, sizeof(int64_t), &maxTs, sizeof(TSKEY));
}
+ return maxTs;
}
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
diff --git a/source/libs/sync/inc/syncEnv.h b/source/libs/sync/inc/syncEnv.h
index 628e8874b4..04e8e5edd4 100644
--- a/source/libs/sync/inc/syncEnv.h
+++ b/source/libs/sync/inc/syncEnv.h
@@ -57,6 +57,11 @@ void syncNodeRemove(int64_t rid);
SSyncNode* syncNodeAcquire(int64_t rid);
void syncNodeRelease(SSyncNode* pNode);
+int64_t syncHbTimerDataAdd(SSyncHbTimerData* pData);
+void syncHbTimerDataRemove(int64_t rid);
+SSyncHbTimerData* syncHbTimerDataAcquire(int64_t rid);
+void syncHbTimerDataRelease(SSyncHbTimerData* pData);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h
index 362618fece..57f52c7d88 100644
--- a/source/libs/sync/inc/syncInt.h
+++ b/source/libs/sync/inc/syncInt.h
@@ -57,10 +57,11 @@ typedef struct SRaftId {
} SRaftId;
typedef struct SSyncHbTimerData {
- SSyncNode* pSyncNode;
+ int64_t syncNodeRid;
SSyncTimer* pTimer;
SRaftId destId;
uint64_t logicClock;
+ int64_t rid;
} SSyncHbTimerData;
typedef struct SSyncTimer {
@@ -70,7 +71,7 @@ typedef struct SSyncTimer {
uint64_t counter;
int32_t timerMS;
SRaftId destId;
- SSyncHbTimerData hbData;
+ int64_t hbDataRid;
} SSyncTimer;
typedef struct SElectTimerParam {
@@ -189,6 +190,8 @@ typedef struct SSyncNode {
int64_t leaderTime;
int64_t lastReplicateTime;
+ bool isStart;
+
} SSyncNode;
// open/close --------------
@@ -198,6 +201,7 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode);
void syncNodeClose(SSyncNode* pSyncNode);
void syncNodePreClose(SSyncNode* pSyncNode);
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak);
+void syncHbTimerDataFree(SSyncHbTimerData* pData);
// on message ---------------------
int32_t syncNodeOnTimeout(SSyncNode* ths, const SRpcMsg* pMsg);
diff --git a/source/libs/sync/src/syncEnv.c b/source/libs/sync/src/syncEnv.c
index 2abc8c0b7f..43685f6496 100644
--- a/source/libs/sync/src/syncEnv.c
+++ b/source/libs/sync/src/syncEnv.c
@@ -20,6 +20,7 @@
static SSyncEnv gSyncEnv = {0};
static int32_t gNodeRefId = -1;
+static int32_t gHbDataRefId = -1;
static void syncEnvTick(void *param, void *tmrId);
SSyncEnv *syncEnv() { return &gSyncEnv; }
@@ -50,6 +51,13 @@ int32_t syncInit() {
return -1;
}
+ gHbDataRefId = taosOpenRef(200, (RefFp)syncHbTimerDataFree);
+ if (gHbDataRefId < 0) {
+ sError("failed to init hb-data ref");
+ syncCleanUp();
+ return -1;
+ }
+
sDebug("sync rsetId:%d is open", gNodeRefId);
return 0;
}
@@ -64,6 +72,12 @@ void syncCleanUp() {
taosCloseRef(gNodeRefId);
gNodeRefId = -1;
}
+
+ if (gHbDataRefId != -1) {
+ sDebug("sync rsetId:%d is closed", gHbDataRefId);
+ taosCloseRef(gHbDataRefId);
+ gHbDataRefId = -1;
+ }
}
int64_t syncNodeAdd(SSyncNode *pNode) {
@@ -88,6 +102,26 @@ SSyncNode *syncNodeAcquire(int64_t rid) {
void syncNodeRelease(SSyncNode *pNode) { taosReleaseRef(gNodeRefId, pNode->rid); }
+int64_t syncHbTimerDataAdd(SSyncHbTimerData *pData) {
+ pData->rid = taosAddRef(gHbDataRefId, pData);
+ if (pData->rid < 0) return -1;
+ return pData->rid;
+}
+
+void syncHbTimerDataRemove(int64_t rid) { taosRemoveRef(gHbDataRefId, rid); }
+
+SSyncHbTimerData *syncHbTimerDataAcquire(int64_t rid) {
+ SSyncHbTimerData *pData = taosAcquireRef(gHbDataRefId, rid);
+ if (pData == NULL) {
+ sError("failed to acquire hb-timer-data from refId:%" PRId64, rid);
+ terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
+ }
+
+ return pData;
+}
+
+void syncHbTimerDataRelease(SSyncHbTimerData *pData) { taosReleaseRef(gHbDataRefId, pData->rid); }
+
#if 0
void syncEnvStartTimer() {
taosTmrReset(gSyncEnv.FpEnvTickTimer, gSyncEnv.envTickTimerMS, &gSyncEnv, gSyncEnv.pTimerManager,
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 0370b3c5b1..a427d7aa0c 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -91,6 +91,7 @@ void syncStart(int64_t rid) {
void syncStop(int64_t rid) {
SSyncNode* pSyncNode = syncNodeAcquire(rid);
if (pSyncNode != NULL) {
+ pSyncNode->isStart = false;
syncNodeRelease(pSyncNode);
syncNodeRemove(rid);
}
@@ -435,8 +436,15 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
}
int32_t ret = 0;
- if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
+ if (pSyncNode->state == TAOS_SYNC_STATE_LEADER && pSyncNode->replicaNum > 1) {
SNodeInfo newLeader = (pSyncNode->peersNodeInfo)[0];
+ if (pSyncNode->peersNum == 2) {
+ SyncIndex matchIndex0 = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId[0]));
+ SyncIndex matchIndex1 = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId[1]));
+ if (matchIndex1 > matchIndex0) {
+ newLeader = (pSyncNode->peersNodeInfo)[1];
+ }
+ }
ret = syncNodeLeaderTransferTo(pSyncNode, newLeader);
}
@@ -665,13 +673,20 @@ static int32_t syncHbTimerInit(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer, SRa
static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
int32_t ret = 0;
if (syncIsInit()) {
- SSyncHbTimerData* pData = &pSyncTimer->hbData;
- pData->pSyncNode = pSyncNode;
+ SSyncHbTimerData* pData = syncHbTimerDataAcquire(pSyncTimer->hbDataRid);
+ if (pData == NULL) {
+ pData = taosMemoryMalloc(sizeof(SSyncHbTimerData));
+ pData->rid = syncHbTimerDataAdd(pData);
+ }
+ pSyncTimer->hbDataRid = pData->rid;
+
+ pData->syncNodeRid = pSyncNode->rid;
pData->pTimer = pSyncTimer;
pData->destId = pSyncTimer->destId;
pData->logicClock = pSyncTimer->logicClock;
- taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, pData, syncEnv()->pTimerManager, &pSyncTimer->pTimer);
+ taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), syncEnv()->pTimerManager,
+ &pSyncTimer->pTimer);
} else {
sError("vgId:%d, start ctrl hb timer error, sync env is stop", pSyncNode->vgId);
}
@@ -683,6 +698,8 @@ static int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
atomic_add_fetch_64(&pSyncTimer->logicClock, 1);
taosTmrStop(pSyncTimer->pTimer);
pSyncTimer->pTimer = NULL;
+ syncHbTimerDataRemove(pSyncTimer->hbDataRid);
+ pSyncTimer->hbDataRid = -1;
return ret;
}
@@ -934,6 +951,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
SSyncSnapshotSender* pSender = snapshotSenderCreate(pSyncNode, i);
// ASSERT(pSender != NULL);
(pSyncNode->senders)[i] = pSender;
+ sSTrace(pSender, "snapshot sender create new while open, data:%p", pSender);
}
// snapshot receivers
@@ -960,7 +978,8 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
// snapshotting
atomic_store_64(&pSyncNode->snapshottingIndex, SYNC_INDEX_INVALID);
- sNTrace(pSyncNode, "sync open");
+ pSyncNode->isStart = true;
+ sNTrace(pSyncNode, "sync open, node:%p", pSyncNode);
return pSyncNode;
@@ -1027,15 +1046,13 @@ void syncNodePreClose(SSyncNode* pSyncNode) {
syncNodeStopHeartbeatTimer(pSyncNode);
}
+void syncHbTimerDataFree(SSyncHbTimerData* pData) { taosMemoryFree(pData); }
+
void syncNodeClose(SSyncNode* pSyncNode) {
- if (pSyncNode == NULL) {
- return;
- }
- int32_t ret;
+ if (pSyncNode == NULL) return;
+ sNTrace(pSyncNode, "sync close, data:%p", pSyncNode);
- sNTrace(pSyncNode, "sync close");
-
- ret = raftStoreClose(pSyncNode->pRaftStore);
+ int32_t ret = raftStoreClose(pSyncNode->pRaftStore);
ASSERT(ret == 0);
pSyncNode->pRaftStore = NULL;
@@ -1064,6 +1081,7 @@ void syncNodeClose(SSyncNode* pSyncNode) {
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
if ((pSyncNode->senders)[i] != NULL) {
+ sSTrace((pSyncNode->senders)[i], "snapshot sender destroy while close, data:%p", (pSyncNode->senders)[i]);
snapshotSenderDestroy((pSyncNode->senders)[i]);
(pSyncNode->senders)[i] = NULL;
}
@@ -1385,7 +1403,7 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
// reset sender
bool reset = false;
for (int32_t j = 0; j < TSDB_MAX_REPLICA; ++j) {
- if (syncUtilSameId(&(pSyncNode->replicasId)[i], &oldReplicasId[j])) {
+ if (syncUtilSameId(&(pSyncNode->replicasId)[i], &oldReplicasId[j]) && oldSenders[j] != NULL) {
char host[128];
uint16_t port;
syncUtilU642Addr((pSyncNode->replicasId)[i].addr, host, sizeof(host), &port);
@@ -1402,6 +1420,8 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
sNTrace(pSyncNode, "snapshot sender udpate replicaIndex from %d to %d, %s:%d, %p, reset:%d", oldreplicaIndex,
i, host, port, (pSyncNode->senders)[i], reset);
+
+ break;
}
}
}
@@ -1410,15 +1430,17 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
if ((pSyncNode->senders)[i] == NULL) {
(pSyncNode->senders)[i] = snapshotSenderCreate(pSyncNode, i);
- sSTrace((pSyncNode->senders)[i], "snapshot sender create new");
+ sSTrace((pSyncNode->senders)[i], "snapshot sender create new while reconfig, data:%p", (pSyncNode->senders)[i]);
+ } else {
+ sSTrace((pSyncNode->senders)[i], "snapshot sender already exist, data:%p", (pSyncNode->senders)[i]);
}
}
// free old
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
if (oldSenders[i] != NULL) {
+ sNTrace(pSyncNode, "snapshot sender destroy old, data:%p replica-index:%d", oldSenders[i], i);
snapshotSenderDestroy(oldSenders[i]);
- sNTrace(pSyncNode, "snapshot sender delete old %p replica-index:%d", oldSenders[i], i);
oldSenders[i] = NULL;
}
}
@@ -1929,29 +1951,59 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
}
static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
- SSyncHbTimerData* pData = (SSyncHbTimerData*)param;
- SSyncNode* pSyncNode = pData->pSyncNode;
- SSyncTimer* pSyncTimer = pData->pTimer;
+ int64_t hbDataRid = (int64_t)param;
+ SSyncHbTimerData* pData = syncHbTimerDataAcquire(hbDataRid);
+ if (pData == NULL) {
+ sError("hb timer get pData NULL, %" PRId64, hbDataRid);
+ return;
+ }
+
+ SSyncNode* pSyncNode = syncNodeAcquire(pData->syncNodeRid);
if (pSyncNode == NULL) {
+ syncHbTimerDataRelease(pData);
+ sError("hb timer get pSyncNode NULL");
+ return;
+ }
+
+ SSyncTimer* pSyncTimer = pData->pTimer;
+
+ if (!pSyncNode->isStart) {
+ syncNodeRelease(pSyncNode);
+ syncHbTimerDataRelease(pData);
+ sError("vgId:%d, hb timer sync node already stop", pSyncNode->vgId);
return;
}
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
+ syncNodeRelease(pSyncNode);
+ syncHbTimerDataRelease(pData);
+ sError("vgId:%d, hb timer sync node not leader", pSyncNode->vgId);
return;
}
if (pSyncNode->pRaftStore == NULL) {
+ syncNodeRelease(pSyncNode);
+ syncHbTimerDataRelease(pData);
+ sError("vgId:%d, hb timer raft store already stop", pSyncNode->vgId);
return;
}
- // sNTrace(pSyncNode, "eq peer hb timer");
-
- int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
- int64_t msgLogicClock = atomic_load_64(&pData->logicClock);
+ // sTrace("vgId:%d, eq peer hb timer", pSyncNode->vgId);
if (pSyncNode->replicaNum > 1) {
+ int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
+ int64_t msgLogicClock = atomic_load_64(&pData->logicClock);
+
if (timerLogicClock == msgLogicClock) {
+ if (syncIsInit()) {
+ // sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId);
+ taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, syncEnv()->pTimerManager,
+ &pSyncTimer->pTimer);
+ } else {
+ sError("sync env is stop, reset peer hb timer error");
+ }
+
SRpcMsg rpcMsg = {0};
(void)syncBuildHeartbeat(&rpcMsg, pSyncNode->vgId);
@@ -1966,18 +2018,14 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
// send msg
syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg);
- if (syncIsInit()) {
- taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, pData, syncEnv()->pTimerManager,
- &pSyncTimer->pTimer);
- } else {
- sError("sync env is stop, syncNodeEqHeartbeatTimer");
- }
-
} else {
- sTrace("==syncNodeEqPeerHeartbeatTimer== timerLogicClock:%" PRId64 ", msgLogicClock:%" PRId64 "", timerLogicClock,
- msgLogicClock);
+ sTrace("vgId:%d, do not send hb, timerLogicClock:%" PRId64 ", msgLogicClock:%" PRId64 "", pSyncNode->vgId,
+ timerLogicClock, msgLogicClock);
}
}
+
+ syncHbTimerDataRelease(pData);
+ syncNodeRelease(pSyncNode);
}
static int32_t syncNodeEqNoop(SSyncNode* pNode) {
diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c
index 2b1cee51d7..db0b6d1d02 100644
--- a/source/libs/sync/src/syncRaftLog.c
+++ b/source/libs/sync/src/syncRaftLog.c
@@ -38,7 +38,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
}
// pLogStore->pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5);
- pLogStore->pCache = taosLRUCacheInit(100 * 1024 * 1024, 1, .5);
+ pLogStore->pCache = taosLRUCacheInit(30 * 1024 * 1024, 1, .5);
if (pLogStore->pCache == NULL) {
taosMemoryFree(pLogStore);
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
diff --git a/source/libs/transport/src/tmsgcb.c b/source/libs/transport/src/tmsgcb.c
index 1cd1903851..0d1644be2d 100644
--- a/source/libs/transport/src/tmsgcb.c
+++ b/source/libs/transport/src/tmsgcb.c
@@ -44,7 +44,13 @@ int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) {
return code;
}
-void tmsgSendRsp(SRpcMsg* pMsg) { return (*defaultMsgCb.sendRspFp)(pMsg); }
+void tmsgSendRsp(SRpcMsg* pMsg) {
+#if 1
+ rpcSendResponse(pMsg);
+#else
+ return (*defaultMsgCb.sendRspFp)(pMsg);
+#endif
+}
void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet) { (*defaultMsgCb.sendRedirectRspFp)(pMsg, pNewEpSet); }
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 5f36d91023..8dd3628c5f 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -462,8 +462,6 @@ static void uvStartSendResp(SSvrMsg* smsg) {
if (pConn->broken == true) {
// persist by
destroySmsg(smsg);
- // transFreeMsg(smsg->msg.pCont);
- // taosMemoryFree(smsg);
transUnrefSrvHandle(pConn);
return;
}
@@ -1234,7 +1232,9 @@ int transReleaseSrvHandle(void* handle) {
m->type = Release;
tTrace("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
- transAsyncSend(pThrd->asyncPool, &m->q);
+ if (0 != transAsyncSend(pThrd->asyncPool, &m->q)) {
+ destroySmsg(m);
+ }
transReleaseExHandle(transGetRefMgt(), refId);
return 0;
@@ -1269,7 +1269,9 @@ int transSendResponse(const STransMsg* msg) {
STraceId* trace = (STraceId*)&msg->info.traceId;
tGTrace("conn %p start to send resp (1/2)", exh->handle);
- transAsyncSend(pThrd->asyncPool, &m->q);
+ if (0 != transAsyncSend(pThrd->asyncPool, &m->q)) {
+ destroySmsg(m);
+ }
transReleaseExHandle(transGetRefMgt(), refId);
return 0;
@@ -1303,7 +1305,9 @@ int transRegisterMsg(const STransMsg* msg) {
STrans* pTransInst = pThrd->pTransInst;
tTrace("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
- transAsyncSend(pThrd->asyncPool, &m->q);
+ if (0 != transAsyncSend(pThrd->asyncPool, &m->q)) {
+ destroySmsg(m);
+ }
transReleaseExHandle(transGetRefMgt(), refId);
return 0;
diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c
index b78d5ca6c1..5d0f020b02 100644
--- a/source/libs/wal/src/walMgmt.c
+++ b/source/libs/wal/src/walMgmt.c
@@ -251,7 +251,7 @@ static void walFsyncAll() {
int32_t code = taosFsyncFile(pWal->pLogFile);
if (code != 0) {
wError("vgId:%d, file:%" PRId64 ".log, failed to fsync since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal),
- strerror(code));
+ strerror(errno));
}
}
pWal = taosIterateRef(tsWal.refSetId, pWal->refId);
diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c
index 5e85e40594..421901184b 100644
--- a/source/os/src/osDir.c
+++ b/source/os/src/osDir.c
@@ -336,12 +336,14 @@ int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) {
#else
if (realpath(dirname, tmp) != NULL) {
#endif
- if (realPath == NULL) {
- strncpy(dirname, tmp, maxlen);
- } else {
- strncpy(realPath, tmp, maxlen);
+ if (strlen(tmp) < maxlen) {
+ if (realPath == NULL) {
+ strncpy(dirname, tmp, maxlen);
+ } else {
+ strncpy(realPath, tmp, maxlen);
+ }
+ return 0;
}
- return 0;
}
return -1;
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index b406432616..4d889843e8 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -344,6 +344,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, "Table schema is old")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE, "Table already exists in other stables")
// query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle")
@@ -646,7 +647,12 @@ const char* tstrerror(int32_t err) {
// this is a system errno
if ((err & 0x00ff0000) == 0x00ff0000) {
- return strerror(err & 0x0000ffff);
+ int32_t code = err & 0x0000ffff;
+ if (code >= 0 && code < 36) {
+ return strerror(code);
+ } else {
+ return "unknown err";
+ }
}
int32_t s = 0;
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 4c9715270d..f9e8cd6271 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -11,7 +11,7 @@
,,y,script,./test.sh -f tsim/user/privilege_db.sim
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
,,y,script,./test.sh -f tsim/db/alter_option.sim
-,,,script,./test.sh -f tsim/db/alter_replica_13.sim
+,,y,script,./test.sh -f tsim/db/alter_replica_13.sim
,,y,script,./test.sh -f tsim/db/alter_replica_31.sim
,,y,script,./test.sh -f tsim/db/basic1.sim
,,y,script,./test.sh -f tsim/db/basic2.sim
@@ -35,30 +35,30 @@
,,y,script,./test.sh -f tsim/db/show_create_table.sim
,,y,script,./test.sh -f tsim/db/tables.sim
,,y,script,./test.sh -f tsim/db/taosdlog.sim
-,,,script,./test.sh -f tsim/dnode/balance_replica1.sim
-,,,script,./test.sh -f tsim/dnode/balance_replica3.sim
-,,,script,./test.sh -f tsim/dnode/balance1.sim
-,,,script,./test.sh -f tsim/dnode/balance2.sim
-,,,script,./test.sh -f tsim/dnode/balance3.sim
-,,,script,./test.sh -f tsim/dnode/balancex.sim
+,,y,script,./test.sh -f tsim/dnode/balance_replica1.sim
+,,y,script,./test.sh -f tsim/dnode/balance_replica3.sim
+,,y,script,./test.sh -f tsim/dnode/balance1.sim
+,,y,script,./test.sh -f tsim/dnode/balance2.sim
+,,y,script,./test.sh -f tsim/dnode/balance3.sim
+,,y,script,./test.sh -f tsim/dnode/balancex.sim
,,y,script,./test.sh -f tsim/dnode/create_dnode.sim
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
-,,,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
-,,,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
-,,,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
-,,,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
-,,,script,./test.sh -f tsim/dnode/drop_dnode_force.sim
+,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
+,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
+,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
+,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
+,,y,script,./test.sh -f tsim/dnode/drop_dnode_force.sim
,,y,script,./test.sh -f tsim/dnode/offline_reason.sim
-,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
-,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
-,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
-,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
-,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
-,,,script,./test.sh -f tsim/dnode/vnode_clean.sim
-,,,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim
-,,,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim
-,,,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim
+,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
+,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
+,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
+,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
+,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
+,,y,script,./test.sh -f tsim/dnode/vnode_clean.sim
+,,y,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim
+,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim
+,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim
,,y,script,./test.sh -f tsim/import/basic.sim
,,y,script,./test.sh -f tsim/import/commit.sim
,,y,script,./test.sh -f tsim/import/large.sim
@@ -113,7 +113,7 @@
,,y,script,./test.sh -f tsim/parser/first_last.sim
,,y,script,./test.sh -f tsim/parser/fill_stb.sim
,,y,script,./test.sh -f tsim/parser/interp.sim
-#,,y,script,./test.sh -f tsim/parser/limit2.sim
+,,y,script,./test.sh -f tsim/parser/limit2.sim
,,y,script,./test.sh -f tsim/parser/fourArithmetic-basic.sim
,,y,script,./test.sh -f tsim/parser/function.sim
,,y,script,./test.sh -f tsim/parser/groupby-basic.sim
@@ -136,8 +136,8 @@
,,y,script,./test.sh -f tsim/parser/lastrow.sim
,,y,script,./test.sh -f tsim/parser/lastrow2.sim
,,y,script,./test.sh -f tsim/parser/like.sim
-,,,script,./test.sh -f tsim/parser/limit.sim
-,,,script,./test.sh -f tsim/parser/limit1.sim
+,,y,script,./test.sh -f tsim/parser/limit.sim
+,,y,script,./test.sh -f tsim/parser/limit1.sim
,,y,script,./test.sh -f tsim/parser/mixed_blocks.sim
,,y,script,./test.sh -f tsim/parser/nchar.sim
,,y,script,./test.sh -f tsim/parser/nestquery.sim
@@ -163,7 +163,7 @@
,,y,script,./test.sh -f tsim/parser/timestamp.sim
,,y,script,./test.sh -f tsim/parser/top_groupby.sim
,,y,script,./test.sh -f tsim/parser/topbot.sim
-,,,script,./test.sh -f tsim/parser/union.sim
+,,y,script,./test.sh -f tsim/parser/union.sim
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
,,y,script,./test.sh -f tsim/parser/where.sim
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
@@ -176,11 +176,11 @@
,,y,script,./test.sh -f tsim/query/udf.sim
,,y,script,./test.sh -f tsim/qnode/basic1.sim
,,y,script,./test.sh -f tsim/snode/basic1.sim
-,,,script,./test.sh -f tsim/mnode/basic1.sim
-,,,script,./test.sh -f tsim/mnode/basic2.sim
-,,,script,./test.sh -f tsim/mnode/basic3.sim
-,,,script,./test.sh -f tsim/mnode/basic4.sim
-,,,script,./test.sh -f tsim/mnode/basic5.sim
+,,y,script,./test.sh -f tsim/mnode/basic1.sim
+,,y,script,./test.sh -f tsim/mnode/basic2.sim
+,,y,script,./test.sh -f tsim/mnode/basic3.sim
+,,y,script,./test.sh -f tsim/mnode/basic4.sim
+,,y,script,./test.sh -f tsim/mnode/basic5.sim
,,y,script,./test.sh -f tsim/show/basic.sim
,,y,script,./test.sh -f tsim/table/autocreate.sim
,,y,script,./test.sh -f tsim/table/basic1.sim
@@ -213,10 +213,10 @@
,,n,script,./test.sh -f tsim/stream/basic0.sim -g
,,y,script,./test.sh -f tsim/stream/basic1.sim
,,y,script,./test.sh -f tsim/stream/basic2.sim
-,,,script,./test.sh -f tsim/stream/drop_stream.sim
+,,y,script,./test.sh -f tsim/stream/drop_stream.sim
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim
-,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
+,,n,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
@@ -227,11 +227,11 @@
,,y,script,./test.sh -f tsim/stream/triggerSession0.sim
,,y,script,./test.sh -f tsim/stream/partitionby.sim
,,y,script,./test.sh -f tsim/stream/partitionby1.sim
-,,y,script,./test.sh -f tsim/stream/schedSnode.sim
-,,,script,./test.sh -f tsim/stream/windowClose.sim
+,,n,script,./test.sh -f tsim/stream/schedSnode.sim
+,,y,script,./test.sh -f tsim/stream/windowClose.sim
,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
,,y,script,./test.sh -f tsim/stream/sliding.sim
-,,y,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
+,,n,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
,,y,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim
,,y,script,./test.sh -f tsim/stream/partitionbyColumnState.sim
,,y,script,./test.sh -f tsim/stream/deleteInterval.sim
@@ -278,8 +278,9 @@
,,y,script,./test.sh -f tsim/stable/values.sim
,,y,script,./test.sh -f tsim/stable/vnode3.sim
,,y,script,./test.sh -f tsim/stable/metrics_idx.sim
-,,,script,./test.sh -f tsim/sma/drop_sma.sim
-,,,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
+,,n,script,./test.sh -f tsim/sma/drop_sma.sim
+,,n,script,./test.sh -f tsim/sma/sma_leak.sim
+,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
@@ -291,12 +292,12 @@
,,n,script,./test.sh -f tsim/valgrind/checkError7.sim
,,n,script,./test.sh -f tsim/valgrind/checkError8.sim
,,n,script,./test.sh -f tsim/valgrind/checkUdf.sim
-,,,script,./test.sh -f tsim/vnode/replica3_basic.sim
+,,y,script,./test.sh -f tsim/vnode/replica3_basic.sim
,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim
,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim
,,y,script,./test.sh -f tsim/vnode/replica3_many.sim
,,y,script,./test.sh -f tsim/vnode/replica3_import.sim
-,,,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim
+,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim
,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim
,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim
,,y,script,./test.sh -f tsim/vnode/stable_dnode3.sim
@@ -424,6 +425,7 @@
,,,system-test,python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
,,,system-test,python3 ./test.py -f 1-insert/alter_stable.py
,,,system-test,python3 ./test.py -f 1-insert/alter_table.py
+,,,system-test,python3 ./test.py -f 1-insert/boundary.py
,,,system-test,python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
,,,system-test,python3 ./test.py -f 1-insert/table_comment.py
,,,system-test,python3 ./test.py -f 1-insert/time_range_wise.py
@@ -437,11 +439,11 @@
,,,system-test,python3 ./test.py -f 1-insert/database_pre_suf.py
,,,system-test,python3 ./test.py -f 1-insert/InsertFuturets.py
,,,system-test,python3 ./test.py -f 0-others/show.py
-,,,system-test,python3 ./test.py -f 2-query/abs.py
+,,,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
,,,system-test,python3 ./test.py -f 2-query/abs.py -R
,,,system-test,python3 ./test.py -f 2-query/and_or_for_byte.py
,,,system-test,python3 ./test.py -f 2-query/and_or_for_byte.py -R
-,,,system-test,python3 ./test.py -f 2-query/apercentile.py
+,,,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py
,,,system-test,python3 ./test.py -f 2-query/apercentile.py -R
,,,system-test,python3 ./test.py -f 2-query/arccos.py
,,,system-test,python3 ./test.py -f 2-query/arccos.py -R
@@ -607,6 +609,8 @@
,,,system-test,python3 ./test.py -f 2-query/upper.py -R
,,,system-test,python3 ./test.py -f 2-query/varchar.py
,,,system-test,python3 ./test.py -f 2-query/varchar.py -R
+,,,system-test,python3 ./test.py -f 2-query/case_when.py
+,,,system-test,python3 ./test.py -f 2-query/case_when.py -R
,,,system-test,python3 ./test.py -f 1-insert/update_data.py
,,,system-test,python3 ./test.py -f 1-insert/tb_100w_data_order.py
,,,system-test,python3 ./test.py -f 1-insert/delete_stable.py
@@ -813,6 +817,7 @@
,,,system-test,python3 ./test.py -f 2-query/last_row.py -Q 2
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
,,,system-test,python3 ./test.py -f 2-query/sml.py -Q 2
+,,,system-test,python3 ./test.py -f 2-query/case_when.py -Q 2
,,,system-test,python3 ./test.py -f 2-query/between.py -Q 3
,,,system-test,python3 ./test.py -f 2-query/distinct.py -Q 3
,,,system-test,python3 ./test.py -f 2-query/varchar.py -Q 3
@@ -906,6 +911,7 @@
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
,,,system-test,python3 ./test.py -f 2-query/sml.py -Q 3
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 3
+,,,system-test,python3 ./test.py -f 2-query/case_when.py -Q 3
,,,system-test,python3 ./test.py -f 2-query/between.py -Q 4
,,,system-test,python3 ./test.py -f 2-query/distinct.py -Q 4
,,,system-test,python3 ./test.py -f 2-query/varchar.py -Q 4
@@ -998,6 +1004,7 @@
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 4
,,,system-test,python3 ./test.py -f 2-query/sml.py -Q 4
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 4
+,,,system-test,python3 ./test.py -f 2-query/case_when.py -Q 4
#develop test
,,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py
diff --git a/tests/pytest/crash_gen.sh b/tests/pytest/crash_gen.sh
index 539314dea4..cc2941a52a 100755
--- a/tests/pytest/crash_gen.sh
+++ b/tests/pytest/crash_gen.sh
@@ -45,7 +45,7 @@ fi
# Now getting ready to execute Python
# The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk
-PYTHON_EXEC=python3.8
+PYTHON_EXEC=python3
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
# export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py
index 600c64b8e6..f8c5f970c5 100755
--- a/tests/pytest/crash_gen/crash_gen_main.py
+++ b/tests/pytest/crash_gen/crash_gen_main.py
@@ -37,6 +37,7 @@ import requests
# from guppy import hpy
import gc
import taos
+from taos.tmq import *
from .shared.types import TdColumns, TdTags
@@ -419,10 +420,12 @@ class ThreadCoordinator:
except threading.BrokenBarrierError as err:
self._execStats.registerFailure("Aborted due to worker thread timeout")
Logging.error("\n")
+
Logging.error("Main loop aborted, caused by worker thread(s) time-out of {} seconds".format(
ThreadCoordinator.WORKER_THREAD_TIMEOUT))
Logging.error("TAOS related threads blocked at (stack frames top-to-bottom):")
ts = ThreadStacks()
+ ts.record_current_time(time.time()) # record thread exit time at current moment
ts.print(filterInternal=True)
workerTimeout = True
@@ -546,7 +549,12 @@ class ThreadCoordinator:
# pick a task type for current state
db = self.pickDatabase()
- taskType = db.getStateMachine().pickTaskType() # dynamic name of class
+ if Dice.throw(2)==1:
+ taskType = db.getStateMachine().pickTaskType() # dynamic name of class
+ else:
+ taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types
+ pass
+
return taskType(self._execStats, db) # create a task from it
def resetExecutedTasks(self):
@@ -674,9 +682,15 @@ class AnyState:
# only "under normal circumstances", as we may override it with the -b option
CAN_DROP_DB = 2
CAN_CREATE_FIXED_SUPER_TABLE = 3
+ CAN_CREATE_STREAM = 3 # super table must exists
+ CAN_CREATE_TOPIC = 3 # super table must exists
+ CAN_CREATE_CONSUMERS = 3
CAN_DROP_FIXED_SUPER_TABLE = 4
+ CAN_DROP_TOPIC = 4
+ CAN_DROP_STREAM = 4
CAN_ADD_DATA = 5
CAN_READ_DATA = 6
+ CAN_DELETE_DATA = 6
def __init__(self):
self._info = self.getInfo()
@@ -727,12 +741,30 @@ class AnyState:
return False
return self._info[self.CAN_DROP_FIXED_SUPER_TABLE]
+ def canCreateTopic(self):
+ return self._info[self.CAN_CREATE_TOPIC]
+
+ def canDropTopic(self):
+ return self._info[self.CAN_DROP_TOPIC]
+
+ def canCreateConsumers(self):
+ return self._info[self.CAN_CREATE_CONSUMERS]
+
+ def canCreateStreams(self):
+ return self._info[self.CAN_CREATE_STREAM]
+
+ def canDropStream(self):
+ return self._info[self.CAN_DROP_STREAM]
+
def canAddData(self):
return self._info[self.CAN_ADD_DATA]
def canReadData(self):
return self._info[self.CAN_READ_DATA]
+ def canDeleteData(self):
+ return self._info[self.CAN_DELETE_DATA]
+
def assertAtMostOneSuccess(self, tasks, cls):
sCnt = 0
for task in tasks:
@@ -902,7 +934,7 @@ class StateHasData(AnyState):
): # only if we didn't create one
# we shouldn't have dropped it
self.assertNoTask(tasks, TaskDropDb)
- if (not self.hasTask(tasks, TaskCreateSuperTable)
+ if not( self.hasTask(tasks, TaskCreateSuperTable)
): # if we didn't create the table
# we should not have a task that drops it
self.assertNoTask(tasks, TaskDropSuperTable)
@@ -974,14 +1006,21 @@ class StateMechine:
# did not do this when openning connection, and this is NOT the worker
# thread, which does this on their own
dbc.use(dbName)
+
if not dbc.hasTables(): # no tables
+
Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time()))
return StateDbOnly()
# For sure we have tables, which means we must have the super table. # TODO: are we sure?
+
sTable = self._db.getFixedSuperTable()
- if sTable.hasRegTables(dbc): # no regular tables
+
+
+ if sTable.hasRegTables(dbc): # no regular tables
+ # print("debug=====*\n"*100)
Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time()))
+
return StateSuperTableOnly()
else: # has actual tables
Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time()))
@@ -1051,6 +1090,28 @@ class StateMechine:
# Logging.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes)))
return taskTypes[i]
+ def balance_pickTaskType(self):
+ # all the task types we can choose from at curent state
+ BasicTypes = self.getTaskTypes()
+ weightsTypes = BasicTypes.copy()
+
+ # this matrixs can balance the Frequency of TaskTypes
+ balance_TaskType_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 ,
+ 'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10,
+ 'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3,
+ 'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # TaskType : balance_matrixs of task
+
+ for task , weights in balance_TaskType_matrixs.items():
+
+ for basicType in BasicTypes:
+ if basicType.__name__ == task:
+ for _ in range(weights):
+ weightsTypes.append(basicType)
+
+ task = random.sample(weightsTypes,1)
+ return task[0]
+
+
# ref:
# https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/
def _weighted_choice_sub(self, weights) -> int:
@@ -1109,6 +1170,7 @@ class Database:
return "fs_table"
def getFixedSuperTable(self) -> TdSuperTable:
+
return TdSuperTable(self.getFixedSuperTableName(), self.getName())
# We aim to create a starting time tick, such that, whenever we run our test here once
@@ -1342,6 +1404,19 @@ class Task():
0x2603, # Table does not exist, replaced by 2662 below
0x260d, # Tags number not matched
0x2662, # Table does not exist #TODO: what about 2603 above?
+ 0x2600, # database not specified, SQL: show stables , database droped , and show tables
+ 0x032C, # Object is creating
+ 0x032D, # Object is dropping
+ 0x03D3, # Conflict transaction not completed
+ 0x0707, # Query not ready , it always occur at replica 3
+ 0x707, # Query not ready
+ 0x396, # Database in creating status
+ 0x386, # Database in droping status
+ 0x03E1, # failed on tmq_subscribe ,topic not exist
+ 0x03ed , # Topic must be dropped first, SQL: drop database db_0
+ 0x0203 , # Invalid value
+ 0x03f0 , # Stream already exist , topic already exists
+
@@ -1638,9 +1713,12 @@ class TaskCreateDb(StateTransitionTask):
# numReplica = Dice.throw(Settings.getConfig().max_replicas) + 1 # 1,2 ... N
numReplica = Config.getConfig().num_replicas # fixed, always
repStr = "replica {}".format(numReplica)
- updatePostfix = "update 1" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active
+ updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1
+ vg_nums = random.randint(1,8)
+ cache_model = Dice.choice(['none' , 'last_row' , 'last_value' , 'both'])
+ buffer = random.randint(3,128)
dbName = self._db.getName()
- self.execWtSql(wt, "create database {} {} {} ".format(dbName, repStr, updatePostfix ) )
+ self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, updatePostfix, vg_nums, cache_model,buffer ) )
if dbName == "db_0" and Config.getConfig().use_shadow_db:
self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) )
@@ -1654,9 +1732,211 @@ class TaskDropDb(StateTransitionTask):
return state.canDropDb()
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
- self.execWtSql(wt, "drop database {}".format(self._db.getName()))
+
+ try:
+ self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists
+ except taos.error.ProgrammingError as err:
+ errno = Helper.convertErrno(err.errno)
+ if errno in [0x0203]: # drop maybe failed
+ pass
+
Logging.debug("[OPS] database dropped at {}".format(time.time()))
+
+class TaskCreateStream(StateTransitionTask):
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canCreateStreams()
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ dbname = self._db.getName()
+
+ sub_stream_name = dbname+ '_sub_stream'
+ sub_stream_tb_name = 'stream_tb_sub'
+ super_stream_name = dbname+ '_super_stream'
+ super_stream_tb_name = 'stream_tb_super'
+ if not self._db.exists(wt.getDbConn()):
+ Logging.debug("Skipping task, no DB yet")
+ return
+
+ sTable = self._db.getFixedSuperTable() # type: TdSuperTable
+ # wt.execSql("use db") # should always be in place
+ stbname =sTable.getName()
+ sub_tables = sTable.getRegTables(wt.getDbConn())
+ aggExpr = Dice.choice([
+ 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)','min(speed)', 'max(speed)', 'first(speed)', 'last(speed)',
+ 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)'])
+
+ stream_sql = '' # set default value
+
+ if sub_tables:
+ sub_tbname = sub_tables[0]
+ # create stream with query above sub_table
+ stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\
+ format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname)
+ else:
+ stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\
+ format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname)
+ self.execWtSql(wt, stream_sql)
+ Logging.debug("[OPS] stream is creating at {}".format(time.time()))
+
+
+class TaskCreateTopic(StateTransitionTask):
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canCreateTopic()
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ dbname = self._db.getName()
+
+ sub_topic_name = dbname+ '_sub_topic'
+ super_topic_name = dbname+ '_super_topic'
+ stable_topic = dbname+ '_stable_topic'
+ db_topic = 'database_' + dbname+ '_topics'
+ if not self._db.exists(wt.getDbConn()):
+ Logging.debug("Skipping task, no DB yet")
+ return
+
+ sTable = self._db.getFixedSuperTable() # type: TdSuperTable
+ # wt.execSql("use db") # should always be in place
+ # create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1;
+
+ stbname =sTable.getName()
+ sub_tables = sTable.getRegTables(wt.getDbConn())
+
+ scalarExpr = Dice.choice([ '*','speed','color','abs(speed)','acos(speed)','asin(speed)','atan(speed)','ceil(speed)','cos(speed)','cos(speed)',
+ 'floor(speed)','log(speed,2)','pow(speed,2)','round(speed)','sin(speed)','sqrt(speed)','char_length(color)','concat(color,color)',
+ 'concat_ws(" ", color,color," ")','length(color)', 'lower(color)', 'ltrim(color)','substr(color , 2)','upper(color)','cast(speed as double)',
+ 'cast(ts as bigint)'])
+ topic_sql = '' # set default value
+ if Dice.throw(3)==0: # create topic : source data from sub query
+ if sub_tables: # if not empty
+ sub_tbname = sub_tables[0]
+ # create topic : source data from sub query of sub stable
+ topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname)
+
+ else: # create topic : source data from sub query of stable
+ topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname)
+ elif Dice.throw(3)==1: # create topic : source data from super table
+ topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname)
+
+ elif Dice.throw(3)==2: # create topic : source data from whole database
+ topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname)
+ else:
+ pass
+
+ # exec create topics
+ self.execWtSql(wt, "use {}".format(dbname))
+ self.execWtSql(wt, topic_sql)
+ Logging.debug("[OPS] db topic is creating at {}".format(time.time()))
+
+class TaskDropTopics(StateTransitionTask):
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canDropTopic()
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ dbname = self._db.getName()
+
+
+ if not self._db.exists(wt.getDbConn()):
+ Logging.debug("Skipping task, no DB yet")
+ return
+
+ sTable = self._db.getFixedSuperTable() # type: TdSuperTable
+ # wt.execSql("use db") # should always be in place
+ tblName = sTable.getName()
+ if sTable.hasTopics(wt.getDbConn()):
+ sTable.dropTopics(wt.getDbConn(),dbname,None) # drop topics of database
+ sTable.dropTopics(wt.getDbConn(),dbname,tblName) # drop topics of stable
+
+class TaskDropStreams(StateTransitionTask):
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canDropStream()
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ # dbname = self._db.getName()
+
+
+ if not self._db.exists(wt.getDbConn()):
+ Logging.debug("Skipping task, no DB yet")
+ return
+
+ sTable = self._db.getFixedSuperTable() # type: TdSuperTable
+ # wt.execSql("use db") # should always be in place
+ # tblName = sTable.getName()
+ if sTable.hasStreams(wt.getDbConn()):
+ sTable.dropStreams(wt.getDbConn()) # drop stream of database
+
+class TaskDropStreamTables(StateTransitionTask):
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canDropStream()
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ # dbname = self._db.getName()
+
+
+ if not self._db.exists(wt.getDbConn()):
+ Logging.debug("Skipping task, no DB yet")
+ return
+
+ sTable = self._db.getFixedSuperTable() # type: TdSuperTable
+ wt.execSql("use db") # should always be in place
+ # tblName = sTable.getName()
+ if sTable.hasStreamTables(wt.getDbConn()):
+ sTable.dropStreamTables(wt.getDbConn()) # drop stream tables
+
+class TaskCreateConsumers(StateTransitionTask):
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canCreateConsumers()
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+
+ if Config.getConfig().connector_type == 'native':
+
+ sTable = self._db.getFixedSuperTable() # type: TdSuperTable
+ # wt.execSql("use db") # should always be in place
+ if sTable.hasTopics(wt.getDbConn()):
+ sTable.createConsumer(wt.getDbConn(),random.randint(1,10))
+ pass
+ else:
+ print(" restful not support tmq consumers")
+ return
+
+
class TaskCreateSuperTable(StateTransitionTask):
@classmethod
def getEndState(cls):
@@ -1673,7 +1953,7 @@ class TaskCreateSuperTable(StateTransitionTask):
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
# wt.execSql("use db") # should always be in place
-
+
sTable.create(wt.getDbConn(),
{'ts': TdDataType.TIMESTAMP, 'speed': TdDataType.INT, 'color': TdDataType.BINARY16}, {
'b': TdDataType.BINARY200, 'f': TdDataType.FLOAT},
@@ -1688,14 +1968,17 @@ class TdSuperTable:
def __init__(self, stName, dbName):
self._stName = stName
self._dbName = dbName
+ self._consumerLists = {}
+ self._ConsumerInsts = []
def getName(self):
return self._stName
+
def drop(self, dbc, skipCheck = False):
dbName = self._dbName
if self.exists(dbc) : # if myself exists
- fullTableName = dbName + '.' + self._stName
+ fullTableName = dbName + '.' + self._stName
dbc.execute("DROP TABLE {}".format(fullTableName))
else:
if not skipCheck:
@@ -1711,10 +1994,12 @@ class TdSuperTable:
dbName = self._dbName
dbc.execute("USE " + dbName)
- fullTableName = dbName + '.' + self._stName
+ fullTableName = dbName + '.' + self._stName
+
if dbc.existsSuperTable(self._stName):
- if dropIfExists:
- dbc.execute("DROP TABLE {}".format(fullTableName))
+ if dropIfExists:
+ dbc.execute("DROP TABLE {}".format(fullTableName))
+
else: # error
raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName))
@@ -1728,12 +2013,61 @@ class TdSuperTable:
)
else:
sql += " TAGS (dummy int) "
- dbc.execute(sql)
+ dbc.execute(sql)
+
+ def createConsumer(self, dbc,Consumer_nums):
+
+ def generateConsumer(current_topic_list):
+ conf = TaosTmqConf()
+ conf.set("group.id", "tg2")
+ conf.set("td.connect.user", "root")
+ conf.set("td.connect.pass", "taosdata")
+ conf.set("enable.auto.commit", "true")
+ def tmq_commit_cb_print(tmq, resp, offset, param=None):
+ print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
+ conf.set_auto_commit_cb(tmq_commit_cb_print, None)
+ consumer = conf.new_consumer()
+ topic_list = TaosTmqList()
+ for topic in current_topic_list:
+ topic_list.append(topic)
+ try:
+ consumer.subscribe(topic_list)
+ except TmqError as e :
+ pass
+
+ # consumer with random work life
+ time_start = time.time()
+ while 1:
+ res = consumer.poll(1000)
+ if time.time() - time_start >random.randint(5,50) :
+ break
+ try:
+ consumer.unsubscribe()
+ except TmqError as e :
+ pass
+ return
+
+ # mulit Consumer
+ current_topic_list = self.getTopicLists(dbc)
+ for i in range(Consumer_nums):
+ consumer_inst = threading.Thread(target=generateConsumer, args=(current_topic_list,))
+ self._ConsumerInsts.append(consumer_inst)
+
+ for ConsumerInst in self._ConsumerInsts:
+ ConsumerInst.start()
+ for ConsumerInst in self._ConsumerInsts:
+ ConsumerInst.join()
+
+ def getTopicLists(self, dbc: DbConn):
+ dbc.query("show topics ")
+ topics = dbc.getQueryResult()
+ topicLists = [v[0] for v in topics]
+ return topicLists
def getRegTables(self, dbc: DbConn):
dbName = self._dbName
try:
- dbc.query("select TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later
+ dbc.query("select distinct TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later
except taos.error.ProgrammingError as err:
errno2 = Helper.convertErrno(err.errno)
Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err))
@@ -1743,7 +2077,75 @@ class TdSuperTable:
return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation
def hasRegTables(self, dbc: DbConn):
- return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
+
+ if dbc.existsSuperTable(self._stName):
+
+ return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
+ else:
+ return False
+
+ def hasStreamTables(self,dbc: DbConn):
+
+ return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0
+
+ def hasStreams(self,dbc: DbConn):
+ return dbc.query("show streams") > 0
+
+ def hasTopics(self,dbc: DbConn):
+
+ return dbc.query("show topics") > 0
+
+ def dropTopics(self,dbc: DbConn , dbname=None,stb_name=None):
+ dbc.query("show topics ")
+ topics = dbc.getQueryResult()
+
+ if dbname !=None and stb_name == None :
+
+ for topic in topics:
+ if dbname in topic[0] and topic[0].startswith("database"):
+ try:
+ dbc.execute('drop topic {}'.format(topic[0]))
+ Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time()))
+ except taos.error.ProgrammingError as err:
+ errno = Helper.convertErrno(err.errno)
+ if errno in [0x03EB]: # Topic subscribed cannot be dropped
+ pass
+ # for subsript in subscriptions:
+
+ else:
+ pass
+
+ pass
+ return True
+ elif dbname !=None and stb_name!= None:
+ for topic in topics:
+ if topic[0].startswith(self._dbName) and topic[0].endswith('topic'):
+ dbc.execute('drop topic {}'.format(topic[0]))
+ Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time()))
+ return True
+ else:
+ return True
+ pass
+
+ def dropStreams(self,dbc:DbConn):
+ dbc.query("show streams ")
+ Streams = dbc.getQueryResult()
+ for Stream in Streams:
+ if Stream[0].startswith(self._dbName):
+ dbc.execute('drop stream {}'.format(Stream[0]))
+
+ return not dbc.query("show streams ") > 0
+
+ def dropStreamTables(self, dbc: DbConn):
+ dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName))
+
+ StreamTables = dbc.getQueryResult()
+
+ for StreamTable in StreamTables:
+ if self.dropStreams(dbc):
+ dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0]))
+
+ return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName))
def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str):
'''
@@ -1838,10 +2240,46 @@ class TdSuperTable:
# Run the query against the regular table first
doAggr = (Dice.throw(2) == 0) # 1 in 2 chance
if not doAggr: # don't do aggregate query, just simple one
+ commonExpr = Dice.choice([
+ '*',
+ 'abs(speed)',
+ 'acos(speed)',
+ 'asin(speed)',
+ 'atan(speed)',
+ 'ceil(speed)',
+ 'cos(speed)',
+ 'cos(speed)',
+ 'floor(speed)',
+ 'log(speed,2)',
+ 'pow(speed,2)',
+ 'round(speed)',
+ 'sin(speed)',
+ 'sqrt(speed)',
+ 'char_length(color)',
+ 'concat(color,color)',
+ 'concat_ws(" ", color,color," ")',
+ 'length(color)',
+ 'lower(color)',
+ 'ltrim(color)',
+ 'substr(color , 2)',
+ 'upper(color)',
+ 'cast(speed as double)',
+ 'cast(ts as bigint)',
+ # 'TO_ISO8601(color)',
+ # 'TO_UNIXTIMESTAMP(ts)',
+ 'now()',
+ 'timediff(ts,now)',
+ 'timezone()',
+ 'TIMETRUNCATE(ts,1s)',
+ 'TIMEZONE()',
+ 'TODAY()',
+ 'distinct(color)'
+ ]
+ )
ret.append(SqlQuery( # reg table
- "select {} from {}.{}".format('*', self._dbName, rTbName)))
+ "select {} from {}.{}".format(commonExpr, self._dbName, rTbName)))
ret.append(SqlQuery( # super table
- "select {} from {}.{}".format('*', self._dbName, self.getName())))
+ "select {} from {}.{}".format(commonExpr, self._dbName, self.getName())))
else: # Aggregate query
aggExpr = Dice.choice([
'count(*)',
@@ -1857,17 +2295,34 @@ class TdSuperTable:
'top(speed, 50)', # TODO: not supported?
'bottom(speed, 50)', # TODO: not supported?
'apercentile(speed, 10)', # TODO: TD-1316
- # 'last_row(speed)', # TODO: commented out per TD-3231, we should re-create
+ 'last_row(*)', # TODO: commented out per TD-3231, we should re-create
# Transformation Functions
# 'diff(speed)', # TODO: no supported?!
- 'spread(speed)'
+ 'spread(speed)',
+ 'elapsed(ts)',
+ 'mode(speed)',
+ 'bottom(speed,1)',
+ 'top(speed,1)',
+ 'tail(speed,1)',
+ 'unique(color)',
+ 'csum(speed)',
+ 'DERIVATIVE(speed,1s,1)',
+ 'diff(speed,1)',
+ 'irate(speed)',
+ 'mavg(speed,3)',
+ 'sample(speed,5)',
+ 'STATECOUNT(speed,"LT",1)',
+ 'STATEDURATION(speed,"LT",1)',
+ 'twa(speed)'
+
]) # TODO: add more from 'top'
# if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049)
sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName())
if Dice.throw(3) == 0: # 1 in X chance
- sql = sql + ' GROUP BY color'
+ partion_expr = Dice.choice(['color','tbname'])
+ sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr
Progress.emit(Progress.QUERY_GROUP_BY)
# Logging.info("Executing GROUP-BY query: " + sql)
ret.append(SqlQuery(sql))
@@ -1974,6 +2429,7 @@ class TaskDropSuperTable(StateTransitionTask):
isSuccess = False
Logging.debug("[DB] Acceptable error when dropping a table")
continue # try to delete next regular table
+
if (not tickOutput):
tickOutput = True # Print only one time
@@ -1985,6 +2441,8 @@ class TaskDropSuperTable(StateTransitionTask):
# Drop the super table itself
tblName = self._db.getFixedSuperTableName()
self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName))
+
+
class TaskAlterTags(StateTransitionTask):
@@ -2234,6 +2692,220 @@ class TaskAddData(StateTransitionTask):
self.activeTable.discard(i) # not raising an error, unlike remove
+class TaskDeleteData(StateTransitionTask):
+ # Track which table is being actively worked on
+ activeTable: Set[int] = set()
+
+ # We use these two files to record operations to DB, useful for power-off tests
+ fAddLogReady = None # type: Optional[io.TextIOWrapper]
+ fAddLogDone = None # type: Optional[io.TextIOWrapper]
+
+ @classmethod
+ def prepToRecordOps(cls):
+ if Config.getConfig().record_ops:
+ if (cls.fAddLogReady is None):
+ Logging.info(
+ "Recording in a file operations to be performed...")
+ cls.fAddLogReady = open("add_log_ready.txt", "w")
+ if (cls.fAddLogDone is None):
+ Logging.info("Recording in a file operations completed...")
+ cls.fAddLogDone = open("add_log_done.txt", "w")
+
+ @classmethod
+ def getEndState(cls):
+ return StateHasData()
+
+ @classmethod
+ def canBeginFrom(cls, state: AnyState):
+ return state.canDeleteData()
+
+ def _lockTableIfNeeded(self, fullTableName, extraMsg = ''):
+ if Config.getConfig().verify_data:
+ # Logging.info("Locking table: {}".format(fullTableName))
+ self.lockTable(fullTableName)
+ # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName))
+ # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
+ else:
+ # Logging.info("Skipping locking table")
+ pass
+
+ def _unlockTableIfNeeded(self, fullTableName):
+ if Config.getConfig().verify_data:
+ # Logging.info("Unlocking table: {}".format(fullTableName))
+ self.unlockTable(fullTableName)
+ # Logging.info("Table unlocked: {}".format(fullTableName))
+ else:
+ pass
+ # Logging.info("Skipping unlocking table")
+
+ def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
+ numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
+ del_Records = int(numRecords/5)
+ if Dice.throw(2) == 0:
+ for j in range(del_Records): # number of records per table
+ intToWrite = db.getNextInt()
+ nextTick = db.getNextTick()
+ # nextColor = db.getNextColor()
+ if Config.getConfig().record_ops:
+ self.prepToRecordOps()
+ if self.fAddLogReady is None:
+ raise CrashGenError("Unexpected empty fAddLogReady")
+ self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName))
+ self.fAddLogReady.flush()
+ os.fsync(self.fAddLogReady.fileno())
+
+ # TODO: too ugly trying to lock the table reliably, refactor...
+ fullTableName = db.getName() + '.' + regTableName
+ self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
+
+ try:
+ sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {})
+ fullTableName,
+ # ds.getFixedSuperTableName(),
+ # ds.getNextBinary(), ds.getNextFloat(),
+ nextTick)
+
+ # print(sql)
+ # Logging.info("Adding data: {}".format(sql))
+ dbc.execute(sql)
+ # Logging.info("Data added: {}".format(sql))
+ intWrote = intToWrite
+
+ # Quick hack, attach an update statement here. TODO: create an "update" task
+ if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
+ intToUpdate = db.getNextInt() # Updated, but should not succeed
+ # nextColor = db.getNextColor()
+ sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here
+ fullTableName,
+ nextTick)
+ # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
+ # fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
+ dbc.execute(sql)
+ intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this.
+
+ except: # Any exception at all
+ self._unlockTableIfNeeded(fullTableName)
+ raise
+
+ # Now read it back and verify, we might encounter an error if table is dropped
+ if Config.getConfig().verify_data: # only if command line asks for it
+ try:
+ dbc.query("SELECT * from {}.{} WHERE ts='{}'".
+ format(db.getName(), regTableName, nextTick))
+ result = dbc.getQueryResult()
+ if len(result)==0:
+ # means data has been delete
+ print("D1",end="") # DF means delete failed
+ else:
+ print("DF",end="") # DF means delete failed
+ except taos.error.ProgrammingError as err:
+ errno = Helper.convertErrno(err.errno)
+ # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
+ # print("D1",end="") # D1 means delete data success and only 1 record
+
+ if errno in [0x218, 0x362,0x2662]: # table doesn't exist
+ # do nothing
+ pass
+ else:
+ # Re-throw otherwise
+ raise
+ finally:
+ self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock
+ # Done with read-back verification, unlock the table now
+ # Successfully wrote the data into the DB, let's record it somehow
+ te.recordDataMark(intWrote)
+ else:
+
+ # delete all datas and verify datas ,expected table is empty
+ if Config.getConfig().record_ops:
+ self.prepToRecordOps()
+ if self.fAddLogReady is None:
+ raise CrashGenError("Unexpected empty fAddLogReady")
+ self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName))
+ self.fAddLogReady.flush()
+ os.fsync(self.fAddLogReady.fileno())
+
+ # TODO: too ugly trying to lock the table reliably, refactor...
+ fullTableName = db.getName() + '.' + regTableName
+ self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
+
+ try:
+ sql = "delete from {} ;".format( # removed: tags ('{}', {})
+ fullTableName)
+ # Logging.info("Adding data: {}".format(sql))
+ dbc.execute(sql)
+ # Logging.info("Data added: {}".format(sql))
+
+ # Quick hack, attach an update statement here. TODO: create an "update" task
+ if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
+ sql = "delete from {} ;".format( # "INSERt" means "update" here
+ fullTableName)
+ dbc.execute(sql)
+
+ except: # Any exception at all
+ self._unlockTableIfNeeded(fullTableName)
+ raise
+
+ # Now read it back and verify, we might encounter an error if table is dropped
+ if Config.getConfig().verify_data: # only if command line asks for it
+ try:
+ dbc.query("SELECT * from {}.{} WHERE ts='{}'".
+ format(db.getName(), regTableName, nextTick))
+ result = dbc.getQueryResult()
+ if len(result)==0:
+ # means data has been delete
+ print("DA",end="")
+ else:
+ print("DF",end="") # DF means delete failed
+ except taos.error.ProgrammingError as err:
+ errno = Helper.convertErrno(err.errno)
+ # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
+ # print("Da",end="") # Da means delete data success and for all datas
+
+ if errno in [0x218, 0x362,0x2662]: # table doesn't exist
+ # do nothing
+ pass
+ else:
+ # Re-throw otherwise
+ raise
+ finally:
+ self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock
+ # Done with read-back verification, unlock the table now
+
+ if Config.getConfig().record_ops:
+ if self.fAddLogDone is None:
+ raise CrashGenError("Unexpected empty fAddLogDone")
+ self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName))
+ self.fAddLogDone.flush()
+ os.fsync(self.fAddLogDone.fileno())
+
+ def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
+ # ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access
+ db = self._db
+ dbc = wt.getDbConn()
+ numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES
+ numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
+ tblSeq = list(range(numTables ))
+ random.shuffle(tblSeq) # now we have random sequence
+ for i in tblSeq:
+ if (i in self.activeTable): # wow already active
+ # print("x", end="", flush=True) # concurrent insertion
+ Progress.emit(Progress.CONCURRENT_INSERTION)
+ else:
+ self.activeTable.add(i) # marking it active
+
+ dbName = db.getName()
+ sTable = db.getFixedSuperTable()
+ regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i)
+ fullTableName = dbName + '.' + regTableName
+ # self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked"
+ sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists
+ # self._unlockTable(fullTableName)
+
+ self._deleteData(db, dbc, regTableName, te)
+
+ self.activeTable.discard(i) # not raising an error, unlike remove
+
class ThreadStacks: # stack info for all threads
def __init__(self):
@@ -2244,6 +2916,9 @@ class ThreadStacks: # stack info for all threads
shortTid = th.native_id % 10000 #type: ignore
self._allStacks[shortTid] = stack # Was using th.native_id
+ def record_current_time(self,current_time):
+ self.current_time = current_time
+
def print(self, filteredEndName = None, filterInternal = False):
for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
lastFrame = stack[-1]
@@ -2258,8 +2933,11 @@ class ThreadStacks: # stack info for all threads
continue # ignore
# Now print
print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(shortTid))
+
lastSqlForThread = DbConn.fetchSqlForThread(shortTid)
- print("Last SQL statement attempted from thread {} is: {}".format(shortTid, lastSqlForThread))
+ last_sql_commit_time = DbConn.get_save_sql_time(shortTid)
+ # time_cost = DbConn.get_time_cost()
+ print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, self.current_time-last_sql_commit_time ,lastSqlForThread))
stackFrame = 0
for frame in stack: # was using: reversed(stack)
# print(frame)
@@ -2268,6 +2946,8 @@ class ThreadStacks: # stack info for all threads
print(" {}".format(frame.line))
stackFrame += 1
print("-----> End of Thread Info ----->\n")
+ if self.current_time-last_sql_commit_time >100: # dead lock occured
+ print("maybe dead locked of thread {} ".format(shortTid))
class ClientManager:
def __init__(self):
@@ -2631,4 +3311,3 @@ class Container():
return
self._verifyValidProperty(name)
self._cargo[name] = value
-
diff --git a/tests/pytest/crash_gen/shared/db.py b/tests/pytest/crash_gen/shared/db.py
index 60c830f4f7..05711efbc6 100644
--- a/tests/pytest/crash_gen/shared/db.py
+++ b/tests/pytest/crash_gen/shared/db.py
@@ -26,10 +26,13 @@ class DbConn:
TYPE_NATIVE = "native-c"
TYPE_REST = "rest-api"
TYPE_INVALID = "invalid"
+
+
# class variables
lastSqlFromThreads : dict[int, str] = {} # stored by thread id, obtained from threading.current_thread().ident%10000
-
+ spendThreads : dict[int, float] = {} # stored by thread id, obtained from threading.current_thread().ident%10000
+ current_time : dict[int, float] = {} # save current time
@classmethod
def saveSqlForCurrentThread(cls, sql: str):
'''
@@ -37,15 +40,56 @@ class DbConn:
run into a dead-lock situation, we can pick out the deadlocked thread, and use
that information to find what what SQL statement is stuck.
'''
+
th = threading.current_thread()
shortTid = th.native_id % 10000 #type: ignore
cls.lastSqlFromThreads[shortTid] = sql # Save this for later
+ cls.record_save_sql_time()
@classmethod
- def fetchSqlForThread(cls, shortTid : int) -> str :
+ def fetchSqlForThread(cls, shortTid : int) -> str :
+
+ print("=======================")
if shortTid not in cls.lastSqlFromThreads:
raise CrashGenError("No last-attempted-SQL found for thread id: {}".format(shortTid))
- return cls.lastSqlFromThreads[shortTid]
+ return cls.lastSqlFromThreads[shortTid]
+
+ @classmethod
+ def get_save_sql_time(cls, shortTid : int):
+ '''
+ Let us save the last SQL statement on a per-thread basis, so that when later we
+ run into a dead-lock situation, we can pick out the deadlocked thread, and use
+ that information to find what what SQL statement is stuck.
+ '''
+ return cls.current_time[shortTid]
+
+ @classmethod
+ def record_save_sql_time(cls):
+ '''
+ Let us save the last SQL statement on a per-thread basis, so that when later we
+ run into a dead-lock situation, we can pick out the deadlocked thread, and use
+ that information to find what what SQL statement is stuck.
+ '''
+ th = threading.current_thread()
+ shortTid = th.native_id % 10000 #type: ignore
+ cls.current_time[shortTid] = float(time.time()) # Save this for later
+
+ @classmethod
+ def sql_exec_spend(cls, cost: float):
+ '''
+ Let us save the last SQL statement on a per-thread basis, so that when later we
+ run into a dead-lock situation, we can pick out the deadlocked thread, and use
+ that information to find what what SQL statement is stuck.
+ '''
+ th = threading.current_thread()
+ shortTid = th.native_id % 10000 #type: ignore
+ cls.spendThreads[shortTid] = cost # Save this for later
+
+ @classmethod
+ def get_time_cost(cls) ->float:
+ th = threading.current_thread()
+ shortTid = th.native_id % 10000 #type: ignore
+ return cls.spendThreads.get(shortTid)
@classmethod
def create(cls, connType, dbTarget):
@@ -61,6 +105,7 @@ class DbConn:
def createNative(cls, dbTarget) -> DbConn:
return cls.create(cls.TYPE_NATIVE, dbTarget)
+
@classmethod
def createRest(cls, dbTarget) -> DbConn:
return cls.create(cls.TYPE_REST, dbTarget)
@@ -75,6 +120,7 @@ class DbConn:
return "[DbConn: type={}, target={}]".format(self._type, self._dbTarget)
def getLastSql(self):
+
return self._lastSql
def open(self):
@@ -184,13 +230,19 @@ class DbConnRest(DbConn):
def _doSql(self, sql):
self._lastSql = sql # remember this, last SQL attempted
self.saveSqlForCurrentThread(sql) # Save in global structure too. #TODO: combine with above
- try:
+ time_cost = -1
+ time_start = time.time()
+ try:
r = requests.post(self._url,
data = sql,
- auth = HTTPBasicAuth('root', 'taosdata'))
+ auth = HTTPBasicAuth('root', 'taosdata'))
except:
print("REST API Failure (TODO: more info here)")
+ self.sql_exec_spend(-2)
raise
+ finally:
+ time_cost = time.time()- time_start
+ self.sql_exec_spend(time_cost)
rj = r.json()
# Sanity check for the "Json Result"
if ('status' not in rj):
@@ -223,6 +275,8 @@ class DbConnRest(DbConn):
"[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql))
return nRows
+
+
def query(self, sql): # return rows affected
return self.execute(sql)
@@ -336,6 +390,7 @@ class MyTDSql:
raise
return self.affectedRows
+
class DbTarget:
def __init__(self, cfgPath, hostAddr, port):
self.cfgPath = cfgPath
@@ -355,6 +410,7 @@ class DbConnNative(DbConn):
# _connInfoDisplayed = False # TODO: find another way to display this
totalConnections = 0 # Not private
totalRequests = 0
+ time_cost = -1
def __init__(self, dbTarget):
super().__init__(dbTarget)
@@ -413,8 +469,18 @@ class DbConnNative(DbConn):
"Cannot exec SQL unless db connection is open", CrashGenError.DB_CONNECTION_NOT_OPEN)
Logging.debug("[SQL] Executing SQL: {}".format(sql))
self._lastSql = sql
+ time_cost = -1
+ nRows = 0
+ time_start = time.time()
self.saveSqlForCurrentThread(sql) # Save in global structure too. #TODO: combine with above
- nRows = self._tdSql.execute(sql)
+ try:
+ nRows= self._tdSql.execute(sql)
+ except Exception as e:
+ self.sql_exec_spend(-2)
+ finally:
+ time_cost = time.time() - time_start
+ self.sql_exec_spend(time_cost)
+
cls = self.__class__
cls.totalRequests += 1
Logging.debug(
@@ -494,4 +560,3 @@ class DbManager():
self._dbConn.close()
self._dbConn = None
Logging.debug("DbManager closed DB connection...")
-
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index b762f8c77f..22e6127973 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -116,6 +116,7 @@ class TDDnode:
self.deployed = 0
self.testCluster = False
self.valgrind = 0
+ self.asan = False
self.remoteIP = ""
self.cfgDict = {
"monitor": "0",
@@ -158,6 +159,15 @@ class TDDnode:
def setValgrind(self, value):
self.valgrind = value
+ def setAsan(self, value):
+ self.asan = value
+ if value:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh")
+ else:
+ self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh")
+
def getDataSize(self):
totalSize = 0
@@ -383,8 +393,14 @@ class TDDnode:
cmd = "mintty -h never %s -c %s" % (
binPath, self.cfgDir)
else:
- cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
- binPath, self.cfgDir)
+ if self.asan:
+ asanDir = "%s/sim/asan/dnode%d.asan" % (
+ self.path, self.index)
+ cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
+ binPath, self.cfgDir, asanDir)
+ else:
+ cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
+ binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
@@ -444,8 +460,14 @@ class TDDnode:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
- cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
- binPath, self.cfgDir)
+ if self.asan:
+ asanDir = "%s/sim/asan/dnode%d.asan" % (
+ self.path, self.index)
+ cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
+ binPath, self.cfgDir, asanDir)
+ else:
+ cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
+ binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --log-file=\"%s/../log/valgrind.log\" --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"%self.cfgDir
@@ -464,6 +486,12 @@ class TDDnode:
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
def stop(self):
+ if self.asan:
+ stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
+ tdLog.info("execute script: " + stopCmd)
+ os.system(stopCmd)
+ return
+
if (not self.remoteIP == ""):
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
tdLog.info("stop dnode%d"%self.index)
@@ -501,6 +529,12 @@ class TDDnode:
def stoptaosd(self):
+ if self.asan:
+ stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
+ tdLog.info("execute script: " + stopCmd)
+ os.system(stopCmd)
+ return
+
if (not self.remoteIP == ""):
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
tdLog.info("stop dnode%d"%self.index)
@@ -534,6 +568,13 @@ class TDDnode:
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self):
+ if self.asan:
+ stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
+ (self.execPath, self.index)
+ tdLog.info("execute script: " + stopCmd)
+ os.system(stopCmd)
+ return
+
if (not self.remoteIP == ""):
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
return
@@ -606,6 +647,7 @@ class TDDnodes:
self.simDeployed = False
self.testCluster = False
self.valgrind = 0
+ self.asan = False
self.killValgrind = 1
def init(self, path, remoteIP = ""):
@@ -629,6 +671,18 @@ class TDDnodes:
def setValgrind(self, value):
self.valgrind = value
+ def setAsan(self, value):
+ self.asan = value
+ if value:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh")
+ self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh")
+ else:
+ self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh")
+ self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh")
+ tdLog.info("run in address sanitizer mode")
+
def setKillValgrind(self, value):
self.killValgrind = value
@@ -642,6 +696,7 @@ class TDDnodes:
self.check(index)
self.dnodes[index - 1].setTestCluster(self.testCluster)
self.dnodes[index - 1].setValgrind(self.valgrind)
+ self.dnodes[index - 1].setAsan(self.asan)
self.dnodes[index - 1].deploy(updatecfgDict)
def cfg(self, index, option, value):
@@ -692,8 +747,22 @@ class TDDnodes:
if index < 1 or index > 10:
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
+ def StopAllSigint(self):
+ tdLog.info("stop all dnodes sigint")
+ if self.asan:
+ tdLog.info("execute script: %s" % self.stopDnodesSigintPath)
+ os.system(self.stopDnodesSigintPath)
+ tdLog.info("execute finished")
+ return
+
def stopAll(self):
tdLog.info("stop all dnodes")
+ if self.asan:
+ tdLog.info("execute script: %s" % self.stopDnodesPath)
+ os.system(self.stopDnodesPath)
+ tdLog.info("execute finished")
+ return
+
if (not self.dnodes[0].remoteIP == ""):
self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()")
return
diff --git a/tests/pytest/util/log.py b/tests/pytest/util/log.py
index 55cd42a6eb..000c907ea4 100644
--- a/tests/pytest/util/log.py
+++ b/tests/pytest/util/log.py
@@ -36,10 +36,10 @@ class TDLog:
printf("\033[1;32m%s %s\033[0m" % (datetime.datetime.now(), info))
def notice(self, err):
- printf("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err))
+ print("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err))
def exit(self, err):
- printf("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
+ print("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
sys.exit(1)
def printNoPrefix(self, info):
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index b320cf5995..9cfd1d368e 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -73,8 +73,15 @@ class TDSql:
expectErrNotOccured = True
try:
self.cursor.execute(sql)
- except BaseException:
+ except BaseException as e:
expectErrNotOccured = False
+ caller = inspect.getframeinfo(inspect.stack()[1][0])
+ self.error_info = repr(e)
+ # print(error_info)
+ # self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","")
+ # self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
+ # print("!!!!!!!!!!!!!!",self.error_info)
+
if expectErrNotOccured:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
@@ -83,6 +90,8 @@ class TDSql:
self.queryCols = 0
self.queryResult = None
tdLog.info("sql:%s, expect error occured" % (sql))
+ return self.error_info
+
def query(self, sql, row_tag=None,queryTimes=10):
self.sql = sql
diff --git a/tests/script/coverage_test.sh b/tests/script/coverage_test.sh
index 457c9eae20..3983f533da 100755
--- a/tests/script/coverage_test.sh
+++ b/tests/script/coverage_test.sh
@@ -10,7 +10,8 @@ else
fi
today=`date +"%Y%m%d"`
-TDENGINE_DIR=/root/pxiao/TDengine
+TDENGINE_DIR=/root/TDengine
+JDBC_DIR=/root/taos-connector-jdbc
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
# Color setting
@@ -20,7 +21,7 @@ GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
-function buildTDengine {
+function buildTDengine() {
echo "check if TDengine need build"
cd $TDENGINE_DIR
git remote prune origin > /dev/null
@@ -33,159 +34,145 @@ function buildTDengine {
# reset counter
lcov -d . --zerocounters
-
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
echo "repo up-to-date"
else
echo "repo need to pull"
fi
- git reset --hard
- git checkout -- .
- git checkout $branch
+ git reset --hard
+ git checkout -- .
+ git checkout $branch
+ git checkout -- .
git clean -dfx
- git pull
- git submodule update --init --recursive -f
+ git pull
[ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug
cd $TDENGINE_DIR/debug
- echo "rebuild.."
+ echo "rebuild.."
LOCAL_COMMIT=`git rev-parse --short @`
rm -rf *
if [ "$branch" == "3.0" ]; then
echo "3.0 ============="
- cmake -DCOVER=true -DBUILD_TEST=true ..
+ cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_HTTP=false -DBUILD_TOOLS=true ..
else
cmake -DCOVER=true -DBUILD_TOOLS=true -DBUILD_HTTP=false .. > /dev/null
fi
- make -j4
+ make -j
make install
}
-function runGeneralCaseOneByOne {
+function runCasesOneByOne () {
while read -r line; do
- if [[ $line =~ ^./test.sh* ]]; then
- case=`echo $line | grep sim$ | awk '{print $NF}'`
-
- if [ -n "$case" ]; then
- date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && ./test.sh -f $case > /dev/null 2>&1 && \
- echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
- || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
- fi
+ cmd=`echo $line | cut -d',' -f 5`
+ if [[ "$2" == "sim" ]] && [[ $cmd == *"test.sh"* ]]; then
+ case=`echo $cmd | cut -d' ' -f 3`
+ start_time=`date +%s`
+ date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
+ echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
+ || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
+ end_time=`date +%s`
+ echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
+ elif [[ "$2" == "system-test" ]] && [[ $line == *"system-test"* ]]; then
+ case=`echo $cmd | cut -d' ' -f 4`
+ start_time=`date +%s`
+ date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
+ echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
+ echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
+ end_time=`date +%s`
+ echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
+ elif [[ "$2" == "develop-test" ]] && [[ $line == *"develop-test"* ]]; then
+ case=`echo $cmd | cut -d' ' -f 4`
+ start_time=`date +%s`
+ date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
+ echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
+ echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
+ end_time=`date +%s`
+ echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
fi
done < $1
}
-function runTestNGCaseOneByOne {
- while read -r line; do
- if [[ $line =~ ^taostest* ]]; then
- case=`echo $line | cut -d' ' -f 3 | cut -d'=' -f 2`
- yaml=`echo $line | cut -d' ' -f 2`
-
- if [ -n "$case" ]; then
- date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && taostest $yaml --case=$case --keep --disable_collection > /dev/null 2>&1 && \
- echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
- || echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
- fi
- fi
- done < $1
+function runUnitTest() {
+ echo "=== Run unit test case ==="
+ echo " $TDENGINE_DIR/debug"
+ cd $TDENGINE_DIR/debug
+ ctest -j12
+ echo "3.0 unit test done"
}
-function runTest {
- echo "run Test"
-
- if [ "$branch" == "3.0" ]; then
- echo "start run unit test case ................"
- echo " $TDENGINE_DIR/debug "
- cd $TDENGINE_DIR/debug
- ctest -j12
- echo "3.0 unit test done"
- fi
-
+function runSimCases() {
+ echo "=== Run sim cases ==="
+
cd $TDENGINE_DIR/tests/script
-
- [ -d ../../sim ] && rm -rf ../../sim
- [ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
-
- runGeneralCaseOneByOne jenkins/basic.txt
-
- sed -i "1i\SIM cases test result" $TDENGINE_COVERAGE_REPORT
-
- totalSuccess=`grep 'success' $TDENGINE_COVERAGE_REPORT | wc -l`
+ runCasesOneByOne ../parallel_test/cases.task sim
+
+ totalSuccess=`grep 'sim success' $TDENGINE_COVERAGE_REPORT | wc -l`
if [ "$totalSuccess" -gt "0" ]; then
- sed -i -e "2i\ ### Total $totalSuccess SIM test case(s) succeed! ###" $TDENGINE_COVERAGE_REPORT
+ echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
fi
- totalFailed=`grep 'failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
+ totalFailed=`grep 'sim failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
if [ "$totalFailed" -ne "0" ]; then
- sed -i "3i\### Total $totalFailed SIM test case(s) failed! ###" $TDENGINE_COVERAGE_REPORT
+ echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
fi
- sed "3G" $TDENGINE_COVERAGE_REPORT
+}
- stopTaosd
- echo "run TestNG cases"
- rm -rf /var/lib/taos/*
- rm -rf /var/log/taos/*
- nohup $TDENGINE_DIR/debug/build/bin/taosd -c /etc/taos > /dev/null 2>&1 &
- sleep 10
- cd $TDENGINE_DIR/../TestNG/cases
- runTestNGCaseOneByOne ../scripts/cases.txt
- echo "TestNG cases done"
+function runPythonCases() {
+ echo "=== Run python cases ==="
+
+ cd $TDENGINE_DIR/tests/system-test
+ runCasesOneByOne ../parallel_test/cases.task system-test
- cd $TDENGINE_DIR/tests
- rm -rf ../sim
- /root/pxiao/test-all-coverage.sh full python $branch | tee -a $TDENGINE_COVERAGE_REPORT
-
-
- sed -i "4i\Python cases test result" $TDENGINE_COVERAGE_REPORT
- totalPySuccess=`grep 'python case(s) succeed!' $TDENGINE_COVERAGE_REPORT | awk '{print $4}'`
- if [ "$totalPySuccess" -gt "0" ]; then
- sed -i -e "5i\ ### Total $totalPySuccess Python test case(s) succeed! ###" $TDENGINE_COVERAGE_REPORT
- fi
+ cd $TDENGINE_DIR/tests/develop-test
+ runCasesOneByOne ../parallel_test/cases.task develop-test
- totalPyFailed=`grep 'python case(s) failed!' $TDENGINE_COVERAGE_REPORT | awk '{print $4}'`
- if [ -z $totalPyFailed ]; then
- sed -i "6i\\n" $TDENGINE_COVERAGE_REPORT
- else
- sed -i "6i\### Total $totalPyFailed Python test case(s) failed! ###" $TDENGINE_COVERAGE_REPORT
- fi
-
- echo "### run JDBC test cases ###" | tee -a $TDENGINE_COVERAGE_REPORT
- # Test Connector
- stopTaosd
- nohup $TDENGINE_DIR/debug/build/bin/taosd -c /etc/taos > /dev/null 2>&1 &
- sleep 10
-
- cd $TDENGINE_DIR/src/connector/jdbc
- mvn clean package > /dev/null 2>&1
- mvn test > jdbc-out.log 2>&1
- tail -n 20 jdbc-out.log 2>&1 | tee -a $TDENGINE_COVERAGE_REPORT
-
- # Test C Demo
- stopTaosd
- $TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
- sleep 10
- yes | $TDENGINE_DIR/debug/build/bin/demo 127.0.0.1 > /dev/null 2>&1 | tee -a $TDENGINE_COVERAGE_REPORT
-
- # Test waltest
- dataDir=`grep dataDir $TDENGINE_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
- walDir=`find $dataDir -name "wal"|head -n1`
- echo "dataDir: $dataDir" | tee -a $TDENGINE_COVERAGE_REPORT
- echo "walDir: $walDir" | tee -a $TDENGINE_COVERAGE_REPORT
- if [ -n "$walDir" ]; then
- yes | $TDENGINE_DIR/debug/build/bin/waltest -p $walDir > /dev/null 2>&1 | tee -a $TDENGINE_COVERAGE_REPORT
+ totalSuccess=`grep 'py success' $TDENGINE_COVERAGE_REPORT | wc -l`
+ if [ "$totalSuccess" -gt "0" ]; then
+ echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
fi
- # run Unit Test
- echo "Run Unit Test: utilTest, queryTest and cliTest"
- #$TDENGINE_DIR/debug/build/bin/utilTest > /dev/null 2>&1 && echo "utilTest pass!" || echo "utilTest failed!"
- #$TDENGINE_DIR/debug/build/bin/queryTest > /dev/null 2>&1 && echo "queryTest pass!" || echo "queryTest failed!"
- #$TDENGINE_DIR/debug/build/bin/cliTest > /dev/null 2>&1 && echo "cliTest pass!" || echo "cliTest failed!"
+ totalFailed=`grep 'py failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
+ if [ "$totalFailed" -ne "0" ]; then
+ echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
+ fi
+}
+
+function runJDBCCases() {
+ echo "=== Run JDBC cases ==="
+
+ cd $JDBC_DIR
+ git checkout -- .
+ git reset --hard HEAD
+ git checkout main
+ git pull
stopTaosd
+ stopTaosadapter
+ taosd -c /etc/taos >> /dev/null 2>&1 &
+ taosadapter >> /dev/null 2>&1 &
+
+ mvn clean test > result.txt 2>&1
+ summary=`grep "Tests run:" result.txt | tail -n 1`
+ echo -e "### JDBC test result: $summary ###" | tee -a $TDENGINE_COVERAGE_REPORT
+}
+
+function runTest() {
+ echo "run Test"
+
+ cd $TDENGINE_DIR
+ [ -d sim ] && rm -rf sim
+ [ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
+
+ runUnitTest
+ runSimCases
+ runPythonCases
+ runJDBCCases
+
+ stopTaosd
cd $TDENGINE_DIR/tests/script
find . -name '*.sql' | xargs rm -f
@@ -203,15 +190,18 @@ function lcovFunc {
# remove exclude paths
if [ "$branch" == "3.0" ]; then
lcov --remove coverage.info \
- '*/contrib/*' '*/tests/*' '*/test/*'\
- '*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c'\
+ '*/contrib/*' '*/tests/*' '*/test/*' '*/tools/*' '*/libs/sync/*'\
+ '*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c'\
'*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\
- '*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/tidpool.c' '*/tmempool.c'\
- '*/tthread.c' '*/tversion.c'\
+ '*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/tidpool.c' '*/tmempool.c'\
+ '*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c' '*/shellAuto.c' '*/shellTire.c'\
+ '*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' '*/catalog.c'\
+ '*/tqSnapshot.c' '*/tsdbSnapshot.c''*/metaSnapshot.c' '*/smaSnapshot.c' '*/tqOffsetSnapshot.c'\
+ '*/vnodeSnapshot.c' '*/metaSnapshot.c' '*/tsdbSnapshot.c' '*/mndGrant.c' '*/mndSnode.c' '*/streamRecover.c'\
--rc lcov_branch_coverage=1 -o coverage.info
else
lcov --remove coverage.info \
- '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' '*/ttype.h' '*/tarithoperator.c' '*/TSDBJNIConnector.c' '*/taosdemo.c'\
+ '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' '*/ttype.h' '*/tarithoperator.c' '*/TSDBJNIConnector.c' '*/taosdemo.c' '*/clientJniConnector.c'\
--rc lcov_branch_coverage=1 -o coverage.info
fi
@@ -257,35 +247,33 @@ function stopTaosd {
echo "Stop tasod end"
}
-function runTestRandomFail {
- exec_random_fail_sh=$1
- default_exec_sh=$TDENGINE_DIR/tests/script/sh/exec.sh
- [ -f $exec_random_fail_sh ] && cp $exec_random_fail_sh $default_exec_sh || exit 1
+function stopTaosadapter {
+ echo "Stop taosadapter"
+ systemctl stop taosadapter.service
+ PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'`
+ while [ -n "$PID" ]
+ do
+ pkill -TERM -x taosadapter
+ sleep 1
+ PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
+ done
+ echo "Stop tasoadapter end"
- dnodes_random_fail_py=$TDENGINE_DIR/tests/pytest/util/dnodes-no-random-fail.py
- default_dnodes_py=$TDENGINE_DIR/tests/pytest/util/dnodes.py
- [ -f $dnodes_random_fail_py ] && cp $dnodes_random_fail_py $default_dnodes_py || exit 1
-
- runTest NoRandomFail
}
-WORK_DIR=/root/pxiao
+WORK_DIR=/root/
date >> $WORK_DIR/cron.log
echo "Run Coverage Test" | tee -a $WORK_DIR/cron.log
stopTaosd
+
buildTDengine
-
-#runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-random-fail.sh
-#runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-default.sh
-#runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-no-random-fail.sh
-
runTest
-
lcovFunc
-#sendReport
+
+sendReport
stopTaosd
date >> $WORK_DIR/cron.log
-echo "End of Coverage Test" | tee -a $WORK_DIR/cron.log
+echo "End of Coverage Test" | tee -a $WORK_DIR/cron.log
\ No newline at end of file
diff --git a/tests/script/sh/checkAsan.sh b/tests/script/sh/checkAsan.sh
index 184dc9a88f..8759db8722 100755
--- a/tests/script/sh/checkAsan.sh
+++ b/tests/script/sh/checkAsan.sh
@@ -3,6 +3,7 @@
set +e
#set -x
+unset LD_PRELOAD
SCRIPT_DIR=`dirname $0`
cd $SCRIPT_DIR/../
SCRIPT_DIR=`pwd`
@@ -15,25 +16,30 @@ else
fi
TAOS_DIR=`pwd`
-LOG_DIR=$TAOS_DIR/sim/tsim/asan
+LOG_DIR=$TAOS_DIR/sim/asan
error_num=`cat ${LOG_DIR}/*.asan | grep "ERROR" | wc -l`
memory_leak=`cat ${LOG_DIR}/*.asan | grep "Direct leak" | wc -l`
indirect_leak=`cat ${LOG_DIR}/*.asan | grep "Indirect leak" | wc -l`
runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | wc -l`
+python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l`
echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m"
echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m"
echo -e "\033[44;32;1m"asan indirect_leak: $indirect_leak"\033[0m"
echo -e "\033[44;32;1m"asan runtime error: $runtime_error"\033[0m"
+echo -e "\033[44;32;1m"asan python error: $python_error"\033[0m"
-let "errors=$error_num+$memory_leak+$indirect_leak+$runtime_error"
+let "errors=$error_num+$memory_leak+$indirect_leak+$runtime_error+$python_error"
if [ $errors -eq 0 ]; then
echo -e "\033[44;32;1m"no asan errors"\033[0m"
exit 0
else
echo -e "\033[44;31;1m"asan total errors: $errors"\033[0m"
+ if [ $python_error -ne 0 ]; then
+ cat ${LOG_DIR}/*.info
+ fi
cat ${LOG_DIR}/*.asan
exit 1
fi
\ No newline at end of file
diff --git a/tests/script/sh/exec.sh b/tests/script/sh/exec.sh
index 3f2c5d268c..c8cb121b8a 100755
--- a/tests/script/sh/exec.sh
+++ b/tests/script/sh/exec.sh
@@ -11,6 +11,7 @@
set +e
#set -x
+unset LD_PRELOAD
UNAME_BIN=`which uname`
OS_TYPE=`$UNAME_BIN`
@@ -80,7 +81,7 @@ LOG_DIR=$NODE_DIR/log
DATA_DIR=$NODE_DIR/data
MGMT_DIR=$NODE_DIR/data/mgmt
TSDB_DIR=$NODE_DIR/data/tsdb
-ASAN_DIR=$SIM_DIR/tsim/asan
+ASAN_DIR=$SIM_DIR/asan
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
echo ------------ $EXEC_OPTON $NODE_NAME
diff --git a/tests/script/sh/sigint_stop_dnodes.sh b/tests/script/sh/sigint_stop_dnodes.sh
new file mode 100755
index 0000000000..83a4f1c1d5
--- /dev/null
+++ b/tests/script/sh/sigint_stop_dnodes.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+set +e
+#set -x
+
+unset LD_PRELOAD
+UNAME_BIN=`which uname`
+OS_TYPE=`$UNAME_BIN`
+
+PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
+echo "Killing taosd processes " $PID
+while [ -n "$PID" ]; do
+ #echo "Killing taosd processes " $PID
+ kill $PID
+ PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
+done
diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh
index d30c75022a..ce2d7144f9 100755
--- a/tests/script/sh/stop_dnodes.sh
+++ b/tests/script/sh/stop_dnodes.sh
@@ -1,5 +1,9 @@
#!/bin/sh
+set +e
+#set -x
+
+unset LD_PRELOAD
UNAME_BIN=`which uname`
OS_TYPE=`$UNAME_BIN`
@@ -22,16 +26,3 @@ while [ -n "$PID" ]; do
fi
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
-
-PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
-while [ -n "$PID" ]; do
- echo kill -9 $PID
- pkill -9 tarbitrator
- if [ "$OS_TYPE" != "Darwin" ]; then
- fuser -k -n tcp 6040
- else
- lsof -nti:6040 | xargs kill -9
- fi
- PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
-done
-
diff --git a/tests/script/test.sh b/tests/script/test.sh
index e1db1c14de..a7a5d34fbe 100755
--- a/tests/script/test.sh
+++ b/tests/script/test.sh
@@ -66,16 +66,13 @@ else
fi
declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR
-
declare -x SIM_DIR=$TOP_DIR/sim
-
PROGRAM=$BUILD_DIR/build/bin/tsim
-
PRG_DIR=$SIM_DIR/tsim
CFG_DIR=$PRG_DIR/cfg
LOG_DIR=$PRG_DIR/log
DATA_DIR=$PRG_DIR/data
-ASAN_DIR=$PRG_DIR/asan
+ASAN_DIR=$SIM_DIR/asan
chmod -R 777 $PRG_DIR
echo "------------------------------------------------------------------------"
@@ -141,11 +138,15 @@ if [ -n "$FILE_NAME" ]; then
echo "AsanDir:" $ASAN_DIR/tsim.asan
eval $PROGRAM -c $CFG_DIR -f $FILE_NAME 2> $ASAN_DIR/tsim.asan
result=$?
- echo "Execute result: " $result
+ echo "Execute result:" $result
if [ $result -eq 0 ]; then
+ $CODE_DIR/sh/sigint_stop_dnodes.sh
$CODE_DIR/sh/checkAsan.sh
else
+ echo "TSIM has asan errors"
+ sleep 1
+ $CODE_DIR/sh/checkAsan.sh
exit 1
fi
fi
diff --git a/tests/script/tsim/db/alter_replica_13.sim b/tests/script/tsim/db/alter_replica_13.sim
index 007bb00f07..b3231cc24b 100644
--- a/tests/script/tsim/db/alter_replica_13.sim
+++ b/tests/script/tsim/db/alter_replica_13.sim
@@ -138,10 +138,10 @@ while $i < 10
if $data[0][4] != leader then
return -1
endi
- if $data[0][6] != follower then
+ if $data[0][6] == leader then
return -1
endi
- if $data[0][8] != follower then
+ if $data[0][8] == leader then
return -1
endi
endw
diff --git a/tests/script/tsim/dnode/drop_dnode_force.sim b/tests/script/tsim/dnode/drop_dnode_force.sim
index 10edacf3aa..26e48933be 100644
--- a/tests/script/tsim/dnode/drop_dnode_force.sim
+++ b/tests/script/tsim/dnode/drop_dnode_force.sim
@@ -192,7 +192,7 @@ if $data(5)[4] != ready then
goto step5
endi
-print =============== step5: drop dnode 2
+print =============== step5a: drop dnode 2
sql_error drop dnode 2
sql drop dnode 2 force
@@ -204,15 +204,23 @@ if $rows != 4 then
return -1
endi
+$x = 0
+step5a:
+ $ = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not online!
+ return -1
+ endi
print select * from information_schema.ins_mnodes;
sql select * from information_schema.ins_mnodes
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
if $rows != 2 then
- return -1
+ goto step5a
endi
if $data(1)[2] != leader then
- return -1
+ goto step5a
endi
sql select * from information_schema.ins_qnodes
diff --git a/tests/script/tsim/mnode/basic5.sim b/tests/script/tsim/mnode/basic5.sim
index e96b193b83..f0f0f18576 100644
--- a/tests/script/tsim/mnode/basic5.sim
+++ b/tests/script/tsim/mnode/basic5.sim
@@ -96,7 +96,7 @@ sql_error drop mnode on dnode 4
sql_error drop mnode on dnode 5
sql_error drop mnode on dnode 6
-system sh/exec.sh -n dnode2 -s stop -x SIGKILL
+system sh/exec.sh -n dnode2 -s stop
$x = 0
step5:
$x = $x + 1
@@ -151,7 +151,7 @@ if $data(4)[4] != ready then
endi
print =============== step6: stop mnode1
-system sh/exec.sh -n dnode1 -s stop -x SIGKILL
+system sh/exec.sh -n dnode1 -s stop
# sql_error drop mnode on dnode 1
$x = 0
@@ -205,8 +205,7 @@ if $data(4)[4] != ready then
endi
print =============== step8: stop mnode1 and drop it
-system sh/exec.sh -n dnode1 -s stop -x SIGKILL
-sql_error drop mnode on dnode 1
+system sh/exec.sh -n dnode1 -s stop
$x = 0
step81:
@@ -234,42 +233,15 @@ if $leaderNum != 1 then
endi
print =============== step9: start mnode1 and wait it dropped
-system sh/exec.sh -n dnode1 -s start
-sql drop mnode on dnode 1 -x step90
-step90:
-
+print check mnode has leader step9a
$x = 0
-step91:
+step9a:
$x = $x + 1
sleep 1000
if $x == 10 then
return -1
endi
-sql select * from information_schema.ins_dnodes
-print ===> $data00 $data01 $data02 $data03 $data04 $data05
-print ===> $data10 $data11 $data12 $data13 $data14 $data15
-print ===> $data20 $data21 $data22 $data23 $data24 $data25
-print ===> $data30 $data31 $data32 $data33 $data34 $data35
-if $data(1)[4] != ready then
- goto step91
-endi
-if $data(2)[4] != ready then
- goto step91
-endi
-if $data(3)[4] != ready then
- goto step91
-endi
-if $data(4)[4] != ready then
- goto step91
-endi
-
-$x = 0
-step92:
- $x = $x + 1
- sleep 1000
- if $x == 20 then
- return -1
- endi
+print check mnode leader
sql select * from information_schema.ins_mnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
@@ -285,10 +257,95 @@ if $data(3)[2] == leader then
$leaderNum = 1
endi
if $leaderNum != 1 then
- goto step92
+ goto step9a
+endi
+
+print start dnode1 step9b
+system sh/exec.sh -n dnode1 -s start
+$x = 0
+step9b:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+print check dnode1 ready
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+print ===> $data30 $data31 $data32 $data33 $data34 $data35
+if $data(1)[4] != ready then
+ goto step9b
+endi
+if $data(2)[4] != ready then
+ goto step9b
+endi
+if $data(3)[4] != ready then
+ goto step9b
+endi
+if $data(4)[4] != ready then
+ goto step9b
+endi
+
+sleep 4000
+print check mnode has leader step9c
+$x = 0
+step9c:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ return -1
+ endi
+print check mnode leader
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step9c
+endi
+
+print drop mnode step9d
+sql drop mnode on dnode 1
+
+$x = 0
+step9d:
+ $x = $x + 1
+ sleep 1000
+ if $x == 20 then
+ return -1
+ endi
+print check mnode leader
+sql select * from information_schema.ins_mnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+print ===> $data20 $data21 $data22 $data23 $data24 $data25
+$leaderNum = 0
+if $data(1)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(2)[2] == leader then
+ $leaderNum = 1
+endi
+if $data(3)[2] == leader then
+ $leaderNum = 1
+endi
+if $leaderNum != 1 then
+ goto step9d
endi
if $rows != 2 then
- goto step92
+ goto step9d
endi
print =============== stepa: create mnode1 again
diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim
index ea0311ebde..b0841d6712 100644
--- a/tests/script/tsim/parser/fill.sim
+++ b/tests/script/tsim/parser/fill.sim
@@ -330,7 +330,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -355,25 +355,25 @@ endi
# fill_into_nonarithmetic_fieds
print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -383,7 +383,7 @@ if $data01 != 1 then
return -1
endi
-sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
@@ -1049,4 +1049,103 @@ print =============== clear
# return -1
#endi
+
+print ============== fill
+
+sql drop database if exists test;
+sql create database test vgroups 4;
+sql use test;
+sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql insert into t1 values(1648712211000,1,2,3);
+sql insert into t1 values(1648712225000,2,2,3);
+sql insert into t2 values(1648712212000,1,2,3);
+sql insert into t2 values(1648712226000,2,2,3);
+
+$loop_count = 0
+
+loop0:
+sleep 200
+
+sql select count(*) from(select count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(value, -1));
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data00 != 20026 then
+ print =====data00=$data00
+ goto loop0
+endi
+
+if $data10 != 20026 then
+ print =====data10=$data10
+ goto loop0
+endi
+
+sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(prev);
+
+if $rows != 40052 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(next);
+
+if $rows != 40052 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(linear);
+
+if $rows != 40052 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(NULL);
+
+if $rows != 40052 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(value, -1);
+
+if $rows != 20026 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(NULL);
+
+if $rows != 20026 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(prev);
+
+if $rows != 20026 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(next);
+
+if $rows != 20026 then
+ print =====rows=$rows
+ return -1
+endi
+
+sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(linear);
+
+if $rows != 20026 then
+ print =====rows=$rows
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim
index 66787b3606..e6a1d53ec7 100644
--- a/tests/script/tsim/parser/fill_stb.sim
+++ b/tests/script/tsim/parser/fill_stb.sim
@@ -170,7 +170,7 @@ endi
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
# fill_char_values_to_arithmetic_fields
-sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -240,10 +240,10 @@ sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <=
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
-sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
-sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
+sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql_error select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
+sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20);
if $rows != $val then
@@ -354,7 +354,7 @@ endi
## NULL fill
print fill(NULL)
-print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
+print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) limit 5
sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(NULL) limit 5
if $rows != 25 then
return -1
diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim
index f760ba3577..d7b4941c27 100644
--- a/tests/script/tsim/parser/fill_us.sim
+++ b/tests/script/tsim/parser/fill_us.sim
@@ -332,7 +332,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -358,24 +358,24 @@ endi
# fill_into_nonarithmetic_fieds
-sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
+sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -385,7 +385,7 @@ if $data01 != 1 then
return -1
endi
-sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
+sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
diff --git a/tests/script/tsim/parser/function.sim b/tests/script/tsim/parser/function.sim
index 704b6eafb0..7f69aa2d02 100644
--- a/tests/script/tsim/parser/function.sim
+++ b/tests/script/tsim/parser/function.sim
@@ -821,10 +821,10 @@ sql insert into tm0 values('2015-08-18T00:18:00Z', 2.126) ('2015-08-18T00:24:00Z
sql_error select derivative(ts) from tm0;
sql_error select derivative(k) from tm0;
-sql select derivative(k, 0, 0) from tm0;
+sql_error select derivative(k, 0, 0) from tm0;
sql_error select derivative(k, 1, 911) from tm0;
sql_error select derivative(kx, 1s, 1) from tm0;
-sql select derivative(k, -20s, 1) from tm0;
+sql_error select derivative(k, -20s, 1) from tm0;
sql select derivative(k, 20a, 0) from tm0;
sql select derivative(k, 200a, 0) from tm0;
sql select derivative(k, 999a, 0) from tm0;
@@ -932,10 +932,10 @@ sql insert into t0 values('2020-1-1 1:4:10', 10);
sql insert into t1 values('2020-1-1 1:1:2', 2);
print ===========================>td-4739
-#sql select diff(val) from (select derivative(k, 1s, 0) val from t1);
-#if $rows != 0 then
-# return -1
-#endi
+sql select diff(val) from (select ts, derivative(k, 1s, 0) val from t1);
+if $rows != 0 then
+ return -1
+endi
sql insert into t1 values('2020-1-1 1:1:4', 20);
sql insert into t1 values('2020-1-1 1:1:6', 200);
@@ -1077,4 +1077,4 @@ endi
if $data11 != NULL then
print ======data11=$data11
return -1
-endi
\ No newline at end of file
+endi
diff --git a/tests/script/tsim/parser/limit2_query.sim b/tests/script/tsim/parser/limit2_query.sim
index 3c5002d591..64eb26429c 100644
--- a/tests/script/tsim/parser/limit2_query.sim
+++ b/tests/script/tsim/parser/limit2_query.sim
@@ -321,55 +321,41 @@ endi
### [TBASE-350]
## stb + interval + fill + group by + limit offset
-sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') limit 2 offset 10
+sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') limit 2 offset 10
if $rows != 2 then
return -1
endi
-#add one more test case
-sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;"
-
$limit = 5
$offset = $rowNum * 2
$offset = $offset - 2
-sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1 limit $limit offset $offset
-if $rows != $tbNum then
+sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1 limit $limit offset $offset
+if $rows != 1 then
return -1
endi
-if $data00 != @18-11-25 19:30:00.000@ then
+if $data00 != 9 then
return -1
endi
if $data01 != 9 then
return -1
endi
-if $data12 != 9 then
+if $data02 != 9.000000000 then
return -1
endi
-if $data23 != 9.000000000 then
+if $data03 != 9.000000000 then
return -1
endi
-if $data34 != 9.000000000 then
+if $data04 != 1 then
return -1
endi
-if $data45 != 1 then
+if $data05 != binary9 then
return -1
endi
-if $data56 != binary9 then
- return -1
-endi
-if $data68 != 6 then
- return -1
-endi
-if $data72 != -2 then
- return -1
-endi
-if $data84 != -2.000000000 then
- return -1
-endi
-if $data98 != 9 then
+if $data06 != nchar9 then
return -1
endi
+
#add one more test case
sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;"
diff --git a/tests/script/tsim/sma/sma_leak.sim b/tests/script/tsim/sma/sma_leak.sim
new file mode 100644
index 0000000000..4f2d1ebeb0
--- /dev/null
+++ b/tests/script/tsim/sma/sma_leak.sim
@@ -0,0 +1,154 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+system sh/cfg.sh -n dnode2 -c supportVnodes -v 4
+system sh/cfg.sh -n dnode3 -c supportVnodes -v 4
+
+print ========== step1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========== step2
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+
+$x = 0
+step2:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+if $rows != 3 then
+ return -1
+endi
+if $data(1)[4] != ready then
+ goto step2
+endi
+if $data(2)[4] != ready then
+ goto step2
+endi
+if $data(3)[4] != ready then
+ goto step2
+endi
+
+print ========== step3
+sql create database d1 vgroups 1
+sql use d1;
+
+print --> create stb
+sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
+
+print --> create sma
+sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);
+
+return
+
+print --> show sma
+sql show indexes from stb from d1;
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != sma_index_name1 then
+ return -1
+endi
+if $data[0][1] != d1 then
+ return -1
+endi
+if $data[0][2] != stb then
+ return -1
+endi
+
+print --> drop stb
+sql drop table stb;
+
+print ========== step4 repeat
+
+print --> create stb
+sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
+
+print --> create sma
+sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);
+
+print --> show sma
+sql show indexes from stb from d1;
+if $rows != 1 then
+ return -1
+endi
+if $data[0][0] != sma_index_name1 then
+ return -1
+endi
+if $data[0][1] != d1 then
+ return -1
+endi
+if $data[0][2] != stb then
+ return -1
+endi
+
+print --> drop stb
+sql drop table stb;
+
+print ========== step5
+sql drop database if exists db;
+sql create database db duration 300;
+sql use db;
+sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint, c_float float, c_double double, c_bool bool, c_binary binary(16), c_nchar nchar(32), c_ts timestamp, c_tint_un tinyint unsigned, c_sint_un smallint unsigned, c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
+sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
+
+print ========== step6 repeat
+sql drop database if exists db;
+sql create database db duration 300;
+sql use db;
+sql create table stb1(ts timestamp, c_int int, c_bint bigint ) tags (t_int int);
+sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
+
+print ========== step7
+sql drop database if exists db;
+sql create database db duration 300;
+sql use db;
+sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
+
+sql create table ct1 using stb1 tags ( 1 );
+sql create table ct2 using stb1 tags ( 2 );
+sql create table ct3 using stb1 tags ( 3 );
+sql create table ct4 using stb1 tags ( 4 );
+
+sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
+sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m;
+sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m;
+
+sql DROP INDEX sma_index_1 ;
+sql DROP INDEX sma_index_2 ;
+sql DROP INDEX sma_index_3 ;
+
+print ========== step8
+sql drop database if exists db;
+sql create database db duration 300;
+sql use db;
+sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
+
+sql create table ct1 using stb1 tags ( 1 );
+sql create table ct2 using stb1 tags ( 2 );
+sql create table ct3 using stb1 tags ( 3 );
+sql create table ct4 using stb1 tags ( 4 );
+
+sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
+sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m;
+sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m;
+
+sql DROP INDEX sma_index_1 ;
+sql DROP INDEX sma_index_2 ;
+sql DROP INDEX sma_index_3 ;
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+system sh/exec.sh -n dnode3 -s stop -x SIGINT
+system sh/exec.sh -n dnode4 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim
index bc076a194b..bb2ea42383 100644
--- a/tests/script/tsim/stream/basic1.sim
+++ b/tests/script/tsim/stream/basic1.sim
@@ -710,6 +710,11 @@ sleep 200
sql select * from streamt4;
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
# row 0
if $rows != 0 then
print =====rows=$rows
diff --git a/tests/script/tsim/stream/basic3.sim b/tests/script/tsim/stream/basic3.sim
new file mode 100644
index 0000000000..48fb860a72
--- /dev/null
+++ b/tests/script/tsim/stream/basic3.sim
@@ -0,0 +1,55 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c debugflag -v 131
+system sh/exec.sh -n dnode1 -s start -v
+
+sleep 5000
+
+sql connect
+
+print ========== interval\session\state window
+
+sql CREATE DATABASE test1 BUFFER 96 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 STRICT 'off' WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0;
+sql use test1;
+sql CREATE STABLE st (time TIMESTAMP, ca DOUBLE, cb DOUBLE, cc int) TAGS (ta VARCHAR(10) );
+
+print ========== create table before stream
+
+sql CREATE TABLE t1 using st TAGS ('aaa');
+sql CREATE TABLE t2 using st TAGS ('bbb');
+sql CREATE TABLE t3 using st TAGS ('ccc');
+sql CREATE TABLE t4 using st TAGS ('ddd');
+
+sql create stream streamd1 into streamt1 as select ca, _wstart,_wend, count(*) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca interval(60m) fill(linear);
+sql create stream streamd2 into streamt2 as select tbname, _wstart,_wend, count(*) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname interval(60m) fill(linear);
+
+sql create stream streamd3 into streamt3 as select ca, _wstart,_wend, count(*), max(ca), min(cb), APERCENTILE(cc, 20) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca session(time, 60m);
+sql create stream streamd4 into streamt4 as select tbname, _wstart,_wend, count(*), max(ca), min(cb), APERCENTILE(cc, 20) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname session(time, 60m);
+
+sql create stream streamd5 into streamt5 as select tbname, _wstart,_wend, count(*), max(ca), min(cb) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname state_window(cc);
+sql create stream streamd6 into streamt6 as select ca, _wstart,_wend, count(*), max(ca), min(cb) from t1 where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca state_window(cc);
+
+sleep 3000
+
+sql drop stream if exists streamd1;
+sql drop stream if exists streamd2;
+sql drop stream if exists streamd3;
+sql drop stream if exists streamd4;
+sql drop stream if exists streamd5;
+sql drop stream if exists streamd6;
+
+
+_OVER:
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+print =============== check
+$null=
+
+system_content sh/checkValgrind.sh -n dnode1
+print cmd return result ----> [ $system_content ]
+if $system_content > 0 then
+ return -1
+endi
+
+if $system_content == $null then
+ return -1
+endi
diff --git a/tests/script/tsim/stream/fillIntervalValue.sim b/tests/script/tsim/stream/fillIntervalValue.sim
index 49e68ae9f2..89590d1be0 100644
--- a/tests/script/tsim/stream/fillIntervalValue.sim
+++ b/tests/script/tsim/stream/fillIntervalValue.sim
@@ -4,7 +4,7 @@ looptest:
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
-#==system sh/exec.sh -n dnode1 -s start -v
+
sleep 200
sql connect
diff --git a/tests/script/tsim/vnode/replica3_many.sim b/tests/script/tsim/vnode/replica3_many.sim
index fbce960b09..e3c73b2018 100644
--- a/tests/script/tsim/vnode/replica3_many.sim
+++ b/tests/script/tsim/vnode/replica3_many.sim
@@ -201,31 +201,39 @@ system sh/exec.sh -n dnode2 -s start
sleep 3000
print ======== step6
-sql select count(*) from db1.tb1
+$y = 0
+step6:
+ $y = $y + 1
+ sleep 1000
+ if $y == 50 then
+ return -1
+ endi
+
+sql select count(*) from db1.tb1 -x step6
print select count(*) from db1.tb1 ==> $data00 $lastRows1
if $data00 <= $lastRows1 then
- return -1
+ goto step6
endi
$lastRows1 = $data00
-sql select count(*) from db2.tb2
+sql select count(*) from db2.tb2 -x step6
print select count(*) from db2.tb2 ==> $data00 $lastRows2
if $data00 <= $lastRows2 then
- return -1
+ goto step6
endi
$lastRows2 = $data00
-sql select count(*) from db3.tb3
+sql select count(*) from db3.tb3 -x step6
print select count(*) from db3.tb3 ==> $data00 $lastRows3
if $data00 <= $lastRows3 then
- return -1
+ goto step6
endi
$lastRows3 = $data00
-sql select count(*) from db4.tb4
+sql select count(*) from db4.tb4 -x step6
print select count(*) from db4.tb4 ==> $data00 $lastRows4
if $data00 <= $lastRows4 then
- return -1
+ goto step6
endi
$lastRows4 = $data00
diff --git a/tests/system-test/1-insert/boundary.py b/tests/system-test/1-insert/boundary.py
new file mode 100644
index 0000000000..d3742ef5f9
--- /dev/null
+++ b/tests/system-test/1-insert/boundary.py
@@ -0,0 +1,183 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+import math
+from random import randint
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+from util.boundary import *
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.boundary = DataBoundary()
+ self.dbname_length_boundary = self.boundary.DBNAME_MAX_LENGTH
+ self.tbname_length_boundary = self.boundary.TBNAME_MAX_LENGTH
+ self.stbname_length_boundary = self.boundary.STBNAME_MAX_LENGTH
+ self.colname_length_boundary = self.boundary.COL_KEY_MAX_LENGTH
+ self.tagname_length_boundary = self.boundary.TAG_KEY_MAX_LENGTH
+ self.username_length_boundary = 23
+ self.password_length_boundary = 128
+ def dbname_length_check(self):
+ dbname_length = randint(1,self.dbname_length_boundary-1)
+ for dbname in [tdCom.get_long_name(self.dbname_length_boundary),tdCom.get_long_name(dbname_length)]:
+ tdSql.execute(f'create database if not exists {dbname}')
+ tdSql.query(f'select name from information_schema.ins_databases where name = "{dbname}"')
+ tdSql.checkEqual(tdSql.queryResult[0][0],dbname)
+ tdSql.execute(f'drop database if exists {dbname}')
+ dbname = tdCom.get_long_name(self.dbname_length_boundary+1)
+ tdSql.error(f'create database if not exists {dbname}')
+ if "Invalid identifier name" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+
+ def tbname_length_check(self):
+ tdSql.prepare()
+ tdSql.execute('use db')
+ tbname_length = randint(1,self.tbname_length_boundary-1)
+ tdSql.execute(f'create table stb (ts timestamp,c0 int) tags(t0 int)')
+ for tbname in [tdCom.get_long_name(self.tbname_length_boundary),tdCom.get_long_name(tbname_length)]:
+ tdSql.execute(f'create table {tbname} using stb tags(1)')
+ tdSql.query(f'select table_name from information_schema.ins_tables where table_name = "{tbname}"')
+ tdSql.checkEqual(tdSql.queryResult[0][0],tbname)
+ tdSql.execute(f'drop table {tbname}')
+ tbname = tdCom.get_long_name(self.tbname_length_boundary+1)
+ tdSql.error(f'create table {tbname} using stb tags(1)')
+ if "Invalid identifier name" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+ stbname_length = randint(1,self.stbname_length_boundary-1)
+ for stbname in [tdCom.get_long_name(self.stbname_length_boundary),tdCom.get_long_name(stbname_length)]:
+ tdSql.execute(f'create table {stbname} (ts timestamp,c0 int) tags(t0 int)')
+ tdSql.query(f'select stable_name from information_schema.ins_stables where stable_name = "{stbname}"')
+ tdSql.checkEqual(tdSql.queryResult[0][0],stbname)
+ tdSql.execute(f'drop table {stbname}')
+ stbname = tdCom.get_long_name(self.stbname_length_boundary+1)
+ tdSql.error(f'create table {stbname} (ts timestamp,c0 int) tags(t0 int)')
+ print(tdSql.error_info)
+ if "Invalid identifier name" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+ tdSql.execute('drop database db')
+
+ def colname_length_check(self):
+ tdSql.prepare()
+ tdSql.execute('use db')
+ column_name_length = randint(1,self.colname_length_boundary-1)
+ for colname in [tdCom.get_long_name(column_name_length),tdCom.get_long_name(self.colname_length_boundary)]:
+ stbname = tdCom.get_long_name(3)
+ ntbname = tdCom.get_long_name(4)
+ tdSql.execute(f'create table {stbname} (ts timestamp,{colname} int) tags(t0 int)')
+ tdSql.query(f'describe {stbname}')
+ tdSql.checkEqual(tdSql.queryResult[1][0],colname)
+ tdSql.execute(f'create table {ntbname} (ts timestamp,{colname} int)')
+ tdSql.query(f'describe {ntbname}')
+ tdSql.checkEqual(tdSql.queryResult[1][0],colname)
+ colname = tdCom.get_long_name(self.colname_length_boundary+1)
+ tdSql.error(f'create table stb (ts timestamp,{colname} int) tags(t0 int)')
+ if "Invalid identifier name" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+ tdSql.execute('drop database db')
+
+ def tagname_length_check(self):
+ tdSql.prepare()
+ tdSql.execute('use db')
+ tag_name_length = randint(1,self.tagname_length_boundary-1)
+ for tagname in (tdCom.get_long_name(tag_name_length),tdCom.get_long_name(self.tagname_length_boundary)):
+ stbname = tdCom.get_long_name(3)
+ tdSql.execute(f'create table {stbname} (ts timestamp,c0 int) tags({tagname} int)')
+ tdSql.query(f'describe {stbname}')
+ tdSql.checkEqual(tdSql.queryResult[-1][0],tagname)
+ tagname = tdCom.get_long_name(self.tagname_length_boundary+1)
+ tdSql.error(f'create table {stbname} (ts timestamp,c0 int) tags({tagname} int)')
+ if "Invalid identifier name" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+ tdSql.execute('drop database db')
+
+ def username_length_check(self):
+ username_length = randint(1,self.username_length_boundary-1)
+ for username in [tdCom.get_long_name(username_length),tdCom.get_long_name(self.username_length_boundary)]:
+ tdSql.execute(f'create user {username} pass "123"')
+ tdSql.query('show users')
+ for user in tdSql.queryResult:
+ if user[0].lower() != 'root':
+ tdSql.checkEqual(user[0],username)
+ tdSql.execute(f'drop user {username}')
+ username = tdCom.get_long_name(self.username_length_boundary+1)
+ tdSql.error(f'create user {username} pass "123"')
+ if "Name or password too long" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+
+ def password_length_check(self):
+ password_length = randint(1,self.password_length_boundary-1)
+ for password in [tdCom.get_long_name(password_length),tdCom.get_long_name(self.password_length_boundary)]:
+ username = tdCom.get_long_name(3)
+ tdSql.execute(f'create user {username} pass "{password}"')
+ password = tdCom.get_long_name(self.password_length_boundary+1)
+ tdSql.error(f'create user {username} pass "{password}"')
+ if "Name or password too long" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+ def sql_length_check(self):
+ insert_rows = 1021
+ tdSql.prepare()
+ tdSql.execute('use db')
+ tdSql.execute('create table ntb (ts timestamp,c0 binary(1013))')
+ values_sql = ''
+ value = tdCom.get_long_name(1013)
+ for num in range(insert_rows):
+ values_sql += f' (now+{num}s,"{value}")'
+ value = tdCom.get_long_name(65)
+ values_sql += f"(now-1s,'{value}')"
+ tdSql.execute(f'insert into ntb values{values_sql}')
+ tdSql.query('select * from ntb')
+ tdSql.checkRows(insert_rows+1)
+ tdSql.execute('create table ntb1 (ts timestamp,c0 binary(1013))')
+ tdSql.error(f'insert into ntb1 values{values_sql};')
+ print(tdSql.error_info)
+ if "SQL statement too long" in tdSql.error_info:
+ tdLog.info("error info is true!")
+ else:
+ tdLog.exit("error info is not true")
+ tdSql.execute('drop database db')
+ def run(self):
+ self.dbname_length_check()
+ self.tbname_length_check()
+ self.colname_length_check()
+ self.tagname_length_check()
+ self.username_length_check()
+ self.password_length_check()
+ self.sql_length_check()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py
index 6fcd987ea4..6a4d532ac6 100644
--- a/tests/system-test/1-insert/update_data.py
+++ b/tests/system-test/1-insert/update_data.py
@@ -23,7 +23,7 @@ class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(),logSql)
+ tdSql.init(conn.cursor())
self.setsql = TDSetSql()
self.dbname = 'db_test'
self.ntbname = 'ntb'
diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py
index d7478a55a8..d64d550bc4 100644
--- a/tests/system-test/2-query/abs.py
+++ b/tests/system-test/2-query/abs.py
@@ -204,18 +204,12 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+
+ tdSql.query(abs_query)
for row_index, row in enumerate(abs_result):
for col_index, elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice(
- "abs function value has not as expected , sql is \"%s\" " % abs_query)
- sys.exit(1)
- else:
- tdLog.info(
- "abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
+
def test_errors(self):
dbname = "testdb"
@@ -466,19 +460,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto(f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ",
f"select abs(c1), abs(c2) ,abs(c3), abs(c4), abs(c5) ,abs(c6) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/and_or_for_byte.py b/tests/system-test/2-query/and_or_for_byte.py
index 479918f2f9..15e9110b3b 100644
--- a/tests/system-test/2-query/and_or_for_byte.py
+++ b/tests/system-test/2-query/and_or_for_byte.py
@@ -426,19 +426,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_function("&", False , f"{dbname}.sub1_bound" ,"c1","c2","c3","c4","c5","c6" )
self.check_function("&", False , f"{dbname}.sub1_bound","abs(c1)","abs(c2)","abs(c3)","abs(c4)","abs(c5)","abs(c6)" )
diff --git a/tests/system-test/2-query/arccos.py b/tests/system-test/2-query/arccos.py
index ed717741c5..f22d393ecd 100644
--- a/tests/system-test/2-query/arccos.py
+++ b/tests/system-test/2-query/arccos.py
@@ -86,21 +86,12 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("acos function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("acos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
+
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
+
def test_errors(self, dbname="db"):
error_sql_lists = [
@@ -414,19 +405,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_acos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select acos(abs(c1)), acos(abs(c2)) ,acos(abs(c3)), acos(abs(c4)), acos(abs(c5)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/arcsin.py b/tests/system-test/2-query/arcsin.py
index 71de088979..1872518c5d 100644
--- a/tests/system-test/2-query/arcsin.py
+++ b/tests/system-test/2-query/arcsin.py
@@ -86,21 +86,13 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
-
+
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("asin function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("asin value check pass , it work as expected ,sql is \"%s\" "%pow_query )
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
+
+
def test_errors(self, dbname="db"):
error_sql_lists = [
@@ -414,19 +406,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_asin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select asin(abs(c1)), asin(abs(c2)) ,asin(abs(c3)), asin(abs(c4)), asin(abs(c5)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/arctan.py b/tests/system-test/2-query/arctan.py
index 9780f9855b..9561637b76 100644
--- a/tests/system-test/2-query/arctan.py
+++ b/tests/system-test/2-query/arctan.py
@@ -84,22 +84,12 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and elem:
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("atan function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("atan value check pass , it work as expected ,sql is \"%s\" "%pow_query )
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
+
def test_errors(self, dbname="db"):
error_sql_lists = [
f"select atan from {dbname}.t1",
@@ -412,19 +402,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_atan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py
index ec7ec34ed3..910dd524cb 100644
--- a/tests/system-test/2-query/avg.py
+++ b/tests/system-test/2-query/avg.py
@@ -114,16 +114,10 @@ class TDTestCase:
avg_result = tdSql.getResult(origin_query)
origin_result = tdSql.getResult(check_query)
- check_status = True
+ tdSql.query(origin_query)
for row_index , row in enumerate(avg_result):
for col_index , elem in enumerate(row):
- if avg_result[row_index][col_index] != origin_result[row_index][col_index]:
- check_status = False
- if not check_status:
- tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query )
- sys.exit(1)
- else:
- tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
+ tdSql.checkData(row_index,col_index,origin_result[row_index][col_index])
def test_errors(self, dbname="db"):
error_sql_lists = [
@@ -378,33 +372,33 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+20s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
#self.check_avg(f"select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from {dbname}.sub1_bound " , f" select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from {dbname}.sub1_bound ")
diff --git a/tests/system-test/2-query/case_when.py b/tests/system-test/2-query/case_when.py
new file mode 100755
index 0000000000..cfe0399553
--- /dev/null
+++ b/tests/system-test/2-query/case_when.py
@@ -0,0 +1,338 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import os
+import time
+import taos
+import subprocess
+from faker import Faker
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+from util.dnodes import *
+
+class TDTestCase:
+ updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"querySmaOptimize":1}
+
+ def init(self, conn, logSql, replicaVar):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ self.testcasePath = os.path.split(__file__)[0]
+ self.testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.db = "case_when"
+
+ def dropandcreateDB_random(self,database,n):
+ ts = 1630000000000
+ num_random = 10
+ fake = Faker('zh_CN')
+ tdSql.execute('''drop database if exists %s ;''' %database)
+ tdSql.execute('''create database %s keep 36500 ;'''%(database))
+ tdSql.execute('''use %s;'''%database)
+
+ tdSql.execute('''create stable %s.stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
+ tdSql.execute('''create stable %s.stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
+ q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
+ tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
+
+ for i in range(num_random):
+ tdSql.execute('''create table %s.table_%d \
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%(database,i))
+ tdSql.execute('''create table %s.stable_1_%d using %s.stable_1 tags('stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ tdSql.execute('''create table %s.stable_%d_a using %s.stable_2 tags('stable_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+ tdSql.execute('''create table %s.stable_%d_b using %s.stable_2 tags('stable_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
+ %(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
+
+ # insert data
+ for i in range(num_random):
+ for j in range(n):
+ tdSql.execute('''insert into %s.stable_1_%d (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts)\
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d) ;'''
+ % (database,i,ts + i*1000 + j, fake.random_int(min=-2147483647, max=2147483647, step=1),
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i))
+
+ tdSql.execute('''insert into %s.table_%d (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts) \
+ values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d) ;'''
+ % (database,i,ts + i*1000 + j, fake.random_int(min=-2147483647, max=2147483647, step=1),
+ fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
+ fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i ))
+
+ tdSql.execute('''insert into %s.stable_%d_a (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts)\
+ values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d) ;'''
+ % (database,i,ts + i*1000 + j, fake.random_int(min=0, max=2147483647, step=1),
+ fake.random_int(min=0, max=9223372036854775807, step=1),
+ fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
+ fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i))
+
+ tdSql.query("select count(*) from %s.stable_1;" %database)
+ tdSql.checkData(0,0,num_random*n)
+ tdSql.query("select count(*) from %s.table_0;"%database)
+ tdSql.checkData(0,0,n)
+
+
+ def users_bug(self,database):
+ sql1 = "select (case when `q_smallint` >0 then 'many--' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1 where tbname = 'stable_1_1' limit 100;" %database
+ sql2 = "select (case when `q_smallint` >0 then 'many--' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1_1 limit 100;" %database
+ self.constant_check(database,sql1,sql2,0)
+
+ sql1 = "select (case when `q_smallint` >0 then 'many![;;][][]]' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1 where tbname = 'stable_1_1' limit 100;" %database
+ sql2 = "select (case when `q_smallint` >0 then 'many![;;][][]]' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1_1 limit 100;" %database
+ self.constant_check(database,sql1,sql2,0)
+
+ sql1 = "select (case when sum(q_smallint)=0 then null else sum(q_smallint) end) from %s.stable_1 where tbname = 'stable_1_1' limit 100;" %database
+ sql2 = "select (case when sum(q_smallint)=0 then null else sum(q_smallint) end) from %s.stable_1_1 limit 100;" %database
+ self.constant_check(database,sql1,sql2,0)
+
+ sql1 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1 where tbname = 'stable_1_1' and ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
+ sql2 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1_1 where ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
+ self.constant_check(database,sql1,sql2,0)
+ self.constant_check(database,sql1,sql2,1)
+ self.constant_check(database,sql1,sql2,2)
+
+ def casewhen_list(self):
+ a1,a2,a3 = random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647)
+ casewhen_lists = ['first case when %d then %d end last' %(a1,a2) , #'first case when 3 then 4 end last' ,
+ 'first case when 0 then %d end last' %(a1), #'first case when 0 then 4 end last' ,
+ 'first case when null then %d end last' %(a1) , #'first case when null then 4 end last' ,
+ 'first case when 1 then %d+(%d) end last' %(a1,a2) , #'first case when 1 then 4+1 end last' ,
+ 'first case when %d-(%d) then 0 end last' %(a1,a1) , #'first case when 1-1 then 0 end last' ,
+ 'first case when %d+(%d) then 0 end last' %(a1,a1), #'first case when 1+1 then 0 end last' ,
+ 'first case when 1 then %d-(%d)+(%d) end last' %(a1,a1,a2), #'first case when 1 then 1-1+2 end last' ,
+ 'first case when %d > 0 then %d < %d end last' %(a1,a1,a2), #'first case when 1 > 0 then 1 < 2 end last' ,
+ 'first case when %d > %d then %d < %d end last' %(a1,a2,a1,a2), #'first case when 1 > 2 then 1 < 2 end last' ,
+ 'first case when abs(%d) then abs(-(%d)) end last' %(a1,a2) ,#'first case when abs(3) then abs(-1) end last' ,
+ 'first case when abs(%d+(%d)) then abs(-(%d))+abs(%d) end last' %(a1,a2,a1,a2) , #'first case when abs(1+1) then abs(-1)+abs(3) end last' ,
+ 'first case when 0 then %d else %d end last' %(a1,a2), #'first case when 0 then 1 else 3 end last' ,
+ 'first case when 0 then %d when 1 then %d else %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 else 3 end last' ,
+ 'first case when 0 then %d when 1 then %d when 2 then %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
+ 'first case when \'a\' then \'b\' when null then 0 end last' , #'first case when \'a\' then \'b\' when null then 0 end last' ,
+ 'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2), #'first case when \'2\' then \'b\' when null then 0 end last' ,
+ 'first case when \'%d\' then \'b\' else null end last' %(a1), #'first case when \'0\' then \'b\' else null end last',
+ 'first case when \'%d\' then \'b\' else %d end last' %(a1,a2), #'first case when \'0\' then \'b\' else 2 end last',
+ 'first case when sum(%d) then sum(%d)-sum(%d) end last' %(a1,a1,a3), #'first case when sum(2) then sum(2)-sum(1) end last' ,
+ 'first case when sum(%d) then abs(-(%d)) end last' %(a1,a2), #'first case when sum(2) then abs(-2) end last' ,
+ 'first case when q_int then ts end last' ,
+ 'first case when q_int then q_int when q_int + (%d) then q_int + (%d) else q_int is null end last' %(a1,a2) , #'first case when q_int then q_int when q_int + 1 then q_int + 1 else q_int is null end last' ,
+ 'first case when q_int then %d when ts then ts end last' %(a1), #'first case when q_int then 3 when ts then ts end last' ,
+ 'first case when %d then q_int end last' %(a1), #'first case when 3 then q_int end last' ,
+ 'first case when q_int then %d when %d then %d end last' %(a1,a1,a3), #'first case when q_int then 3 when 1 then 2 end last' ,
+ 'first case when sum(q_int) then sum(q_int)-abs(-(%d)) end last' %(a1), #'first case when sum(q_int) then sum(q_int)-abs(-1) end last' ,
+ 'first case when q_int < %d then %d when q_int >= %d then %d else %d end last' %(a1,a2,a1,a2,a3), #'first case when q_int < 3 then 1 when q_int >= 3 then 2 else 3 end last' ,
+ 'first cast(case q_int when q_int then q_int + (%d) else q_int is null end as double) last' %(a1), #'first cast(case q_int when q_int then q_int + 1 else q_int is null end as double) last' ,
+ 'first sum(case q_int when q_int then q_int + (%d) else q_int is null end + (%d)) last' %(a1,a2), #'first sum(case q_int when q_int then q_int + 1 else q_int is null end + 1) last' ,
+ 'first case when q_int is not null then case when q_int <= %d then q_int else q_int * (%d) end else -(%d) end last' %(a1,a1,a3), #'first case when q_int is not null then case when q_int <= 0 then q_int else q_int * 10 end else -1 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 3 then 4 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
+ 'first case %d when %d then %d else %d end last' %(a1,a1,a2,a3), # 'first case 3 when 1 then 4 else 2 end last' ,
+ 'first case %d when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
+ 'first case \'%d\' when null then %d when %d then %d end last' %(a1,a1,a2,a3), # 'first case \'3\' when null then 4 when 3 then 1 end last' ,
+ 'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
+ 'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
+ 'first case q_double when \'a\' then %d when \'%d\' then %d end last' %(a1,a2,a3) , # 'first case q_double when \'a\' then 4 when \'0\' then 1 end last' ,
+ 'first case q_double when q_int then q_int when q_int - (%d) then q_int else %d end last' %(a1,a2), # 'first case q_double when q_int then q_int when q_int - 1 then q_int else 99 end last' ,
+ 'first case cast(q_double as int) when %d then q_double when q_int then %d else ts end last' %(a1,a2), #'first case cast(q_double as int) when 0 then q_double when q_int then 11 else ts end last' ,
+ 'first case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2,a3), #'first case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
+ 'first case when \'a\' then \'b\' when null then %d end last' %(a1), # 'first case when \'a\' then \'b\' when null then 0 end last' ,
+ 'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2), # 'first case when \'2\' then \'b\' when null then 0 end last' ,
+ 'first case when %d then \'b\' else null end last' %(a1), # 'first case when 0 then \'b\' else null end last' ,
+ 'first case when %d then \'b\' else %d+abs(%d) end last' %(a1,a2,a3), # 'first case when 0 then \'b\' else 2+abs(-2) end last' ,
+ 'first case when %d then %d end last' %(a1,a2), # 'first case when 3 then 4 end last' ,
+ 'first case when %d then %d end last' %(a1,a2), # 'first case when 0 then 4 end last' ,
+ 'first case when null then %d end last' %(a1), # 'first case when null then 4 end last' ,
+ 'first case when %d then %d+(%d) end last' %(a1,a2,a3), # 'first case when 1 then 4+1 end last' ,
+ 'first case when %d-(%d) then %d end last' %(a1,a2,a3), # 'first case when 1-1 then 0 end last' ,
+ 'first case when %d+(%d) then %d end last' %(a1,a2,a3), # 'first case when 1+1 then 0 end last' ,
+ 'first case when abs(%d) then abs(%d) end last' %(a1,a2), # 'first case when abs(3) then abs(-1) end last' ,
+ 'first case when abs(%d+(%d)) then abs(%d)+abs(%d) end last' %(a1,a2,a3,a1), # 'first case when abs(1+1) then abs(-1)+abs(3) end last' ,
+ 'first case when %d then %d else %d end last' %(a1,a2,a3), # 'first case when 0 then 1 else 3 end last' ,
+ 'first case when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2), # 'first case when 0 then 1 when 1 then 0 else 3 end last' ,
+ 'first case when %d then %d when %d then %d when %d then %d end last' %(a1,a2,a3,a1,a2,a3), # 'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a1,a3), # 'first case 3 when 3 then 4 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
+ 'first case %d when %d then %d else %d end last' %(a1,a2,a3,a1), # 'first case 3 when 1 then 4 else 2 end last' ,
+ 'first case %d when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
+ 'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
+ 'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
+ 'first q_double,case q_double when \'a\' then %d when \'%d\' then %d end last' %(a1,a2,a3), #'first q_double,case q_double when \'a\' then 4 when \'0\' then 1 end last' ,
+ 'first case null when null then %d when %d then %d end last' %(a1,a2,a3), #'first case null when null then 4 when 3 then 1 end last' ,
+ 'first q_double,q_int,case q_double when q_int then q_int when q_int - (%d ) then q_int else %d end last' %(a1,a2), # 'first q_double,q_int,case q_double when q_int then q_int when q_int - 1 then q_int else 99 end last' ,
+ 'first case cast(q_double as int) when %d then q_double when q_int then %d else ts end last' %(a1,a2), # 'first case cast(q_double as int) when 0 then q_double when q_int then 11 else ts end last' ,
+ 'first q_int, case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a1,a1,a2,a2,a3), #'first q_int, case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
+ 'first distinct loc, case t_int when t_bigint then t_ts else t_smallint + (%d) end last' %(a1), #'first distinct loc, case t_int when t_bigint then t_ts else t_smallint + 100 end last' ,
+ ]
+ #num = len(casewhen_lists)
+
+ casewhen_list = str(random.sample(casewhen_lists,50)).replace("[","").replace("]","").replace("'first","").replace("last'","").replace("\"first","").replace("last\"","")
+
+ return casewhen_list
+
+ def base_case(self,database):
+
+ for i in range(30):
+ cs = self.casewhen_list().split(',')[i]
+ sql1 = "select %s from %s.stable_1 where tbname = 'stable_1_1';" % (cs ,database)
+ sql2 = "select %s from %s.stable_1_1 ;" % (cs ,database)
+ self.constant_check(database,sql1,sql2,0)
+
+
+ def state_window_list(self):
+ a1,a2,a3 = random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647)
+ state_window_lists = ['first case when %d then %d end last' %(a1,a2) , #'first case when 3 then 4 end last' ,
+ 'first case when 0 then %d end last' %(a1), #'first case when 0 then 4 end last' ,
+ 'first case when null then %d end last' %(a1) , #'first case when null then 4 end last' ,
+ 'first case when %d-(%d) then 0 end last' %(a1,a1) , #'first case when 1-1 then 0 end last' ,
+ 'first case when %d+(%d) then 0 end last' %(a1,a1), #'first case when 1+1 then 0 end last' ,
+ 'first case when %d > 0 then %d < %d end last' %(a1,a1,a2), #'first case when 1 > 0 then 1 < 2 end last' ,
+ 'first case when %d > %d then %d < %d end last' %(a1,a2,a1,a2), #'first case when 1 > 2 then 1 < 2 end last' ,
+ 'first case when abs(%d) then abs(-(%d)) end last' %(a1,a2) ,#'first case when abs(3) then abs(-1) end last' ,
+ 'first case when 0 then %d else %d end last' %(a1,a2), #'first case when 0 then 1 else 3 end last' ,
+ 'first case when 0 then %d when 1 then %d else %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 else 3 end last' ,
+ 'first case when 0 then %d when 1 then %d when 2 then %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
+ 'first case when \'a\' then \'b\' when null then 0 end last' , #'first case when \'a\' then \'b\' when null then 0 end last' ,
+ 'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2) , #'first case when \'2\' then \'b\' when null then 0 end last' ,
+ 'first case when \'%d\' then \'b\' else null end last' %(a1), #'first case when \'0\' then \'b\' else null end last',
+ 'first case when \'%d\' then \'b\' else %d end last' %(a1,a2), #'first case when \'0\' then \'b\' else 2 end last',
+ 'first case when q_int then q_int when q_int + (%d) then q_int + (%d) else q_int is null end last' %(a1,a2) , #'first case when q_int then q_int when q_int + 1 then q_int + 1 else q_int is null end last' ,
+ 'first case when q_int then %d when ts then ts end last' %(a1), #'first case when q_int then 3 when ts then ts end last' ,
+ 'first case when %d then q_int end last' %(a1), #'first case when 3 then q_int end last' ,
+ 'first case when q_int then %d when %d then %d end last' %(a1,a1,a3), #'first case when q_int then 3 when 1 then 2 end last' ,
+ 'first case when q_int < %d then %d when q_int >= %d then %d else %d end last' %(a1,a2,a1,a2,a3), #'first case when q_int < 3 then 1 when q_int >= 3 then 2 else 3 end last' ,
+ 'first case when q_int is not null then case when q_int <= %d then q_int else q_int * (%d) end else -(%d) end last' %(a1,a1,a3), #'first case when q_int is not null then case when q_int <= 0 then q_int else q_int * 10 end else -1 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 3 then 4 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
+ 'first case %d when %d then %d else %d end last' %(a1,a1,a2,a3), # 'first case 3 when 1 then 4 else 2 end last' ,
+ 'first case %d when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
+ 'first case \'%d\' when null then %d when %d then %d end last' %(a1,a1,a2,a3), # 'first case \'3\' when null then 4 when 3 then 1 end last' ,
+ 'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
+ 'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
+ 'first case q_double when \'a\' then %d when \'%d\' then %d end last' %(a1,a2,a3) , # 'first case q_double when \'a\' then 4 when \'0\' then 1 end last' ,
+ 'first case q_double when q_int then q_int when q_int - (%d) then q_int else %d end last' %(a1,a2), # 'first case q_double when q_int then q_int when q_int - 1 then q_int else 99 end last' ,
+ 'first case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2,a3), #'first case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
+ 'first case when \'a\' then \'b\' when null then %d end last' %(a1), # 'first case when \'a\' then \'b\' when null then 0 end last' ,
+ 'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2), # 'first case when \'2\' then \'b\' when null then 0 end last' ,
+ 'first case when %d then \'b\' else null end last' %(a1), # 'first case when 0 then \'b\' else null end last' ,
+ 'first case when %d then \'b\' else %d+abs(%d) end last' %(a1,a2,a3), # 'first case when 0 then \'b\' else 2+abs(-2) end last' ,
+ 'first case when %d then %d end last' %(a1,a2), # 'first case when 3 then 4 end last' ,
+ 'first case when %d then %d end last' %(a1,a2), # 'first case when 0 then 4 end last' ,
+ 'first case when null then %d end last' %(a1), # 'first case when null then 4 end last' ,
+ #'first case when %d then %d+(%d) end last' %(a1,a2,a3), # 'first case when 1 then 4+1 end last' ,
+ 'first case when %d-(%d) then %d end last' %(a1,a2,a3), # 'first case when 1-1 then 0 end last' ,
+ 'first case when %d+(%d) then %d end last' %(a1,a2,a3), # 'first case when 1+1 then 0 end last' ,
+ 'first case when abs(%d) then abs(%d) end last' %(a1,a2), # 'first case when abs(3) then abs(-1) end last' ,
+ #'first case when abs(%d+(%d)) then abs(%d)+abs(%d) end last' %(a1,a2,a3,a1), # 'first case when abs(1+1) then abs(-1)+abs(3) end last' ,
+ 'first case when %d then %d else %d end last' %(a1,a2,a3), # 'first case when 0 then 1 else 3 end last' ,
+ 'first case when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2), # 'first case when 0 then 1 when 1 then 0 else 3 end last' ,
+ 'first case when %d then %d when %d then %d when %d then %d end last' %(a1,a2,a3,a1,a2,a3), # 'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a1,a3), # 'first case 3 when 3 then 4 end last' ,
+ 'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
+ 'first case %d when %d then %d else %d end last' %(a1,a2,a3,a1), # 'first case 3 when 1 then 4 else 2 end last' ,
+ 'first case %d when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
+ 'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
+ 'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
+ 'first case null when null then %d when %d then %d end last' %(a1,a2,a3), #'first case null when null then 4 when 3 then 1 end last' ,
+ 'first q_int, case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a1,a1,a2,a2,a3), #'first q_int, case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
+ ]
+
+ state_window_list = str(random.sample(state_window_lists,50)).replace("[","").replace("]","").replace("'first","").replace("last'","").replace("\"first","").replace("last\"","")
+
+ return state_window_list
+
+ def state_window_case(self,database):
+
+ for i in range(30):
+ cs = self.state_window_list().split(',')[i]
+ sql1 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1 where tbname = 'stable_1_1' state_window(%s);" % (database,cs)
+ sql2 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1_1 state_window(%s) ;" % (database,cs)
+ self.constant_check(database,sql1,sql2,0)
+ self.constant_check(database,sql1,sql2,1)
+ self.constant_check(database,sql1,sql2,2)
+
+
+
+ def constant_check(self,database,sql1,sql2,column):
+ #column =0 代表0列, column = n代表n-1列
+ tdLog.info("\n=============sql1:(%s)___sql2:(%s) ====================\n" %(sql1,sql2))
+ tdSql.query(sql1)
+ queryRows = len(tdSql.queryResult)
+
+ for i in range(queryRows):
+ tdSql.query(sql1)
+ sql1_value = tdSql.getData(i,column)
+ tdSql.execute(" flush database %s;" %database)
+ tdSql.query(sql2)
+ sql2_value = tdSql.getData(i,column)
+ self.value_check(sql1_value,sql2_value)
+
+ def value_check(self,base_value,check_value):
+ if base_value==check_value:
+ tdLog.info(f"checkEqual success, base_value={base_value},check_value={check_value}")
+ else :
+ tdLog.exit(f"checkEqual error, base_value=={base_value},check_value={check_value}")
+
+ def run(self):
+ fake = Faker('zh_CN')
+ fake_data = fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1)
+ fake_float = fake.pyfloat()
+ fake_str = fake.pystr()
+
+ startTime = time.time()
+
+ os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
+
+ self.dropandcreateDB_random("%s" %self.db, 10)
+
+ self.users_bug("%s" %self.db)
+
+ self.base_case("%s" %self.db)
+
+ self.state_window_case("%s" %self.db)
+
+
+
+ #taos -f sql
+ print("taos -f sql start!")
+ taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
+ _ = subprocess.check_output(taos_cmd1, shell=True)
+ print("taos -f sql over!")
+
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/ceil.py b/tests/system-test/2-query/ceil.py
index fffd484720..aabc716a74 100644
--- a/tests/system-test/2-query/ceil.py
+++ b/tests/system-test/2-query/ceil.py
@@ -85,16 +85,11 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(ceil_query)
for row_index , row in enumerate(ceil_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("ceil function value has not as expected , sql is \"%s\" "%ceil_query )
- sys.exit(1)
- else:
- tdLog.info("ceil value check pass , it work as expected ,sql is \"%s\" "%ceil_query )
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
+
def test_errors(self, dbname="db"):
error_sql_lists = [
@@ -377,10 +372,10 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
@@ -388,15 +383,15 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from {dbname}.sub1_bound")
self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/cos.py b/tests/system-test/2-query/cos.py
index d6bddc4e84..d2056805eb 100644
--- a/tests/system-test/2-query/cos.py
+++ b/tests/system-test/2-query/cos.py
@@ -84,26 +84,10 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
- print("========",pow_query, origin_query )
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and elem:
- check_status = False
- elif auto_result[row_index][col_index] != None and ((auto_result[row_index][col_index] != elem) and (str(auto_result[row_index][col_index])[:6] != str(elem)[:6] )):
- # elif auto_result[row_index][col_index] != None and (abs(auto_result[row_index][col_index] - elem) > 0.000001):
- print("=====")
- print(row_index, col_index)
- print(auto_result[row_index][col_index], elem, origin_result[row_index][col_index])
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("cos function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("cos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
def test_errors(self, dbname="db"):
error_sql_lists = [
@@ -413,16 +397,16 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
# self.check_result_auto_cos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/floor.py b/tests/system-test/2-query/floor.py
index 6a75872bcf..bf78aa8bfa 100644
--- a/tests/system-test/2-query/floor.py
+++ b/tests/system-test/2-query/floor.py
@@ -85,16 +85,11 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(floor_query)
for row_index , row in enumerate(floor_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("floor function value has not as expected , sql is \"%s\" "%floor_query )
- sys.exit(1)
- else:
- tdLog.info("floor value check pass , it work as expected ,sql is \"%s\" "%floor_query )
+ tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
+
def test_errors(self, dbname=DBNAME):
error_sql_lists = [
@@ -388,10 +383,10 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
@@ -399,15 +394,15 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select floor(c1), floor(c2) ,floor(c3), floor(c4), floor(c5) ,floor(c6) from {dbname}.sub1_bound")
self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select floor(c1), floor(c2) ,floor(c3), floor(c3), floor(c2) ,floor(c1) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/function_stateduration.py b/tests/system-test/2-query/function_stateduration.py
index ad9b8b0d79..728df0b91f 100644
--- a/tests/system-test/2-query/function_stateduration.py
+++ b/tests/system-test/2-query/function_stateduration.py
@@ -364,10 +364,10 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
@@ -375,15 +375,15 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index ce6b85c1cf..ce57357abd 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -18,6 +18,7 @@ class TDTestCase:
def run(self):
dbname = "db"
tbname = "tb"
+ tbname1 = "tb1"
stbname = "stb"
ctbname1 = "ctb1"
ctbname2 = "ctb2"
@@ -38,8 +39,6 @@ class TDTestCase:
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:10', 10, 10, 10, 10, 10.0, 10.0, true, 'varchar', 'nchar')")
tdSql.execute(f"insert into {dbname}.{tbname} values ('2020-02-01 00:00:15', 15, 15, 15, 15, 15.0, 15.0, true, 'varchar', 'nchar')")
- tdSql.execute(f"insert into {dbname}.{tbname} (ts) values (now)")
-
tdLog.printNoPrefix("==========step3:fill null")
## {. . .}
@@ -247,7 +246,7 @@ class TDTestCase:
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
- tdSql.checkRows(13)
+ tdSql.checkRows(12)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 10)
@@ -297,21 +296,21 @@ class TDTestCase:
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)")
- tdSql.checkRows(5)
+ tdSql.checkRows(3)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)")
- tdSql.checkRows(4)
+ tdSql.checkRows(0)
tdLog.printNoPrefix("==========step7:fill linear")
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
- tdSql.checkRows(12)
+ tdSql.checkRows(11)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
@@ -354,7 +353,7 @@ class TDTestCase:
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(linear)")
- tdSql.checkRows(5)
+ tdSql.checkRows(3)
tdSql.checkData(0, 0, 13)
tdSql.checkData(1, 0, 14)
tdSql.checkData(2, 0, 15)
@@ -512,7 +511,7 @@ class TDTestCase:
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
- tdSql.checkRows(13)
+ tdSql.checkRows(12)
tdSql.checkCols(2)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
@@ -555,7 +554,7 @@ class TDTestCase:
tdSql.checkData(8, 0, '2020-02-01 00:00:12.000')
tdSql.query(f"select _irowts,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
- tdSql.checkRows(12)
+ tdSql.checkRows(11)
tdSql.checkCols(2)
tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
@@ -583,7 +582,7 @@ class TDTestCase:
# multiple _irowts
tdSql.query(f"select interp(c0),_irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
- tdSql.checkRows(12)
+ tdSql.checkRows(11)
tdSql.checkCols(2)
tdSql.checkData(0, 1, '2020-02-01 00:00:05.000')
@@ -599,7 +598,7 @@ class TDTestCase:
tdSql.checkData(10, 1, '2020-02-01 00:00:15.000')
tdSql.query(f"select _irowts, interp(c0), interp(c0), _irowts from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(linear)")
- tdSql.checkRows(12)
+ tdSql.checkRows(11)
tdSql.checkCols(4)
cols = (0, 3)
@@ -837,7 +836,944 @@ class TDTestCase:
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
- tdLog.printNoPrefix("==========step10:test multi-interp cases")
+ # test fill linear
+
+ ## | {. | | .} |
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-11 00:00:05') every(1d) fill(linear)")
+ tdSql.checkRows(11)
+ tdSql.checkData(0, 0, 5)
+ tdSql.checkData(1, 0, 6)
+ tdSql.checkData(2, 0, 7)
+ tdSql.checkData(3, 0, 8)
+ tdSql.checkData(4, 0, 9)
+ tdSql.checkData(5, 0, 10)
+ tdSql.checkData(6, 0, 11)
+ tdSql.checkData(7, 0, 12)
+ tdSql.checkData(8, 0, 13)
+ tdSql.checkData(9, 0, 14)
+ tdSql.checkData(10, 0, 15)
+
+ ## | . | {} | . |
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-03 00:00:05', '2020-02-07 00:00:05') every(1d) fill(linear)")
+ tdSql.checkRows(5)
+ tdSql.checkData(0, 0, 7)
+ tdSql.checkData(1, 0, 8)
+ tdSql.checkData(2, 0, 9)
+ tdSql.checkData(3, 0, 10)
+ tdSql.checkData(4, 0, 11)
+
+ ## | {. | } | . |
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-01-31 00:00:05', '2020-02-05 00:00:05') every(1d) fill(linear)")
+ tdSql.checkRows(5)
+ tdSql.checkData(0, 0, 5)
+ tdSql.checkData(1, 0, 6)
+ tdSql.checkData(2, 0, 7)
+ tdSql.checkData(3, 0, 8)
+ tdSql.checkData(4, 0, 9)
+
+ ## | . | { | .} |
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(linear)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, 14)
+ tdSql.checkData(1, 0, 15)
+
+
+ tdLog.printNoPrefix("==========step10:test interp with null data")
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{tbname1}
+ (ts timestamp, c0 int, c1 int)
+ '''
+ )
+
+
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:00', 0, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:05', NULL, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:10', 10, 10)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:15', NULL, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:20', 20, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:25', NULL, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:30', 30, 30)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:35', 35, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:40', 40, 40)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:45', NULL, 45)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:50', 50, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:00:55', NULL, NULL)")
+ tdSql.execute(f"insert into {dbname}.{tbname1} values ('2020-02-02 00:01:00', 55, 60)")
+
+ # test fill linear
+
+ # check c0
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-01 23:59:59', '2020-02-02 00:00:00') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-01 23:59:59', '2020-02-02 00:00:03') every(1s) fill(linear)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 0)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-01 23:59:59', '2020-02-02 00:00:05') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, 0)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-01 23:59:59', '2020-02-02 00:00:08') every(1s) fill(linear)")
+ tdSql.checkRows(9)
+ tdSql.checkData(0, 0, 0)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:01', '2020-02-02 00:00:03') every(1s) fill(linear)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:03', '2020-02-02 00:00:08') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:05', '2020-02-02 00:00:10') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, 10)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:05', '2020-02-02 00:00:15') every(1s) fill(linear)")
+ tdSql.checkRows(11)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, 10)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:05', '2020-02-02 00:00:18') every(1s) fill(linear)")
+ tdSql.checkRows(14)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, 10)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, None)
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(12, 0, None)
+ tdSql.checkData(13, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:05', '2020-02-02 00:00:20') every(1s) fill(linear)")
+ tdSql.checkRows(16)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, 10)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, None)
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(12, 0, None)
+ tdSql.checkData(13, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, 20)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:09', '2020-02-02 00:00:11') every(1s) fill(linear)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, 10)
+ tdSql.checkData(2, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:10', '2020-02-02 00:00:15') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, 10)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:12', '2020-02-02 00:00:13') every(1s) fill(linear)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:12', '2020-02-02 00:00:15') every(1s) fill(linear)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:12', '2020-02-02 00:00:18') every(1s) fill(linear)")
+ tdSql.checkRows(7)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:30', '2020-02-02 00:00:40') every(1s) fill(linear)")
+ tdSql.checkRows(11)
+ tdSql.checkData(0, 0, 30)
+ tdSql.checkData(1, 0, 31)
+ tdSql.checkData(2, 0, 32)
+ tdSql.checkData(3, 0, 33)
+ tdSql.checkData(4, 0, 34)
+ tdSql.checkData(5, 0, 35)
+ tdSql.checkData(6, 0, 36)
+ tdSql.checkData(7, 0, 37)
+ tdSql.checkData(8, 0, 38)
+ tdSql.checkData(9, 0, 39)
+ tdSql.checkData(10, 0, 40)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:25', '2020-02-02 00:00:45') every(1s) fill(linear)")
+ tdSql.checkRows(21)
+ tdSql.checkData(5, 0, 30)
+ tdSql.checkData(6, 0, 31)
+ tdSql.checkData(7, 0, 32)
+ tdSql.checkData(8, 0, 33)
+ tdSql.checkData(9, 0, 34)
+ tdSql.checkData(10, 0, 35)
+ tdSql.checkData(11, 0, 36)
+ tdSql.checkData(12, 0, 37)
+ tdSql.checkData(13, 0, 38)
+ tdSql.checkData(14, 0, 39)
+ tdSql.checkData(15, 0, 40)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:20', '2020-02-02 00:00:40') every(1s) fill(linear)")
+ tdSql.checkRows(21)
+ tdSql.checkData(0, 0, 20)
+ tdSql.checkData(10, 0, 30)
+ tdSql.checkData(11, 0, 31)
+ tdSql.checkData(12, 0, 32)
+ tdSql.checkData(13, 0, 33)
+ tdSql.checkData(14, 0, 34)
+ tdSql.checkData(15, 0, 35)
+ tdSql.checkData(16, 0, 36)
+ tdSql.checkData(17, 0, 37)
+ tdSql.checkData(18, 0, 38)
+ tdSql.checkData(19, 0, 39)
+ tdSql.checkData(20, 0, 40)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:30', '2020-02-02 00:00:50') every(1s) fill(linear)")
+ tdSql.checkRows(21)
+ tdSql.checkData(0, 0, 30)
+ tdSql.checkData(1, 0, 31)
+ tdSql.checkData(2, 0, 32)
+ tdSql.checkData(3, 0, 33)
+ tdSql.checkData(4, 0, 34)
+ tdSql.checkData(5, 0, 35)
+ tdSql.checkData(6, 0, 36)
+ tdSql.checkData(7, 0, 37)
+ tdSql.checkData(8, 0, 38)
+ tdSql.checkData(9, 0, 39)
+ tdSql.checkData(10, 0, 40)
+ tdSql.checkData(20, 0, 50)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname1} range('2020-02-02 00:00:20', '2020-02-02 00:00:50') every(1s) fill(linear)")
+ tdSql.checkRows(31)
+ tdSql.checkData(0, 0, 20)
+ tdSql.checkData(10, 0, 30)
+ tdSql.checkData(11, 0, 31)
+ tdSql.checkData(12, 0, 32)
+ tdSql.checkData(13, 0, 33)
+ tdSql.checkData(14, 0, 34)
+ tdSql.checkData(15, 0, 35)
+ tdSql.checkData(16, 0, 36)
+ tdSql.checkData(17, 0, 37)
+ tdSql.checkData(18, 0, 38)
+ tdSql.checkData(19, 0, 39)
+ tdSql.checkData(20, 0, 40)
+ tdSql.checkData(30, 0, 50)
+
+ # check c1
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-01 23:59:59', '2020-02-02 00:00:05') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:05') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:08') every(1s) fill(linear)")
+ tdSql.checkRows(9)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:10') every(1s) fill(linear)")
+ tdSql.checkRows(11)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:15') every(1s) fill(linear)")
+ tdSql.checkRows(16)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(12, 0, None)
+ tdSql.checkData(13, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:20') every(1s) fill(linear)")
+ tdSql.checkRows(21)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(12, 0, None)
+ tdSql.checkData(13, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None)
+ tdSql.checkData(16, 0, None)
+ tdSql.checkData(17, 0, None)
+ tdSql.checkData(18, 0, None)
+ tdSql.checkData(19, 0, None)
+ tdSql.checkData(20, 0, None)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:25') every(1s) fill(linear)")
+ tdSql.checkRows(26)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(12, 0, None)
+ tdSql.checkData(13, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None)
+ tdSql.checkData(16, 0, None)
+ tdSql.checkData(17, 0, None)
+ tdSql.checkData(18, 0, None)
+ tdSql.checkData(19, 0, None)
+ tdSql.checkData(20, 0, None)
+ tdSql.checkData(21, 0, None)
+ tdSql.checkData(22, 0, None)
+ tdSql.checkData(23, 0, None)
+ tdSql.checkData(24, 0, None)
+ tdSql.checkData(25, 0, None)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:30') every(1s) fill(linear)")
+ tdSql.checkRows(31)
+ tdSql.checkData(0, 0, None)
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(2, 0, None)
+ tdSql.checkData(3, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None)
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(7, 0, None)
+ tdSql.checkData(8, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(12, 0, None)
+ tdSql.checkData(13, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None)
+ tdSql.checkData(16, 0, None)
+ tdSql.checkData(17, 0, None)
+ tdSql.checkData(18, 0, None)
+ tdSql.checkData(19, 0, None)
+ tdSql.checkData(20, 0, None)
+ tdSql.checkData(21, 0, None)
+ tdSql.checkData(22, 0, None)
+ tdSql.checkData(23, 0, None)
+ tdSql.checkData(24, 0, None)
+ tdSql.checkData(25, 0, None)
+ tdSql.checkData(26, 0, None)
+ tdSql.checkData(27, 0, None)
+ tdSql.checkData(28, 0, None)
+ tdSql.checkData(29, 0, None)
+ tdSql.checkData(30, 0, 30)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:35') every(1s) fill(linear)")
+ tdSql.checkRows(36)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(30, 0, 30)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:40') every(1s) fill(linear)")
+ tdSql.checkRows(41)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(30, 0, 30)
+ tdSql.checkData(40, 0, 40)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:45') every(1s) fill(linear)")
+ tdSql.checkRows(46)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(30, 0, 30)
+ tdSql.checkData(40, 0, 40)
+ tdSql.checkData(41, 0, 41)
+ tdSql.checkData(42, 0, 42)
+ tdSql.checkData(43, 0, 43)
+ tdSql.checkData(44, 0, 44)
+ tdSql.checkData(45, 0, 45)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:50') every(1s) fill(linear)")
+ tdSql.checkRows(51)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(30, 0, 30)
+ tdSql.checkData(40, 0, 40)
+ tdSql.checkData(41, 0, 41)
+ tdSql.checkData(42, 0, 42)
+ tdSql.checkData(43, 0, 43)
+ tdSql.checkData(44, 0, 44)
+ tdSql.checkData(45, 0, 45)
+
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:00:55') every(1s) fill(linear)")
+ tdSql.checkRows(56)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(30, 0, 30)
+ tdSql.checkData(40, 0, 40)
+ tdSql.checkData(41, 0, 41)
+ tdSql.checkData(42, 0, 42)
+ tdSql.checkData(43, 0, 43)
+ tdSql.checkData(44, 0, 44)
+ tdSql.checkData(45, 0, 45)
+
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(linear)")
+ tdSql.checkRows(61)
+ tdSql.checkData(10, 0, 10)
+ tdSql.checkData(30, 0, 30)
+ tdSql.checkData(40, 0, 40)
+ tdSql.checkData(41, 0, 41)
+ tdSql.checkData(42, 0, 42)
+ tdSql.checkData(43, 0, 43)
+ tdSql.checkData(44, 0, 44)
+ tdSql.checkData(45, 0, 45)
+ tdSql.checkData(60, 0, 60)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:40', '2020-02-02 00:00:45') every(1s) fill(linear)")
+ tdSql.checkRows(6)
+ tdSql.checkData(0, 0, 40)
+ tdSql.checkData(1, 0, 41)
+ tdSql.checkData(2, 0, 42)
+ tdSql.checkData(3, 0, 43)
+ tdSql.checkData(4, 0, 44)
+ tdSql.checkData(5, 0, 45)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:35', '2020-02-02 00:00:50') every(1s) fill(linear)")
+ tdSql.checkRows(16)
+ tdSql.checkData(5, 0, 40)
+ tdSql.checkData(6, 0, 41)
+ tdSql.checkData(7, 0, 42)
+ tdSql.checkData(8, 0, 43)
+ tdSql.checkData(9, 0, 44)
+ tdSql.checkData(10, 0, 45)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:35', '2020-02-02 00:00:55') every(1s) fill(linear)")
+ tdSql.checkRows(21)
+ tdSql.checkData(5, 0, 40)
+ tdSql.checkData(6, 0, 41)
+ tdSql.checkData(7, 0, 42)
+ tdSql.checkData(8, 0, 43)
+ tdSql.checkData(9, 0, 44)
+ tdSql.checkData(10, 0, 45)
+
+ tdSql.query(f"select interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:30', '2020-02-02 00:01:00') every(1s) fill(linear)")
+ tdSql.checkRows(31)
+ tdSql.checkData(0, 0, 30)
+ tdSql.checkData(10, 0, 40)
+ tdSql.checkData(11, 0, 41)
+ tdSql.checkData(12, 0, 42)
+ tdSql.checkData(13, 0, 43)
+ tdSql.checkData(14, 0, 44)
+ tdSql.checkData(15, 0, 45)
+ tdSql.checkData(30, 0, 60)
+
+ # two interps
+ tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(linear)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(2)
+ tdSql.checkData(0, 0, 0) #
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None) #
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10) #
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None) #
+ tdSql.checkData(16, 0, None)
+ tdSql.checkData(19, 0, None)
+ tdSql.checkData(20, 0, 20) #
+ tdSql.checkData(21, 0, None)
+ tdSql.checkData(24, 0, None)
+ tdSql.checkData(25, 0, None) #
+ tdSql.checkData(26, 0, None)
+ tdSql.checkData(29, 0, None)
+ tdSql.checkData(30, 0, 30) #
+ tdSql.checkData(31, 0, 31)
+ tdSql.checkData(32, 0, 32)
+ tdSql.checkData(33, 0, 33)
+ tdSql.checkData(34, 0, 34)
+ tdSql.checkData(35, 0, 35) #
+ tdSql.checkData(36, 0, 36)
+ tdSql.checkData(37, 0, 37)
+ tdSql.checkData(38, 0, 38)
+ tdSql.checkData(39, 0, 39)
+ tdSql.checkData(40, 0, 40) #
+ tdSql.checkData(41, 0, None)
+ tdSql.checkData(44, 0, None)
+ tdSql.checkData(45, 0, None) #
+ tdSql.checkData(46, 0, None)
+ tdSql.checkData(49, 0, None)
+ tdSql.checkData(50, 0, 50) #
+ tdSql.checkData(51, 0, None)
+ tdSql.checkData(54, 0, None)
+ tdSql.checkData(55, 0, None) #
+ tdSql.checkData(56, 0, None)
+ tdSql.checkData(59, 0, None)
+ tdSql.checkData(60, 0, 55) #
+
+ tdSql.checkData(0, 1, None) #
+ tdSql.checkData(1, 1, None)
+ tdSql.checkData(4, 1, None)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, None)
+ tdSql.checkData(9, 1, None)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, None)
+ tdSql.checkData(14, 1, None)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, None)
+ tdSql.checkData(19, 1, None)
+ tdSql.checkData(20, 1, None) #
+ tdSql.checkData(21, 1, None)
+ tdSql.checkData(24, 1, None)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, None)
+ tdSql.checkData(29, 1, None)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, None)
+ tdSql.checkData(34, 1, None)
+ tdSql.checkData(35, 1, None) #
+ tdSql.checkData(36, 1, None)
+ tdSql.checkData(39, 1, None)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, 41)
+ tdSql.checkData(42, 1, 42)
+ tdSql.checkData(43, 1, 43)
+ tdSql.checkData(44, 1, 44)
+ tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(46, 1, None)
+ tdSql.checkData(49, 1, None)
+ tdSql.checkData(50, 1, None) #
+ tdSql.checkData(51, 1, None)
+ tdSql.checkData(54, 1, None)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(56, 1, None)
+ tdSql.checkData(59, 1, None)
+ tdSql.checkData(60, 1, 60) #
+
+ # test fill null
+ tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(null)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(2)
+ tdSql.checkData(0, 0, 0) #
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None) #
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10) #
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None) #
+ tdSql.checkData(16, 0, None)
+ tdSql.checkData(19, 0, None)
+ tdSql.checkData(20, 0, 20) #
+ tdSql.checkData(21, 0, None)
+ tdSql.checkData(24, 0, None)
+ tdSql.checkData(25, 0, None) #
+ tdSql.checkData(26, 0, None)
+ tdSql.checkData(29, 0, None)
+ tdSql.checkData(30, 0, 30) #
+ tdSql.checkData(31, 0, None)
+ tdSql.checkData(34, 0, None)
+ tdSql.checkData(35, 0, 35) #
+ tdSql.checkData(36, 0, None)
+ tdSql.checkData(39, 0, None)
+ tdSql.checkData(40, 0, 40) #
+ tdSql.checkData(41, 0, None)
+ tdSql.checkData(44, 0, None)
+ tdSql.checkData(45, 0, None) #
+ tdSql.checkData(46, 0, None)
+ tdSql.checkData(49, 0, None)
+ tdSql.checkData(50, 0, 50) #
+ tdSql.checkData(51, 0, None)
+ tdSql.checkData(54, 0, None)
+ tdSql.checkData(55, 0, None) #
+ tdSql.checkData(56, 0, None)
+ tdSql.checkData(59, 0, None)
+ tdSql.checkData(60, 0, 55) #
+
+ tdSql.checkData(0, 1, None) #
+ tdSql.checkData(1, 1, None)
+ tdSql.checkData(4, 1, None)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, None)
+ tdSql.checkData(9, 1, None)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, None)
+ tdSql.checkData(14, 1, None)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, None)
+ tdSql.checkData(19, 1, None)
+ tdSql.checkData(20, 1, None) #
+ tdSql.checkData(21, 1, None)
+ tdSql.checkData(24, 1, None)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, None)
+ tdSql.checkData(29, 1, None)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, None)
+ tdSql.checkData(34, 1, None)
+ tdSql.checkData(35, 1, None) #
+ tdSql.checkData(36, 1, None)
+ tdSql.checkData(39, 1, None)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, None)
+ tdSql.checkData(44, 1, None)
+ tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(46, 1, None)
+ tdSql.checkData(49, 1, None)
+ tdSql.checkData(50, 1, None) #
+ tdSql.checkData(51, 1, None)
+ tdSql.checkData(54, 1, None)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(56, 1, None)
+ tdSql.checkData(59, 1, None)
+ tdSql.checkData(60, 1, 60) #
+
+ # test fill value
+ tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(2)
+ tdSql.checkData(0, 0, 0) #
+ tdSql.checkData(1, 0, 123)
+ tdSql.checkData(4, 0, 123)
+ tdSql.checkData(5, 0, None) #
+ tdSql.checkData(6, 0, 123)
+ tdSql.checkData(9, 0, 123)
+ tdSql.checkData(10, 0, 10) #
+ tdSql.checkData(11, 0, 123)
+ tdSql.checkData(14, 0, 123)
+ tdSql.checkData(15, 0, None) #
+ tdSql.checkData(16, 0, 123)
+ tdSql.checkData(19, 0, 123)
+ tdSql.checkData(20, 0, 20) #
+ tdSql.checkData(21, 0, 123)
+ tdSql.checkData(24, 0, 123)
+ tdSql.checkData(25, 0, None) #
+ tdSql.checkData(26, 0, 123)
+ tdSql.checkData(29, 0, 123)
+ tdSql.checkData(30, 0, 30) #
+ tdSql.checkData(31, 0, 123)
+ tdSql.checkData(34, 0, 123)
+ tdSql.checkData(35, 0, 35) #
+ tdSql.checkData(36, 0, 123)
+ tdSql.checkData(39, 0, 123)
+ tdSql.checkData(40, 0, 40) #
+ tdSql.checkData(41, 0, 123)
+ tdSql.checkData(44, 0, 123)
+ tdSql.checkData(45, 0, None) #
+ tdSql.checkData(46, 0, 123)
+ tdSql.checkData(49, 0, 123)
+ tdSql.checkData(50, 0, 50) #
+ tdSql.checkData(51, 0, 123)
+ tdSql.checkData(54, 0, 123)
+ tdSql.checkData(55, 0, None) #
+ tdSql.checkData(59, 0, 123)
+ tdSql.checkData(60, 0, 55) #
+
+ tdSql.checkData(0, 1, None) #
+ tdSql.checkData(1, 1, 123)
+ tdSql.checkData(4, 1, 123)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, 123)
+ tdSql.checkData(9, 1, 123)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, 123)
+ tdSql.checkData(14, 1, 123)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, 123)
+ tdSql.checkData(19, 1, 123)
+ tdSql.checkData(20, 1, None) #
+ tdSql.checkData(21, 1, 123)
+ tdSql.checkData(24, 1, 123)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, 123)
+ tdSql.checkData(29, 1, 123)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, 123)
+ tdSql.checkData(34, 1, 123)
+ tdSql.checkData(35, 1, None) #
+ tdSql.checkData(36, 1, 123)
+ tdSql.checkData(39, 1, 123)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, 123)
+ tdSql.checkData(44, 1, 123)
+ tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(46, 1, 123)
+ tdSql.checkData(49, 1, 123)
+ tdSql.checkData(50, 1, None) #
+ tdSql.checkData(51, 1, 123)
+ tdSql.checkData(54, 1, 123)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(56, 1, 123)
+ tdSql.checkData(59, 1, 123)
+ tdSql.checkData(60, 1, 60) #
+
+ # test fill prev
+ tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(prev)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(2)
+ tdSql.checkData(0, 0, 0) #
+ tdSql.checkData(1, 0, 0)
+ tdSql.checkData(4, 0, 0)
+ tdSql.checkData(5, 0, None) #
+ tdSql.checkData(6, 0, None)
+ tdSql.checkData(9, 0, None)
+ tdSql.checkData(10, 0, 10) #
+ tdSql.checkData(11, 0, 10)
+ tdSql.checkData(14, 0, 10)
+ tdSql.checkData(15, 0, None) #
+ tdSql.checkData(16, 0, None)
+ tdSql.checkData(19, 0, None)
+ tdSql.checkData(20, 0, 20) #
+ tdSql.checkData(21, 0, 20)
+ tdSql.checkData(24, 0, 20)
+ tdSql.checkData(25, 0, None) #
+ tdSql.checkData(26, 0, None)
+ tdSql.checkData(29, 0, None)
+ tdSql.checkData(30, 0, 30) #
+ tdSql.checkData(31, 0, 30)
+ tdSql.checkData(34, 0, 30)
+ tdSql.checkData(35, 0, 35) #
+ tdSql.checkData(36, 0, 35)
+ tdSql.checkData(39, 0, 35)
+ tdSql.checkData(40, 0, 40) #
+ tdSql.checkData(41, 0, 40)
+ tdSql.checkData(44, 0, 40)
+ tdSql.checkData(45, 0, None) #
+ tdSql.checkData(46, 0, None)
+ tdSql.checkData(49, 0, None)
+ tdSql.checkData(50, 0, 50) #
+ tdSql.checkData(51, 0, 50)
+ tdSql.checkData(54, 0, 50)
+ tdSql.checkData(55, 0, None) #
+ tdSql.checkData(56, 0, None)
+ tdSql.checkData(59, 0, None)
+ tdSql.checkData(60, 0, 55) #
+
+ tdSql.checkData(0, 1, None) #
+ tdSql.checkData(1, 1, None)
+ tdSql.checkData(4, 1, None)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, None)
+ tdSql.checkData(9, 1, None)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, 10)
+ tdSql.checkData(14, 1, 10)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, None)
+ tdSql.checkData(19, 1, None)
+ tdSql.checkData(20, 1, None) #
+ tdSql.checkData(21, 1, None)
+ tdSql.checkData(24, 1, None)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, None)
+ tdSql.checkData(29, 1, None)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, 30)
+ tdSql.checkData(34, 1, 30)
+ tdSql.checkData(35, 1, None) #
+ tdSql.checkData(36, 1, None)
+ tdSql.checkData(39, 1, None)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, 40)
+ tdSql.checkData(44, 1, 40)
+ tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(46, 1, 45)
+ tdSql.checkData(49, 1, 45)
+ tdSql.checkData(50, 1, None) #
+ tdSql.checkData(51, 1, None)
+ tdSql.checkData(54, 1, None)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(56, 1, None)
+ tdSql.checkData(59, 1, None)
+ tdSql.checkData(60, 1, 60) #
+
+ # test fill next
+ tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(next)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(2)
+ tdSql.checkData(0, 0, 0) #
+ tdSql.checkData(1, 0, None)
+ tdSql.checkData(4, 0, None)
+ tdSql.checkData(5, 0, None) #
+ tdSql.checkData(6, 0, 10)
+ tdSql.checkData(9, 0, 10)
+ tdSql.checkData(10, 0, 10) #
+ tdSql.checkData(11, 0, None)
+ tdSql.checkData(14, 0, None)
+ tdSql.checkData(15, 0, None) #
+ tdSql.checkData(16, 0, 20)
+ tdSql.checkData(19, 0, 20)
+ tdSql.checkData(20, 0, 20) #
+ tdSql.checkData(21, 0, None)
+ tdSql.checkData(24, 0, None)
+ tdSql.checkData(25, 0, None) #
+ tdSql.checkData(26, 0, 30)
+ tdSql.checkData(29, 0, 30)
+ tdSql.checkData(30, 0, 30) #
+ tdSql.checkData(31, 0, 35)
+ tdSql.checkData(34, 0, 35)
+ tdSql.checkData(35, 0, 35) #
+ tdSql.checkData(36, 0, 40)
+ tdSql.checkData(39, 0, 40)
+ tdSql.checkData(40, 0, 40) #
+ tdSql.checkData(41, 0, None)
+ tdSql.checkData(44, 0, None)
+ tdSql.checkData(45, 0, None) #
+ tdSql.checkData(46, 0, 50)
+ tdSql.checkData(49, 0, 50)
+ tdSql.checkData(50, 0, 50) #
+ tdSql.checkData(51, 0, None)
+ tdSql.checkData(54, 0, None)
+ tdSql.checkData(55, 0, None) #
+ tdSql.checkData(56, 0, 55)
+ tdSql.checkData(59, 0, 55)
+ tdSql.checkData(60, 0, 55) #
+
+ tdSql.checkData(0, 1, None) #
+ tdSql.checkData(1, 1, None)
+ tdSql.checkData(4, 1, None)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, 10)
+ tdSql.checkData(9, 1, 10)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, None)
+ tdSql.checkData(14, 1, None)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, None)
+ tdSql.checkData(19, 1, None)
+ tdSql.checkData(20, 1, None) #
+ tdSql.checkData(21, 1, None)
+ tdSql.checkData(24, 1, None)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, 30)
+ tdSql.checkData(29, 1, 30)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, None)
+ tdSql.checkData(34, 1, None)
+ tdSql.checkData(35, 1, None) #
+ tdSql.checkData(36, 1, 40)
+ tdSql.checkData(39, 1, 40)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, 45)
+ tdSql.checkData(44, 1, 45)
+ tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(46, 1, None)
+ tdSql.checkData(49, 1, None)
+ tdSql.checkData(50, 1, None) #
+ tdSql.checkData(51, 1, None)
+ tdSql.checkData(54, 1, None)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(56, 1, 60)
+ tdSql.checkData(59, 1, 60)
+ tdSql.checkData(60, 1, 60) #
+
+ tdLog.printNoPrefix("==========step11:test multi-interp cases")
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)")
tdSql.checkRows(5)
tdSql.checkCols(4)
@@ -891,7 +1827,7 @@ class TDTestCase:
for i in range (tdSql.queryCols):
tdSql.checkData(0, i, 13)
- tdLog.printNoPrefix("==========step11:test error cases")
+ tdLog.printNoPrefix("==========step12:test error cases")
tdSql.error(f"select interp(c0) from {dbname}.{tbname}")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')")
@@ -914,7 +1850,7 @@ class TDTestCase:
tdSql.error(f"select interp('abcd') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp('中文字符') from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
- tdLog.printNoPrefix("==========step12:stable cases")
+ tdLog.printNoPrefix("==========step13:stable cases")
#tdSql.query(f"select interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(null)")
#tdSql.checkRows(13)
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index f8d6ce4c6c..01da658989 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -572,19 +572,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
# check basic elem for table per row
diff --git a/tests/system-test/2-query/log.py b/tests/system-test/2-query/log.py
index 7305a44f56..a05de53c42 100644
--- a/tests/system-test/2-query/log.py
+++ b/tests/system-test/2-query/log.py
@@ -91,7 +91,6 @@ class TDTestCase:
elem = math.log(elem , base)
elif elem <=0:
elem = None
-
row_check.append(elem)
auto_result.append(row_check)
@@ -519,19 +518,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py
index a067d66547..5647d81e27 100644
--- a/tests/system-test/2-query/pow.py
+++ b/tests/system-test/2-query/pow.py
@@ -507,19 +507,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_pow(2, f"select c1, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.sub1_bound")
self.check_result_auto_pow(3, f"select c1, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,3), pow(c3,3), pow(c4,3), pow(c5,3) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py
index e3d98d6986..d647f516ae 100644
--- a/tests/system-test/2-query/round.py
+++ b/tests/system-test/2-query/round.py
@@ -81,16 +81,12 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(round_query)
for row_index , row in enumerate(round_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("round function value has not as expected , sql is \"%s\" "%round_query )
- sys.exit(1)
- else:
- tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index])
+
+
def test_errors(self, dbname="db"):
error_sql_lists = [
@@ -388,10 +384,10 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
@@ -399,15 +395,15 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound")
self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py
index 4fdec8fd73..c65e58852d 100644
--- a/tests/system-test/2-query/sin.py
+++ b/tests/system-test/2-query/sin.py
@@ -394,19 +394,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py
index 9229444f74..4db166808e 100644
--- a/tests/system-test/2-query/sqrt.py
+++ b/tests/system-test/2-query/sqrt.py
@@ -443,19 +443,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py
index 2aa9194d37..f76e153014 100644
--- a/tests/system-test/2-query/statecount.py
+++ b/tests/system-test/2-query/statecount.py
@@ -451,10 +451,10 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
@@ -462,15 +462,15 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py
index f925380c09..43321810d0 100644
--- a/tests/system-test/2-query/tail.py
+++ b/tests/system-test/2-query/tail.py
@@ -421,10 +421,10 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
@@ -432,20 +432,20 @@ class TDTestCase:
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc")
- tdSql.checkRows(2)
- tdSql.checkData(0,0,9223372036854775803)
+ tdSql.query(f"select tail(c2,1) from {dbname}.sub1_bound order by 1 desc")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,-9223372036854775803)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py
index 27e6efb475..e689eaba20 100644
--- a/tests/system-test/2-query/tan.py
+++ b/tests/system-test/2-query/tan.py
@@ -391,19 +391,19 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound")
diff --git a/tests/system-test/2-query/txt.txt b/tests/system-test/2-query/txt.txt
new file mode 100644
index 0000000000..fd3b0b5e36
--- /dev/null
+++ b/tests/system-test/2-query/txt.txt
@@ -0,0 +1,40 @@
+abs.py: def check_boundary_values(self):
+abs.py: self.check_boundary_values()
+and_or_for_byte.py: def check_boundary_values(self, dbname="bound_test"):
+and_or_for_byte.py: self.check_boundary_values()
+arccos.py: def check_boundary_values(self, dbname="bound_test"):
+arccos.py: self.check_boundary_values()
+arcsin.py: def check_boundary_values(self, dbname="bound_test"):
+arcsin.py: self.check_boundary_values()
+arctan.py: def check_boundary_values(self, dbname="bound_test"):
+arctan.py: self.check_boundary_values()
+avg.py: def check_boundary_values(self, dbname="bound_test"):
+avg.py: self.check_boundary_values()
+ceil.py: def check_boundary_values(self, dbname="bound_test"):
+ceil.py: self.check_boundary_values()
+cos.py: def check_boundary_values(self, dbname="bound_test"):
+cos.py: self.check_boundary_values()
+floor.py: def check_boundary_values(self, dbname="bound_test"):
+floor.py: self.check_boundary_values()
+function_stateduration.py: def check_boundary_values(self, dbname="bound_test"):
+function_stateduration.py: self.check_boundary_values()
+last_row.py: def check_boundary_values(self, dbname="bound_test"):
+last_row.py: self.check_boundary_values()
+log.py: def check_boundary_values(self, dbname="bound_test"):
+log.py: self.check_boundary_values()
+pow.py: def check_boundary_values(self, dbname="bound_test"):
+pow.py: self.check_boundary_values()
+round.py: def check_boundary_values(self, dbname="bound_test"):
+round.py: self.check_boundary_values()
+sin.py: def check_boundary_values(self, dbname="testdb"):
+sin.py: self.check_boundary_values()
+sqrt.py: def check_boundary_values(self, dbname="bound_test"):
+sqrt.py: self.check_boundary_values()
+statecount.py: def check_boundary_values(self, dbname="bound_test"):
+statecount.py: self.check_boundary_values()
+tail.py: def check_boundary_values(self, dbname="bound_test"):
+tail.py: self.check_boundary_values()
+tan.py: def check_boundary_values(self, dbname="bound_test"):
+tan.py: self.check_boundary_values()
+unique.py: def check_boundary_values(self, dbname="bound_test"):
+unique.py: self.check_boundary_values()
diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py
index 6c51854b43..2b0336d2d7 100644
--- a/tests/system-test/2-query/unique.py
+++ b/tests/system-test/2-query/unique.py
@@ -451,26 +451,26 @@ class TDTestCase:
)
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+20s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+30s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc")
diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py
index 5e79f17459..8ec5e62ad9 100644
--- a/tests/system-test/7-tmq/tmqShow.py
+++ b/tests/system-test/7-tmq/tmqShow.py
@@ -135,12 +135,21 @@ class TDTestCase:
if rows != len(topicNameList):
tdLog.exit("show consumers rows error")
- tdLog.info("check show subscriptions")
- tdSql.query("show subscriptions")
- tdLog.debug(tdSql.queryResult)
- rows = tdSql.getRows()
- expectSubscriptions = paraDict['vgroups'] * len(topicNameList)
- tdLog.info("show subscriptions rows: %d, expect Subscriptions: %d"%(rows,expectSubscriptions))
+ for i in range(0, 10, 1):
+ tdLog.info("check show subscriptions")
+ tdSql.query("show subscriptions")
+ tdLog.debug(tdSql.queryResult)
+ rows = tdSql.getRows()
+ expectSubscriptions = paraDict['vgroups'] * len(topicNameList)
+ tdLog.info("show subscriptions rows: %d, expect Subscriptions: %d"%(rows,expectSubscriptions))
+ if rows != expectSubscriptions:
+ # tdLog.exit("show subscriptions rows error")
+ tdLog.info("continue retry[%d] to show subscriptions"%(i))
+ time.sleep(1)
+ continue
+ else:
+ break
+
if rows != expectSubscriptions:
tdLog.exit("show subscriptions rows error")
diff --git a/tests/system-test/pytest.sh b/tests/system-test/pytest.sh
new file mode 100755
index 0000000000..3f03058342
--- /dev/null
+++ b/tests/system-test/pytest.sh
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+##################################################
+#
+# Do simulation test
+#
+##################################################
+
+set +e
+#set -x
+
+UNAME_BIN=`which uname`
+OS_TYPE=`$UNAME_BIN`
+
+cd .
+
+# Get responsible directories
+CODE_DIR=`dirname $0`
+CODE_DIR=`pwd`
+
+IN_TDINTERNAL="community"
+if [[ "$CODE_DIR" == *"$IN_TDINTERNAL"* ]]; then
+ cd ../../..
+else
+ cd ../../
+fi
+
+TOP_DIR=`pwd`
+TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
+
+if [[ "$OS_TYPE" != "Darwin" ]]; then
+ cut_opt="--field="
+else
+ cut_opt="-f "
+fi
+
+if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
+ BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3`
+else
+ BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2`
+fi
+
+declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR
+declare -x SIM_DIR=$TOP_DIR/sim
+PROGRAM=$BUILD_DIR/build/bin/tsim
+PRG_DIR=$SIM_DIR/tsim
+ASAN_DIR=$SIM_DIR/asan
+
+chmod -R 777 $PRG_DIR
+echo "------------------------------------------------------------------------"
+echo "Start TDengine Testing Case ..."
+echo "BUILD_DIR: $BUILD_DIR"
+echo "SIM_DIR : $SIM_DIR"
+echo "CODE_DIR : $CODE_DIR"
+echo "ASAN_DIR : $ASAN_DIR"
+
+rm -rf $SIM_DIR/*
+
+mkdir -p $PRG_DIR
+mkdir -p $ASAN_DIR
+
+cd $CODE_DIR
+ulimit -n 600000
+ulimit -c unlimited
+
+#sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e
+
+echo "ExcuteCmd:" $*
+AsanFile=$ASAN_DIR/psim.info
+echo "AsanFile:" $AsanFile
+
+unset LD_PRELOAD
+#export LD_PRELOAD=libasan.so.5
+export LD_PRELOAD=`gcc -print-file-name=libasan.so`
+echo "Preload AsanSo:" $?
+
+$* -a 2> $AsanFile
+
+unset LD_PRELOAD
+AsanFileLen=`cat $AsanFile | wc -l`
+while [ $AsanFileLen -lt 10 ]
+do
+ sleep 1
+ `cat $AsanFile | wc -l`
+done
+echo "AsanFileLen:" $AsanFileLen
+
+AsanFileSuccessLen=`grep -w successfully $AsanFile | wc -l`
+echo "AsanFileSuccessLen:" $AsanFileSuccessLen
+
+if [ $AsanFileSuccessLen -gt 0 ]; then
+ echo "Execute script successfully and check asan"
+ $CODE_DIR/../script/sh/checkAsan.sh
+else
+ echo "Execute script failure"
+ exit 1
+fi
+
diff --git a/tests/system-test/test.py b/tests/system-test/test.py
index d942147c03..cf9aba123c 100644
--- a/tests/system-test/test.py
+++ b/tests/system-test/test.py
@@ -73,8 +73,9 @@ if __name__ == "__main__":
createDnodeNums = 1
restful = False
replicaVar = 1
+ asan = False
independentMnode = True
- opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:i:', [
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:i:a', [
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate','replicaVar','independentMnode'])
for key, value in opts:
if key in ['-h', '--help']:
@@ -99,6 +100,7 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-D taosadapter update cfg dict ')
tdLog.printNoPrefix('-n the number of replicas')
tdLog.printNoPrefix('-i independentMnode Mnode')
+ tdLog.printNoPrefix('-a address sanitizer mode')
sys.exit(0)
@@ -167,6 +169,9 @@ if __name__ == "__main__":
if key in ['-R', '--restful']:
restful = True
+ if key in ['-a', '--asan']:
+ asan = True
+
if key in ['-D', '--adaptercfgupdate']:
try:
adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
@@ -387,6 +392,7 @@ if __name__ == "__main__":
tdDnodes.init(deployPath, masterIp)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
+ tdDnodes.setAsan(asan)
tdDnodes.stopAll()
is_test_framework = 0
key_word = 'tdCases.addLinux'
@@ -547,4 +553,7 @@ if __name__ == "__main__":
if conn is not None:
conn.close()
+ if asan:
+ tdDnodes.StopAllSigint()
+ tdLog.info("Address sanitizer mode finished")
sys.exit(0)