Merge branch '3.0' into feature/stream
This commit is contained in:
commit
9358429e78
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 23e2b73
|
||||
GIT_TAG e00ebd9
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taosws-rs
|
||||
ExternalProject_Add(taosws-rs
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
|
||||
GIT_TAG 9843872
|
||||
GIT_TAG f406d51
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -205,13 +205,13 @@ Additional functions are defined in `taosudf.h` to make it easier to work with t
|
|||
|
||||
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
|
||||
|
||||
For example, the sample UDF `add_one.c` can be compiled into a DLL as follows:
|
||||
For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
|
||||
|
||||
```bash
|
||||
gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
|
||||
gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
|
||||
```
|
||||
|
||||
The generated DLL file `add_one.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
|
||||
The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
|
||||
|
||||
## Manage and Use User-Defined Functions
|
||||
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
|
||||
|
|
|
@ -62,7 +62,7 @@ SHOW FUNCTIONS;
|
|||
|
||||
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
|
||||
```sql
|
||||
SELECT X(c1,c2) FROM table/stable;
|
||||
SELECT bit_and(c1,c2) FROM table;
|
||||
```
|
||||
|
||||
The above SQL statement invokes function X for column c1 and c2. You can use query keywords like WHERE with user-defined functions.
|
||||
The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
|
||||
|
|
|
@ -10,4 +10,4 @@ chrono = "0.4"
|
|||
serde = { version = "1", features = ["derive"] }
|
||||
tokio = { version = "1", features = ["rt", "macros", "rt-multi-thread"] }
|
||||
|
||||
taos = { version = "0.*" }
|
||||
taos = { version = "0.4.8" }
|
||||
|
|
|
@ -12,7 +12,10 @@ async fn main() -> anyhow::Result<()> {
|
|||
// bind table name and tags
|
||||
stmt.set_tbname_tags(
|
||||
"d1001",
|
||||
&[Value::VarChar("California.SanFransico".into()), Value::Int(2)],
|
||||
&[
|
||||
Value::VarChar("California.SanFransico".into()),
|
||||
Value::Int(2),
|
||||
],
|
||||
)?;
|
||||
// bind values.
|
||||
let values = vec![
|
||||
|
|
|
@ -50,7 +50,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
// create super table
|
||||
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
|
||||
// create topic for subscription
|
||||
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
|
||||
format!("CREATE TOPIC tmq_meters AS SELECT * FROM `meters`")
|
||||
])
|
||||
.await?;
|
||||
|
||||
|
@ -64,13 +64,9 @@ async fn main() -> anyhow::Result<()> {
|
|||
let mut consumer = tmq.build()?;
|
||||
consumer.subscribe(["tmq_meters"]).await?;
|
||||
|
||||
{
|
||||
let mut stream = consumer.stream();
|
||||
|
||||
while let Some((offset, message)) = stream.try_next().await? {
|
||||
// get information from offset
|
||||
|
||||
// the topic
|
||||
consumer
|
||||
.stream()
|
||||
.try_for_each(|(offset, message)| async {
|
||||
let topic = offset.topic();
|
||||
// the vgroup id, like partition id in kafka.
|
||||
let vgroup_id = offset.vgroup_id();
|
||||
|
@ -78,20 +74,14 @@ async fn main() -> anyhow::Result<()> {
|
|||
|
||||
if let Some(data) = message.into_data() {
|
||||
while let Some(block) = data.fetch_raw_block().await? {
|
||||
// one block for one table, get table name if needed
|
||||
let name = block.table_name();
|
||||
let records: Vec<Record> = block.deserialize().try_collect()?;
|
||||
println!(
|
||||
"** table: {}, got {} records: {:#?}\n",
|
||||
name.unwrap(),
|
||||
records.len(),
|
||||
records
|
||||
);
|
||||
println!("** read {} records: {:#?}\n", records.len(), records);
|
||||
}
|
||||
}
|
||||
consumer.commit(offset).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
consumer.unsubscribe().await;
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ async fn main() -> anyhow::Result<()> {
|
|||
let dsn = "ws://";
|
||||
let taos = TaosBuilder::from_dsn(dsn)?.build()?;
|
||||
|
||||
|
||||
taos.exec_many([
|
||||
"DROP DATABASE IF EXISTS power",
|
||||
"CREATE DATABASE power",
|
||||
|
|
|
@ -18,7 +18,18 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
|||
<DocCardList items={useCurrentSidebarCategory().items}/>
|
||||
```
|
||||
|
||||
### 加入 TDengine 官方社区
|
||||
## 学习 TDengine 知识地图
|
||||
|
||||
TDengine 知识地图中涵盖了 TDengine 的各种知识点,揭示了各概念实体之间的调用关系和数据流向。学习和了解 TDengine 知识地图有助于你快速掌握 TDengine 的知识体系。
|
||||
|
||||
<figure>
|
||||
<center>
|
||||
<a href="pathname:///img/tdengine-map.svg" target="_blank"><img src="/img/tdengine-map.svg" width="80%" /></a>
|
||||
<figcaption>图 1. TDengine 知识地图</figcaption>
|
||||
</center>
|
||||
</figure>
|
||||
|
||||
## 加入 TDengine 官方社区
|
||||
|
||||
微信扫描以下二维码,学习了解 TDengine 的最新技术,与大家共同交流物联网大数据技术应用、TDengine 使用问题和技巧等话题。
|
||||
|
||||
|
|
|
@ -205,13 +205,13 @@ typedef struct SUdfInterBuf {
|
|||
|
||||
用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。
|
||||
|
||||
例如,按照上一章节描述的规则准备好了用户定义函数的源代码 add_one.c,以 Linux 为例可以执行如下指令编译得到动态链接库文件:
|
||||
例如,按照上一章节描述的规则准备好了用户定义函数的源代码 bit_and.c,以 Linux 为例可以执行如下指令编译得到动态链接库文件:
|
||||
|
||||
```bash
|
||||
gcc -g -O0 -fPIC -shared add_one.c -o add_one.so
|
||||
gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
|
||||
```
|
||||
|
||||
这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
|
||||
这样就准备好了动态链接库 libbitand.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
|
||||
|
||||
## 管理和使用UDF
|
||||
编译好的UDF,还需要将其加入到系统才能被正常的SQL调用。关于如何管理和使用UDF,参见[UDF使用说明](../12-taos-sql/26-udf.md)
|
||||
|
|
|
@ -63,7 +63,7 @@ SHOW FUNCTIONS;
|
|||
|
||||
在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如:
|
||||
```sql
|
||||
SELECT X(c1,c2) FROM table/stable;
|
||||
SELECT bit_and(c1,c2) FROM table;
|
||||
```
|
||||
|
||||
表示对名为 c1, c2 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
|
||||
表示对表 table 上名为 c1, c2 的数据列调用名为 bit_and 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。
|
||||
|
|
|
@ -30,6 +30,7 @@ typedef struct SVariant {
|
|||
int64_t i;
|
||||
uint64_t u;
|
||||
double d;
|
||||
float f;
|
||||
char *pz;
|
||||
TdUcs4 *ucs4;
|
||||
SArray *arr; // only for 'in' query to hold value list, not value for a field
|
||||
|
|
|
@ -27,8 +27,7 @@ typedef struct SStreamTask SStreamTask;
|
|||
|
||||
typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
|
||||
|
||||
// incremental state storage
|
||||
typedef struct {
|
||||
typedef struct STdbState {
|
||||
SStreamTask* pOwner;
|
||||
TDB* db;
|
||||
TTB* pStateDb;
|
||||
|
@ -37,6 +36,11 @@ typedef struct {
|
|||
TTB* pSessionStateDb;
|
||||
TTB* pParNameDb;
|
||||
TXN txn;
|
||||
} STdbState;
|
||||
|
||||
// incremental state storage
|
||||
typedef struct {
|
||||
STdbState* pTdbState;
|
||||
int32_t number;
|
||||
} SStreamState;
|
||||
|
||||
|
@ -45,6 +49,7 @@ void streamStateClose(SStreamState* pState);
|
|||
int32_t streamStateBegin(SStreamState* pState);
|
||||
int32_t streamStateCommit(SStreamState* pState);
|
||||
int32_t streamStateAbort(SStreamState* pState);
|
||||
void streamStateDestroy(SStreamState* pState);
|
||||
|
||||
typedef struct {
|
||||
TBC* pCur;
|
||||
|
|
|
@ -352,6 +352,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_TDB_STB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x061A)
|
||||
#define TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x061B)
|
||||
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x061C)
|
||||
#define TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE TAOS_DEF_ERROR_CODE(0, 0x061D)
|
||||
|
||||
// query
|
||||
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700)
|
||||
|
|
|
@ -489,6 +489,9 @@ enum {
|
|||
#define MAX_META_MSG_IN_BATCH 1048576
|
||||
#define MAX_META_BATCH_RSP_SIZE (1 * 1048576 * 1024)
|
||||
|
||||
// sort page size by default
|
||||
#define DEFAULT_PAGESIZE 4096
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -73,10 +73,10 @@
|
|||
# compressColData -1
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# timezone UTC-8
|
||||
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
|
|
@ -7,6 +7,9 @@ ARG dirName
|
|||
ARG cpuType
|
||||
RUN echo ${pkgFile} && echo ${dirName}
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y curl
|
||||
|
||||
COPY ${pkgFile} /root/
|
||||
ENV TINI_VERSION v0.19.0
|
||||
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${cpuType} /tini
|
||||
|
|
|
@ -1,16 +1,161 @@
|
|||
#!/bin/bash
|
||||
|
||||
TAOS_RUN_TAOSBENCHMARK_TEST_ONCE=0
|
||||
#ADMIN_URL=${ADMIN_URL:-http://172.26.10.84:10001}
|
||||
TAOSD_STARTUP_TIMEOUT_SECOND=${TAOSD_STARTUP_TIMEOUT_SECOND:-160}
|
||||
TAOS_TIMEOUT_SECOND=${TAOS_TIMEOUT_SECOND:-5}
|
||||
BACKUP_CORE_FOLDER=/var/log/corefile
|
||||
ALERT_URL=app/system/alert/add
|
||||
|
||||
echo "ADMIN_URL: ${ADMIN_URL}"
|
||||
echo "TAOS_TIMEOUT_SECOND: ${TAOS_TIMEOUT_SECOND}"
|
||||
|
||||
function set_service_state() {
|
||||
#echo "set service state: $1, $2"
|
||||
service_state="$1"
|
||||
service_msg="$2"
|
||||
}
|
||||
set_service_state "init" "ok"
|
||||
app_name=`hostname |cut -d\- -f1`
|
||||
|
||||
function check_taosd() {
|
||||
timeout $TAOS_TIMEOUT_SECOND taos -s "show databases;" >/dev/null
|
||||
local ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
echo "`date` check taosd error $ret"
|
||||
if [ "x$1" != "xignore" ]; then
|
||||
set_service_state "error" "taos check failed $ret"
|
||||
fi
|
||||
else
|
||||
set_service_state "ready" "ok"
|
||||
fi
|
||||
}
|
||||
function post_error_msg() {
|
||||
if [ ! -z "${ADMIN_URL}" ]; then
|
||||
taos_version=`taos --version`
|
||||
echo "app_name: ${app_name}"
|
||||
echo "service_state: ${service_state}"
|
||||
echo "`date` service_msg: ${service_msg}"
|
||||
echo "${taos_version}"
|
||||
curl --connect-timeout 10 --max-time 20 -X POST -H "Content-Type: application/json" \
|
||||
-d"{\"appName\":\"${app_name}\",\
|
||||
\"alertLevel\":\"${service_state}\",\
|
||||
\"taosVersion\":\"${taos_version}\",\
|
||||
\"alertMsg\":\"${service_msg}\"}" \
|
||||
${ADMIN_URL}/${ALERT_URL}
|
||||
fi
|
||||
}
|
||||
function check_taosd_exit_type() {
|
||||
local core_pattern=`cat /proc/sys/kernel/core_pattern`
|
||||
echo "$core_pattern" | grep -q "^/"
|
||||
if [ $? -eq 0 ]; then
|
||||
core_folder=`dirname $core_pattern`
|
||||
core_prefix=`basename $core_pattern | sed "s/%.*//"`
|
||||
else
|
||||
core_folder=`pwd`
|
||||
core_prefix="$core_pattern"
|
||||
fi
|
||||
local core_files=`ls $core_folder | grep "^${core_prefix}"`
|
||||
if [ ! -z "$core_files" ]; then
|
||||
# move core files to another folder
|
||||
mkdir -p ${BACKUP_CORE_FOLDER}
|
||||
cp ${core_folder}/${core_prefix}* ${BACKUP_CORE_FOLDER}/
|
||||
rm -f ${core_folder}/${core_prefix}*
|
||||
set_service_state "error" "taosd exit with core file"
|
||||
else
|
||||
set_service_state "error" "taosd exit without core file"
|
||||
fi
|
||||
}
|
||||
disk_usage_level=(60 80 99)
|
||||
current_disk_level=0
|
||||
disk_state="ok"
|
||||
disk_msg="ok"
|
||||
get_usage_ok="yes"
|
||||
function post_disk_error_msg() {
|
||||
if [ ! -z "${ADMIN_URL}" ]; then
|
||||
taos_version=`taos --version`
|
||||
echo "app_name: ${app_name}"
|
||||
echo "disk_state: ${disk_state}"
|
||||
echo "`date` disk_msg: ${disk_msg}"
|
||||
echo "${taos_version}"
|
||||
curl --connect-timeout 10 --max-time 20 -X POST -H "Content-Type: application/json" \
|
||||
-d"{\"appName\":\"${app_name}\",\
|
||||
\"alertLevel\":\"${disk_state}\",\
|
||||
\"taosVersion\":\"${taos_version}\",\
|
||||
\"alertMsg\":\"${disk_msg}\"}" \
|
||||
${ADMIN_URL}/${ALERT_URL}
|
||||
fi
|
||||
}
|
||||
function check_disk() {
|
||||
local folder=`cat /etc/taos/taos.cfg|grep -v "^#"|grep dataDir|awk '{print $NF}'`
|
||||
if [ -z "$folder" ]; then
|
||||
folder="/var/lib/taos"
|
||||
fi
|
||||
local mount_point="$folder"
|
||||
local usage=""
|
||||
while [ -z "$usage" ]; do
|
||||
usage=`df -h|grep -w "${mount_point}"|awk '{print $5}'|grep -v Use|sed "s/%$//"`
|
||||
if [ "x${mount_point}" = "x/" ]; then
|
||||
break
|
||||
fi
|
||||
mount_point=`dirname ${mount_point}`
|
||||
done
|
||||
if [ -z "$usage" ]; then
|
||||
disk_state="error"
|
||||
disk_msg="cannot get disk usage"
|
||||
if [ "$get_usage_ok" = "yes" ]; then
|
||||
post_disk_error_msg
|
||||
get_usage_ok="no"
|
||||
fi
|
||||
else
|
||||
get_usage_ok="yes"
|
||||
local current_level=0
|
||||
for level in ${disk_usage_level[*]}; do
|
||||
if [ ${usage} -ge ${level} ]; then
|
||||
disk_state="error"
|
||||
disk_msg="disk usage over ${level}%"
|
||||
current_level=${level}
|
||||
fi
|
||||
done
|
||||
if [ ${current_level} -gt ${current_disk_level} ]; then
|
||||
post_disk_error_msg
|
||||
elif [ ${current_level} -lt ${current_disk_level} ]; then
|
||||
echo "disk usage reduced from ${current_disk_level} to ${current_level}"
|
||||
fi
|
||||
current_disk_level=${current_level}
|
||||
fi
|
||||
}
|
||||
function run_taosd() {
|
||||
taosd
|
||||
set_service_state "error" "taosd exit"
|
||||
# post error msg
|
||||
# check crash or OOM
|
||||
check_taosd_exit_type
|
||||
post_error_msg
|
||||
}
|
||||
function print_service_state_change() {
|
||||
if [ "x$1" != "x${service_state}" ]; then
|
||||
echo "`date` service state: ${service_state}, ${service_msg}"
|
||||
fi
|
||||
}
|
||||
taosd_start_time=`date +%s`
|
||||
while ((1))
|
||||
do
|
||||
check_disk
|
||||
# echo "outer loop: $a"
|
||||
sleep 10
|
||||
output=`taos -k`
|
||||
output=`timeout $TAOS_TIMEOUT_SECOND taos -k`
|
||||
if [ -z "${output}" ]; then
|
||||
echo "`date` taos -k error"
|
||||
status=""
|
||||
else
|
||||
status=${output:0:1}
|
||||
fi
|
||||
# echo $output
|
||||
# echo $status
|
||||
if [ "$status"x = "0"x ]
|
||||
then
|
||||
taosd &
|
||||
# taosd_start_time=`date +%s`
|
||||
run_taosd &
|
||||
fi
|
||||
# echo "$status"x "$TAOS_RUN_TAOSBENCHMARK_TEST"x "$TAOS_RUN_TAOSBENCHMARK_TEST_ONCE"x
|
||||
if [ "$status"x = "2"x ] && [ "$TAOS_RUN_TAOSBENCHMARK_TEST"x = "1"x ] && [ "$TAOS_RUN_TAOSBENCHMARK_TEST_ONCE"x = "0"x ]
|
||||
|
@ -28,9 +173,33 @@ do
|
|||
taos -s "GRANT ALL on test.* to admin_user;"
|
||||
fi
|
||||
fi
|
||||
# check taosd status
|
||||
if [ "$service_state" = "ready" ]; then
|
||||
# check taosd status
|
||||
check_taosd
|
||||
print_service_state_change "ready"
|
||||
if [ "$service_state" = "error" ]; then
|
||||
post_error_msg
|
||||
fi
|
||||
elif [ "$service_state" = "init" ]; then
|
||||
check_taosd "ignore"
|
||||
# check timeout
|
||||
current_time=`date +%s`
|
||||
time_elapsed=$(( current_time - taosd_start_time ))
|
||||
if [ ${time_elapsed} -gt ${TAOSD_STARTUP_TIMEOUT_SECOND} ]; then
|
||||
set_service_state "error" "taosd startup timeout"
|
||||
post_error_msg
|
||||
fi
|
||||
print_service_state_change "init"
|
||||
elif [ "$service_state" = "error" ]; then
|
||||
# check taosd status
|
||||
check_taosd
|
||||
print_service_state_change "error"
|
||||
fi
|
||||
# check taosadapter
|
||||
nc -z localhost 6041
|
||||
if [ $? -ne 0 ]; then
|
||||
taosadapter &
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
|
|
@ -63,7 +63,7 @@ Source: {#MyAppSourceDir}{#MyAppTaosdemoExeName}; DestDir: "{app}"; Flags: igNor
|
|||
|
||||
[run]
|
||||
Filename: {sys}\sc.exe; Parameters: "create taosd start= DEMAND binPath= ""C:\\TDengine\\taosd.exe --win_service""" ; Flags: runhidden
|
||||
Filename: {sys}\sc.exe; Parameters: "create taosadapter start= DEMAND binPath= ""C:\\TDengine\\taosadapter.exe --win_service""" ; Flags: runhidden
|
||||
Filename: {sys}\sc.exe; Parameters: "create taosadapter start= DEMAND binPath= ""C:\\TDengine\\taosadapter.exe""" ; Flags: runhidden
|
||||
|
||||
[UninstallRun]
|
||||
RunOnceId: "stoptaosd"; Filename: {sys}\sc.exe; Parameters: "stop taosd" ; Flags: runhidden
|
||||
|
|
|
@ -1403,6 +1403,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
|||
pBlock->info = pDataBlock->info;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.capacity = 0;
|
||||
pBlock->info.rowSize = 0;
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
|
|
|
@ -109,7 +109,7 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
|||
}
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
pVar->nLen = tDataTypes[type].bytes;
|
||||
pVar->d = GET_FLOAT_VAL(pz);
|
||||
pVar->f = GET_FLOAT_VAL(pz);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length
|
||||
|
@ -223,12 +223,18 @@ int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2) {
|
|||
} else {
|
||||
return p1->nLen > p2->nLen ? 1 : -1;
|
||||
}
|
||||
} else if (p1->nType == TSDB_DATA_TYPE_FLOAT || p1->nType == TSDB_DATA_TYPE_DOUBLE) {
|
||||
} else if (p1->nType == TSDB_DATA_TYPE_DOUBLE) {
|
||||
if (p1->d == p2->d) {
|
||||
return 0;
|
||||
} else {
|
||||
return p1->d > p2->d ? 1 : -1;
|
||||
}
|
||||
} else if (p1->nType == TSDB_DATA_TYPE_FLOAT) {
|
||||
if (p1->f == p2->f) {
|
||||
return 0;
|
||||
} else {
|
||||
return p1->f > p2->f ? 1 : -1;
|
||||
}
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(p1->nType)) {
|
||||
if (p1->u == p2->u) {
|
||||
return 0;
|
||||
|
@ -259,8 +265,9 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
|
|||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return (char *)&pVar->u;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return (char *)&pVar->d;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return (char *)&pVar->f;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
return (char *)pVar->pz;
|
||||
|
|
|
@ -56,6 +56,9 @@ static void mmProcessRpcMsg(SQueueInfo *pInfo, SRpcMsg *pMsg) {
|
|||
if (IsReq(pMsg) && pMsg->info.handle != NULL && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
if (code != 0 && terrno != 0) code = terrno;
|
||||
mmSendRsp(pMsg, code);
|
||||
} else {
|
||||
rpcFreeCont(pMsg->info.rsp);
|
||||
pMsg->info.rsp = NULL;
|
||||
}
|
||||
|
||||
if (code == TSDB_CODE_RPC_REDIRECT) {
|
||||
|
|
|
@ -577,9 +577,9 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
|
|||
|
||||
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
|
||||
pMsg->info.rsp = rpcMallocCont(contLen);
|
||||
pMsg->info.hasEpSet = 1;
|
||||
if (pMsg->info.rsp != NULL) {
|
||||
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
|
||||
pMsg->info.hasEpSet = 1;
|
||||
pMsg->info.rspLen = contLen;
|
||||
terrno = TSDB_CODE_RPC_REDIRECT;
|
||||
} else {
|
||||
|
|
|
@ -403,6 +403,11 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMe
|
|||
// validate req
|
||||
metaReaderInit(&mr, pMeta, 0);
|
||||
if (metaGetTableEntryByName(&mr, pReq->name) == 0) {
|
||||
if (pReq->type == TSDB_CHILD_TABLE && pReq->ctb.suid != mr.me.ctbEntry.suid) {
|
||||
terrno = TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE;
|
||||
metaReaderClear(&mr);
|
||||
return -1;
|
||||
}
|
||||
pReq->uid = mr.me.uid;
|
||||
if (pReq->type == TSDB_CHILD_TABLE) {
|
||||
pReq->ctb.suid = mr.me.ctbEntry.suid;
|
||||
|
|
|
@ -671,7 +671,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
|
||||
|
||||
if (tqTaosxScanLog(pTq, pHandle, pCont, &taosxRsp) < 0) {
|
||||
/*ASSERT(0);*/
|
||||
}
|
||||
if (taosxRsp.blockNum > 0 /* threshold */) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
|
|
|
@ -228,7 +228,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
|
|||
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
|
||||
|
||||
SArray* pBlocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
SArray* pSchemas = taosArrayInit(0, sizeof(SSchemaWrapper));
|
||||
SArray* pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
|
||||
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
|
||||
STqReader* pReader = pExec->pExecReader;
|
||||
|
@ -248,6 +248,9 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
|
|||
int64_t uid = pExec->pExecReader->msgIter.uid;
|
||||
if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp, taosArrayGetSize(pBlocks)) < 0) {
|
||||
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
|
||||
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSSchemaWrapper);
|
||||
pBlocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +274,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
|
|||
tqAddBlockDataToRsp(pBlock, (SMqDataRsp*)pRsp, taosArrayGetSize(pBlock->pDataBlock),
|
||||
pTq->pVnode->config.tsdbCfg.precision);
|
||||
blockDataFreeRes(pBlock);
|
||||
SSchemaWrapper* pSW = taosArrayGet(pSchemas, i);
|
||||
SSchemaWrapper* pSW = taosArrayGetP(pSchemas, i);
|
||||
taosArrayPush(pRsp->blockSchema, &pSW);
|
||||
pRsp->blockNum++;
|
||||
}
|
||||
|
@ -293,8 +296,10 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
|
|||
int64_t uid = pExec->pExecReader->msgIter.uid;
|
||||
if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp, taosArrayGetSize(pBlocks)) < 0) {
|
||||
taosArrayDestroyEx(pBlocks, (FDelete)blockDataFreeRes);
|
||||
/*blockDataFreeRes(&block);*/
|
||||
continue;
|
||||
taosArrayDestroyP(pSchemas, (FDelete)tDeleteSSchemaWrapper);
|
||||
pBlocks = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
pSchemas = taosArrayInit(0, sizeof(void*));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (pHandle->fetchMeta) {
|
||||
|
@ -322,7 +327,7 @@ int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp
|
|||
tqAddBlockDataToRsp(pBlock, (SMqDataRsp*)pRsp, taosArrayGetSize(pBlock->pDataBlock),
|
||||
pTq->pVnode->config.tsdbCfg.precision);
|
||||
blockDataFreeRes(pBlock);
|
||||
SSchemaWrapper* pSW = taosArrayGet(pSchemas, i);
|
||||
SSchemaWrapper* pSW = taosArrayGetP(pSchemas, i);
|
||||
taosArrayPush(pRsp->blockSchema, &pSW);
|
||||
pRsp->blockNum++;
|
||||
}
|
||||
|
|
|
@ -592,6 +592,7 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
int32_t colAtMost = pSchemaWrapper->nCols;
|
||||
|
||||
int32_t curRow = 0;
|
||||
int32_t lastRow = 0;
|
||||
|
||||
char* assigned = taosMemoryCalloc(1, pSchemaWrapper->nCols);
|
||||
if (assigned == NULL) return -1;
|
||||
|
@ -605,11 +606,13 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
bool buildNew = false;
|
||||
tdSTSRowIterReset(&iter, row);
|
||||
|
||||
tqDebug("vgId:%d, row of block %d", pReader->pWalReader->pWal->cfg.vgId, curRow);
|
||||
for (int32_t i = 0; i < colAtMost; i++) {
|
||||
SCellVal sVal = {0};
|
||||
if (!tdSTSRowIterFetch(&iter, pSchemaWrapper->pSchema[i].colId, pSchemaWrapper->pSchema[i].type, &sVal)) {
|
||||
break;
|
||||
}
|
||||
tqDebug("vgId:%d, %d col, type %d", pReader->pWalReader->pWal->cfg.vgId, i, sVal.valType);
|
||||
if (curRow == 0) {
|
||||
assigned[i] = sVal.valType != TD_VTYPE_NONE;
|
||||
buildNew = true;
|
||||
|
@ -623,27 +626,42 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
}
|
||||
|
||||
if (buildNew) {
|
||||
SSDataBlock block;
|
||||
SSchemaWrapper sw;
|
||||
if (tqMaskBlock(&sw, &block, pSchemaWrapper, assigned) < 0) {
|
||||
if (taosArrayGetSize(blocks) > 0) {
|
||||
SSDataBlock* pLastBlock = taosArrayGetLast(blocks);
|
||||
pLastBlock->info.rows = curRow - lastRow;
|
||||
lastRow = curRow;
|
||||
}
|
||||
SSDataBlock* pBlock = createDataBlock();
|
||||
SSchemaWrapper* pSW = taosMemoryCalloc(1, sizeof(SSchemaWrapper));
|
||||
if (tqMaskBlock(pSW, pBlock, pSchemaWrapper, assigned) < 0) {
|
||||
blockDataDestroy(pBlock);
|
||||
goto FAIL;
|
||||
}
|
||||
SSDataBlock block = {0};
|
||||
assignOneDataBlock(&block, pBlock);
|
||||
blockDataDestroy(pBlock);
|
||||
|
||||
tqDebug("vgId:%d, build new block, col %d", pReader->pWalReader->pWal->cfg.vgId,
|
||||
(int32_t)taosArrayGetSize(block.pDataBlock));
|
||||
|
||||
taosArrayPush(blocks, &block);
|
||||
taosArrayPush(schemas, &sw);
|
||||
taosArrayPush(schemas, &pSW);
|
||||
}
|
||||
|
||||
SSDataBlock* pBlock = taosArrayGetLast(blocks);
|
||||
pBlock->info.uid = pReader->msgIter.uid;
|
||||
pBlock->info.rows = pReader->msgIter.numOfRows;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.version = pReader->pMsg->version;
|
||||
|
||||
tqDebug("vgId:%d, taosx scan, block num: %d", pReader->pWalReader->pWal->cfg.vgId,
|
||||
(int32_t)taosArrayGetSize(blocks));
|
||||
|
||||
if (blockDataEnsureCapacity(pBlock, pReader->msgIter.numOfRows - curRow) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
tdSTSRowIterInit(&iter, pTschema);
|
||||
tdSTSRowIterReset(&iter, row);
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SCellVal sVal = {0};
|
||||
|
@ -654,12 +672,16 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
|
|||
|
||||
ASSERT(sVal.valType != TD_VTYPE_NONE);
|
||||
|
||||
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType != TD_VTYPE_NORM) < 0) {
|
||||
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType == TD_VTYPE_NULL) < 0) {
|
||||
goto FAIL;
|
||||
}
|
||||
tqDebug("vgId:%d, row %d col %d append %d", pReader->pWalReader->pWal->cfg.vgId, curRow, i,
|
||||
sVal.valType == TD_VTYPE_NULL);
|
||||
}
|
||||
curRow++;
|
||||
}
|
||||
SSDataBlock* pLastBlock = taosArrayGetLast(blocks);
|
||||
pLastBlock->info.rows = curRow - lastRow;
|
||||
|
||||
taosMemoryFree(assigned);
|
||||
return 0;
|
||||
|
|
|
@ -49,7 +49,7 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee
|
|||
|
||||
pTsdb->path = (char *)&pTsdb[1];
|
||||
snprintf(pTsdb->path, TD_PATH_MAX, "%s%s%s", pVnode->path, TD_DIRSEP, dir);
|
||||
taosRealPath(pTsdb->path, NULL, slen);
|
||||
// taosRealPath(pTsdb->path, NULL, slen);
|
||||
pTsdb->pVnode = pVnode;
|
||||
taosThreadRwlockInit(&pTsdb->rwLock, NULL);
|
||||
if (!pKeepCfg) {
|
||||
|
|
|
@ -1698,7 +1698,7 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
|
|||
size += pBlockCol->szBitmap;
|
||||
|
||||
// offset
|
||||
if (IS_VAR_DATA_TYPE(pColData->type)) {
|
||||
if (IS_VAR_DATA_TYPE(pColData->type) && pColData->flag != (HAS_NULL | HAS_NONE)) {
|
||||
code = tsdbCmprData((uint8_t *)pColData->aOffset, sizeof(int32_t) * pColData->nVal, TSDB_DATA_TYPE_INT, cmprAlg,
|
||||
ppOut, nOut + size, &pBlockCol->szOffset, ppBuf);
|
||||
if (code) goto _exit;
|
||||
|
|
|
@ -163,7 +163,7 @@ typedef struct {
|
|||
SArray* pStopInfo;
|
||||
} STaskStopInfo;
|
||||
|
||||
typedef struct SExecTaskInfo {
|
||||
struct SExecTaskInfo {
|
||||
STaskIdInfo id;
|
||||
uint32_t status;
|
||||
STimeWindow window;
|
||||
|
@ -182,7 +182,7 @@ typedef struct SExecTaskInfo {
|
|||
struct SOperatorInfo* pRoot;
|
||||
SLocalFetch localFetch;
|
||||
STaskStopInfo stopInfo;
|
||||
} SExecTaskInfo;
|
||||
};
|
||||
|
||||
enum {
|
||||
OP_NOT_OPENED = 0x0,
|
||||
|
@ -315,37 +315,39 @@ typedef struct STableMetaCacheInfo {
|
|||
uint64_t cacheHit;
|
||||
} STableMetaCacheInfo;
|
||||
|
||||
typedef struct STableScanInfo {
|
||||
typedef struct STableScanBase {
|
||||
STsdbReader* dataReader;
|
||||
SReadHandle readHandle;
|
||||
SLimitInfo limitInfo;
|
||||
SFileBlockLoadRecorder readRecorder;
|
||||
SQueryTableDataCond cond;
|
||||
SAggOptrPushDownInfo pdInfo;
|
||||
SColMatchInfo matchInfo;
|
||||
SReadHandle readHandle;
|
||||
SExprSupp pseudoSup;
|
||||
STableMetaCacheInfo metaCache;
|
||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||
int32_t dataBlockLoadFlag;
|
||||
SLimitInfo limitInfo;
|
||||
} STableScanBase;
|
||||
|
||||
typedef struct STableScanInfo {
|
||||
STableScanBase base;
|
||||
SScanInfo scanInfo;
|
||||
int32_t scanTimes;
|
||||
SSDataBlock* pResBlock;
|
||||
SColMatchInfo matchInfo;
|
||||
SExprSupp pseudoSup;
|
||||
SQueryTableDataCond cond;
|
||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||
int32_t dataBlockLoadFlag;
|
||||
SSampleExecInfo sample; // sample execution info
|
||||
int32_t currentGroupId;
|
||||
int32_t currentTable;
|
||||
int8_t scanMode;
|
||||
SAggOptrPushDownInfo pdInfo;
|
||||
int8_t assignBlockUid;
|
||||
STableMetaCacheInfo metaCache;
|
||||
} STableScanInfo;
|
||||
|
||||
typedef struct STableMergeScanInfo {
|
||||
STableListInfo* tableListInfo;
|
||||
int32_t tableStartIndex;
|
||||
int32_t tableEndIndex;
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
SArray* queryConds; // array of queryTableDataCond
|
||||
STsdbReader* pReader;
|
||||
SReadHandle readHandle;
|
||||
STableScanBase base;
|
||||
int32_t bufPageSize;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
SArray* pSortInfo;
|
||||
|
@ -354,25 +356,10 @@ typedef struct STableMergeScanInfo {
|
|||
int64_t startTs; // sort start time
|
||||
SArray* sortSourceParams;
|
||||
SLimitInfo limitInfo;
|
||||
SFileBlockLoadRecorder readRecorder;
|
||||
int64_t numOfRows;
|
||||
SScanInfo scanInfo;
|
||||
int32_t scanTimes;
|
||||
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
|
||||
SResultRowInfo* pResultRowInfo;
|
||||
int32_t* rowEntryInfoOffset;
|
||||
SExprInfo* pExpr;
|
||||
SSDataBlock* pResBlock;
|
||||
SColMatchInfo matchInfo;
|
||||
int32_t numOfOutput;
|
||||
SExprSupp pseudoSup;
|
||||
SQueryTableDataCond cond;
|
||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||
int32_t dataBlockLoadFlag;
|
||||
|
||||
// if the upstream is an interval operator, the interval info is also kept here to get the time
|
||||
// window to check if current data block needs to be loaded.
|
||||
SInterval interval;
|
||||
SSampleExecInfo sample; // sample execution info
|
||||
SSortExecInfo sortExecInfo;
|
||||
} STableMergeScanInfo;
|
||||
|
@ -414,13 +401,6 @@ enum {
|
|||
PROJECT_RETRIEVE_DONE = 0x2,
|
||||
};
|
||||
|
||||
typedef struct SCatchSupporter {
|
||||
SHashObj* pWindowHashTable; // quick locate the window object for each window
|
||||
SDiskbasedBuf* pDataBuf; // buffer based on blocked-wised disk file
|
||||
int32_t keySize;
|
||||
int64_t* pKeyBuf;
|
||||
} SCatchSupporter;
|
||||
|
||||
typedef struct SStreamAggSupporter {
|
||||
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
|
||||
SSDataBlock* pScanBlock;
|
||||
|
@ -796,7 +776,6 @@ typedef struct STimeSliceOperatorInfo {
|
|||
SArray* pPrevRow; // SArray<SGroupValue>
|
||||
SArray* pNextRow; // SArray<SGroupValue>
|
||||
SArray* pLinearInfo; // SArray<SFillLinearInfo>
|
||||
bool fillLastPoint;
|
||||
bool isPrevRowSet;
|
||||
bool isNextRowSet;
|
||||
int32_t fillType; // fill type
|
||||
|
@ -1042,8 +1021,8 @@ int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPos
|
|||
|
||||
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
|
||||
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
|
||||
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
|
||||
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
|
||||
|
||||
|
|
|
@ -36,7 +36,8 @@ typedef struct SFillColInfo {
|
|||
typedef struct SFillLinearInfo {
|
||||
SPoint start;
|
||||
SPoint end;
|
||||
bool hasNull;
|
||||
bool isStartSet;
|
||||
bool isEndSet;
|
||||
int16_t type;
|
||||
int32_t bytes;
|
||||
} SFillLinearInfo;
|
||||
|
|
|
@ -163,9 +163,10 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
|
|||
/**
|
||||
* get proper sort buffer pages according to the row size
|
||||
* @param rowSize
|
||||
* @param numOfCols columns count that be put into page
|
||||
* @return
|
||||
*/
|
||||
int32_t getProperSortPageSize(size_t rowSize);
|
||||
int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -1026,8 +1026,8 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
|
|||
SStreamScanInfo* pInfo = pOperator->info;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTSInfo->dataReader);
|
||||
pTSInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTSInfo->base.dataReader);
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
#if 0
|
||||
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
|
||||
pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
|
||||
|
@ -1079,23 +1079,23 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
|
|||
// TODO after dropping table, table may not found
|
||||
ASSERT(found);
|
||||
|
||||
if (pTableScanInfo->dataReader == NULL) {
|
||||
if (pTableScanInfo->base.dataReader == NULL) {
|
||||
STableKeyInfo* pList = tableListGetInfo(pTaskInfo->pTableInfoList, 0);
|
||||
int32_t num = tableListGetSize(pTaskInfo->pTableInfoList);
|
||||
|
||||
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond, pList, num,
|
||||
&pTableScanInfo->dataReader, NULL) < 0 ||
|
||||
pTableScanInfo->dataReader == NULL) {
|
||||
if (tsdbReaderOpen(pTableScanInfo->base.readHandle.vnode, &pTableScanInfo->base.cond, pList, num,
|
||||
&pTableScanInfo->base.dataReader, NULL) < 0 ||
|
||||
pTableScanInfo->base.dataReader == NULL) {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
STableKeyInfo tki = {.uid = uid};
|
||||
tsdbSetTableList(pTableScanInfo->dataReader, &tki, 1);
|
||||
int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
|
||||
pTableScanInfo->cond.twindows.skey = ts + 1;
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
pTableScanInfo->cond.twindows.skey = oldSkey;
|
||||
tsdbSetTableList(pTableScanInfo->base.dataReader, &tki, 1);
|
||||
int64_t oldSkey = pTableScanInfo->base.cond.twindows.skey;
|
||||
pTableScanInfo->base.cond.twindows.skey = ts + 1;
|
||||
tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
|
||||
pTableScanInfo->base.cond.twindows.skey = oldSkey;
|
||||
pTableScanInfo->scanTimes = 0;
|
||||
|
||||
qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
|
||||
|
|
|
@ -1000,12 +1000,6 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo) {
|
||||
if (pTableQueryInfo == NULL) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
|
||||
if (status == TASK_NOT_COMPLETED) {
|
||||
pTaskInfo->status = status;
|
||||
|
@ -1652,55 +1646,6 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
|
|||
static int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
|
||||
const char* pKey);
|
||||
|
||||
static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int32_t rowIndex) {
|
||||
size_t size = taosArrayGetSize(groupInfo);
|
||||
if (size == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
int32_t* index = taosArrayGet(groupInfo, i);
|
||||
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, *index);
|
||||
bool isNull = colDataIsNull(pColInfo, rowIndex, pBlock->info.rows, NULL);
|
||||
|
||||
if ((isNull && buf[i] != NULL) || (!isNull && buf[i] == NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
char* pCell = colDataGetData(pColInfo, rowIndex);
|
||||
if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
|
||||
if (varDataLen(pCell) != varDataLen(buf[i])) {
|
||||
return false;
|
||||
} else {
|
||||
if (memcmp(varDataVal(pCell), varDataVal(buf[i]), varDataLen(pCell)) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (memcmp(pCell, buf[i], pColInfo->info.bytes) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex) {
|
||||
int32_t size = (int32_t)taosArrayGetSize(pColumnList);
|
||||
|
||||
for (int32_t i = 0; i < size; ++i) {
|
||||
int32_t* index = taosArrayGet(pColumnList, i);
|
||||
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, *index);
|
||||
|
||||
char* data = colDataGetData(pColInfo, rowIndex);
|
||||
memcpy(rowColData[i], data, colDataGetLength(pColInfo, rowIndex));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) {
|
||||
// todo add more information about exchange operation
|
||||
int32_t type = pOperator->operatorType;
|
||||
|
@ -1712,13 +1657,13 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
|
|||
return TSDB_CODE_SUCCESS;
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pTableScanInfo = pOperator->info;
|
||||
*order = pTableScanInfo->cond.order;
|
||||
*scanFlag = pTableScanInfo->scanFlag;
|
||||
*order = pTableScanInfo->base.cond.order;
|
||||
*scanFlag = pTableScanInfo->base.scanFlag;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (type == QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN) {
|
||||
STableMergeScanInfo* pTableScanInfo = pOperator->info;
|
||||
*order = pTableScanInfo->cond.order;
|
||||
*scanFlag = pTableScanInfo->scanFlag;
|
||||
*order = pTableScanInfo->base.cond.order;
|
||||
*scanFlag = pTableScanInfo->base.scanFlag;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
if (pOperator->pDownstream == NULL || pOperator->pDownstream[0] == NULL) {
|
||||
|
@ -2365,8 +2310,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
|
|||
|
||||
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pTableScanInfo = downstream->info;
|
||||
pTableScanInfo->pdInfo.pExprSup = &pOperator->exprSupp;
|
||||
pTableScanInfo->pdInfo.pAggSup = &pInfo->aggSup;
|
||||
pTableScanInfo->base.pdInfo.pExprSup = &pOperator->exprSupp;
|
||||
pTableScanInfo->base.pdInfo.pAggSup = &pInfo->aggSup;
|
||||
}
|
||||
|
||||
code = appendDownstream(pOperator, &downstream, 1);
|
||||
|
@ -2731,7 +2676,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
}
|
||||
|
||||
STableScanInfo* pScanInfo = pOperator->info;
|
||||
pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
|
||||
pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
|
||||
STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
|
||||
|
||||
|
@ -2749,14 +2694,14 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
return NULL;
|
||||
}
|
||||
|
||||
pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pTableListInfo, pHandle, pTaskInfo);
|
||||
pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pHandle, pTaskInfo);
|
||||
if (NULL == pOperator) {
|
||||
pTaskInfo->code = terrno;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
STableScanInfo* pScanInfo = pOperator->info;
|
||||
pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
|
||||
pTaskInfo->cost.pRecoder = &pScanInfo->base.readRecorder;
|
||||
} else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
|
||||
pOperator = createExchangeOperatorInfo(pHandle ? pHandle->pMsgCb->clientRpc : NULL, (SExchangePhysiNode*)pPhyNode,
|
||||
pTaskInfo);
|
||||
|
|
|
@ -233,25 +233,25 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
|
|||
|
||||
STableScanInfo* pTableScanInfo = pOperator->info;
|
||||
|
||||
SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
|
||||
SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->base.pdInfo.pAggSup->pResultRowHashTable, buf,
|
||||
GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
|
||||
|
||||
if (p1 == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*pPage = getBufPage(pTableScanInfo->pdInfo.pAggSup->pResultBuf, p1->pageId);
|
||||
*pPage = getBufPage(pTableScanInfo->base.pdInfo.pAggSup->pResultBuf, p1->pageId);
|
||||
return (SResultRow*)((char*)(*pPage) + p1->offset);
|
||||
}
|
||||
|
||||
static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo* pBlockInfo, uint32_t* status) {
|
||||
STableScanInfo* pTableScanInfo = pOperator->info;
|
||||
|
||||
if (pTableScanInfo->pdInfo.pExprSup == NULL) {
|
||||
if (pTableScanInfo->base.pdInfo.pExprSup == NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SExprSupp* pSup1 = pTableScanInfo->pdInfo.pExprSup;
|
||||
SExprSupp* pSup1 = pTableScanInfo->base.pdInfo.pExprSup;
|
||||
|
||||
SFilePage* pPage = NULL;
|
||||
SResultRow* pRow = getTableGroupOutputBuf(pOperator, pBlockInfo->groupId, &pPage);
|
||||
|
@ -264,7 +264,7 @@ static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo*
|
|||
for (int32_t i = 0; i < pSup1->numOfExprs; ++i) {
|
||||
int32_t functionId = pSup1->pCtx[i].functionId;
|
||||
|
||||
SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, pTableScanInfo->pdInfo.pExprSup->rowEntryInfoOffset);
|
||||
SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, pTableScanInfo->base.pdInfo.pExprSup->rowEntryInfoOffset);
|
||||
|
||||
int32_t reqStatus = fmFuncDynDataRequired(functionId, pEntry, &pBlockInfo->window);
|
||||
if (reqStatus != FUNC_DATA_REQUIRED_NOT_LOAD) {
|
||||
|
@ -274,7 +274,7 @@ static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo*
|
|||
}
|
||||
|
||||
// release buffer pages
|
||||
releaseBufPage(pTableScanInfo->pdInfo.pAggSup->pResultBuf, pPage);
|
||||
releaseBufPage(pTableScanInfo->base.pdInfo.pAggSup->pResultBuf, pPage);
|
||||
|
||||
if (notLoadBlock) {
|
||||
*status = FUNC_DATA_REQUIRED_NOT_LOAD;
|
||||
|
@ -293,7 +293,7 @@ static bool doFilterByBlockSMA(SFilterInfo* pFilterInfo, SColumnDataAgg** pColsA
|
|||
return keep;
|
||||
}
|
||||
|
||||
static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
|
||||
static bool doLoadBlockSMA(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
|
||||
bool allColumnsHaveAgg = true;
|
||||
SColumnDataAgg** pColAgg = NULL;
|
||||
|
||||
|
@ -330,7 +330,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void doSetTagColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo,
|
||||
static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo,
|
||||
int32_t rows) {
|
||||
if (pTableScanInfo->pseudoSup.numOfExprs > 0) {
|
||||
SExprSupp* pSup = &pTableScanInfo->pseudoSup;
|
||||
|
@ -374,19 +374,16 @@ void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
|
||||
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableScanInfo, SSDataBlock* pBlock,
|
||||
uint32_t* status) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STableScanInfo* pInfo = pOperator->info;
|
||||
|
||||
SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
|
||||
|
||||
pCost->totalBlocks += 1;
|
||||
pCost->totalRows += pBlock->info.rows;
|
||||
|
||||
bool loadSMA = false;
|
||||
|
||||
*status = pInfo->dataBlockLoadFlag;
|
||||
*status = pTableScanInfo->dataBlockLoadFlag;
|
||||
if (pOperator->exprSupp.pFilterInfo != NULL ||
|
||||
overlapWithTimeWindow(&pTableScanInfo->pdInfo.interval, &pBlock->info, pTableScanInfo->cond.order)) {
|
||||
(*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
|
@ -485,14 +482,14 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
|
|||
}
|
||||
}
|
||||
|
||||
applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo, pOperator);
|
||||
applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator);
|
||||
|
||||
pCost->totalRows += pBlock->info.rows;
|
||||
pInfo->limitInfo.numOfOutputRows = pCost->totalRows;
|
||||
pTableScanInfo->limitInfo.numOfOutputRows = pCost->totalRows;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
||||
static void prepareForDescendingScan(STableScanBase* pTableScanInfo, SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
||||
SET_REVERSE_SCAN_FLAG(pTableScanInfo);
|
||||
|
||||
switchCtxOrder(pCtx, numOfOutput);
|
||||
|
@ -700,7 +697,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
while (tsdbNextDataBlock(pTableScanInfo->dataReader)) {
|
||||
while (tsdbNextDataBlock(pTableScanInfo->base.dataReader)) {
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
|
||||
}
|
||||
|
@ -715,7 +712,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
SDataBlockInfo* pBInfo = &pBlock->info;
|
||||
|
||||
int32_t rows = 0;
|
||||
tsdbRetrieveDataBlockInfo(pTableScanInfo->dataReader, &rows, &pBInfo->uid, &pBInfo->window);
|
||||
tsdbRetrieveDataBlockInfo(pTableScanInfo->base.dataReader, &rows, &pBInfo->uid, &pBInfo->window);
|
||||
|
||||
blockDataEnsureCapacity(pBlock, rows); // todo remove it latter
|
||||
pBInfo->rows = rows;
|
||||
|
@ -724,7 +721,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
pBlock->info.groupId = getTableGroupId(pTaskInfo->pTableInfoList, pBlock->info.uid);
|
||||
|
||||
uint32_t status = 0;
|
||||
int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status);
|
||||
int32_t code = loadDataBlock(pOperator, &pTableScanInfo->base, pBlock, &status);
|
||||
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pOperator->pTaskInfo->env, code);
|
||||
|
@ -735,10 +732,10 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
|||
continue;
|
||||
}
|
||||
|
||||
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
|
||||
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
pOperator->resultInfo.totalRows = pTableScanInfo->base.readRecorder.totalRows;
|
||||
pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
||||
pOperator->cost.totalCost = pTableScanInfo->readRecorder.elapsedTime;
|
||||
pOperator->cost.totalCost = pTableScanInfo->base.readRecorder.elapsedTime;
|
||||
|
||||
// todo refactor
|
||||
/*pTableScanInfo->lastStatus.uid = pBlock->info.uid;*/
|
||||
|
@ -758,7 +755,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
|
|||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
// The read handle is not initialized yet, since no qualified tables exists
|
||||
if (pTableScanInfo->dataReader == NULL || pOperator->status == OP_EXEC_DONE) {
|
||||
if (pTableScanInfo->base.dataReader == NULL || pOperator->status == OP_EXEC_DONE) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -773,19 +770,19 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
|
|||
|
||||
if (pTableScanInfo->scanTimes < pTableScanInfo->scanInfo.numOfAsc) {
|
||||
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
|
||||
pTableScanInfo->scanFlag = REPEAT_SCAN;
|
||||
pTableScanInfo->base.scanFlag = REPEAT_SCAN;
|
||||
qDebug("start to repeat ascending order scan data blocks due to query func required, %s", GET_TASKID(pTaskInfo));
|
||||
|
||||
// do prepare for the next round table scan operation
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t total = pTableScanInfo->scanInfo.numOfAsc + pTableScanInfo->scanInfo.numOfDesc;
|
||||
if (pTableScanInfo->scanTimes < total) {
|
||||
if (pTableScanInfo->cond.order == TSDB_ORDER_ASC) {
|
||||
prepareForDescendingScan(pTableScanInfo, pOperator->exprSupp.pCtx, 0);
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
if (pTableScanInfo->base.cond.order == TSDB_ORDER_ASC) {
|
||||
prepareForDescendingScan(&pTableScanInfo->base, pOperator->exprSupp.pCtx, 0);
|
||||
tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
|
||||
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
|
||||
}
|
||||
|
||||
|
@ -799,10 +796,10 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
|
|||
|
||||
if (pTableScanInfo->scanTimes < total) {
|
||||
setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
|
||||
pTableScanInfo->scanFlag = REPEAT_SCAN;
|
||||
pTableScanInfo->base.scanFlag = REPEAT_SCAN;
|
||||
|
||||
qDebug("%s start to repeat descending order scan data blocks", GET_TASKID(pTaskInfo));
|
||||
tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
|
||||
tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -831,11 +828,11 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
STableKeyInfo* pTableInfo = tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->currentTable);
|
||||
tsdbSetTableList(pInfo->dataReader, pTableInfo, 1);
|
||||
tsdbSetTableList(pInfo->base.dataReader, pTableInfo, 1);
|
||||
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d %s", pTableInfo->uid, numOfTables,
|
||||
pInfo->currentTable, pTaskInfo->id.str);
|
||||
|
||||
tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
|
||||
tsdbReaderReset(pInfo->base.dataReader, &pInfo->base.cond);
|
||||
pInfo->scanTimes = 0;
|
||||
}
|
||||
} else { // scan table group by group sequentially
|
||||
|
@ -848,10 +845,10 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
int32_t num = 0;
|
||||
STableKeyInfo* pList = NULL;
|
||||
tableListGetGroupList(pTaskInfo->pTableInfoList, pInfo->currentGroupId, &pList, &num);
|
||||
ASSERT(pInfo->dataReader == NULL);
|
||||
ASSERT(pInfo->base.dataReader == NULL);
|
||||
|
||||
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, pList, num,
|
||||
(STsdbReader**)&pInfo->dataReader, GET_TASKID(pTaskInfo));
|
||||
int32_t code = tsdbReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num,
|
||||
(STsdbReader**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
@ -870,15 +867,15 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
|
||||
// reset value for the next group data output
|
||||
pOperator->status = OP_OPENED;
|
||||
pInfo->limitInfo.numOfOutputRows = 0;
|
||||
pInfo->limitInfo.remainOffset = pInfo->limitInfo.limit.offset;
|
||||
pInfo->base.limitInfo.numOfOutputRows = 0;
|
||||
pInfo->base.limitInfo.remainOffset = pInfo->base.limitInfo.limit.offset;
|
||||
|
||||
int32_t num = 0;
|
||||
STableKeyInfo* pList = NULL;
|
||||
tableListGetGroupList(pTaskInfo->pTableInfoList, pInfo->currentGroupId, &pList, &num);
|
||||
|
||||
tsdbSetTableList(pInfo->dataReader, pList, num);
|
||||
tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
|
||||
tsdbSetTableList(pInfo->base.dataReader, pList, num);
|
||||
tsdbReaderReset(pInfo->base.dataReader, &pInfo->base.cond);
|
||||
pInfo->scanTimes = 0;
|
||||
|
||||
result = doGroupedTableScan(pOperator);
|
||||
|
@ -894,7 +891,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
|
|||
static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len) {
|
||||
SFileBlockLoadRecorder* pRecorder = taosMemoryCalloc(1, sizeof(SFileBlockLoadRecorder));
|
||||
STableScanInfo* pTableScanInfo = pOptr->info;
|
||||
*pRecorder = pTableScanInfo->readRecorder;
|
||||
*pRecorder = pTableScanInfo->base.readRecorder;
|
||||
*pOptrExplain = pRecorder;
|
||||
*len = sizeof(SFileBlockLoadRecorder);
|
||||
return 0;
|
||||
|
@ -903,17 +900,17 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr
|
|||
static void destroyTableScanOperatorInfo(void* param) {
|
||||
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
|
||||
blockDataDestroy(pTableScanInfo->pResBlock);
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->cond);
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->base.cond);
|
||||
|
||||
tsdbReaderClose(pTableScanInfo->dataReader);
|
||||
pTableScanInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTableScanInfo->base.dataReader);
|
||||
pTableScanInfo->base.dataReader = NULL;
|
||||
|
||||
if (pTableScanInfo->matchInfo.pList != NULL) {
|
||||
taosArrayDestroy(pTableScanInfo->matchInfo.pList);
|
||||
if (pTableScanInfo->base.matchInfo.pList != NULL) {
|
||||
taosArrayDestroy(pTableScanInfo->base.matchInfo.pList);
|
||||
}
|
||||
|
||||
taosLRUCacheCleanup(pTableScanInfo->metaCache.pTableMetaEntryCache);
|
||||
cleanupExprSupp(&pTableScanInfo->pseudoSup);
|
||||
taosLRUCacheCleanup(pTableScanInfo->base.metaCache.pTableMetaEntryCache);
|
||||
cleanupExprSupp(&pTableScanInfo->base.pseudoSup);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -930,30 +927,32 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
|
||||
int32_t numOfCols = 0;
|
||||
int32_t code =
|
||||
extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->matchInfo);
|
||||
extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
initLimitInfo(pScanNode->node.pLimit, pScanNode->node.pSlimit, &pInfo->limitInfo);
|
||||
code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
|
||||
initLimitInfo(pScanNode->node.pLimit, pScanNode->node.pSlimit, &pInfo->base.limitInfo);
|
||||
code = initQueryTableDataCond(&pInfo->base.cond, pTableScanNode);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (pScanNode->pScanPseudoCols != NULL) {
|
||||
SExprSupp* pSup = &pInfo->pseudoSup;
|
||||
SExprSupp* pSup = &pInfo->base.pseudoSup;
|
||||
pSup->pExprInfo = createExprInfo(pScanNode->pScanPseudoCols, NULL, &pSup->numOfExprs);
|
||||
pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
|
||||
}
|
||||
|
||||
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
|
||||
pInfo->pdInfo.interval = extractIntervalInfo(pTableScanNode);
|
||||
pInfo->readHandle = *readHandle;
|
||||
|
||||
pInfo->base.scanFlag = MAIN_SCAN;
|
||||
pInfo->base.pdInfo.interval = extractIntervalInfo(pTableScanNode);
|
||||
pInfo->base.readHandle = *readHandle;
|
||||
pInfo->sample.sampleRatio = pTableScanNode->ratio;
|
||||
pInfo->sample.seed = taosGetTimestampSec();
|
||||
|
||||
pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
|
||||
pInfo->base.dataBlockLoadFlag = pTableScanNode->dataRequired;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
pInfo->pResBlock = createResDataBlock(pDescNode);
|
||||
|
@ -964,7 +963,6 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->scanFlag = MAIN_SCAN;
|
||||
pInfo->currentGroupId = -1;
|
||||
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
|
||||
|
||||
|
@ -972,13 +970,13 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
pTaskInfo);
|
||||
pOperator->exprSupp.numOfExprs = numOfCols;
|
||||
|
||||
pInfo->metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5);
|
||||
if (pInfo->metaCache.pTableMetaEntryCache == NULL) {
|
||||
pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5);
|
||||
if (pInfo->base.metaCache.pTableMetaEntryCache == NULL) {
|
||||
code = terrno;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
taosLRUCacheSetStrictCapacity(pInfo->metaCache.pTableMetaEntryCache, false);
|
||||
taosLRUCacheSetStrictCapacity(pInfo->base.metaCache.pTableMetaEntryCache, false);
|
||||
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doTableScan, NULL, destroyTableScanOperatorInfo,
|
||||
getTableScannerExecInfo);
|
||||
|
||||
|
@ -1000,7 +998,7 @@ SOperatorInfo* createTableSeqScanOperatorInfo(void* pReadHandle, SExecTaskInfo*
|
|||
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
|
||||
pInfo->dataReader = pReadHandle;
|
||||
pInfo->base.dataReader = pReadHandle;
|
||||
// pInfo->prevGroupId = -1;
|
||||
|
||||
setOperatorInfo(pOperator, "TableSeqScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN, false, OP_NOT_OPENED,
|
||||
|
@ -1207,11 +1205,11 @@ static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t grou
|
|||
}
|
||||
|
||||
void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin) {
|
||||
pTableScanInfo->cond.twindows = *pWin;
|
||||
pTableScanInfo->base.cond.twindows = *pWin;
|
||||
pTableScanInfo->scanTimes = 0;
|
||||
pTableScanInfo->currentGroupId = -1;
|
||||
tsdbReaderClose(pTableScanInfo->dataReader);
|
||||
pTableScanInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTableScanInfo->base.dataReader);
|
||||
pTableScanInfo->base.dataReader = NULL;
|
||||
}
|
||||
|
||||
static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbUid, TSKEY startTs, TSKEY endTs,
|
||||
|
@ -1219,7 +1217,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
|
|||
STableKeyInfo tblInfo = {.uid = tbUid, .groupId = 0};
|
||||
|
||||
STableScanInfo* pTableScanInfo = pTableScanOp->info;
|
||||
SQueryTableDataCond cond = pTableScanInfo->cond;
|
||||
SQueryTableDataCond cond = pTableScanInfo->base.cond;
|
||||
|
||||
cond.startVersion = -1;
|
||||
cond.endVersion = maxVersion;
|
||||
|
@ -1231,7 +1229,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
|
|||
blockDataCleanup(pBlock);
|
||||
|
||||
STsdbReader* pReader = NULL;
|
||||
int32_t code = tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &cond, &tblInfo, 1, (STsdbReader**)&pReader,
|
||||
int32_t code = tsdbReaderOpen(pTableScanInfo->base.readHandle.vnode, &cond, &tblInfo, 1, (STsdbReader**)&pReader,
|
||||
GET_TASKID(pTaskInfo));
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
terrno = code;
|
||||
|
@ -1250,8 +1248,8 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
|
|||
blockDataEnsureCapacity(pBlock, rows);
|
||||
pBlock->info.rows = rows;
|
||||
|
||||
relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
|
||||
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, rows);
|
||||
relocateColumnData(pBlock, pTableScanInfo->base.matchInfo.pList, pCols, true);
|
||||
doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, rows);
|
||||
|
||||
pBlock->info.groupId = getTableGroupId(pTaskInfo->pTableInfoList, pBInfo->uid);
|
||||
}
|
||||
|
@ -1378,8 +1376,8 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
|
|||
*pRowIndex = 0;
|
||||
pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
|
||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTableScanInfo->dataReader);
|
||||
pTableScanInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTableScanInfo->base.dataReader);
|
||||
pTableScanInfo->base.dataReader = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1813,8 +1811,8 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
|
|||
} else {
|
||||
if (!pTaskInfo->streamInfo.returned) {
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTSInfo->dataReader);
|
||||
pTSInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTSInfo->base.dataReader);
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
|
||||
qDebug("queue scan tsdb over, switch to wal ver %" PRId64 "", pTaskInfo->streamInfo.snapshotVer + 1);
|
||||
if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
|
||||
|
@ -1980,22 +1978,22 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1 ||
|
||||
pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE2) {
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
|
||||
memcpy(&pTSInfo->base.cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
|
||||
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1) {
|
||||
pTSInfo->cond.startVersion = 0;
|
||||
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
|
||||
qDebug("stream recover step 1, from %" PRId64 " to %" PRId64, pTSInfo->cond.startVersion,
|
||||
pTSInfo->cond.endVersion);
|
||||
pTSInfo->base.cond.startVersion = 0;
|
||||
pTSInfo->base.cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
|
||||
qDebug("stream recover step 1, from %" PRId64 " to %" PRId64, pTSInfo->base.cond.startVersion,
|
||||
pTSInfo->base.cond.endVersion);
|
||||
} else {
|
||||
pTSInfo->cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1;
|
||||
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2;
|
||||
qDebug("stream recover step 2, from %" PRId64 " to %" PRId64, pTSInfo->cond.startVersion,
|
||||
pTSInfo->cond.endVersion);
|
||||
pTSInfo->base.cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1;
|
||||
pTSInfo->base.cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2;
|
||||
qDebug("stream recover step 2, from %" PRId64 " to %" PRId64, pTSInfo->base.cond.startVersion,
|
||||
pTSInfo->base.cond.endVersion);
|
||||
}
|
||||
|
||||
/*resetTableScanInfo(pTSInfo, pWin);*/
|
||||
tsdbReaderClose(pTSInfo->dataReader);
|
||||
pTSInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTSInfo->base.dataReader);
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
|
||||
pTSInfo->scanTimes = 0;
|
||||
pTSInfo->currentGroupId = -1;
|
||||
|
@ -2016,11 +2014,11 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
}
|
||||
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
|
||||
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
|
||||
tsdbReaderClose(pTSInfo->dataReader);
|
||||
pTSInfo->dataReader = NULL;
|
||||
tsdbReaderClose(pTSInfo->base.dataReader);
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
|
||||
pTSInfo->cond.startVersion = -1;
|
||||
pTSInfo->cond.endVersion = -1;
|
||||
pTSInfo->base.cond.startVersion = -1;
|
||||
pTSInfo->base.cond.endVersion = -1;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2128,8 +2126,8 @@ FETCH_NEXT_BLOCK:
|
|||
SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
|
||||
if (pSDB) {
|
||||
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
|
||||
uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader);
|
||||
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
|
||||
uint64_t version = getReaderMaxVersion(pTableScanInfo->base.dataReader);
|
||||
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->base.cond.twindows, pInfo->groupId, version);
|
||||
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
|
||||
checkUpdateData(pInfo, true, pSDB, false);
|
||||
// printDataBlock(pSDB, "stream scan update");
|
||||
|
@ -2520,7 +2518,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
SOperatorInfo* pTableScanOp = createTableScanOperatorInfo(pTableScanNode, pHandle, pTaskInfo);
|
||||
STableScanInfo* pTSInfo = (STableScanInfo*)pTableScanOp->info;
|
||||
if (pHandle->version > 0) {
|
||||
pTSInfo->cond.endVersion = pHandle->version;
|
||||
pTSInfo->base.cond.endVersion = pHandle->version;
|
||||
}
|
||||
|
||||
STableKeyInfo* pList = NULL;
|
||||
|
@ -2529,8 +2527,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
|
||||
if (pHandle->initTableReader) {
|
||||
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
|
||||
pTSInfo->dataReader = NULL;
|
||||
code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, pList, num, &pTSInfo->dataReader, NULL);
|
||||
pTSInfo->base.dataReader = NULL;
|
||||
code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->base.cond, pList, num, &pTSInfo->base.dataReader, NULL);
|
||||
if (code != 0) {
|
||||
terrno = code;
|
||||
destroyTableScanOperatorInfo(pTableScanOp);
|
||||
|
@ -2566,7 +2564,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
goto _error;
|
||||
}
|
||||
taosArrayDestroy(tableIdList);
|
||||
memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->cond, sizeof(SQueryTableDataCond));
|
||||
memcpy(&pTaskInfo->streamInfo.tableCond, &pTSInfo->base.cond, sizeof(SQueryTableDataCond));
|
||||
} else {
|
||||
taosArrayDestroy(pColIds);
|
||||
}
|
||||
|
@ -4381,123 +4379,6 @@ _error:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
|
||||
SSDataBlock* pBlock, uint32_t* status) {
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STableMergeScanInfo* pInfo = pOperator->info;
|
||||
|
||||
SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
|
||||
|
||||
pCost->totalBlocks += 1;
|
||||
pCost->totalRows += pBlock->info.rows;
|
||||
|
||||
*status = pInfo->dataBlockLoadFlag;
|
||||
if (pOperator->exprSupp.pFilterInfo != NULL ||
|
||||
overlapWithTimeWindow(&pTableScanInfo->interval, &pBlock->info, pTableScanInfo->cond.order)) {
|
||||
(*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
}
|
||||
|
||||
SDataBlockInfo* pBlockInfo = &pBlock->info;
|
||||
taosMemoryFreeClear(pBlock->pBlockAgg);
|
||||
|
||||
if (*status == FUNC_DATA_REQUIRED_FILTEROUT) {
|
||||
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
pCost->filterOutBlocks += 1;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
|
||||
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
|
||||
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
pCost->skipBlocks += 1;
|
||||
|
||||
// clear all data in pBlock that are set when handing the previous block
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
|
||||
SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i);
|
||||
pcol->pData = NULL;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (*status == FUNC_DATA_REQUIRED_SMA_LOAD) {
|
||||
pCost->loadBlockStatis += 1;
|
||||
|
||||
bool allColumnsHaveAgg = true;
|
||||
SColumnDataAgg** pColAgg = NULL;
|
||||
|
||||
if (allColumnsHaveAgg == true) {
|
||||
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
||||
// todo create this buffer during creating operator
|
||||
if (pBlock->pBlockAgg == NULL) {
|
||||
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColMatchItem* pColMatchInfo = taosArrayGet(pTableScanInfo->matchInfo.pList, i);
|
||||
if (!pColMatchInfo->needOutput) {
|
||||
continue;
|
||||
}
|
||||
pBlock->pBlockAgg[pColMatchInfo->dstSlotId] = pColAgg[i];
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else { // failed to load the block sma data, data block statistics does not exist, load data block instead
|
||||
*status = FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
|
||||
|
||||
// todo filter data block according to the block sma data firstly
|
||||
#if 0
|
||||
if (!doFilterByBlockSMA(pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) {
|
||||
pCost->filterOutBlocks += 1;
|
||||
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo), pBlockInfo->window.skey,
|
||||
pBlockInfo->window.ekey, pBlockInfo->rows);
|
||||
(*status) = FUNC_DATA_REQUIRED_FILTEROUT;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
pCost->totalCheckedRows += pBlock->info.rows;
|
||||
pCost->loadBlocks += 1;
|
||||
|
||||
STsdbReader* reader = pTableScanInfo->pReader;
|
||||
SArray* pCols = tsdbRetrieveDataBlock(reader, NULL);
|
||||
if (pCols == NULL) {
|
||||
return terrno;
|
||||
}
|
||||
|
||||
relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
|
||||
|
||||
// currently only the tbname pseudo column
|
||||
SExprSupp* pSup = &pTableScanInfo->pseudoSup;
|
||||
|
||||
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock,
|
||||
pBlock->info.rows, GET_TASKID(pTaskInfo), NULL);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
if (pOperator->exprSupp.pFilterInfo != NULL) {
|
||||
int64_t st = taosGetTimestampMs();
|
||||
doFilter(pBlock, pOperator->exprSupp.pFilterInfo, &pTableScanInfo->matchInfo);
|
||||
|
||||
double el = (taosGetTimestampUs() - st) / 1000.0;
|
||||
pTableScanInfo->readRecorder.filterTime += el;
|
||||
|
||||
if (pBlock->info.rows == 0) {
|
||||
pCost->filterOutBlocks += 1;
|
||||
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d, elapsed time:%.2f ms",
|
||||
GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, el);
|
||||
} else {
|
||||
qDebug("%s data block filter applied, elapsed time:%.2f ms", GET_TASKID(pTaskInfo), el);
|
||||
}
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static SSDataBlock* getTableDataBlockImpl(void* param) {
|
||||
STableMergeScanSortSourceParam* source = param;
|
||||
SOperatorInfo* pOperator = source->pOperator;
|
||||
|
@ -4512,15 +4393,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
|
|||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
void* p = tableListGetInfo(pInfo->tableListInfo, readIdx + pInfo->tableStartIndex);
|
||||
SReadHandle* pHandle = &pInfo->readHandle;
|
||||
void* p = tableListGetInfo(pTaskInfo->pTableInfoList, readIdx + pInfo->tableStartIndex);
|
||||
SReadHandle* pHandle = &pInfo->base.readHandle;
|
||||
|
||||
int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->pReader, GET_TASKID(pTaskInfo));
|
||||
int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->base.dataReader, GET_TASKID(pTaskInfo));
|
||||
if (code != 0) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
STsdbReader* reader = pInfo->pReader;
|
||||
STsdbReader* reader = pInfo->base.dataReader;
|
||||
while (tsdbNextDataBlock(reader)) {
|
||||
if (isTaskKilled(pTaskInfo)) {
|
||||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
|
||||
|
@ -4546,7 +4427,8 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
|
|||
}
|
||||
|
||||
uint32_t status = 0;
|
||||
code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, pBlock, &status);
|
||||
loadDataBlock(pOperator, &pTableScanInfo->base, pBlock, &status);
|
||||
// code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, pBlock, &status);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
@ -4559,15 +4441,15 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
|
|||
pBlock->info.groupId = getTableGroupId(pTaskInfo->pTableInfoList, pBlock->info.uid);
|
||||
|
||||
pOperator->resultInfo.totalRows += pBlock->info.rows;
|
||||
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
|
||||
|
||||
tsdbReaderClose(pInfo->pReader);
|
||||
pInfo->pReader = NULL;
|
||||
tsdbReaderClose(pInfo->base.dataReader);
|
||||
pInfo->base.dataReader = NULL;
|
||||
return pBlock;
|
||||
}
|
||||
|
||||
tsdbReaderClose(pInfo->pReader);
|
||||
pInfo->pReader = NULL;
|
||||
tsdbReaderClose(pInfo->base.dataReader);
|
||||
pInfo->base.dataReader = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -4605,10 +4487,10 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
{
|
||||
size_t numOfTables = tableListGetSize(pInfo->tableListInfo);
|
||||
size_t numOfTables = tableListGetSize(pTaskInfo->pTableInfoList);
|
||||
int32_t i = pInfo->tableStartIndex + 1;
|
||||
for (; i < numOfTables; ++i) {
|
||||
STableKeyInfo* tableKeyInfo = tableListGetInfo(pInfo->tableListInfo, i);
|
||||
STableKeyInfo* tableKeyInfo = tableListGetInfo(pTaskInfo->pTableInfoList, i);
|
||||
if (tableKeyInfo->groupId != pInfo->groupId) {
|
||||
break;
|
||||
}
|
||||
|
@ -4619,7 +4501,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
int32_t tableStartIdx = pInfo->tableStartIndex;
|
||||
int32_t tableEndIdx = pInfo->tableEndIndex;
|
||||
|
||||
pInfo->pReader = NULL;
|
||||
pInfo->base.dataReader = NULL;
|
||||
|
||||
// todo the total available buffer should be determined by total capacity of buffer of this task.
|
||||
// the additional one is reserved for merge result
|
||||
|
@ -4642,7 +4524,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
taosArrayPush(pInfo->sortSourceParams, ¶m);
|
||||
|
||||
SQueryTableDataCond cond;
|
||||
dumpSQueryTableCond(&pInfo->cond, &cond);
|
||||
dumpSQueryTableCond(&pInfo->base.cond, &cond);
|
||||
taosArrayPush(pInfo->queryConds, &cond);
|
||||
}
|
||||
|
||||
|
@ -4682,6 +4564,7 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
|
|||
taosArrayClear(pInfo->sortSourceParams);
|
||||
|
||||
tsortDestroySortHandle(pInfo->pSortHandle);
|
||||
pInfo->pSortHandle = NULL;
|
||||
|
||||
for (int32_t i = 0; i < taosArrayGetSize(pInfo->queryConds); i++) {
|
||||
SQueryTableDataCond* cond = taosArrayGet(pInfo->queryConds, i);
|
||||
|
@ -4732,7 +4615,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
size_t tableListSize = tableListGetSize(pInfo->tableListInfo);
|
||||
size_t tableListSize = tableListGetSize(pTaskInfo->pTableInfoList);
|
||||
if (!pInfo->hasGroupId) {
|
||||
pInfo->hasGroupId = true;
|
||||
|
||||
|
@ -4741,7 +4624,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
pInfo->tableStartIndex = 0;
|
||||
pInfo->groupId = ((STableKeyInfo*)tableListGetInfo(pInfo->tableListInfo, pInfo->tableStartIndex))->groupId;
|
||||
pInfo->groupId = ((STableKeyInfo*)tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->tableStartIndex))->groupId;
|
||||
startGroupTableMergeScan(pOperator);
|
||||
}
|
||||
|
||||
|
@ -4760,7 +4643,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
}
|
||||
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
|
||||
pInfo->groupId = tableListGetInfo(pInfo->tableListInfo, pInfo->tableStartIndex)->groupId;
|
||||
pInfo->groupId = tableListGetInfo(pTaskInfo->pTableInfoList, pInfo->tableStartIndex)->groupId;
|
||||
startGroupTableMergeScan(pOperator);
|
||||
}
|
||||
}
|
||||
|
@ -4770,7 +4653,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
|
|||
|
||||
void destroyTableMergeScanOperatorInfo(void* param) {
|
||||
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->cond);
|
||||
cleanupQueryTableDataCond(&pTableScanInfo->base.cond);
|
||||
|
||||
int32_t numOfTable = taosArrayGetSize(pTableScanInfo->queryConds);
|
||||
|
||||
|
@ -4780,9 +4663,11 @@ void destroyTableMergeScanOperatorInfo(void* param) {
|
|||
}
|
||||
|
||||
taosArrayDestroy(pTableScanInfo->sortSourceParams);
|
||||
tsortDestroySortHandle(pTableScanInfo->pSortHandle);
|
||||
pTableScanInfo->pSortHandle = NULL;
|
||||
|
||||
tsdbReaderClose(pTableScanInfo->pReader);
|
||||
pTableScanInfo->pReader = NULL;
|
||||
tsdbReaderClose(pTableScanInfo->base.dataReader);
|
||||
pTableScanInfo->base.dataReader = NULL;
|
||||
|
||||
for (int i = 0; i < taosArrayGetSize(pTableScanInfo->queryConds); i++) {
|
||||
SQueryTableDataCond* pCond = taosArrayGet(pTableScanInfo->queryConds, i);
|
||||
|
@ -4790,17 +4675,20 @@ void destroyTableMergeScanOperatorInfo(void* param) {
|
|||
}
|
||||
taosArrayDestroy(pTableScanInfo->queryConds);
|
||||
|
||||
if (pTableScanInfo->matchInfo.pList != NULL) {
|
||||
taosArrayDestroy(pTableScanInfo->matchInfo.pList);
|
||||
if (pTableScanInfo->base.matchInfo.pList != NULL) {
|
||||
taosArrayDestroy(pTableScanInfo->base.matchInfo.pList);
|
||||
}
|
||||
|
||||
pTableScanInfo->pResBlock = blockDataDestroy(pTableScanInfo->pResBlock);
|
||||
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
|
||||
|
||||
taosArrayDestroy(pTableScanInfo->pSortInfo);
|
||||
cleanupExprSupp(&pTableScanInfo->pseudoSup);
|
||||
cleanupExprSupp(&pTableScanInfo->base.pseudoSup);
|
||||
|
||||
tsdbReaderClose(pTableScanInfo->base.dataReader);
|
||||
pTableScanInfo->base.dataReader = NULL;
|
||||
taosLRUCacheCleanup(pTableScanInfo->base.metaCache.pTableMetaEntryCache);
|
||||
|
||||
taosMemoryFreeClear(pTableScanInfo->rowEntryInfoOffset);
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -4809,7 +4697,7 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
|
|||
// TODO: merge these two info into one struct
|
||||
STableMergeScanExecInfo* execInfo = taosMemoryCalloc(1, sizeof(STableMergeScanExecInfo));
|
||||
STableMergeScanInfo* pInfo = pOptr->info;
|
||||
execInfo->blockRecorder = pInfo->readRecorder;
|
||||
execInfo->blockRecorder = pInfo->base.readRecorder;
|
||||
execInfo->sortExecInfo = pInfo->sortExecInfo;
|
||||
|
||||
*pOptrExplain = execInfo;
|
||||
|
@ -4818,8 +4706,8 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
|
||||
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
|
||||
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
STableMergeScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableMergeScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
|
@ -4830,38 +4718,46 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
|
||||
int32_t numOfCols = 0;
|
||||
int32_t code = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID,
|
||||
&pInfo->matchInfo);
|
||||
&pInfo->base.matchInfo);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
|
||||
code = initQueryTableDataCond(&pInfo->base.cond, pTableScanNode);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosArrayDestroy(pInfo->matchInfo.pList);
|
||||
taosArrayDestroy(pInfo->base.matchInfo.pList);
|
||||
goto _error;
|
||||
}
|
||||
|
||||
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
|
||||
SExprSupp* pSup = &pInfo->pseudoSup;
|
||||
SExprSupp* pSup = &pInfo->base.pseudoSup;
|
||||
pSup->pExprInfo = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->numOfExprs);
|
||||
pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
|
||||
}
|
||||
|
||||
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
|
||||
|
||||
pInfo->readHandle = *readHandle;
|
||||
pInfo->interval = extractIntervalInfo(pTableScanNode);
|
||||
pInfo->base.metaCache.pTableMetaEntryCache = taosLRUCacheInit(1024 * 128, -1, .5);
|
||||
if (pInfo->base.metaCache.pTableMetaEntryCache == NULL) {
|
||||
code = terrno;
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->base.dataBlockLoadFlag = FUNC_DATA_REQUIRED_DATA_LOAD;
|
||||
pInfo->base.scanFlag = MAIN_SCAN;
|
||||
pInfo->base.readHandle = *readHandle;
|
||||
|
||||
pInfo->base.limitInfo.limit.limit = -1;
|
||||
pInfo->base.limitInfo.slimit.limit = -1;
|
||||
|
||||
pInfo->sample.sampleRatio = pTableScanNode->ratio;
|
||||
pInfo->sample.seed = taosGetTimestampSec();
|
||||
pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
|
||||
|
||||
code = filterInitFromNode((SNode*)pTableScanNode->scan.node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
|
||||
pInfo->tableListInfo = pTableListInfo;
|
||||
pInfo->scanFlag = MAIN_SCAN;
|
||||
|
||||
initResultSizeInfo(&pOperator->resultInfo, 1024);
|
||||
pInfo->pResBlock = createResDataBlock(pDescNode);
|
||||
|
@ -4869,12 +4765,13 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
|
||||
pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam));
|
||||
|
||||
pInfo->pSortInfo = generateSortByTsInfo(pInfo->matchInfo.pList, pInfo->cond.order);
|
||||
pInfo->pSortInfo = generateSortByTsInfo(pInfo->base.matchInfo.pList, pInfo->base.cond.order);
|
||||
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
|
||||
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
|
||||
|
||||
int32_t rowSize = pInfo->pResBlock->info.rowSize;
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize);
|
||||
uint32_t nCols = taosArrayGetSize(pInfo->pResBlock->pDataBlock);
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize, nCols);
|
||||
|
||||
setOperatorInfo(pOperator, "TableMergeScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, false, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
|
|
|
@ -760,7 +760,8 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
|
|||
pInfo->groupSort = pMergePhyNode->groupSort;
|
||||
pInfo->pSortInfo = createSortInfo(pMergePhyNode->pMergeKeys);
|
||||
pInfo->pInputBlock = pInputBlock;
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize);
|
||||
size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock);
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols);
|
||||
pInfo->sortBufSize = pInfo->bufPageSize * (numStreams + 1); // one additional is reserved for merged result.
|
||||
|
||||
setOperatorInfo(pOperator, "MultiwayMergeOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE, false, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
|
|
@ -1942,10 +1942,8 @@ static void doKeepPrevRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
// null data should not be kept since it can not be used to perform interpolation
|
||||
if (!colDataIsNull_s(pColInfoData, i)) {
|
||||
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, i);
|
||||
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pkey->isNull = false;
|
||||
char* val = colDataGetData(pColInfoData, rowIndex);
|
||||
if (!IS_VAR_DATA_TYPE(pkey->type)) {
|
||||
|
@ -1953,6 +1951,8 @@ static void doKeepPrevRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
|
|||
} else {
|
||||
memcpy(pkey->pData, val, varDataLen(val));
|
||||
}
|
||||
} else {
|
||||
pkey->isNull = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1964,10 +1964,8 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
|
|||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
// null data should not be kept since it can not be used to perform interpolation
|
||||
if (!colDataIsNull_s(pColInfoData, i)) {
|
||||
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, i);
|
||||
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pkey->isNull = false;
|
||||
char* val = colDataGetData(pColInfoData, rowIndex);
|
||||
if (!IS_VAR_DATA_TYPE(pkey->type)) {
|
||||
|
@ -1975,50 +1973,51 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
|
|||
} else {
|
||||
memcpy(pkey->pData, val, varDataLen(val));
|
||||
}
|
||||
} else {
|
||||
pkey->isNull = true;
|
||||
}
|
||||
}
|
||||
|
||||
pSliceInfo->isNextRowSet = true;
|
||||
}
|
||||
|
||||
static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex,
|
||||
bool isLastRow) {
|
||||
static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex) {
|
||||
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
bool fillLastPoint = pSliceInfo->fillLastPoint;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId);
|
||||
SFillLinearInfo* pLinearInfo = taosArrayGet(pSliceInfo->pLinearInfo, i);
|
||||
|
||||
// null data should not be kept since it can not be used to perform interpolation
|
||||
if (!colDataIsNull_s(pColInfoData, i)) {
|
||||
if (isLastRow) {
|
||||
// null value is represented by using key = INT64_MIN for now.
|
||||
// TODO: optimize to ignore null values for linear interpolation.
|
||||
if (!pLinearInfo->isStartSet) {
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->start.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
} else if (fillLastPoint) {
|
||||
}
|
||||
pLinearInfo->isStartSet = true;
|
||||
} else if (!pLinearInfo->isEndSet) {
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
}
|
||||
pLinearInfo->isEndSet = true;
|
||||
} else {
|
||||
pLinearInfo->start.key = pLinearInfo->end.key;
|
||||
memcpy(pLinearInfo->start.val, pLinearInfo->end.val, pLinearInfo->bytes);
|
||||
|
||||
if (!colDataIsNull_s(pColInfoData, rowIndex)) {
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
memcpy(pLinearInfo->end.val, colDataGetData(pColInfoData, rowIndex), pLinearInfo->bytes);
|
||||
} else {
|
||||
pLinearInfo->start.key = *(int64_t*)colDataGetData(pTsCol, rowIndex);
|
||||
pLinearInfo->end.key = *(int64_t*)colDataGetData(pTsCol, rowIndex + 1);
|
||||
|
||||
char* val;
|
||||
val = colDataGetData(pColInfoData, rowIndex);
|
||||
memcpy(pLinearInfo->start.val, val, pLinearInfo->bytes);
|
||||
val = colDataGetData(pColInfoData, rowIndex + 1);
|
||||
memcpy(pLinearInfo->end.val, val, pLinearInfo->bytes);
|
||||
pLinearInfo->end.key = INT64_MIN;
|
||||
}
|
||||
|
||||
pLinearInfo->hasNull = false;
|
||||
} else {
|
||||
pLinearInfo->hasNull = true;
|
||||
}
|
||||
}
|
||||
|
||||
pSliceInfo->fillLastPoint = isLastRow;
|
||||
}
|
||||
|
||||
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock) {
|
||||
static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, bool beforeTs) {
|
||||
int32_t rows = pResBlock->info.rows;
|
||||
blockDataEnsureCapacity(pResBlock, rows + 1);
|
||||
// todo set the correct primary timestamp column
|
||||
|
@ -2037,7 +2036,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
}
|
||||
|
||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||
// SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||
switch (pSliceInfo->fillType) {
|
||||
case TSDB_FILL_NULL: {
|
||||
colDataAppendNULL(pDst, rows);
|
||||
|
@ -2069,20 +2067,25 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
SPoint start = pLinearInfo->start;
|
||||
SPoint end = pLinearInfo->end;
|
||||
SPoint current = {.key = pSliceInfo->current};
|
||||
current.val = taosMemoryCalloc(pLinearInfo->bytes, 1);
|
||||
|
||||
// before interp range, do not fill
|
||||
if (start.key == INT64_MIN || end.key == INT64_MAX) {
|
||||
// do not interpolate before ts range, only increate pSliceInfo->current
|
||||
if (beforeTs && !pLinearInfo->isEndSet) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!pLinearInfo->isStartSet || !pLinearInfo->isEndSet) {
|
||||
hasInterp = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pLinearInfo->hasNull) {
|
||||
if (start.key == INT64_MIN || end.key == INT64_MIN) {
|
||||
colDataAppendNULL(pDst, rows);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
current.val = taosMemoryCalloc(pLinearInfo->bytes, 1);
|
||||
taosGetLinearInterpolationVal(¤t, pLinearInfo->type, &start, &end, pLinearInfo->type);
|
||||
colDataAppend(pDst, rows, (char*)current.val, false);
|
||||
}
|
||||
|
||||
taosMemoryFree(current.val);
|
||||
break;
|
||||
|
@ -2094,7 +2097,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
}
|
||||
|
||||
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot);
|
||||
if (pkey->isNull == false) {
|
||||
colDataAppend(pDst, rows, pkey->pData, false);
|
||||
} else {
|
||||
colDataAppendNULL(pDst, rows);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2105,7 +2112,11 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
}
|
||||
|
||||
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
|
||||
if (pkey->isNull == false) {
|
||||
colDataAppend(pDst, rows, pkey->pData, false);
|
||||
} else {
|
||||
colDataAppendNULL(pDst, rows);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2118,8 +2129,40 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
|
|||
if (hasInterp) {
|
||||
pResBlock->info.rows += 1;
|
||||
}
|
||||
|
||||
return hasInterp;
|
||||
}
|
||||
|
||||
static void addCurrentRowToResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock,
|
||||
SSDataBlock* pSrcBlock, int32_t index) {
|
||||
blockDataEnsureCapacity(pResBlock, pResBlock->info.rows + 1);
|
||||
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
|
||||
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
|
||||
|
||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||
|
||||
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
|
||||
colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
|
||||
} else {
|
||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||
SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, srcSlot);
|
||||
|
||||
if (colDataIsNull_s(pSrc, index)) {
|
||||
colDataAppendNULL(pDst, pResBlock->info.rows);
|
||||
continue;
|
||||
}
|
||||
|
||||
char* v = colDataGetData(pSrc, index);
|
||||
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
||||
}
|
||||
}
|
||||
|
||||
pResBlock->info.rows += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
|
||||
if (pInfo->pPrevRow != NULL) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2190,24 +2233,19 @@ static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB
|
|||
|
||||
SFillLinearInfo linearInfo = {0};
|
||||
linearInfo.start.key = INT64_MIN;
|
||||
linearInfo.end.key = INT64_MAX;
|
||||
linearInfo.end.key = INT64_MIN;
|
||||
linearInfo.start.val = taosMemoryCalloc(1, pColInfo->info.bytes);
|
||||
linearInfo.end.val = taosMemoryCalloc(1, pColInfo->info.bytes);
|
||||
linearInfo.hasNull = false;
|
||||
linearInfo.isStartSet = false;
|
||||
linearInfo.isEndSet = false;
|
||||
linearInfo.type = pColInfo->info.type;
|
||||
linearInfo.bytes = pColInfo->info.bytes;
|
||||
taosArrayPush(pInfo->pLinearInfo, &linearInfo);
|
||||
}
|
||||
|
||||
pInfo->fillLastPoint = false;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool needToFillLastPoint(STimeSliceOperatorInfo* pSliceInfo) {
|
||||
return (pSliceInfo->fillLastPoint == true && pSliceInfo->fillType == TSDB_FILL_LINEAR);
|
||||
}
|
||||
|
||||
static int32_t initKeeperInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
|
||||
int32_t code;
|
||||
code = initPrevRowsKeeper(pInfo, pBlock);
|
||||
|
@ -2263,118 +2301,40 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
int64_t ts = *(int64_t*)colDataGetData(pTsCol, i);
|
||||
|
||||
if (i == 0 && needToFillLastPoint(pSliceInfo)) { // first row in current block
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
}
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ts == pSliceInfo->current) {
|
||||
blockDataEnsureCapacity(pResBlock, pResBlock->info.rows + 1);
|
||||
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
||||
addCurrentRowToResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i);
|
||||
|
||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||
|
||||
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
|
||||
colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
|
||||
} else {
|
||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||
|
||||
if (colDataIsNull_s(pSrc, i)) {
|
||||
colDataAppendNULL(pDst, pResBlock->info.rows);
|
||||
continue;
|
||||
}
|
||||
|
||||
char* v = colDataGetData(pSrc, i);
|
||||
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
||||
}
|
||||
}
|
||||
|
||||
pResBlock->info.rows += 1;
|
||||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
|
||||
// for linear interpolation, always fill value between this and next points;
|
||||
// if its the first point in data block, also fill values between previous(if there's any) and this point;
|
||||
// if its the last point in data block, no need to fill, but reserve this point as the start value and do
|
||||
// the interpolation when processing next data block.
|
||||
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
|
||||
pInterval->precision);
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else { // it is the last row of current block
|
||||
// store ts value as start, and calculate interp value when processing next block
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, true);
|
||||
}
|
||||
} else { // non-linear interpolation
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (ts < pSliceInfo->current) {
|
||||
// in case of interpolation window starts and ends between two datapoints, fill(prev) need to interpolate
|
||||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
|
||||
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
// no need to increate pSliceInfo->current here
|
||||
// pSliceInfo->current =
|
||||
// taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
|
||||
pInterval->precision);
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// store ts value as start, and calculate interp value when processing next block
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, true);
|
||||
}
|
||||
} else { // non-linear interpolation
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
// in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
|
||||
doKeepNextRows(pSliceInfo, pBlock, i + 1);
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
|
||||
pInterval->precision);
|
||||
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, false) && pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
break;
|
||||
} else {
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
}
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
|
@ -2387,71 +2347,27 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
} else { // it is the last row of current block
|
||||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
}
|
||||
}
|
||||
} else { // ts > pSliceInfo->current
|
||||
// in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate
|
||||
doKeepNextRows(pSliceInfo, pBlock, i);
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i);
|
||||
|
||||
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, true) && pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
break;
|
||||
} else {
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
}
|
||||
}
|
||||
|
||||
// add current row if timestamp match
|
||||
if (ts == pSliceInfo->current && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
blockDataEnsureCapacity(pResBlock, pResBlock->info.rows + 1);
|
||||
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
|
||||
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
|
||||
|
||||
int32_t dstSlot = pExprInfo->base.resSchema.slotId;
|
||||
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
|
||||
|
||||
if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
|
||||
colDataAppend(pDst, pResBlock->info.rows, (char*)&pSliceInfo->current, false);
|
||||
} else {
|
||||
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
|
||||
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
|
||||
|
||||
if (colDataIsNull_s(pSrc, i)) {
|
||||
colDataAppendNULL(pDst, pResBlock->info.rows);
|
||||
continue;
|
||||
}
|
||||
|
||||
char* v = colDataGetData(pSrc, i);
|
||||
colDataAppend(pDst, pResBlock->info.rows, v, false);
|
||||
}
|
||||
}
|
||||
|
||||
pResBlock->info.rows += 1;
|
||||
addCurrentRowToResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i);
|
||||
doKeepPrevRows(pSliceInfo, pBlock, i);
|
||||
|
||||
if (pSliceInfo->fillType == TSDB_FILL_LINEAR) {
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
if (i < pBlock->info.rows - 1) {
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, false);
|
||||
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
|
||||
if (nextTs > pSliceInfo->current) {
|
||||
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit,
|
||||
pInterval->precision);
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
setOperatorCompleted(pOperator);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else { // it is the last row of current block
|
||||
// store ts value as start, and calculate interp value when processing next block
|
||||
doKeepLinearInfo(pSliceInfo, pBlock, i, true);
|
||||
}
|
||||
} else { // non-linear interpolation
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
}
|
||||
}
|
||||
|
||||
if (pSliceInfo->current > pSliceInfo->win.ekey) {
|
||||
|
@ -2466,7 +2382,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
|
|||
// except for fill(next), fill(linear)
|
||||
while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT &&
|
||||
pSliceInfo->fillType != TSDB_FILL_LINEAR) {
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock);
|
||||
genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, false);
|
||||
pSliceInfo->current =
|
||||
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
|
||||
}
|
||||
|
@ -2545,9 +2461,11 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
|
|||
pInfo->interval.interval = pInterpPhyNode->interval;
|
||||
pInfo->current = pInfo->win.skey;
|
||||
|
||||
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
|
||||
STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
|
||||
pScanInfo->cond.twindows = pInfo->win;
|
||||
pScanInfo->cond.type = TIMEWINDOW_RANGE_EXTERNAL;
|
||||
pScanInfo->base.cond.twindows = pInfo->win;
|
||||
pScanInfo->base.cond.type = TIMEWINDOW_RANGE_EXTERNAL;
|
||||
}
|
||||
|
||||
setOperatorInfo(pOperator, "TimeSliceOperator", QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC, false, OP_NOT_OPENED, pInfo,
|
||||
pTaskInfo);
|
||||
|
|
|
@ -584,15 +584,11 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// TODO consider the page meta size
|
||||
int32_t getProperSortPageSize(size_t rowSize) {
|
||||
uint32_t defaultPageSize = 4096;
|
||||
|
||||
uint32_t pgSize = 0;
|
||||
if (rowSize * 4 > defaultPageSize) {
|
||||
pgSize = rowSize * 4;
|
||||
} else {
|
||||
pgSize = defaultPageSize;
|
||||
// get sort page size
|
||||
int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols) {
|
||||
uint32_t pgSize = rowSize * 4 + blockDataGetSerialMetaSize(numOfCols);
|
||||
if (pgSize < DEFAULT_PAGESIZE) {
|
||||
return DEFAULT_PAGESIZE;
|
||||
}
|
||||
|
||||
return pgSize;
|
||||
|
@ -612,7 +608,8 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
}
|
||||
|
||||
if (pHandle->pDataBlock == NULL) {
|
||||
pHandle->pageSize = getProperSortPageSize(blockDataGetRowSize(pBlock));
|
||||
uint32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
pHandle->pageSize = getProperSortPageSize(blockDataGetRowSize(pBlock), numOfCols);
|
||||
|
||||
// todo, number of pages are set according to the total available sort buffer
|
||||
pHandle->numOfPages = 1024;
|
||||
|
|
|
@ -49,7 +49,7 @@ typedef struct SSumRes {
|
|||
};
|
||||
int16_t type;
|
||||
int64_t prevTs; // used for csum only
|
||||
bool isPrevTsSet; //used for csum only
|
||||
bool isPrevTsSet; // used for csum only
|
||||
|
||||
} SSumRes;
|
||||
|
||||
|
@ -2220,8 +2220,8 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf
|
|||
|
||||
SLeastSQRInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
|
||||
|
||||
pInfo->startVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[1].param.d : (double)pCtx->param[1].param.i;
|
||||
pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[2].param.nType) ? pCtx->param[2].param.d : (double)pCtx->param[2].param.i;
|
||||
GET_TYPED_DATA(pInfo->startVal, double, pCtx->param[1].param.nType, &pCtx->param[1].param.i);
|
||||
GET_TYPED_DATA(pInfo->stepVal, double, pCtx->param[2].param.nType, &pCtx->param[2].param.i);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2565,8 +2565,8 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
SVariant* pVal = &pCtx->param[1].param;
|
||||
double v =
|
||||
(IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
|
||||
double v = 0;
|
||||
GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
|
||||
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
@ -2625,8 +2625,8 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
|
|||
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
|
||||
|
||||
SVariant* pVal = &pCtx->param[1].param;
|
||||
pInfo->percent =
|
||||
(IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
|
||||
pInfo->percent = 0;
|
||||
GET_TYPED_DATA(pInfo->percent, double, pVal->nType, &pVal->i);
|
||||
|
||||
if (pCtx->numOfParams == 2) {
|
||||
pInfo->algo = APERCT_ALGO_DEFAULT;
|
||||
|
@ -3722,6 +3722,12 @@ static int32_t topBotResComparFn(const void* p1, const void* p2, const void* par
|
|||
}
|
||||
|
||||
return (val1->v.u > val2->v.u) ? 1 : -1;
|
||||
} else if (TSDB_DATA_TYPE_FLOAT == type) {
|
||||
if (val1->v.f == val2->v.f) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (val1->v.f > val2->v.f) ? 1 : -1;
|
||||
}
|
||||
|
||||
if (val1->v.d == val2->v.d) {
|
||||
|
@ -3762,10 +3768,12 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
|||
} else { // replace the minimum value in the result
|
||||
if ((isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && val.i > pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u > pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && val.d > pItems[0].v.d))) ||
|
||||
(TSDB_DATA_TYPE_FLOAT == type && val.f > pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && val.d > pItems[0].v.d))) ||
|
||||
(!isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && val.i < pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u < pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && val.d < pItems[0].v.d)))) {
|
||||
(TSDB_DATA_TYPE_FLOAT == type && val.f < pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && val.d < pItems[0].v.d)))) {
|
||||
// replace the old data and the coresponding tuple data
|
||||
STopBotResItem* pItem = &pItems[0];
|
||||
pItem->v = val;
|
||||
|
@ -3928,12 +3936,7 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
}
|
||||
for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) {
|
||||
STopBotResItem* pItem = &pRes->pItems[i];
|
||||
if (type == TSDB_DATA_TYPE_FLOAT) {
|
||||
float v = pItem->v.d;
|
||||
colDataAppend(pCol, currentRow, (const char*)&v, false);
|
||||
} else {
|
||||
colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
|
||||
}
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_finalize i:%d,item:%p,pageId:%d, offset:%d\n", i, pItem, pItem->tuplePos.pageId,
|
||||
pItem->tuplePos.offset);
|
||||
|
@ -3964,10 +3967,12 @@ void addResult(SqlFunctionCtx* pCtx, STopBotResItem* pSourceItem, int16_t type,
|
|||
} else { // replace the minimum value in the result
|
||||
if ((isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && pSourceItem->v.i > pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && pSourceItem->v.u > pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && pSourceItem->v.d > pItems[0].v.d))) ||
|
||||
(TSDB_DATA_TYPE_FLOAT == type && pSourceItem->v.f > pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && pSourceItem->v.d > pItems[0].v.d))) ||
|
||||
(!isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && pSourceItem->v.i < pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && pSourceItem->v.u < pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && pSourceItem->v.d < pItems[0].v.d)))) {
|
||||
(TSDB_DATA_TYPE_FLOAT == type && pSourceItem->v.f < pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && pSourceItem->v.d < pItems[0].v.d)))) {
|
||||
// replace the old data and the coresponding tuple data
|
||||
STopBotResItem* pItem = &pItems[0];
|
||||
pItem->v = pSourceItem->v;
|
||||
|
@ -6038,7 +6043,7 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
} else {
|
||||
if (pInfo->win.ekey == pInfo->win.skey) {
|
||||
pInfo->dOutput = pInfo->p.val;
|
||||
} else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { //no data in timewindow
|
||||
} else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { // no data in timewindow
|
||||
pInfo->dOutput = 0;
|
||||
} else {
|
||||
pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
|
||||
|
|
|
@ -2036,6 +2036,8 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
|
|||
pVal->u = pNode->datum.u;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
pVal->f = pNode->datum.d;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
pVal->d = pNode->datum.d;
|
||||
break;
|
||||
|
|
|
@ -2762,17 +2762,17 @@ static bool needFill(SNode* pNode) {
|
|||
return hasFillFunc;
|
||||
}
|
||||
|
||||
static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
|
||||
if (TSDB_DATA_TYPE_NULL == fillDt.type) {
|
||||
return false;
|
||||
static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList* pValues, int32_t index) {
|
||||
SListCell* pCell = nodesListGetCell(pValues, index);
|
||||
if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
|
||||
return true;
|
||||
SNode* pCaseFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCaseFunc);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
|
||||
|
@ -2788,8 +2788,8 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
|
|||
if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
|
||||
}
|
||||
if (mismatchFillDataType(((SExprNode*)pProject)->resType,
|
||||
((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
|
||||
}
|
||||
++fillNo;
|
||||
|
|
|
@ -348,8 +348,10 @@ static int32_t scanPathOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
|
|||
int32_t code = scanPathOptMatch(pCxt, pLogicSubplan->pNode, &info);
|
||||
if (TSDB_CODE_SUCCESS == code && info.pScan) {
|
||||
scanPathOptSetScanWin(info.pScan);
|
||||
if (!pCxt->pPlanCxt->streamQuery) {
|
||||
scanPathOptSetScanOrder(info.scanOrder, info.pScan);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && (NULL != info.pDsoFuncs || NULL != info.pSdrFuncs)) {
|
||||
info.pScan->dataRequired = scanPathOptGetDataRequired(info.pSdrFuncs);
|
||||
info.pScan->pDynamicScanFuncs = info.pDsoFuncs;
|
||||
|
|
|
@ -1704,7 +1704,8 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
|
|||
}
|
||||
|
||||
qDebug("GROUP Num:%u", info->groupNum);
|
||||
for (uint32_t i = 0; i < info->groupNum; ++i) {
|
||||
uint32_t maxDbgGrpNum = TMIN(info->groupNum, 1000);
|
||||
for (uint32_t i = 0; i < maxDbgGrpNum; ++i) {
|
||||
SFilterGroup *group = &info->groups[i];
|
||||
qDebug("Group%d : unit num[%u]", i, group->unitNum);
|
||||
|
||||
|
@ -3248,14 +3249,18 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, SColumnInfoData *pRes, SC
|
|||
for (uint32_t u = 0; u < group->unitNum; ++u) {
|
||||
uint32_t uidx = group->unitIdxs[u];
|
||||
SFilterComUnit *cunit = &info->cunits[uidx];
|
||||
void *colData = colDataGetData((SColumnInfoData *)(cunit->colData), i);
|
||||
|
||||
void *colData = NULL;
|
||||
bool isNull = colDataIsNull((SColumnInfoData *)(cunit->colData), 0, i, NULL);
|
||||
// if (FILTER_UNIT_GET_F(info, uidx)) {
|
||||
// p[i] = FILTER_UNIT_GET_R(info, uidx);
|
||||
// } else {
|
||||
uint8_t optr = cunit->optr;
|
||||
|
||||
if (colData == NULL || colDataIsNull((SColumnInfoData *)(cunit->colData), 0, i, NULL)) {
|
||||
if (!isNull) {
|
||||
colData = colDataGetData((SColumnInfoData *)(cunit->colData), i);
|
||||
}
|
||||
|
||||
if (colData == NULL || isNull) {
|
||||
p[i] = optr == OP_TYPE_IS_NULL ? true : false;
|
||||
} else {
|
||||
if (optr == OP_TYPE_IS_NOT_NULL) {
|
||||
|
@ -3916,6 +3921,10 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) {
|
|||
} else {
|
||||
SColumnNode *refNode = (SColumnNode *)node->pLeft;
|
||||
SNodeListNode *listNode = (SNodeListNode *)node->pRight;
|
||||
if (LIST_LENGTH(listNode->pNodeList) > 10) {
|
||||
stat->scalarMode = true;
|
||||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
int32_t type = vectorGetConvertType(refNode->node.resType.type, listNode->dataType.type);
|
||||
if (0 != type && type != refNode->node.resType.type) {
|
||||
stat->scalarMode = true;
|
||||
|
|
|
@ -114,6 +114,12 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
|
|||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return NULL;
|
||||
}
|
||||
pState->pTdbState = taosMemoryCalloc(1, sizeof(STdbState));
|
||||
if (pState->pTdbState == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
streamStateDestroy(pState);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char statePath[1024];
|
||||
if (!specPath) {
|
||||
|
@ -122,31 +128,34 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
|
|||
memset(statePath, 0, 1024);
|
||||
tstrncpy(statePath, path, 1024);
|
||||
}
|
||||
if (tdbOpen(statePath, szPage, pages, &pState->db, 0) < 0) {
|
||||
if (tdbOpen(statePath, szPage, pages, &pState->pTdbState->db, 0) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// open state storage backend
|
||||
if (tdbTbOpen("state.db", sizeof(SStateKey), -1, stateKeyCmpr, pState->db, &pState->pStateDb, 0) < 0) {
|
||||
if (tdbTbOpen("state.db", sizeof(SStateKey), -1, stateKeyCmpr, pState->pTdbState->db, &pState->pTdbState->pStateDb,
|
||||
0) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
if (tdbTbOpen("fill.state.db", sizeof(SWinKey), -1, winKeyCmpr, pState->db, &pState->pFillStateDb, 0) < 0) {
|
||||
if (tdbTbOpen("fill.state.db", sizeof(SWinKey), -1, winKeyCmpr, pState->pTdbState->db,
|
||||
&pState->pTdbState->pFillStateDb, 0) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tdbTbOpen("session.state.db", sizeof(SStateSessionKey), -1, stateSessionKeyCmpr, pState->db,
|
||||
&pState->pSessionStateDb, 0) < 0) {
|
||||
if (tdbTbOpen("session.state.db", sizeof(SStateSessionKey), -1, stateSessionKeyCmpr, pState->pTdbState->db,
|
||||
&pState->pTdbState->pSessionStateDb, 0) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb, 0) < 0) {
|
||||
if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->pTdbState->db,
|
||||
&pState->pTdbState->pFuncStateDb, 0) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
if (tdbTbOpen("parname.state.db", sizeof(int64_t), TSDB_TABLE_NAME_LEN, NULL, pState->db, &pState->pParNameDb, 0) <
|
||||
0) {
|
||||
if (tdbTbOpen("parname.state.db", sizeof(int64_t), TSDB_TABLE_NAME_LEN, NULL, pState->pTdbState->db,
|
||||
&pState->pTdbState->pParNameDb, 0) < 0) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
@ -154,117 +163,117 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
|
|||
goto _err;
|
||||
}
|
||||
|
||||
pState->pOwner = pTask;
|
||||
pState->pTdbState->pOwner = pTask;
|
||||
|
||||
return pState;
|
||||
|
||||
_err:
|
||||
tdbTbClose(pState->pStateDb);
|
||||
tdbTbClose(pState->pFuncStateDb);
|
||||
tdbTbClose(pState->pFillStateDb);
|
||||
tdbTbClose(pState->pSessionStateDb);
|
||||
tdbTbClose(pState->pParNameDb);
|
||||
tdbClose(pState->db);
|
||||
taosMemoryFree(pState);
|
||||
tdbTbClose(pState->pTdbState->pStateDb);
|
||||
tdbTbClose(pState->pTdbState->pFuncStateDb);
|
||||
tdbTbClose(pState->pTdbState->pFillStateDb);
|
||||
tdbTbClose(pState->pTdbState->pSessionStateDb);
|
||||
tdbTbClose(pState->pTdbState->pParNameDb);
|
||||
tdbClose(pState->pTdbState->db);
|
||||
streamStateDestroy(pState);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void streamStateClose(SStreamState* pState) {
|
||||
tdbCommit(pState->db, &pState->txn);
|
||||
tdbPostCommit(pState->db, &pState->txn);
|
||||
tdbTbClose(pState->pStateDb);
|
||||
tdbTbClose(pState->pFuncStateDb);
|
||||
tdbTbClose(pState->pFillStateDb);
|
||||
tdbTbClose(pState->pSessionStateDb);
|
||||
tdbTbClose(pState->pParNameDb);
|
||||
tdbClose(pState->db);
|
||||
tdbCommit(pState->pTdbState->db, &pState->pTdbState->txn);
|
||||
tdbPostCommit(pState->pTdbState->db, &pState->pTdbState->txn);
|
||||
tdbTbClose(pState->pTdbState->pStateDb);
|
||||
tdbTbClose(pState->pTdbState->pFuncStateDb);
|
||||
tdbTbClose(pState->pTdbState->pFillStateDb);
|
||||
tdbTbClose(pState->pTdbState->pSessionStateDb);
|
||||
tdbTbClose(pState->pTdbState->pParNameDb);
|
||||
tdbClose(pState->pTdbState->db);
|
||||
|
||||
taosMemoryFree(pState);
|
||||
streamStateDestroy(pState);
|
||||
}
|
||||
|
||||
int32_t streamStateBegin(SStreamState* pState) {
|
||||
if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
|
||||
0) {
|
||||
if (tdbTxnOpen(&pState->pTdbState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
||||
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tdbBegin(pState->db, &pState->txn) < 0) {
|
||||
tdbTxnClose(&pState->txn);
|
||||
if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
|
||||
tdbTxnClose(&pState->pTdbState->txn);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamStateCommit(SStreamState* pState) {
|
||||
if (tdbCommit(pState->db, &pState->txn) < 0) {
|
||||
if (tdbCommit(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
if (tdbPostCommit(pState->db, &pState->txn) < 0) {
|
||||
if (tdbPostCommit(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
memset(&pState->txn, 0, sizeof(TXN));
|
||||
if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
|
||||
0) {
|
||||
memset(&pState->pTdbState->txn, 0, sizeof(TXN));
|
||||
if (tdbTxnOpen(&pState->pTdbState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
||||
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
||||
return -1;
|
||||
}
|
||||
if (tdbBegin(pState->db, &pState->txn) < 0) {
|
||||
if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamStateAbort(SStreamState* pState) {
|
||||
if (tdbAbort(pState->db, &pState->txn) < 0) {
|
||||
if (tdbAbort(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
memset(&pState->txn, 0, sizeof(TXN));
|
||||
if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
|
||||
0) {
|
||||
memset(&pState->pTdbState->txn, 0, sizeof(TXN));
|
||||
if (tdbTxnOpen(&pState->pTdbState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL,
|
||||
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
|
||||
return -1;
|
||||
}
|
||||
if (tdbBegin(pState->db, &pState->txn) < 0) {
|
||||
if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn) < 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) {
|
||||
return tdbTbUpsert(pState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->txn);
|
||||
return tdbTbUpsert(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->pTdbState->txn);
|
||||
}
|
||||
int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) {
|
||||
return tdbTbGet(pState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
|
||||
return tdbTbGet(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
|
||||
}
|
||||
|
||||
int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) {
|
||||
return tdbTbDelete(pState->pFuncStateDb, key, sizeof(STupleKey), &pState->txn);
|
||||
return tdbTbDelete(pState->pTdbState->pFuncStateDb, key, sizeof(STupleKey), &pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbUpsert(pState->pStateDb, &sKey, sizeof(SStateKey), value, vLen, &pState->txn);
|
||||
return tdbTbUpsert(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), value, vLen, &pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
|
||||
return tdbTbUpsert(pState->pFillStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
|
||||
return tdbTbUpsert(pState->pTdbState->pFillStateDb, key, sizeof(SWinKey), value, vLen, &pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbGet(pState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen);
|
||||
return tdbTbGet(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), pVal, pVLen);
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||
return tdbTbGet(pState->pFillStateDb, key, sizeof(SWinKey), pVal, pVLen);
|
||||
return tdbTbGet(pState->pTdbState->pFillStateDb, key, sizeof(SWinKey), pVal, pVLen);
|
||||
}
|
||||
|
||||
// todo refactor
|
||||
int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbDelete(pState->pStateDb, &sKey, sizeof(SStateKey), &pState->txn);
|
||||
return tdbTbDelete(pState->pTdbState->pStateDb, &sKey, sizeof(SStateKey), &pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
int32_t streamStateClear(SStreamState* pState) {
|
||||
|
@ -288,7 +297,7 @@ void streamStateSetNumber(SStreamState* pState, int32_t number) { pState->number
|
|||
|
||||
// todo refactor
|
||||
int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key) {
|
||||
return tdbTbDelete(pState->pFillStateDb, key, sizeof(SWinKey), &pState->txn);
|
||||
return tdbTbDelete(pState->pTdbState->pFillStateDb, key, sizeof(SWinKey), &pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
|
||||
|
@ -314,7 +323,7 @@ int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pV
|
|||
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
|
||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||
if (pCur == NULL) return NULL;
|
||||
tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL);
|
||||
tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL);
|
||||
|
||||
int32_t c = 0;
|
||||
SStateKey sKey = {.key = *key, .opNum = pState->number};
|
||||
|
@ -330,7 +339,7 @@ SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
|
|||
SStreamStateCur* streamStateFillGetCur(SStreamState* pState, const SWinKey* key) {
|
||||
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
|
||||
if (pCur == NULL) return NULL;
|
||||
tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL);
|
||||
tdbTbcOpen(pState->pTdbState->pFillStateDb, &pCur->pCur, NULL);
|
||||
|
||||
int32_t c = 0;
|
||||
tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c);
|
||||
|
@ -402,6 +411,7 @@ int32_t streamStateGetFirst(SStreamState* pState, SWinKey* key) {
|
|||
streamStatePut(pState, &tmp, NULL, 0);
|
||||
SStreamStateCur* pCur = streamStateSeekKeyNext(pState, &tmp);
|
||||
int32_t code = streamStateGetKVByCur(pCur, key, NULL, 0);
|
||||
streamStateFreeCur(pCur);
|
||||
streamStateDel(pState, &tmp);
|
||||
return code;
|
||||
}
|
||||
|
@ -422,7 +432,7 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key
|
|||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -448,7 +458,7 @@ SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey*
|
|||
if (!pCur) {
|
||||
return NULL;
|
||||
}
|
||||
if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pFillStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -473,7 +483,7 @@ SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey*
|
|||
if (pCur == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pFillStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -520,7 +530,8 @@ void streamFreeVal(void* val) { tdbFree(val); }
|
|||
|
||||
int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen) {
|
||||
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbUpsert(pState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), value, vLen, &pState->txn);
|
||||
return tdbTbUpsert(pState->pTdbState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), value, vLen,
|
||||
&pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen) {
|
||||
|
@ -543,7 +554,7 @@ int32_t streamStateSessionGet(SStreamState* pState, SSessionKey* key, void** pVa
|
|||
|
||||
int32_t streamStateSessionDel(SStreamState* pState, const SSessionKey* key) {
|
||||
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
|
||||
return tdbTbDelete(pState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), &pState->txn);
|
||||
return tdbTbDelete(pState->pTdbState->pSessionStateDb, &sKey, sizeof(SStateSessionKey), &pState->pTdbState->txn);
|
||||
}
|
||||
|
||||
SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, const SSessionKey* key) {
|
||||
|
@ -552,7 +563,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentPrev(SStreamState* pState, cons
|
|||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -579,7 +590,7 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext(SStreamState* pState, cons
|
|||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -607,7 +618,7 @@ SStreamStateCur* streamStateSessionSeekKeyNext(SStreamState* pState, const SSess
|
|||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -674,7 +685,7 @@ int32_t streamStateSessionGetKeyByRange(SStreamState* pState, const SSessionKey*
|
|||
return -1;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return -1;
|
||||
}
|
||||
|
@ -821,13 +832,19 @@ _end:
|
|||
}
|
||||
|
||||
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) {
|
||||
tdbTbUpsert(pState->pParNameDb, &groupId, sizeof(int64_t), tbname, TSDB_TABLE_NAME_LEN, &pState->txn);
|
||||
tdbTbUpsert(pState->pTdbState->pParNameDb, &groupId, sizeof(int64_t), tbname, TSDB_TABLE_NAME_LEN,
|
||||
&pState->pTdbState->txn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal) {
|
||||
int32_t len;
|
||||
return tdbTbGet(pState->pParNameDb, &groupId, sizeof(int64_t), pVal, &len);
|
||||
return tdbTbGet(pState->pTdbState->pParNameDb, &groupId, sizeof(int64_t), pVal, &len);
|
||||
}
|
||||
|
||||
void streamStateDestroy(SStreamState* pState) {
|
||||
taosMemoryFreeClear(pState->pTdbState);
|
||||
taosMemoryFreeClear(pState);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -837,7 +854,7 @@ char* streamStateSessionDump(SStreamState* pState) {
|
|||
return NULL;
|
||||
}
|
||||
pCur->number = pState->number;
|
||||
if (tdbTbcOpen(pState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
if (tdbTbcOpen(pState->pTdbState->pSessionStateDb, &pCur->pCur, NULL) < 0) {
|
||||
streamStateFreeCur(pCur);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -436,8 +436,15 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
int32_t ret = 0;
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER && pSyncNode->replicaNum > 1) {
|
||||
SNodeInfo newLeader = (pSyncNode->peersNodeInfo)[0];
|
||||
if (pSyncNode->peersNum == 2) {
|
||||
SyncIndex matchIndex0 = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId[0]));
|
||||
SyncIndex matchIndex1 = syncIndexMgrGetIndex(pSyncNode->pMatchIndex, &(pSyncNode->peersId[1]));
|
||||
if (matchIndex1 > matchIndex0) {
|
||||
newLeader = (pSyncNode->peersNodeInfo)[1];
|
||||
}
|
||||
}
|
||||
ret = syncNodeLeaderTransferTo(pSyncNode, newLeader);
|
||||
}
|
||||
|
||||
|
@ -944,6 +951,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
|
|||
SSyncSnapshotSender* pSender = snapshotSenderCreate(pSyncNode, i);
|
||||
// ASSERT(pSender != NULL);
|
||||
(pSyncNode->senders)[i] = pSender;
|
||||
sSTrace(pSender, "snapshot sender create new while open, data:%p", pSender);
|
||||
}
|
||||
|
||||
// snapshot receivers
|
||||
|
@ -971,7 +979,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
|
|||
atomic_store_64(&pSyncNode->snapshottingIndex, SYNC_INDEX_INVALID);
|
||||
|
||||
pSyncNode->isStart = true;
|
||||
sNTrace(pSyncNode, "sync open");
|
||||
sNTrace(pSyncNode, "sync open, node:%p", pSyncNode);
|
||||
|
||||
return pSyncNode;
|
||||
|
||||
|
@ -1041,14 +1049,10 @@ void syncNodePreClose(SSyncNode* pSyncNode) {
|
|||
void syncHbTimerDataFree(SSyncHbTimerData* pData) { taosMemoryFree(pData); }
|
||||
|
||||
void syncNodeClose(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode == NULL) {
|
||||
return;
|
||||
}
|
||||
int32_t ret;
|
||||
if (pSyncNode == NULL) return;
|
||||
sNTrace(pSyncNode, "sync close, data:%p", pSyncNode);
|
||||
|
||||
sNTrace(pSyncNode, "sync close");
|
||||
|
||||
ret = raftStoreClose(pSyncNode->pRaftStore);
|
||||
int32_t ret = raftStoreClose(pSyncNode->pRaftStore);
|
||||
ASSERT(ret == 0);
|
||||
pSyncNode->pRaftStore = NULL;
|
||||
|
||||
|
@ -1077,6 +1081,7 @@ void syncNodeClose(SSyncNode* pSyncNode) {
|
|||
|
||||
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
|
||||
if ((pSyncNode->senders)[i] != NULL) {
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender destroy while close, data:%p", (pSyncNode->senders)[i]);
|
||||
snapshotSenderDestroy((pSyncNode->senders)[i]);
|
||||
(pSyncNode->senders)[i] = NULL;
|
||||
}
|
||||
|
@ -1398,7 +1403,7 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
|
|||
// reset sender
|
||||
bool reset = false;
|
||||
for (int32_t j = 0; j < TSDB_MAX_REPLICA; ++j) {
|
||||
if (syncUtilSameId(&(pSyncNode->replicasId)[i], &oldReplicasId[j])) {
|
||||
if (syncUtilSameId(&(pSyncNode->replicasId)[i], &oldReplicasId[j]) && oldSenders[j] != NULL) {
|
||||
char host[128];
|
||||
uint16_t port;
|
||||
syncUtilU642Addr((pSyncNode->replicasId)[i].addr, host, sizeof(host), &port);
|
||||
|
@ -1415,6 +1420,8 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
|
|||
|
||||
sNTrace(pSyncNode, "snapshot sender udpate replicaIndex from %d to %d, %s:%d, %p, reset:%d", oldreplicaIndex,
|
||||
i, host, port, (pSyncNode->senders)[i], reset);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1423,15 +1430,17 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
|
|||
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
|
||||
if ((pSyncNode->senders)[i] == NULL) {
|
||||
(pSyncNode->senders)[i] = snapshotSenderCreate(pSyncNode, i);
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender create new");
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender create new while reconfig, data:%p", (pSyncNode->senders)[i]);
|
||||
} else {
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender already exist, data:%p", (pSyncNode->senders)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// free old
|
||||
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
|
||||
if (oldSenders[i] != NULL) {
|
||||
sNTrace(pSyncNode, "snapshot sender destroy old, data:%p replica-index:%d", oldSenders[i], i);
|
||||
snapshotSenderDestroy(oldSenders[i]);
|
||||
sNTrace(pSyncNode, "snapshot sender delete old %p replica-index:%d", oldSenders[i], i);
|
||||
oldSenders[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1946,12 +1955,14 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
|||
|
||||
SSyncHbTimerData* pData = syncHbTimerDataAcquire(hbDataRid);
|
||||
if (pData == NULL) {
|
||||
sError("hb timer get pData NULL, %" PRId64, hbDataRid);
|
||||
return;
|
||||
}
|
||||
|
||||
SSyncNode* pSyncNode = syncNodeAcquire(pData->syncNodeRid);
|
||||
if (pSyncNode == NULL) {
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("hb timer get pSyncNode NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1960,28 +1971,39 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
|||
if (!pSyncNode->isStart) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("vgId:%d, hb timer sync node already stop", pSyncNode->vgId);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("vgId:%d, hb timer sync node not leader", pSyncNode->vgId);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->pRaftStore == NULL) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("vgId:%d, hb timer raft store already stop", pSyncNode->vgId);
|
||||
return;
|
||||
}
|
||||
|
||||
// sNTrace(pSyncNode, "eq peer hb timer");
|
||||
// sTrace("vgId:%d, eq peer hb timer", pSyncNode->vgId);
|
||||
|
||||
if (pSyncNode->replicaNum > 1) {
|
||||
int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
|
||||
int64_t msgLogicClock = atomic_load_64(&pData->logicClock);
|
||||
|
||||
if (pSyncNode->replicaNum > 1) {
|
||||
if (timerLogicClock == msgLogicClock) {
|
||||
if (syncIsInit()) {
|
||||
// sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId);
|
||||
taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, syncEnv()->pTimerManager,
|
||||
&pSyncTimer->pTimer);
|
||||
} else {
|
||||
sError("sync env is stop, reset peer hb timer error");
|
||||
}
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
(void)syncBuildHeartbeat(&rpcMsg, pSyncNode->vgId);
|
||||
|
||||
|
@ -1996,16 +2018,9 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
|||
// send msg
|
||||
syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg);
|
||||
|
||||
if (syncIsInit()) {
|
||||
taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, pData, syncEnv()->pTimerManager,
|
||||
&pSyncTimer->pTimer);
|
||||
} else {
|
||||
sError("sync env is stop, syncNodeEqHeartbeatTimer");
|
||||
}
|
||||
|
||||
} else {
|
||||
sTrace("==syncNodeEqPeerHeartbeatTimer== timerLogicClock:%" PRId64 ", msgLogicClock:%" PRId64 "", timerLogicClock,
|
||||
msgLogicClock);
|
||||
sTrace("vgId:%d, do not send hb, timerLogicClock:%" PRId64 ", msgLogicClock:%" PRId64 "", pSyncNode->vgId,
|
||||
timerLogicClock, msgLogicClock);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,13 @@ int32_t tmsgSendReq(const SEpSet* epSet, SRpcMsg* pMsg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
void tmsgSendRsp(SRpcMsg* pMsg) { return (*defaultMsgCb.sendRspFp)(pMsg); }
|
||||
void tmsgSendRsp(SRpcMsg* pMsg) {
|
||||
#if 1
|
||||
rpcSendResponse(pMsg);
|
||||
#else
|
||||
return (*defaultMsgCb.sendRspFp)(pMsg);
|
||||
#endif
|
||||
}
|
||||
|
||||
void tmsgSendRedirectRsp(SRpcMsg* pMsg, const SEpSet* pNewEpSet) { (*defaultMsgCb.sendRedirectRspFp)(pMsg, pNewEpSet); }
|
||||
|
||||
|
|
|
@ -462,8 +462,6 @@ static void uvStartSendResp(SSvrMsg* smsg) {
|
|||
if (pConn->broken == true) {
|
||||
// persist by
|
||||
destroySmsg(smsg);
|
||||
// transFreeMsg(smsg->msg.pCont);
|
||||
// taosMemoryFree(smsg);
|
||||
transUnrefSrvHandle(pConn);
|
||||
return;
|
||||
}
|
||||
|
@ -1234,7 +1232,9 @@ int transReleaseSrvHandle(void* handle) {
|
|||
m->type = Release;
|
||||
|
||||
tTrace("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
|
||||
transAsyncSend(pThrd->asyncPool, &m->q);
|
||||
if (0 != transAsyncSend(pThrd->asyncPool, &m->q)) {
|
||||
destroySmsg(m);
|
||||
}
|
||||
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return 0;
|
||||
|
@ -1269,7 +1269,9 @@ int transSendResponse(const STransMsg* msg) {
|
|||
|
||||
STraceId* trace = (STraceId*)&msg->info.traceId;
|
||||
tGTrace("conn %p start to send resp (1/2)", exh->handle);
|
||||
transAsyncSend(pThrd->asyncPool, &m->q);
|
||||
if (0 != transAsyncSend(pThrd->asyncPool, &m->q)) {
|
||||
destroySmsg(m);
|
||||
}
|
||||
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return 0;
|
||||
|
@ -1303,7 +1305,9 @@ int transRegisterMsg(const STransMsg* msg) {
|
|||
|
||||
STrans* pTransInst = pThrd->pTransInst;
|
||||
tTrace("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
|
||||
transAsyncSend(pThrd->asyncPool, &m->q);
|
||||
if (0 != transAsyncSend(pThrd->asyncPool, &m->q)) {
|
||||
destroySmsg(m);
|
||||
}
|
||||
|
||||
transReleaseExHandle(transGetRefMgt(), refId);
|
||||
return 0;
|
||||
|
|
|
@ -336,6 +336,7 @@ int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) {
|
|||
#else
|
||||
if (realpath(dirname, tmp) != NULL) {
|
||||
#endif
|
||||
if (strlen(tmp) < maxlen) {
|
||||
if (realPath == NULL) {
|
||||
strncpy(dirname, tmp, maxlen);
|
||||
} else {
|
||||
|
@ -343,6 +344,7 @@ int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -344,6 +344,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, "Table schema is old")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_IN_OTHER_STABLE, "Table already exists in other stables")
|
||||
|
||||
// query
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, "Invalid handle")
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
|
||||
,,,script,./test.sh -f tsim/dnode/drop_dnode_force.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_force.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/offline_reason.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
|
||||
|
@ -56,7 +56,7 @@
|
|||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/vnode_clean.sim
|
||||
,,,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/import/basic.sim
|
||||
|
@ -113,7 +113,7 @@
|
|||
,,y,script,./test.sh -f tsim/parser/first_last.sim
|
||||
,,y,script,./test.sh -f tsim/parser/fill_stb.sim
|
||||
,,y,script,./test.sh -f tsim/parser/interp.sim
|
||||
#,,y,script,./test.sh -f tsim/parser/limit2.sim
|
||||
,,y,script,./test.sh -f tsim/parser/limit2.sim
|
||||
,,y,script,./test.sh -f tsim/parser/fourArithmetic-basic.sim
|
||||
,,y,script,./test.sh -f tsim/parser/function.sim
|
||||
,,y,script,./test.sh -f tsim/parser/groupby-basic.sim
|
||||
|
@ -136,8 +136,8 @@
|
|||
,,y,script,./test.sh -f tsim/parser/lastrow.sim
|
||||
,,y,script,./test.sh -f tsim/parser/lastrow2.sim
|
||||
,,y,script,./test.sh -f tsim/parser/like.sim
|
||||
,,,script,./test.sh -f tsim/parser/limit.sim
|
||||
,,,script,./test.sh -f tsim/parser/limit1.sim
|
||||
,,y,script,./test.sh -f tsim/parser/limit.sim
|
||||
,,y,script,./test.sh -f tsim/parser/limit1.sim
|
||||
,,y,script,./test.sh -f tsim/parser/mixed_blocks.sim
|
||||
,,y,script,./test.sh -f tsim/parser/nchar.sim
|
||||
,,y,script,./test.sh -f tsim/parser/nestquery.sim
|
||||
|
@ -163,7 +163,7 @@
|
|||
,,y,script,./test.sh -f tsim/parser/timestamp.sim
|
||||
,,y,script,./test.sh -f tsim/parser/top_groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/topbot.sim
|
||||
,,,script,./test.sh -f tsim/parser/union.sim
|
||||
,,y,script,./test.sh -f tsim/parser/union.sim
|
||||
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
|
||||
,,y,script,./test.sh -f tsim/parser/where.sim
|
||||
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
|
||||
|
@ -176,11 +176,11 @@
|
|||
,,y,script,./test.sh -f tsim/query/udf.sim
|
||||
,,y,script,./test.sh -f tsim/qnode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/snode/basic1.sim
|
||||
,,,script,./test.sh -f tsim/mnode/basic1.sim
|
||||
,,,script,./test.sh -f tsim/mnode/basic2.sim
|
||||
,,,script,./test.sh -f tsim/mnode/basic3.sim
|
||||
,,,script,./test.sh -f tsim/mnode/basic4.sim
|
||||
,,,script,./test.sh -f tsim/mnode/basic5.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic2.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic3.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic4.sim
|
||||
,,y,script,./test.sh -f tsim/mnode/basic5.sim
|
||||
,,y,script,./test.sh -f tsim/show/basic.sim
|
||||
,,y,script,./test.sh -f tsim/table/autocreate.sim
|
||||
,,y,script,./test.sh -f tsim/table/basic1.sim
|
||||
|
@ -213,10 +213,10 @@
|
|||
,,n,script,./test.sh -f tsim/stream/basic0.sim -g
|
||||
,,y,script,./test.sh -f tsim/stream/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic2.sim
|
||||
,,,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
,,n,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
|
||||
|
@ -227,11 +227,11 @@
|
|||
,,y,script,./test.sh -f tsim/stream/triggerSession0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/schedSnode.sim
|
||||
,,,script,./test.sh -f tsim/stream/windowClose.sim
|
||||
,,n,script,./test.sh -f tsim/stream/schedSnode.sim
|
||||
,,y,script,./test.sh -f tsim/stream/windowClose.sim
|
||||
,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
|
||||
,,y,script,./test.sh -f tsim/stream/sliding.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
,,n,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnState.sim
|
||||
,,y,script,./test.sh -f tsim/stream/deleteInterval.sim
|
||||
|
@ -278,8 +278,9 @@
|
|||
,,y,script,./test.sh -f tsim/stable/values.sim
|
||||
,,y,script,./test.sh -f tsim/stable/vnode3.sim
|
||||
,,y,script,./test.sh -f tsim/stable/metrics_idx.sim
|
||||
,,,script,./test.sh -f tsim/sma/drop_sma.sim
|
||||
,,,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
,,n,script,./test.sh -f tsim/sma/drop_sma.sim
|
||||
,,n,script,./test.sh -f tsim/sma/sma_leak.sim
|
||||
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
|
||||
|
@ -291,7 +292,7 @@
|
|||
,,n,script,./test.sh -f tsim/valgrind/checkError7.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError8.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkUdf.sim
|
||||
,,,script,./test.sh -f tsim/vnode/replica3_basic.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_basic.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_repeat.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_many.sim
|
||||
|
@ -424,6 +425,7 @@
|
|||
,,,system-test,python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/alter_stable.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/alter_table.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/boundary.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/table_comment.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/time_range_wise.py
|
||||
|
@ -437,31 +439,31 @@
|
|||
,,,system-test,python3 ./test.py -f 1-insert/database_pre_suf.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/InsertFuturets.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/show.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/abs.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/abs.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/and_or_for_byte.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/and_or_for_byte.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/and_or_for_byte.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/apercentile.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/apercentile.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/arccos.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/arccos.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/arcsin.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/arcsin.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/arctan.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/arctan.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/between.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/between.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/bottom.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/bottom.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/cast.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/cast.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/ceil.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/ceil.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/char_length.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/char_length.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/check_tsdb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/check_tsdb.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py -R
|
||||
|
@ -469,63 +471,63 @@
|
|||
,,,system-test,python3 ./test.py -f 2-query/concat_ws.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/cos.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/cos.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/count_partition.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/count_partition.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/count.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/count.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/countAlwaysReturnValue.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/countAlwaysReturnValue.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/db.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/db.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/db.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/diff.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/diff.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distinct.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distinct.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_apercentile.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_apercentile.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_avg.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_avg.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_count.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_count.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_max.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_max.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_min.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_min.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_spread.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_spread.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_stddev.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_stddev.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_sum.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_sum.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/explain.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/explain.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/first.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/first.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/floor.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/floor.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_null.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_null.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_stateduration.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/histogram.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/histogram.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/histogram.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/hyperloglog.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/hyperloglog.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/irate.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/irate.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/join.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/join.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/last.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/last.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/leastsquares.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/leastsquares.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/length.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/length.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/log.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/log.py -R
|
||||
|
@ -533,7 +535,7 @@
|
|||
,,,system-test,python3 ./test.py -f 2-query/lower.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/mavg.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/mavg.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/max_partition.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/max_partition.py -R
|
||||
|
@ -607,6 +609,8 @@
|
|||
,,,system-test,python3 ./test.py -f 2-query/upper.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/varchar.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/varchar.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/case_when.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/case_when.py -R
|
||||
,,,system-test,python3 ./test.py -f 1-insert/update_data.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/tb_100w_data_order.py
|
||||
,,,system-test,python3 ./test.py -f 1-insert/delete_stable.py
|
||||
|
@ -813,6 +817,7 @@
|
|||
,,,system-test,python3 ./test.py -f 2-query/last_row.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/sml.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/case_when.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/between.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/distinct.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/varchar.py -Q 3
|
||||
|
@ -906,6 +911,7 @@
|
|||
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/sml.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/case_when.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/between.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/distinct.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/varchar.py -Q 4
|
||||
|
@ -998,6 +1004,7 @@
|
|||
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/sml.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/case_when.py -Q 4
|
||||
|
||||
#develop test
|
||||
,,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py
|
||||
|
|
|
@ -45,7 +45,7 @@ fi
|
|||
|
||||
# Now getting ready to execute Python
|
||||
# The following is the default of our standard dev env (Ubuntu 20.04), modify/adjust at your own risk
|
||||
PYTHON_EXEC=python3.8
|
||||
PYTHON_EXEC=python3
|
||||
|
||||
# First we need to set up a path for Python to find our own TAOS modules, so that "import" can work.
|
||||
# export PYTHONPATH=$(pwd)/../../src/connector/python:$(pwd)
|
||||
|
|
|
@ -37,6 +37,7 @@ import requests
|
|||
# from guppy import hpy
|
||||
import gc
|
||||
import taos
|
||||
from taos.tmq import *
|
||||
|
||||
|
||||
from .shared.types import TdColumns, TdTags
|
||||
|
@ -419,10 +420,12 @@ class ThreadCoordinator:
|
|||
except threading.BrokenBarrierError as err:
|
||||
self._execStats.registerFailure("Aborted due to worker thread timeout")
|
||||
Logging.error("\n")
|
||||
|
||||
Logging.error("Main loop aborted, caused by worker thread(s) time-out of {} seconds".format(
|
||||
ThreadCoordinator.WORKER_THREAD_TIMEOUT))
|
||||
Logging.error("TAOS related threads blocked at (stack frames top-to-bottom):")
|
||||
ts = ThreadStacks()
|
||||
ts.record_current_time(time.time()) # record thread exit time at current moment
|
||||
ts.print(filterInternal=True)
|
||||
workerTimeout = True
|
||||
|
||||
|
@ -546,7 +549,12 @@ class ThreadCoordinator:
|
|||
|
||||
# pick a task type for current state
|
||||
db = self.pickDatabase()
|
||||
if Dice.throw(2)==1:
|
||||
taskType = db.getStateMachine().pickTaskType() # dynamic name of class
|
||||
else:
|
||||
taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types
|
||||
pass
|
||||
|
||||
return taskType(self._execStats, db) # create a task from it
|
||||
|
||||
def resetExecutedTasks(self):
|
||||
|
@ -674,9 +682,15 @@ class AnyState:
|
|||
# only "under normal circumstances", as we may override it with the -b option
|
||||
CAN_DROP_DB = 2
|
||||
CAN_CREATE_FIXED_SUPER_TABLE = 3
|
||||
CAN_CREATE_STREAM = 3 # super table must exists
|
||||
CAN_CREATE_TOPIC = 3 # super table must exists
|
||||
CAN_CREATE_CONSUMERS = 3
|
||||
CAN_DROP_FIXED_SUPER_TABLE = 4
|
||||
CAN_DROP_TOPIC = 4
|
||||
CAN_DROP_STREAM = 4
|
||||
CAN_ADD_DATA = 5
|
||||
CAN_READ_DATA = 6
|
||||
CAN_DELETE_DATA = 6
|
||||
|
||||
def __init__(self):
|
||||
self._info = self.getInfo()
|
||||
|
@ -727,12 +741,30 @@ class AnyState:
|
|||
return False
|
||||
return self._info[self.CAN_DROP_FIXED_SUPER_TABLE]
|
||||
|
||||
def canCreateTopic(self):
|
||||
return self._info[self.CAN_CREATE_TOPIC]
|
||||
|
||||
def canDropTopic(self):
|
||||
return self._info[self.CAN_DROP_TOPIC]
|
||||
|
||||
def canCreateConsumers(self):
|
||||
return self._info[self.CAN_CREATE_CONSUMERS]
|
||||
|
||||
def canCreateStreams(self):
|
||||
return self._info[self.CAN_CREATE_STREAM]
|
||||
|
||||
def canDropStream(self):
|
||||
return self._info[self.CAN_DROP_STREAM]
|
||||
|
||||
def canAddData(self):
|
||||
return self._info[self.CAN_ADD_DATA]
|
||||
|
||||
def canReadData(self):
|
||||
return self._info[self.CAN_READ_DATA]
|
||||
|
||||
def canDeleteData(self):
|
||||
return self._info[self.CAN_DELETE_DATA]
|
||||
|
||||
def assertAtMostOneSuccess(self, tasks, cls):
|
||||
sCnt = 0
|
||||
for task in tasks:
|
||||
|
@ -902,7 +934,7 @@ class StateHasData(AnyState):
|
|||
): # only if we didn't create one
|
||||
# we shouldn't have dropped it
|
||||
self.assertNoTask(tasks, TaskDropDb)
|
||||
if (not self.hasTask(tasks, TaskCreateSuperTable)
|
||||
if not( self.hasTask(tasks, TaskCreateSuperTable)
|
||||
): # if we didn't create the table
|
||||
# we should not have a task that drops it
|
||||
self.assertNoTask(tasks, TaskDropSuperTable)
|
||||
|
@ -974,14 +1006,21 @@ class StateMechine:
|
|||
# did not do this when openning connection, and this is NOT the worker
|
||||
# thread, which does this on their own
|
||||
dbc.use(dbName)
|
||||
|
||||
if not dbc.hasTables(): # no tables
|
||||
|
||||
Logging.debug("[STT] DB_ONLY found, between {} and {}".format(ts, time.time()))
|
||||
return StateDbOnly()
|
||||
|
||||
# For sure we have tables, which means we must have the super table. # TODO: are we sure?
|
||||
|
||||
sTable = self._db.getFixedSuperTable()
|
||||
|
||||
|
||||
if sTable.hasRegTables(dbc): # no regular tables
|
||||
# print("debug=====*\n"*100)
|
||||
Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time()))
|
||||
|
||||
return StateSuperTableOnly()
|
||||
else: # has actual tables
|
||||
Logging.debug("[STT] HAS_DATA found, between {} and {}".format(ts, time.time()))
|
||||
|
@ -1051,6 +1090,28 @@ class StateMechine:
|
|||
# Logging.debug(" (weighted random:{}/{}) ".format(i, len(taskTypes)))
|
||||
return taskTypes[i]
|
||||
|
||||
def balance_pickTaskType(self):
|
||||
# all the task types we can choose from at curent state
|
||||
BasicTypes = self.getTaskTypes()
|
||||
weightsTypes = BasicTypes.copy()
|
||||
|
||||
# this matrixs can balance the Frequency of TaskTypes
|
||||
balance_TaskType_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 ,
|
||||
'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10,
|
||||
'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3,
|
||||
'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # TaskType : balance_matrixs of task
|
||||
|
||||
for task , weights in balance_TaskType_matrixs.items():
|
||||
|
||||
for basicType in BasicTypes:
|
||||
if basicType.__name__ == task:
|
||||
for _ in range(weights):
|
||||
weightsTypes.append(basicType)
|
||||
|
||||
task = random.sample(weightsTypes,1)
|
||||
return task[0]
|
||||
|
||||
|
||||
# ref:
|
||||
# https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/
|
||||
def _weighted_choice_sub(self, weights) -> int:
|
||||
|
@ -1109,6 +1170,7 @@ class Database:
|
|||
return "fs_table"
|
||||
|
||||
def getFixedSuperTable(self) -> TdSuperTable:
|
||||
|
||||
return TdSuperTable(self.getFixedSuperTableName(), self.getName())
|
||||
|
||||
# We aim to create a starting time tick, such that, whenever we run our test here once
|
||||
|
@ -1342,6 +1404,19 @@ class Task():
|
|||
0x2603, # Table does not exist, replaced by 2662 below
|
||||
0x260d, # Tags number not matched
|
||||
0x2662, # Table does not exist #TODO: what about 2603 above?
|
||||
0x2600, # database not specified, SQL: show stables , database droped , and show tables
|
||||
0x032C, # Object is creating
|
||||
0x032D, # Object is dropping
|
||||
0x03D3, # Conflict transaction not completed
|
||||
0x0707, # Query not ready , it always occur at replica 3
|
||||
0x707, # Query not ready
|
||||
0x396, # Database in creating status
|
||||
0x386, # Database in droping status
|
||||
0x03E1, # failed on tmq_subscribe ,topic not exist
|
||||
0x03ed , # Topic must be dropped first, SQL: drop database db_0
|
||||
0x0203 , # Invalid value
|
||||
0x03f0 , # Stream already exist , topic already exists
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1638,9 +1713,12 @@ class TaskCreateDb(StateTransitionTask):
|
|||
# numReplica = Dice.throw(Settings.getConfig().max_replicas) + 1 # 1,2 ... N
|
||||
numReplica = Config.getConfig().num_replicas # fixed, always
|
||||
repStr = "replica {}".format(numReplica)
|
||||
updatePostfix = "update 1" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active
|
||||
updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1
|
||||
vg_nums = random.randint(1,8)
|
||||
cache_model = Dice.choice(['none' , 'last_row' , 'last_value' , 'both'])
|
||||
buffer = random.randint(3,128)
|
||||
dbName = self._db.getName()
|
||||
self.execWtSql(wt, "create database {} {} {} ".format(dbName, repStr, updatePostfix ) )
|
||||
self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, updatePostfix, vg_nums, cache_model,buffer ) )
|
||||
if dbName == "db_0" and Config.getConfig().use_shadow_db:
|
||||
self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) )
|
||||
|
||||
|
@ -1654,9 +1732,211 @@ class TaskDropDb(StateTransitionTask):
|
|||
return state.canDropDb()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
self.execWtSql(wt, "drop database {}".format(self._db.getName()))
|
||||
|
||||
try:
|
||||
self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno = Helper.convertErrno(err.errno)
|
||||
if errno in [0x0203]: # drop maybe failed
|
||||
pass
|
||||
|
||||
Logging.debug("[OPS] database dropped at {}".format(time.time()))
|
||||
|
||||
|
||||
class TaskCreateStream(StateTransitionTask):
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canCreateStreams()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
dbname = self._db.getName()
|
||||
|
||||
sub_stream_name = dbname+ '_sub_stream'
|
||||
sub_stream_tb_name = 'stream_tb_sub'
|
||||
super_stream_name = dbname+ '_super_stream'
|
||||
super_stream_tb_name = 'stream_tb_super'
|
||||
if not self._db.exists(wt.getDbConn()):
|
||||
Logging.debug("Skipping task, no DB yet")
|
||||
return
|
||||
|
||||
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
|
||||
# wt.execSql("use db") # should always be in place
|
||||
stbname =sTable.getName()
|
||||
sub_tables = sTable.getRegTables(wt.getDbConn())
|
||||
aggExpr = Dice.choice([
|
||||
'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)','min(speed)', 'max(speed)', 'first(speed)', 'last(speed)',
|
||||
'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)'])
|
||||
|
||||
stream_sql = '' # set default value
|
||||
|
||||
if sub_tables:
|
||||
sub_tbname = sub_tables[0]
|
||||
# create stream with query above sub_table
|
||||
stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\
|
||||
format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname)
|
||||
else:
|
||||
stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\
|
||||
format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname)
|
||||
self.execWtSql(wt, stream_sql)
|
||||
Logging.debug("[OPS] stream is creating at {}".format(time.time()))
|
||||
|
||||
|
||||
class TaskCreateTopic(StateTransitionTask):
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canCreateTopic()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
dbname = self._db.getName()
|
||||
|
||||
sub_topic_name = dbname+ '_sub_topic'
|
||||
super_topic_name = dbname+ '_super_topic'
|
||||
stable_topic = dbname+ '_stable_topic'
|
||||
db_topic = 'database_' + dbname+ '_topics'
|
||||
if not self._db.exists(wt.getDbConn()):
|
||||
Logging.debug("Skipping task, no DB yet")
|
||||
return
|
||||
|
||||
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
|
||||
# wt.execSql("use db") # should always be in place
|
||||
# create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1;
|
||||
|
||||
stbname =sTable.getName()
|
||||
sub_tables = sTable.getRegTables(wt.getDbConn())
|
||||
|
||||
scalarExpr = Dice.choice([ '*','speed','color','abs(speed)','acos(speed)','asin(speed)','atan(speed)','ceil(speed)','cos(speed)','cos(speed)',
|
||||
'floor(speed)','log(speed,2)','pow(speed,2)','round(speed)','sin(speed)','sqrt(speed)','char_length(color)','concat(color,color)',
|
||||
'concat_ws(" ", color,color," ")','length(color)', 'lower(color)', 'ltrim(color)','substr(color , 2)','upper(color)','cast(speed as double)',
|
||||
'cast(ts as bigint)'])
|
||||
topic_sql = '' # set default value
|
||||
if Dice.throw(3)==0: # create topic : source data from sub query
|
||||
if sub_tables: # if not empty
|
||||
sub_tbname = sub_tables[0]
|
||||
# create topic : source data from sub query of sub stable
|
||||
topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname)
|
||||
|
||||
else: # create topic : source data from sub query of stable
|
||||
topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname)
|
||||
elif Dice.throw(3)==1: # create topic : source data from super table
|
||||
topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname)
|
||||
|
||||
elif Dice.throw(3)==2: # create topic : source data from whole database
|
||||
topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname)
|
||||
else:
|
||||
pass
|
||||
|
||||
# exec create topics
|
||||
self.execWtSql(wt, "use {}".format(dbname))
|
||||
self.execWtSql(wt, topic_sql)
|
||||
Logging.debug("[OPS] db topic is creating at {}".format(time.time()))
|
||||
|
||||
class TaskDropTopics(StateTransitionTask):
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canDropTopic()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
dbname = self._db.getName()
|
||||
|
||||
|
||||
if not self._db.exists(wt.getDbConn()):
|
||||
Logging.debug("Skipping task, no DB yet")
|
||||
return
|
||||
|
||||
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
|
||||
# wt.execSql("use db") # should always be in place
|
||||
tblName = sTable.getName()
|
||||
if sTable.hasTopics(wt.getDbConn()):
|
||||
sTable.dropTopics(wt.getDbConn(),dbname,None) # drop topics of database
|
||||
sTable.dropTopics(wt.getDbConn(),dbname,tblName) # drop topics of stable
|
||||
|
||||
class TaskDropStreams(StateTransitionTask):
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canDropStream()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
# dbname = self._db.getName()
|
||||
|
||||
|
||||
if not self._db.exists(wt.getDbConn()):
|
||||
Logging.debug("Skipping task, no DB yet")
|
||||
return
|
||||
|
||||
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
|
||||
# wt.execSql("use db") # should always be in place
|
||||
# tblName = sTable.getName()
|
||||
if sTable.hasStreams(wt.getDbConn()):
|
||||
sTable.dropStreams(wt.getDbConn()) # drop stream of database
|
||||
|
||||
class TaskDropStreamTables(StateTransitionTask):
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canDropStream()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
# dbname = self._db.getName()
|
||||
|
||||
|
||||
if not self._db.exists(wt.getDbConn()):
|
||||
Logging.debug("Skipping task, no DB yet")
|
||||
return
|
||||
|
||||
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
|
||||
wt.execSql("use db") # should always be in place
|
||||
# tblName = sTable.getName()
|
||||
if sTable.hasStreamTables(wt.getDbConn()):
|
||||
sTable.dropStreamTables(wt.getDbConn()) # drop stream tables
|
||||
|
||||
class TaskCreateConsumers(StateTransitionTask):
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canCreateConsumers()
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
|
||||
if Config.getConfig().connector_type == 'native':
|
||||
|
||||
sTable = self._db.getFixedSuperTable() # type: TdSuperTable
|
||||
# wt.execSql("use db") # should always be in place
|
||||
if sTable.hasTopics(wt.getDbConn()):
|
||||
sTable.createConsumer(wt.getDbConn(),random.randint(1,10))
|
||||
pass
|
||||
else:
|
||||
print(" restful not support tmq consumers")
|
||||
return
|
||||
|
||||
|
||||
class TaskCreateSuperTable(StateTransitionTask):
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
|
@ -1688,10 +1968,13 @@ class TdSuperTable:
|
|||
def __init__(self, stName, dbName):
|
||||
self._stName = stName
|
||||
self._dbName = dbName
|
||||
self._consumerLists = {}
|
||||
self._ConsumerInsts = []
|
||||
|
||||
def getName(self):
|
||||
return self._stName
|
||||
|
||||
|
||||
def drop(self, dbc, skipCheck = False):
|
||||
dbName = self._dbName
|
||||
if self.exists(dbc) : # if myself exists
|
||||
|
@ -1712,9 +1995,11 @@ class TdSuperTable:
|
|||
dbName = self._dbName
|
||||
dbc.execute("USE " + dbName)
|
||||
fullTableName = dbName + '.' + self._stName
|
||||
|
||||
if dbc.existsSuperTable(self._stName):
|
||||
if dropIfExists:
|
||||
dbc.execute("DROP TABLE {}".format(fullTableName))
|
||||
|
||||
else: # error
|
||||
raise CrashGenError("Cannot create super table, already exists: {}".format(self._stName))
|
||||
|
||||
|
@ -1730,10 +2015,59 @@ class TdSuperTable:
|
|||
sql += " TAGS (dummy int) "
|
||||
dbc.execute(sql)
|
||||
|
||||
def createConsumer(self, dbc,Consumer_nums):
|
||||
|
||||
def generateConsumer(current_topic_list):
|
||||
conf = TaosTmqConf()
|
||||
conf.set("group.id", "tg2")
|
||||
conf.set("td.connect.user", "root")
|
||||
conf.set("td.connect.pass", "taosdata")
|
||||
conf.set("enable.auto.commit", "true")
|
||||
def tmq_commit_cb_print(tmq, resp, offset, param=None):
|
||||
print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
|
||||
conf.set_auto_commit_cb(tmq_commit_cb_print, None)
|
||||
consumer = conf.new_consumer()
|
||||
topic_list = TaosTmqList()
|
||||
for topic in current_topic_list:
|
||||
topic_list.append(topic)
|
||||
try:
|
||||
consumer.subscribe(topic_list)
|
||||
except TmqError as e :
|
||||
pass
|
||||
|
||||
# consumer with random work life
|
||||
time_start = time.time()
|
||||
while 1:
|
||||
res = consumer.poll(1000)
|
||||
if time.time() - time_start >random.randint(5,50) :
|
||||
break
|
||||
try:
|
||||
consumer.unsubscribe()
|
||||
except TmqError as e :
|
||||
pass
|
||||
return
|
||||
|
||||
# mulit Consumer
|
||||
current_topic_list = self.getTopicLists(dbc)
|
||||
for i in range(Consumer_nums):
|
||||
consumer_inst = threading.Thread(target=generateConsumer, args=(current_topic_list,))
|
||||
self._ConsumerInsts.append(consumer_inst)
|
||||
|
||||
for ConsumerInst in self._ConsumerInsts:
|
||||
ConsumerInst.start()
|
||||
for ConsumerInst in self._ConsumerInsts:
|
||||
ConsumerInst.join()
|
||||
|
||||
def getTopicLists(self, dbc: DbConn):
|
||||
dbc.query("show topics ")
|
||||
topics = dbc.getQueryResult()
|
||||
topicLists = [v[0] for v in topics]
|
||||
return topicLists
|
||||
|
||||
def getRegTables(self, dbc: DbConn):
|
||||
dbName = self._dbName
|
||||
try:
|
||||
dbc.query("select TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later
|
||||
dbc.query("select distinct TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno2 = Helper.convertErrno(err.errno)
|
||||
Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err))
|
||||
|
@ -1743,7 +2077,75 @@ class TdSuperTable:
|
|||
return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation
|
||||
|
||||
def hasRegTables(self, dbc: DbConn):
|
||||
|
||||
if dbc.existsSuperTable(self._stName):
|
||||
|
||||
return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0
|
||||
else:
|
||||
return False
|
||||
|
||||
def hasStreamTables(self,dbc: DbConn):
|
||||
|
||||
return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0
|
||||
|
||||
def hasStreams(self,dbc: DbConn):
|
||||
return dbc.query("show streams") > 0
|
||||
|
||||
def hasTopics(self,dbc: DbConn):
|
||||
|
||||
return dbc.query("show topics") > 0
|
||||
|
||||
def dropTopics(self,dbc: DbConn , dbname=None,stb_name=None):
|
||||
dbc.query("show topics ")
|
||||
topics = dbc.getQueryResult()
|
||||
|
||||
if dbname !=None and stb_name == None :
|
||||
|
||||
for topic in topics:
|
||||
if dbname in topic[0] and topic[0].startswith("database"):
|
||||
try:
|
||||
dbc.execute('drop topic {}'.format(topic[0]))
|
||||
Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time()))
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno = Helper.convertErrno(err.errno)
|
||||
if errno in [0x03EB]: # Topic subscribed cannot be dropped
|
||||
pass
|
||||
# for subsript in subscriptions:
|
||||
|
||||
else:
|
||||
pass
|
||||
|
||||
pass
|
||||
return True
|
||||
elif dbname !=None and stb_name!= None:
|
||||
for topic in topics:
|
||||
if topic[0].startswith(self._dbName) and topic[0].endswith('topic'):
|
||||
dbc.execute('drop topic {}'.format(topic[0]))
|
||||
Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time()))
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
pass
|
||||
|
||||
def dropStreams(self,dbc:DbConn):
|
||||
dbc.query("show streams ")
|
||||
Streams = dbc.getQueryResult()
|
||||
for Stream in Streams:
|
||||
if Stream[0].startswith(self._dbName):
|
||||
dbc.execute('drop stream {}'.format(Stream[0]))
|
||||
|
||||
return not dbc.query("show streams ") > 0
|
||||
|
||||
def dropStreamTables(self, dbc: DbConn):
|
||||
dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName))
|
||||
|
||||
StreamTables = dbc.getQueryResult()
|
||||
|
||||
for StreamTable in StreamTables:
|
||||
if self.dropStreams(dbc):
|
||||
dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0]))
|
||||
|
||||
return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName))
|
||||
|
||||
def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str):
|
||||
'''
|
||||
|
@ -1838,10 +2240,46 @@ class TdSuperTable:
|
|||
# Run the query against the regular table first
|
||||
doAggr = (Dice.throw(2) == 0) # 1 in 2 chance
|
||||
if not doAggr: # don't do aggregate query, just simple one
|
||||
commonExpr = Dice.choice([
|
||||
'*',
|
||||
'abs(speed)',
|
||||
'acos(speed)',
|
||||
'asin(speed)',
|
||||
'atan(speed)',
|
||||
'ceil(speed)',
|
||||
'cos(speed)',
|
||||
'cos(speed)',
|
||||
'floor(speed)',
|
||||
'log(speed,2)',
|
||||
'pow(speed,2)',
|
||||
'round(speed)',
|
||||
'sin(speed)',
|
||||
'sqrt(speed)',
|
||||
'char_length(color)',
|
||||
'concat(color,color)',
|
||||
'concat_ws(" ", color,color," ")',
|
||||
'length(color)',
|
||||
'lower(color)',
|
||||
'ltrim(color)',
|
||||
'substr(color , 2)',
|
||||
'upper(color)',
|
||||
'cast(speed as double)',
|
||||
'cast(ts as bigint)',
|
||||
# 'TO_ISO8601(color)',
|
||||
# 'TO_UNIXTIMESTAMP(ts)',
|
||||
'now()',
|
||||
'timediff(ts,now)',
|
||||
'timezone()',
|
||||
'TIMETRUNCATE(ts,1s)',
|
||||
'TIMEZONE()',
|
||||
'TODAY()',
|
||||
'distinct(color)'
|
||||
]
|
||||
)
|
||||
ret.append(SqlQuery( # reg table
|
||||
"select {} from {}.{}".format('*', self._dbName, rTbName)))
|
||||
"select {} from {}.{}".format(commonExpr, self._dbName, rTbName)))
|
||||
ret.append(SqlQuery( # super table
|
||||
"select {} from {}.{}".format('*', self._dbName, self.getName())))
|
||||
"select {} from {}.{}".format(commonExpr, self._dbName, self.getName())))
|
||||
else: # Aggregate query
|
||||
aggExpr = Dice.choice([
|
||||
'count(*)',
|
||||
|
@ -1857,17 +2295,34 @@ class TdSuperTable:
|
|||
'top(speed, 50)', # TODO: not supported?
|
||||
'bottom(speed, 50)', # TODO: not supported?
|
||||
'apercentile(speed, 10)', # TODO: TD-1316
|
||||
# 'last_row(speed)', # TODO: commented out per TD-3231, we should re-create
|
||||
'last_row(*)', # TODO: commented out per TD-3231, we should re-create
|
||||
# Transformation Functions
|
||||
# 'diff(speed)', # TODO: no supported?!
|
||||
'spread(speed)'
|
||||
'spread(speed)',
|
||||
'elapsed(ts)',
|
||||
'mode(speed)',
|
||||
'bottom(speed,1)',
|
||||
'top(speed,1)',
|
||||
'tail(speed,1)',
|
||||
'unique(color)',
|
||||
'csum(speed)',
|
||||
'DERIVATIVE(speed,1s,1)',
|
||||
'diff(speed,1)',
|
||||
'irate(speed)',
|
||||
'mavg(speed,3)',
|
||||
'sample(speed,5)',
|
||||
'STATECOUNT(speed,"LT",1)',
|
||||
'STATEDURATION(speed,"LT",1)',
|
||||
'twa(speed)'
|
||||
|
||||
]) # TODO: add more from 'top'
|
||||
|
||||
|
||||
# if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049)
|
||||
sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName())
|
||||
if Dice.throw(3) == 0: # 1 in X chance
|
||||
sql = sql + ' GROUP BY color'
|
||||
partion_expr = Dice.choice(['color','tbname'])
|
||||
sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr
|
||||
Progress.emit(Progress.QUERY_GROUP_BY)
|
||||
# Logging.info("Executing GROUP-BY query: " + sql)
|
||||
ret.append(SqlQuery(sql))
|
||||
|
@ -1975,6 +2430,7 @@ class TaskDropSuperTable(StateTransitionTask):
|
|||
Logging.debug("[DB] Acceptable error when dropping a table")
|
||||
continue # try to delete next regular table
|
||||
|
||||
|
||||
if (not tickOutput):
|
||||
tickOutput = True # Print only one time
|
||||
if isSuccess:
|
||||
|
@ -1987,6 +2443,8 @@ class TaskDropSuperTable(StateTransitionTask):
|
|||
self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName))
|
||||
|
||||
|
||||
|
||||
|
||||
class TaskAlterTags(StateTransitionTask):
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
|
@ -2234,6 +2692,220 @@ class TaskAddData(StateTransitionTask):
|
|||
|
||||
self.activeTable.discard(i) # not raising an error, unlike remove
|
||||
|
||||
class TaskDeleteData(StateTransitionTask):
|
||||
# Track which table is being actively worked on
|
||||
activeTable: Set[int] = set()
|
||||
|
||||
# We use these two files to record operations to DB, useful for power-off tests
|
||||
fAddLogReady = None # type: Optional[io.TextIOWrapper]
|
||||
fAddLogDone = None # type: Optional[io.TextIOWrapper]
|
||||
|
||||
@classmethod
|
||||
def prepToRecordOps(cls):
|
||||
if Config.getConfig().record_ops:
|
||||
if (cls.fAddLogReady is None):
|
||||
Logging.info(
|
||||
"Recording in a file operations to be performed...")
|
||||
cls.fAddLogReady = open("add_log_ready.txt", "w")
|
||||
if (cls.fAddLogDone is None):
|
||||
Logging.info("Recording in a file operations completed...")
|
||||
cls.fAddLogDone = open("add_log_done.txt", "w")
|
||||
|
||||
@classmethod
|
||||
def getEndState(cls):
|
||||
return StateHasData()
|
||||
|
||||
@classmethod
|
||||
def canBeginFrom(cls, state: AnyState):
|
||||
return state.canDeleteData()
|
||||
|
||||
def _lockTableIfNeeded(self, fullTableName, extraMsg = ''):
|
||||
if Config.getConfig().verify_data:
|
||||
# Logging.info("Locking table: {}".format(fullTableName))
|
||||
self.lockTable(fullTableName)
|
||||
# Logging.info("Table locked {}: {}".format(extraMsg, fullTableName))
|
||||
# print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written
|
||||
else:
|
||||
# Logging.info("Skipping locking table")
|
||||
pass
|
||||
|
||||
def _unlockTableIfNeeded(self, fullTableName):
|
||||
if Config.getConfig().verify_data:
|
||||
# Logging.info("Unlocking table: {}".format(fullTableName))
|
||||
self.unlockTable(fullTableName)
|
||||
# Logging.info("Table unlocked: {}".format(fullTableName))
|
||||
else:
|
||||
pass
|
||||
# Logging.info("Skipping unlocking table")
|
||||
|
||||
def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
|
||||
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||
del_Records = int(numRecords/5)
|
||||
if Dice.throw(2) == 0:
|
||||
for j in range(del_Records): # number of records per table
|
||||
intToWrite = db.getNextInt()
|
||||
nextTick = db.getNextTick()
|
||||
# nextColor = db.getNextColor()
|
||||
if Config.getConfig().record_ops:
|
||||
self.prepToRecordOps()
|
||||
if self.fAddLogReady is None:
|
||||
raise CrashGenError("Unexpected empty fAddLogReady")
|
||||
self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName))
|
||||
self.fAddLogReady.flush()
|
||||
os.fsync(self.fAddLogReady.fileno())
|
||||
|
||||
# TODO: too ugly trying to lock the table reliably, refactor...
|
||||
fullTableName = db.getName() + '.' + regTableName
|
||||
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
|
||||
|
||||
try:
|
||||
sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {})
|
||||
fullTableName,
|
||||
# ds.getFixedSuperTableName(),
|
||||
# ds.getNextBinary(), ds.getNextFloat(),
|
||||
nextTick)
|
||||
|
||||
# print(sql)
|
||||
# Logging.info("Adding data: {}".format(sql))
|
||||
dbc.execute(sql)
|
||||
# Logging.info("Data added: {}".format(sql))
|
||||
intWrote = intToWrite
|
||||
|
||||
# Quick hack, attach an update statement here. TODO: create an "update" task
|
||||
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
|
||||
intToUpdate = db.getNextInt() # Updated, but should not succeed
|
||||
# nextColor = db.getNextColor()
|
||||
sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here
|
||||
fullTableName,
|
||||
nextTick)
|
||||
# sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format(
|
||||
# fullTableName, db.getNextInt(), db.getNextColor(), nextTick)
|
||||
dbc.execute(sql)
|
||||
intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this.
|
||||
|
||||
except: # Any exception at all
|
||||
self._unlockTableIfNeeded(fullTableName)
|
||||
raise
|
||||
|
||||
# Now read it back and verify, we might encounter an error if table is dropped
|
||||
if Config.getConfig().verify_data: # only if command line asks for it
|
||||
try:
|
||||
dbc.query("SELECT * from {}.{} WHERE ts='{}'".
|
||||
format(db.getName(), regTableName, nextTick))
|
||||
result = dbc.getQueryResult()
|
||||
if len(result)==0:
|
||||
# means data has been delete
|
||||
print("D1",end="") # DF means delete failed
|
||||
else:
|
||||
print("DF",end="") # DF means delete failed
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno = Helper.convertErrno(err.errno)
|
||||
# if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
|
||||
# print("D1",end="") # D1 means delete data success and only 1 record
|
||||
|
||||
if errno in [0x218, 0x362,0x2662]: # table doesn't exist
|
||||
# do nothing
|
||||
pass
|
||||
else:
|
||||
# Re-throw otherwise
|
||||
raise
|
||||
finally:
|
||||
self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock
|
||||
# Done with read-back verification, unlock the table now
|
||||
# Successfully wrote the data into the DB, let's record it somehow
|
||||
te.recordDataMark(intWrote)
|
||||
else:
|
||||
|
||||
# delete all datas and verify datas ,expected table is empty
|
||||
if Config.getConfig().record_ops:
|
||||
self.prepToRecordOps()
|
||||
if self.fAddLogReady is None:
|
||||
raise CrashGenError("Unexpected empty fAddLogReady")
|
||||
self.fAddLogReady.write("Ready to delete {} to {}\n".format(intToWrite, regTableName))
|
||||
self.fAddLogReady.flush()
|
||||
os.fsync(self.fAddLogReady.fileno())
|
||||
|
||||
# TODO: too ugly trying to lock the table reliably, refactor...
|
||||
fullTableName = db.getName() + '.' + regTableName
|
||||
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
|
||||
|
||||
try:
|
||||
sql = "delete from {} ;".format( # removed: tags ('{}', {})
|
||||
fullTableName)
|
||||
# Logging.info("Adding data: {}".format(sql))
|
||||
dbc.execute(sql)
|
||||
# Logging.info("Data added: {}".format(sql))
|
||||
|
||||
# Quick hack, attach an update statement here. TODO: create an "update" task
|
||||
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB
|
||||
sql = "delete from {} ;".format( # "INSERt" means "update" here
|
||||
fullTableName)
|
||||
dbc.execute(sql)
|
||||
|
||||
except: # Any exception at all
|
||||
self._unlockTableIfNeeded(fullTableName)
|
||||
raise
|
||||
|
||||
# Now read it back and verify, we might encounter an error if table is dropped
|
||||
if Config.getConfig().verify_data: # only if command line asks for it
|
||||
try:
|
||||
dbc.query("SELECT * from {}.{} WHERE ts='{}'".
|
||||
format(db.getName(), regTableName, nextTick))
|
||||
result = dbc.getQueryResult()
|
||||
if len(result)==0:
|
||||
# means data has been delete
|
||||
print("DA",end="")
|
||||
else:
|
||||
print("DF",end="") # DF means delete failed
|
||||
except taos.error.ProgrammingError as err:
|
||||
errno = Helper.convertErrno(err.errno)
|
||||
# if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
|
||||
# print("Da",end="") # Da means delete data success and for all datas
|
||||
|
||||
if errno in [0x218, 0x362,0x2662]: # table doesn't exist
|
||||
# do nothing
|
||||
pass
|
||||
else:
|
||||
# Re-throw otherwise
|
||||
raise
|
||||
finally:
|
||||
self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock
|
||||
# Done with read-back verification, unlock the table now
|
||||
|
||||
if Config.getConfig().record_ops:
|
||||
if self.fAddLogDone is None:
|
||||
raise CrashGenError("Unexpected empty fAddLogDone")
|
||||
self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName))
|
||||
self.fAddLogDone.flush()
|
||||
os.fsync(self.fAddLogDone.fileno())
|
||||
|
||||
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
|
||||
# ds = self._dbManager # Quite DANGEROUS here, may result in multi-thread client access
|
||||
db = self._db
|
||||
dbc = wt.getDbConn()
|
||||
numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES
|
||||
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
|
||||
tblSeq = list(range(numTables ))
|
||||
random.shuffle(tblSeq) # now we have random sequence
|
||||
for i in tblSeq:
|
||||
if (i in self.activeTable): # wow already active
|
||||
# print("x", end="", flush=True) # concurrent insertion
|
||||
Progress.emit(Progress.CONCURRENT_INSERTION)
|
||||
else:
|
||||
self.activeTable.add(i) # marking it active
|
||||
|
||||
dbName = db.getName()
|
||||
sTable = db.getFixedSuperTable()
|
||||
regTableName = self.getRegTableName(i) # "db.reg_table_{}".format(i)
|
||||
fullTableName = dbName + '.' + regTableName
|
||||
# self._lockTable(fullTableName) # "create table" below. Stop it if the table is "locked"
|
||||
sTable.ensureRegTable(self, wt.getDbConn(), regTableName) # Ensure the table exists
|
||||
# self._unlockTable(fullTableName)
|
||||
|
||||
self._deleteData(db, dbc, regTableName, te)
|
||||
|
||||
self.activeTable.discard(i) # not raising an error, unlike remove
|
||||
|
||||
|
||||
class ThreadStacks: # stack info for all threads
|
||||
def __init__(self):
|
||||
|
@ -2244,6 +2916,9 @@ class ThreadStacks: # stack info for all threads
|
|||
shortTid = th.native_id % 10000 #type: ignore
|
||||
self._allStacks[shortTid] = stack # Was using th.native_id
|
||||
|
||||
def record_current_time(self,current_time):
|
||||
self.current_time = current_time
|
||||
|
||||
def print(self, filteredEndName = None, filterInternal = False):
|
||||
for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
|
||||
lastFrame = stack[-1]
|
||||
|
@ -2258,8 +2933,11 @@ class ThreadStacks: # stack info for all threads
|
|||
continue # ignore
|
||||
# Now print
|
||||
print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(shortTid))
|
||||
|
||||
lastSqlForThread = DbConn.fetchSqlForThread(shortTid)
|
||||
print("Last SQL statement attempted from thread {} is: {}".format(shortTid, lastSqlForThread))
|
||||
last_sql_commit_time = DbConn.get_save_sql_time(shortTid)
|
||||
# time_cost = DbConn.get_time_cost()
|
||||
print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, self.current_time-last_sql_commit_time ,lastSqlForThread))
|
||||
stackFrame = 0
|
||||
for frame in stack: # was using: reversed(stack)
|
||||
# print(frame)
|
||||
|
@ -2268,6 +2946,8 @@ class ThreadStacks: # stack info for all threads
|
|||
print(" {}".format(frame.line))
|
||||
stackFrame += 1
|
||||
print("-----> End of Thread Info ----->\n")
|
||||
if self.current_time-last_sql_commit_time >100: # dead lock occured
|
||||
print("maybe dead locked of thread {} ".format(shortTid))
|
||||
|
||||
class ClientManager:
|
||||
def __init__(self):
|
||||
|
@ -2631,4 +3311,3 @@ class Container():
|
|||
return
|
||||
self._verifyValidProperty(name)
|
||||
self._cargo[name] = value
|
||||
|
||||
|
|
|
@ -27,9 +27,12 @@ class DbConn:
|
|||
TYPE_REST = "rest-api"
|
||||
TYPE_INVALID = "invalid"
|
||||
|
||||
|
||||
|
||||
# class variables
|
||||
lastSqlFromThreads : dict[int, str] = {} # stored by thread id, obtained from threading.current_thread().ident%10000
|
||||
|
||||
spendThreads : dict[int, float] = {} # stored by thread id, obtained from threading.current_thread().ident%10000
|
||||
current_time : dict[int, float] = {} # save current time
|
||||
@classmethod
|
||||
def saveSqlForCurrentThread(cls, sql: str):
|
||||
'''
|
||||
|
@ -37,16 +40,57 @@ class DbConn:
|
|||
run into a dead-lock situation, we can pick out the deadlocked thread, and use
|
||||
that information to find what what SQL statement is stuck.
|
||||
'''
|
||||
|
||||
th = threading.current_thread()
|
||||
shortTid = th.native_id % 10000 #type: ignore
|
||||
cls.lastSqlFromThreads[shortTid] = sql # Save this for later
|
||||
cls.record_save_sql_time()
|
||||
|
||||
@classmethod
|
||||
def fetchSqlForThread(cls, shortTid : int) -> str :
|
||||
|
||||
print("=======================")
|
||||
if shortTid not in cls.lastSqlFromThreads:
|
||||
raise CrashGenError("No last-attempted-SQL found for thread id: {}".format(shortTid))
|
||||
return cls.lastSqlFromThreads[shortTid]
|
||||
|
||||
@classmethod
|
||||
def get_save_sql_time(cls, shortTid : int):
|
||||
'''
|
||||
Let us save the last SQL statement on a per-thread basis, so that when later we
|
||||
run into a dead-lock situation, we can pick out the deadlocked thread, and use
|
||||
that information to find what what SQL statement is stuck.
|
||||
'''
|
||||
return cls.current_time[shortTid]
|
||||
|
||||
@classmethod
|
||||
def record_save_sql_time(cls):
|
||||
'''
|
||||
Let us save the last SQL statement on a per-thread basis, so that when later we
|
||||
run into a dead-lock situation, we can pick out the deadlocked thread, and use
|
||||
that information to find what what SQL statement is stuck.
|
||||
'''
|
||||
th = threading.current_thread()
|
||||
shortTid = th.native_id % 10000 #type: ignore
|
||||
cls.current_time[shortTid] = float(time.time()) # Save this for later
|
||||
|
||||
@classmethod
|
||||
def sql_exec_spend(cls, cost: float):
|
||||
'''
|
||||
Let us save the last SQL statement on a per-thread basis, so that when later we
|
||||
run into a dead-lock situation, we can pick out the deadlocked thread, and use
|
||||
that information to find what what SQL statement is stuck.
|
||||
'''
|
||||
th = threading.current_thread()
|
||||
shortTid = th.native_id % 10000 #type: ignore
|
||||
cls.spendThreads[shortTid] = cost # Save this for later
|
||||
|
||||
@classmethod
|
||||
def get_time_cost(cls) ->float:
|
||||
th = threading.current_thread()
|
||||
shortTid = th.native_id % 10000 #type: ignore
|
||||
return cls.spendThreads.get(shortTid)
|
||||
|
||||
@classmethod
|
||||
def create(cls, connType, dbTarget):
|
||||
if connType == cls.TYPE_NATIVE:
|
||||
|
@ -61,6 +105,7 @@ class DbConn:
|
|||
def createNative(cls, dbTarget) -> DbConn:
|
||||
return cls.create(cls.TYPE_NATIVE, dbTarget)
|
||||
|
||||
|
||||
@classmethod
|
||||
def createRest(cls, dbTarget) -> DbConn:
|
||||
return cls.create(cls.TYPE_REST, dbTarget)
|
||||
|
@ -75,6 +120,7 @@ class DbConn:
|
|||
return "[DbConn: type={}, target={}]".format(self._type, self._dbTarget)
|
||||
|
||||
def getLastSql(self):
|
||||
|
||||
return self._lastSql
|
||||
|
||||
def open(self):
|
||||
|
@ -184,13 +230,19 @@ class DbConnRest(DbConn):
|
|||
def _doSql(self, sql):
|
||||
self._lastSql = sql # remember this, last SQL attempted
|
||||
self.saveSqlForCurrentThread(sql) # Save in global structure too. #TODO: combine with above
|
||||
time_cost = -1
|
||||
time_start = time.time()
|
||||
try:
|
||||
r = requests.post(self._url,
|
||||
data = sql,
|
||||
auth = HTTPBasicAuth('root', 'taosdata'))
|
||||
except:
|
||||
print("REST API Failure (TODO: more info here)")
|
||||
self.sql_exec_spend(-2)
|
||||
raise
|
||||
finally:
|
||||
time_cost = time.time()- time_start
|
||||
self.sql_exec_spend(time_cost)
|
||||
rj = r.json()
|
||||
# Sanity check for the "Json Result"
|
||||
if ('status' not in rj):
|
||||
|
@ -223,6 +275,8 @@ class DbConnRest(DbConn):
|
|||
"[SQL-REST] Execution Result, nRows = {}, SQL = {}".format(nRows, sql))
|
||||
return nRows
|
||||
|
||||
|
||||
|
||||
def query(self, sql): # return rows affected
|
||||
return self.execute(sql)
|
||||
|
||||
|
@ -336,6 +390,7 @@ class MyTDSql:
|
|||
raise
|
||||
return self.affectedRows
|
||||
|
||||
|
||||
class DbTarget:
|
||||
def __init__(self, cfgPath, hostAddr, port):
|
||||
self.cfgPath = cfgPath
|
||||
|
@ -355,6 +410,7 @@ class DbConnNative(DbConn):
|
|||
# _connInfoDisplayed = False # TODO: find another way to display this
|
||||
totalConnections = 0 # Not private
|
||||
totalRequests = 0
|
||||
time_cost = -1
|
||||
|
||||
def __init__(self, dbTarget):
|
||||
super().__init__(dbTarget)
|
||||
|
@ -413,8 +469,18 @@ class DbConnNative(DbConn):
|
|||
"Cannot exec SQL unless db connection is open", CrashGenError.DB_CONNECTION_NOT_OPEN)
|
||||
Logging.debug("[SQL] Executing SQL: {}".format(sql))
|
||||
self._lastSql = sql
|
||||
time_cost = -1
|
||||
nRows = 0
|
||||
time_start = time.time()
|
||||
self.saveSqlForCurrentThread(sql) # Save in global structure too. #TODO: combine with above
|
||||
nRows = self._tdSql.execute(sql)
|
||||
try:
|
||||
nRows= self._tdSql.execute(sql)
|
||||
except Exception as e:
|
||||
self.sql_exec_spend(-2)
|
||||
finally:
|
||||
time_cost = time.time() - time_start
|
||||
self.sql_exec_spend(time_cost)
|
||||
|
||||
cls = self.__class__
|
||||
cls.totalRequests += 1
|
||||
Logging.debug(
|
||||
|
@ -494,4 +560,3 @@ class DbManager():
|
|||
self._dbConn.close()
|
||||
self._dbConn = None
|
||||
Logging.debug("DbManager closed DB connection...")
|
||||
|
||||
|
|
|
@ -116,6 +116,7 @@ class TDDnode:
|
|||
self.deployed = 0
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.asan = False
|
||||
self.remoteIP = ""
|
||||
self.cfgDict = {
|
||||
"monitor": "0",
|
||||
|
@ -158,6 +159,15 @@ class TDDnode:
|
|||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def setAsan(self, value):
|
||||
self.asan = value
|
||||
if value:
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
self.execPath = os.path.abspath(self.path + "/community/tests/script/sh/exec.sh")
|
||||
else:
|
||||
self.execPath = os.path.abspath(self.path + "/tests/script/sh/exec.sh")
|
||||
|
||||
def getDataSize(self):
|
||||
totalSize = 0
|
||||
|
||||
|
@ -382,6 +392,12 @@ class TDDnode:
|
|||
if platform.system().lower() == 'windows':
|
||||
cmd = "mintty -h never %s -c %s" % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
if self.asan:
|
||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||
self.path, self.index)
|
||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||
binPath, self.cfgDir, asanDir)
|
||||
else:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
|
@ -444,6 +460,12 @@ class TDDnode:
|
|||
tdLog.exit("dnode:%d is not deployed" % (self.index))
|
||||
|
||||
if self.valgrind == 0:
|
||||
if self.asan:
|
||||
asanDir = "%s/sim/asan/dnode%d.asan" % (
|
||||
self.path, self.index)
|
||||
cmd = "nohup %s -c %s > /dev/null 2> %s & " % (
|
||||
binPath, self.cfgDir, asanDir)
|
||||
else:
|
||||
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
|
||||
binPath, self.cfgDir)
|
||||
else:
|
||||
|
@ -464,6 +486,12 @@ class TDDnode:
|
|||
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
|
||||
|
||||
def stop(self):
|
||||
if self.asan:
|
||||
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||
tdLog.info("execute script: " + stopCmd)
|
||||
os.system(stopCmd)
|
||||
return
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||
tdLog.info("stop dnode%d"%self.index)
|
||||
|
@ -501,6 +529,12 @@ class TDDnode:
|
|||
|
||||
|
||||
def stoptaosd(self):
|
||||
if self.asan:
|
||||
stopCmd = "%s -s stop -n dnode%d" % (self.execPath, self.index)
|
||||
tdLog.info("execute script: " + stopCmd)
|
||||
os.system(stopCmd)
|
||||
return
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].stop()"%(self.index-1,self.index-1))
|
||||
tdLog.info("stop dnode%d"%self.index)
|
||||
|
@ -534,6 +568,13 @@ class TDDnode:
|
|||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if self.asan:
|
||||
stopCmd = "%s -s stop -n dnode%d -x SIGKILL" + \
|
||||
(self.execPath, self.index)
|
||||
tdLog.info("execute script: " + stopCmd)
|
||||
os.system(stopCmd)
|
||||
return
|
||||
|
||||
if (not self.remoteIP == ""):
|
||||
self.remoteExec(self.cfgDict, "tdDnodes.dnodes[%d].running=1\ntdDnodes.dnodes[%d].forcestop()"%(self.index-1,self.index-1))
|
||||
return
|
||||
|
@ -606,6 +647,7 @@ class TDDnodes:
|
|||
self.simDeployed = False
|
||||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.asan = False
|
||||
self.killValgrind = 1
|
||||
|
||||
def init(self, path, remoteIP = ""):
|
||||
|
@ -629,6 +671,18 @@ class TDDnodes:
|
|||
def setValgrind(self, value):
|
||||
self.valgrind = value
|
||||
|
||||
def setAsan(self, value):
|
||||
self.asan = value
|
||||
if value:
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
self.stopDnodesPath = os.path.abspath(self.path + "/community/tests/script/sh/stop_dnodes.sh")
|
||||
self.stopDnodesSigintPath = os.path.abspath(self.path + "/community/tests/script/sh/sigint_stop_dnodes.sh")
|
||||
else:
|
||||
self.stopDnodesPath = os.path.abspath(self.path + "/tests/script/sh/stop_dnodes.sh")
|
||||
self.stopDnodesSigintPath = os.path.abspath(self.path + "/tests/script/sh/sigint_stop_dnodes.sh")
|
||||
tdLog.info("run in address sanitizer mode")
|
||||
|
||||
def setKillValgrind(self, value):
|
||||
self.killValgrind = value
|
||||
|
||||
|
@ -642,6 +696,7 @@ class TDDnodes:
|
|||
self.check(index)
|
||||
self.dnodes[index - 1].setTestCluster(self.testCluster)
|
||||
self.dnodes[index - 1].setValgrind(self.valgrind)
|
||||
self.dnodes[index - 1].setAsan(self.asan)
|
||||
self.dnodes[index - 1].deploy(updatecfgDict)
|
||||
|
||||
def cfg(self, index, option, value):
|
||||
|
@ -692,8 +747,22 @@ class TDDnodes:
|
|||
if index < 1 or index > 10:
|
||||
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
|
||||
|
||||
def StopAllSigint(self):
|
||||
tdLog.info("stop all dnodes sigint")
|
||||
if self.asan:
|
||||
tdLog.info("execute script: %s" % self.stopDnodesSigintPath)
|
||||
os.system(self.stopDnodesSigintPath)
|
||||
tdLog.info("execute finished")
|
||||
return
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes")
|
||||
if self.asan:
|
||||
tdLog.info("execute script: %s" % self.stopDnodesPath)
|
||||
os.system(self.stopDnodesPath)
|
||||
tdLog.info("execute finished")
|
||||
return
|
||||
|
||||
if (not self.dnodes[0].remoteIP == ""):
|
||||
self.dnodes[0].remoteExec(self.dnodes[0].cfgDict, "for i in range(len(tdDnodes.dnodes)):\n tdDnodes.dnodes[i].running=1\ntdDnodes.stopAll()")
|
||||
return
|
||||
|
|
|
@ -36,10 +36,10 @@ class TDLog:
|
|||
printf("\033[1;32m%s %s\033[0m" % (datetime.datetime.now(), info))
|
||||
|
||||
def notice(self, err):
|
||||
printf("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
print("\033[1;33m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
|
||||
def exit(self, err):
|
||||
printf("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
print("\033[1;31m%s %s\033[0m" % (datetime.datetime.now(), err))
|
||||
sys.exit(1)
|
||||
|
||||
def printNoPrefix(self, info):
|
||||
|
|
|
@ -73,8 +73,15 @@ class TDSql:
|
|||
expectErrNotOccured = True
|
||||
try:
|
||||
self.cursor.execute(sql)
|
||||
except BaseException:
|
||||
except BaseException as e:
|
||||
expectErrNotOccured = False
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
self.error_info = repr(e)
|
||||
# print(error_info)
|
||||
# self.error_info = error_info[error_info.index('(')+1:-1].split(",")[0].replace("'","")
|
||||
# self.error_info = (','.join(error_info.split(",")[:-1]).split("(",1)[1:][0]).replace("'","")
|
||||
# print("!!!!!!!!!!!!!!",self.error_info)
|
||||
|
||||
if expectErrNotOccured:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql))
|
||||
|
@ -83,6 +90,8 @@ class TDSql:
|
|||
self.queryCols = 0
|
||||
self.queryResult = None
|
||||
tdLog.info("sql:%s, expect error occured" % (sql))
|
||||
return self.error_info
|
||||
|
||||
|
||||
def query(self, sql, row_tag=None,queryTimes=10):
|
||||
self.sql = sql
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
set +e
|
||||
#set -x
|
||||
|
||||
unset LD_PRELOAD
|
||||
SCRIPT_DIR=`dirname $0`
|
||||
cd $SCRIPT_DIR/../
|
||||
SCRIPT_DIR=`pwd`
|
||||
|
@ -15,25 +16,37 @@ else
|
|||
fi
|
||||
|
||||
TAOS_DIR=`pwd`
|
||||
LOG_DIR=$TAOS_DIR/sim/tsim/asan
|
||||
LOG_DIR=$TAOS_DIR/sim/asan
|
||||
|
||||
error_num=`cat ${LOG_DIR}/*.asan | grep "ERROR" | wc -l`
|
||||
memory_leak=`cat ${LOG_DIR}/*.asan | grep "Direct leak" | wc -l`
|
||||
indirect_leak=`cat ${LOG_DIR}/*.asan | grep "Indirect leak" | wc -l`
|
||||
runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | wc -l`
|
||||
|
||||
# ignore
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:735:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'signed char'
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:790:11: runtime error: 3.4e+38 is outside the range of representable values of type 'long int'
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:772:11: runtime error: 3.52344e+09 is outside the range of representable values of type 'int'
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:753:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'short int'
|
||||
runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type" | wc -l`
|
||||
|
||||
python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l`
|
||||
|
||||
echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m"
|
||||
echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m"
|
||||
echo -e "\033[44;32;1m"asan indirect_leak: $indirect_leak"\033[0m"
|
||||
echo -e "\033[44;32;1m"asan runtime error: $runtime_error"\033[0m"
|
||||
echo -e "\033[44;32;1m"asan python error: $python_error"\033[0m"
|
||||
|
||||
let "errors=$error_num+$memory_leak+$indirect_leak+$runtime_error"
|
||||
let "errors=$error_num+$memory_leak+$indirect_leak+$runtime_error+$python_error"
|
||||
|
||||
if [ $errors -eq 0 ]; then
|
||||
echo -e "\033[44;32;1m"no asan errors"\033[0m"
|
||||
exit 0
|
||||
else
|
||||
echo -e "\033[44;31;1m"asan total errors: $errors"\033[0m"
|
||||
if [ $python_error -ne 0 ]; then
|
||||
cat ${LOG_DIR}/*.info
|
||||
fi
|
||||
cat ${LOG_DIR}/*.asan
|
||||
exit 1
|
||||
fi
|
|
@ -11,6 +11,7 @@
|
|||
set +e
|
||||
#set -x
|
||||
|
||||
unset LD_PRELOAD
|
||||
UNAME_BIN=`which uname`
|
||||
OS_TYPE=`$UNAME_BIN`
|
||||
|
||||
|
@ -80,7 +81,7 @@ LOG_DIR=$NODE_DIR/log
|
|||
DATA_DIR=$NODE_DIR/data
|
||||
MGMT_DIR=$NODE_DIR/data/mgmt
|
||||
TSDB_DIR=$NODE_DIR/data/tsdb
|
||||
ASAN_DIR=$SIM_DIR/tsim/asan
|
||||
ASAN_DIR=$SIM_DIR/asan
|
||||
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
|
||||
|
||||
echo ------------ $EXEC_OPTON $NODE_NAME
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/sh
|
||||
|
||||
set +e
|
||||
#set -x
|
||||
|
||||
unset LD_PRELOAD
|
||||
UNAME_BIN=`which uname`
|
||||
OS_TYPE=`$UNAME_BIN`
|
||||
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
echo "Killing taosd processes " $PID
|
||||
while [ -n "$PID" ]; do
|
||||
#echo "Killing taosd processes " $PID
|
||||
kill $PID
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
|
@ -1,5 +1,9 @@
|
|||
#!/bin/sh
|
||||
|
||||
set +e
|
||||
#set -x
|
||||
|
||||
unset LD_PRELOAD
|
||||
UNAME_BIN=`which uname`
|
||||
OS_TYPE=`$UNAME_BIN`
|
||||
|
||||
|
@ -22,16 +26,3 @@ while [ -n "$PID" ]; do
|
|||
fi
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]; do
|
||||
echo kill -9 $PID
|
||||
pkill -9 tarbitrator
|
||||
if [ "$OS_TYPE" != "Darwin" ]; then
|
||||
fuser -k -n tcp 6040
|
||||
else
|
||||
lsof -nti:6040 | xargs kill -9
|
||||
fi
|
||||
PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
|
|
|
@ -66,16 +66,13 @@ else
|
|||
fi
|
||||
|
||||
declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR
|
||||
|
||||
declare -x SIM_DIR=$TOP_DIR/sim
|
||||
|
||||
PROGRAM=$BUILD_DIR/build/bin/tsim
|
||||
|
||||
PRG_DIR=$SIM_DIR/tsim
|
||||
CFG_DIR=$PRG_DIR/cfg
|
||||
LOG_DIR=$PRG_DIR/log
|
||||
DATA_DIR=$PRG_DIR/data
|
||||
ASAN_DIR=$PRG_DIR/asan
|
||||
ASAN_DIR=$SIM_DIR/asan
|
||||
|
||||
chmod -R 777 $PRG_DIR
|
||||
echo "------------------------------------------------------------------------"
|
||||
|
@ -141,11 +138,15 @@ if [ -n "$FILE_NAME" ]; then
|
|||
echo "AsanDir:" $ASAN_DIR/tsim.asan
|
||||
eval $PROGRAM -c $CFG_DIR -f $FILE_NAME 2> $ASAN_DIR/tsim.asan
|
||||
result=$?
|
||||
echo "Execute result: " $result
|
||||
echo "Execute result:" $result
|
||||
|
||||
if [ $result -eq 0 ]; then
|
||||
$CODE_DIR/sh/sigint_stop_dnodes.sh
|
||||
$CODE_DIR/sh/checkAsan.sh
|
||||
else
|
||||
echo "TSIM has asan errors"
|
||||
sleep 1
|
||||
$CODE_DIR/sh/checkAsan.sh
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -138,10 +138,10 @@ while $i < 10
|
|||
if $data[0][4] != leader then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][6] != follower then
|
||||
if $data[0][6] == leader then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][8] != follower then
|
||||
if $data[0][8] == leader then
|
||||
return -1
|
||||
endi
|
||||
endw
|
||||
|
|
|
@ -96,7 +96,7 @@ sql_error drop mnode on dnode 4
|
|||
sql_error drop mnode on dnode 5
|
||||
sql_error drop mnode on dnode 6
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGKILL
|
||||
system sh/exec.sh -n dnode2 -s stop
|
||||
$x = 0
|
||||
step5:
|
||||
$x = $x + 1
|
||||
|
@ -151,7 +151,7 @@ if $data(4)[4] != ready then
|
|||
endi
|
||||
|
||||
print =============== step6: stop mnode1
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGKILL
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
# sql_error drop mnode on dnode 1
|
||||
|
||||
$x = 0
|
||||
|
@ -205,7 +205,7 @@ if $data(4)[4] != ready then
|
|||
endi
|
||||
|
||||
print =============== step8: stop mnode1 and drop it
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGKILL
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
|
||||
$x = 0
|
||||
step81:
|
||||
|
|
|
@ -330,7 +330,7 @@ if $data11 != -1 then
|
|||
endi
|
||||
|
||||
# fill_char_values_to_arithmetic_fields
|
||||
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
|
||||
# fill_multiple_columns
|
||||
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
|
||||
|
@ -355,25 +355,25 @@ endi
|
|||
|
||||
# fill_into_nonarithmetic_fieds
|
||||
print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
|
||||
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
|
||||
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
|
||||
|
||||
|
||||
# fill nonarithmetic values into arithmetic fields
|
||||
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
|
||||
sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
|
||||
print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
|
||||
if $rows != 9 then
|
||||
|
@ -383,7 +383,7 @@ if $data01 != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
|
||||
## linear fill
|
||||
# feature currently switched off 2018/09/29
|
||||
|
|
|
@ -170,7 +170,7 @@ endi
|
|||
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
|
||||
|
||||
# fill_char_values_to_arithmetic_fields
|
||||
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
|
||||
# fill_multiple_columns
|
||||
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
|
||||
|
@ -240,10 +240,10 @@ sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <=
|
|||
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
|
||||
|
||||
# fill nonarithmetic values into arithmetic fields
|
||||
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
|
||||
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
|
||||
sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
|
||||
sql_error select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
|
||||
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
|
||||
|
||||
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20);
|
||||
if $rows != $val then
|
||||
|
@ -354,7 +354,7 @@ endi
|
|||
|
||||
## NULL fill
|
||||
print fill(NULL)
|
||||
print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
|
||||
print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) limit 5
|
||||
sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(NULL) limit 5
|
||||
if $rows != 25 then
|
||||
return -1
|
||||
|
|
|
@ -332,7 +332,7 @@ if $data11 != -1 then
|
|||
endi
|
||||
|
||||
# fill_char_values_to_arithmetic_fields
|
||||
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
|
||||
# fill_multiple_columns
|
||||
sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
|
||||
|
@ -358,24 +358,24 @@ endi
|
|||
|
||||
|
||||
# fill_into_nonarithmetic_fieds
|
||||
sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
|
||||
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
|
||||
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
|
||||
|
||||
|
||||
# fill nonarithmetic values into arithmetic fields
|
||||
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
|
||||
sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
|
||||
if $rows != 9 then
|
||||
|
@ -385,7 +385,7 @@ if $data01 != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
|
||||
## linear fill
|
||||
# feature currently switched off 2018/09/29
|
||||
|
|
|
@ -321,55 +321,41 @@ endi
|
|||
|
||||
### [TBASE-350]
|
||||
## stb + interval + fill + group by + limit offset
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') limit 2 offset 10
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') limit 2 offset 10
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
#add one more test case
|
||||
sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;"
|
||||
|
||||
|
||||
$limit = 5
|
||||
$offset = $rowNum * 2
|
||||
$offset = $offset - 2
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1 limit $limit offset $offset
|
||||
if $rows != $tbNum then
|
||||
sql select max(c1), min(c2), sum(c3), avg(c4), first(c7), last(c8), first(c9) from lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 partition by t1 interval(5m) fill(value, -1, -2, -3, -4 ,-7 ,'-8', '-9') order by t1 limit $limit offset $offset
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != @18-11-25 19:30:00.000@ then
|
||||
if $data00 != 9 then
|
||||
return -1
|
||||
endi
|
||||
if $data01 != 9 then
|
||||
return -1
|
||||
endi
|
||||
if $data12 != 9 then
|
||||
if $data02 != 9.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data23 != 9.000000000 then
|
||||
if $data03 != 9.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data34 != 9.000000000 then
|
||||
if $data04 != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data45 != 1 then
|
||||
if $data05 != binary9 then
|
||||
return -1
|
||||
endi
|
||||
if $data56 != binary9 then
|
||||
return -1
|
||||
endi
|
||||
if $data68 != 6 then
|
||||
return -1
|
||||
endi
|
||||
if $data72 != -2 then
|
||||
return -1
|
||||
endi
|
||||
if $data84 != -2.000000000 then
|
||||
return -1
|
||||
endi
|
||||
if $data98 != 9 then
|
||||
if $data06 != nchar9 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
||||
#add one more test case
|
||||
sql select max(c1), last(c8) from lm2_db0.lm2_tb0 where ts >= 1537146000000 and ts <= 1543145400000 interval(5m) fill(linear) limit 10 offset 4089;"
|
||||
|
||||
|
|
|
@ -0,0 +1,154 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
|
||||
system sh/cfg.sh -n dnode2 -c supportVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c supportVnodes -v 4
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ========== step2
|
||||
sql create dnode $hostname port 7200
|
||||
sql create dnode $hostname port 7300
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
system sh/exec.sh -n dnode3 -s start
|
||||
|
||||
$x = 0
|
||||
step2:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql select * from information_schema.ins_dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
if $rows != 3 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step2
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step2
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step2
|
||||
endi
|
||||
|
||||
print ========== step3
|
||||
sql create database d1 vgroups 1
|
||||
sql use d1;
|
||||
|
||||
print --> create stb
|
||||
sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
|
||||
|
||||
print --> create sma
|
||||
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);
|
||||
|
||||
return
|
||||
|
||||
print --> show sma
|
||||
sql show indexes from stb from d1;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != sma_index_name1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != d1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != stb then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print --> drop stb
|
||||
sql drop table stb;
|
||||
|
||||
print ========== step4 repeat
|
||||
|
||||
print --> create stb
|
||||
sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
|
||||
|
||||
print --> create sma
|
||||
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);
|
||||
|
||||
print --> show sma
|
||||
sql show indexes from stb from d1;
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][0] != sma_index_name1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][1] != d1 then
|
||||
return -1
|
||||
endi
|
||||
if $data[0][2] != stb then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print --> drop stb
|
||||
sql drop table stb;
|
||||
|
||||
print ========== step5
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint, c_float float, c_double double, c_bool bool, c_binary binary(16), c_nchar nchar(32), c_ts timestamp, c_tint_un tinyint unsigned, c_sint_un smallint unsigned, c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
|
||||
print ========== step6 repeat
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint ) tags (t_int int);
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
|
||||
print ========== step7
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||
|
||||
sql create table ct1 using stb1 tags ( 1 );
|
||||
sql create table ct2 using stb1 tags ( 2 );
|
||||
sql create table ct3 using stb1 tags ( 3 );
|
||||
sql create table ct4 using stb1 tags ( 4 );
|
||||
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m;
|
||||
sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m;
|
||||
|
||||
sql DROP INDEX sma_index_1 ;
|
||||
sql DROP INDEX sma_index_2 ;
|
||||
sql DROP INDEX sma_index_3 ;
|
||||
|
||||
print ========== step8
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||
|
||||
sql create table ct1 using stb1 tags ( 1 );
|
||||
sql create table ct2 using stb1 tags ( 2 );
|
||||
sql create table ct3 using stb1 tags ( 3 );
|
||||
sql create table ct4 using stb1 tags ( 4 );
|
||||
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m;
|
||||
sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m;
|
||||
|
||||
sql DROP INDEX sma_index_1 ;
|
||||
sql DROP INDEX sma_index_2 ;
|
||||
sql DROP INDEX sma_index_3 ;
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
|
@ -4,7 +4,7 @@ looptest:
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
#==system sh/exec.sh -n dnode1 -s start -v
|
||||
|
||||
sleep 200
|
||||
sql connect
|
||||
|
||||
|
|
|
@ -201,31 +201,39 @@ system sh/exec.sh -n dnode2 -s start
|
|||
sleep 3000
|
||||
|
||||
print ======== step6
|
||||
sql select count(*) from db1.tb1
|
||||
$y = 0
|
||||
step6:
|
||||
$y = $y + 1
|
||||
sleep 1000
|
||||
if $y == 50 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select count(*) from db1.tb1 -x step6
|
||||
print select count(*) from db1.tb1 ==> $data00 $lastRows1
|
||||
if $data00 <= $lastRows1 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows1 = $data00
|
||||
|
||||
sql select count(*) from db2.tb2
|
||||
sql select count(*) from db2.tb2 -x step6
|
||||
print select count(*) from db2.tb2 ==> $data00 $lastRows2
|
||||
if $data00 <= $lastRows2 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows2 = $data00
|
||||
|
||||
sql select count(*) from db3.tb3
|
||||
sql select count(*) from db3.tb3 -x step6
|
||||
print select count(*) from db3.tb3 ==> $data00 $lastRows3
|
||||
if $data00 <= $lastRows3 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows3 = $data00
|
||||
|
||||
sql select count(*) from db4.tb4
|
||||
sql select count(*) from db4.tb4 -x step6
|
||||
print select count(*) from db4.tb4 ==> $data00 $lastRows4
|
||||
if $data00 <= $lastRows4 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows4 = $data00
|
||||
|
||||
|
|
|
@ -0,0 +1,183 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
import math
|
||||
from random import randint
|
||||
from util.log import *
|
||||
from util.cases import *
|
||||
from util.sql import *
|
||||
from util.common import *
|
||||
from util.sqlset import *
|
||||
from util.boundary import *
|
||||
|
||||
class TDTestCase:
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor())
|
||||
self.boundary = DataBoundary()
|
||||
self.dbname_length_boundary = self.boundary.DBNAME_MAX_LENGTH
|
||||
self.tbname_length_boundary = self.boundary.TBNAME_MAX_LENGTH
|
||||
self.stbname_length_boundary = self.boundary.STBNAME_MAX_LENGTH
|
||||
self.colname_length_boundary = self.boundary.COL_KEY_MAX_LENGTH
|
||||
self.tagname_length_boundary = self.boundary.TAG_KEY_MAX_LENGTH
|
||||
self.username_length_boundary = 23
|
||||
self.password_length_boundary = 128
|
||||
def dbname_length_check(self):
|
||||
dbname_length = randint(1,self.dbname_length_boundary-1)
|
||||
for dbname in [tdCom.get_long_name(self.dbname_length_boundary),tdCom.get_long_name(dbname_length)]:
|
||||
tdSql.execute(f'create database if not exists {dbname}')
|
||||
tdSql.query(f'select name from information_schema.ins_databases where name = "{dbname}"')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],dbname)
|
||||
tdSql.execute(f'drop database if exists {dbname}')
|
||||
dbname = tdCom.get_long_name(self.dbname_length_boundary+1)
|
||||
tdSql.error(f'create database if not exists {dbname}')
|
||||
if "Invalid identifier name" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
|
||||
def tbname_length_check(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute('use db')
|
||||
tbname_length = randint(1,self.tbname_length_boundary-1)
|
||||
tdSql.execute(f'create table stb (ts timestamp,c0 int) tags(t0 int)')
|
||||
for tbname in [tdCom.get_long_name(self.tbname_length_boundary),tdCom.get_long_name(tbname_length)]:
|
||||
tdSql.execute(f'create table {tbname} using stb tags(1)')
|
||||
tdSql.query(f'select table_name from information_schema.ins_tables where table_name = "{tbname}"')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],tbname)
|
||||
tdSql.execute(f'drop table {tbname}')
|
||||
tbname = tdCom.get_long_name(self.tbname_length_boundary+1)
|
||||
tdSql.error(f'create table {tbname} using stb tags(1)')
|
||||
if "Invalid identifier name" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
stbname_length = randint(1,self.stbname_length_boundary-1)
|
||||
for stbname in [tdCom.get_long_name(self.stbname_length_boundary),tdCom.get_long_name(stbname_length)]:
|
||||
tdSql.execute(f'create table {stbname} (ts timestamp,c0 int) tags(t0 int)')
|
||||
tdSql.query(f'select stable_name from information_schema.ins_stables where stable_name = "{stbname}"')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0],stbname)
|
||||
tdSql.execute(f'drop table {stbname}')
|
||||
stbname = tdCom.get_long_name(self.stbname_length_boundary+1)
|
||||
tdSql.error(f'create table {stbname} (ts timestamp,c0 int) tags(t0 int)')
|
||||
print(tdSql.error_info)
|
||||
if "Invalid identifier name" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def colname_length_check(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute('use db')
|
||||
column_name_length = randint(1,self.colname_length_boundary-1)
|
||||
for colname in [tdCom.get_long_name(column_name_length),tdCom.get_long_name(self.colname_length_boundary)]:
|
||||
stbname = tdCom.get_long_name(3)
|
||||
ntbname = tdCom.get_long_name(4)
|
||||
tdSql.execute(f'create table {stbname} (ts timestamp,{colname} int) tags(t0 int)')
|
||||
tdSql.query(f'describe {stbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[1][0],colname)
|
||||
tdSql.execute(f'create table {ntbname} (ts timestamp,{colname} int)')
|
||||
tdSql.query(f'describe {ntbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[1][0],colname)
|
||||
colname = tdCom.get_long_name(self.colname_length_boundary+1)
|
||||
tdSql.error(f'create table stb (ts timestamp,{colname} int) tags(t0 int)')
|
||||
if "Invalid identifier name" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def tagname_length_check(self):
|
||||
tdSql.prepare()
|
||||
tdSql.execute('use db')
|
||||
tag_name_length = randint(1,self.tagname_length_boundary-1)
|
||||
for tagname in (tdCom.get_long_name(tag_name_length),tdCom.get_long_name(self.tagname_length_boundary)):
|
||||
stbname = tdCom.get_long_name(3)
|
||||
tdSql.execute(f'create table {stbname} (ts timestamp,c0 int) tags({tagname} int)')
|
||||
tdSql.query(f'describe {stbname}')
|
||||
tdSql.checkEqual(tdSql.queryResult[-1][0],tagname)
|
||||
tagname = tdCom.get_long_name(self.tagname_length_boundary+1)
|
||||
tdSql.error(f'create table {stbname} (ts timestamp,c0 int) tags({tagname} int)')
|
||||
if "Invalid identifier name" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
tdSql.execute('drop database db')
|
||||
|
||||
def username_length_check(self):
|
||||
username_length = randint(1,self.username_length_boundary-1)
|
||||
for username in [tdCom.get_long_name(username_length),tdCom.get_long_name(self.username_length_boundary)]:
|
||||
tdSql.execute(f'create user {username} pass "123"')
|
||||
tdSql.query('show users')
|
||||
for user in tdSql.queryResult:
|
||||
if user[0].lower() != 'root':
|
||||
tdSql.checkEqual(user[0],username)
|
||||
tdSql.execute(f'drop user {username}')
|
||||
username = tdCom.get_long_name(self.username_length_boundary+1)
|
||||
tdSql.error(f'create user {username} pass "123"')
|
||||
if "Name or password too long" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
|
||||
def password_length_check(self):
|
||||
password_length = randint(1,self.password_length_boundary-1)
|
||||
for password in [tdCom.get_long_name(password_length),tdCom.get_long_name(self.password_length_boundary)]:
|
||||
username = tdCom.get_long_name(3)
|
||||
tdSql.execute(f'create user {username} pass "{password}"')
|
||||
password = tdCom.get_long_name(self.password_length_boundary+1)
|
||||
tdSql.error(f'create user {username} pass "{password}"')
|
||||
if "Name or password too long" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
def sql_length_check(self):
|
||||
insert_rows = 1021
|
||||
tdSql.prepare()
|
||||
tdSql.execute('use db')
|
||||
tdSql.execute('create table ntb (ts timestamp,c0 binary(1013))')
|
||||
values_sql = ''
|
||||
value = tdCom.get_long_name(1013)
|
||||
for num in range(insert_rows):
|
||||
values_sql += f' (now+{num}s,"{value}")'
|
||||
value = tdCom.get_long_name(65)
|
||||
values_sql += f"(now-1s,'{value}')"
|
||||
tdSql.execute(f'insert into ntb values{values_sql}')
|
||||
tdSql.query('select * from ntb')
|
||||
tdSql.checkRows(insert_rows+1)
|
||||
tdSql.execute('create table ntb1 (ts timestamp,c0 binary(1013))')
|
||||
tdSql.error(f'insert into ntb1 values{values_sql};')
|
||||
print(tdSql.error_info)
|
||||
if "SQL statement too long" in tdSql.error_info:
|
||||
tdLog.info("error info is true!")
|
||||
else:
|
||||
tdLog.exit("error info is not true")
|
||||
tdSql.execute('drop database db')
|
||||
def run(self):
|
||||
self.dbname_length_check()
|
||||
self.tbname_length_check()
|
||||
self.colname_length_check()
|
||||
self.tagname_length_check()
|
||||
self.username_length_check()
|
||||
self.password_length_check()
|
||||
self.sql_length_check()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -23,7 +23,7 @@ class TDTestCase:
|
|||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(),logSql)
|
||||
tdSql.init(conn.cursor())
|
||||
self.setsql = TDSetSql()
|
||||
self.dbname = 'db_test'
|
||||
self.ntbname = 'ntb'
|
||||
|
|
|
@ -204,18 +204,12 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
|
||||
tdSql.query(abs_query)
|
||||
for row_index, row in enumerate(abs_result):
|
||||
for col_index, elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice(
|
||||
"abs function value has not as expected , sql is \"%s\" " % abs_query)
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info(
|
||||
"abs value check pass , it work as expected ,sql is \"%s\" " % abs_query)
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
def test_errors(self):
|
||||
dbname = "testdb"
|
||||
|
@ -466,19 +460,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto(f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ",
|
||||
f"select abs(c1), abs(c2) ,abs(c3), abs(c4), abs(c5) ,abs(c6) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -426,19 +426,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_function("&", False , f"{dbname}.sub1_bound" ,"c1","c2","c3","c4","c5","c6" )
|
||||
self.check_function("&", False , f"{dbname}.sub1_bound","abs(c1)","abs(c2)","abs(c3)","abs(c4)","abs(c5)","abs(c6)" )
|
||||
|
|
|
@ -86,21 +86,12 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
|
||||
tdSql.query(pow_query)
|
||||
for row_index , row in enumerate(pow_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
|
||||
check_status = False
|
||||
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
|
||||
check_status = False
|
||||
else:
|
||||
pass
|
||||
if not check_status:
|
||||
tdLog.notice("acos function value has not as expected , sql is \"%s\" "%pow_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("acos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
|
||||
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -414,19 +405,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_acos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select acos(abs(c1)), acos(abs(c2)) ,acos(abs(c3)), acos(abs(c4)), acos(abs(c5)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -86,21 +86,13 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
|
||||
tdSql.query(pow_query)
|
||||
for row_index , row in enumerate(pow_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
|
||||
check_status = False
|
||||
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
|
||||
check_status = False
|
||||
else:
|
||||
pass
|
||||
if not check_status:
|
||||
tdLog.notice("asin function value has not as expected , sql is \"%s\" "%pow_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("asin value check pass , it work as expected ,sql is \"%s\" "%pow_query )
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -414,19 +406,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_asin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select asin(abs(c1)), asin(abs(c2)) ,asin(abs(c3)), asin(abs(c4)), asin(abs(c5)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -84,21 +84,11 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
|
||||
tdSql.query(pow_query)
|
||||
for row_index , row in enumerate(pow_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] == None and elem:
|
||||
check_status = False
|
||||
elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
|
||||
check_status = False
|
||||
else:
|
||||
pass
|
||||
if not check_status:
|
||||
tdLog.notice("atan function value has not as expected , sql is \"%s\" "%pow_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("atan value check pass , it work as expected ,sql is \"%s\" "%pow_query )
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -412,19 +402,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_atan( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select atan(abs(c1)), atan(abs(c2)) ,atan(abs(c3)), atan(abs(c4)), atan(abs(c5)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -114,16 +114,10 @@ class TDTestCase:
|
|||
avg_result = tdSql.getResult(origin_query)
|
||||
origin_result = tdSql.getResult(check_query)
|
||||
|
||||
check_status = True
|
||||
tdSql.query(origin_query)
|
||||
for row_index , row in enumerate(avg_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if avg_result[row_index][col_index] != origin_result[row_index][col_index]:
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
|
||||
tdSql.checkData(row_index,col_index,origin_result[row_index][col_index])
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -378,33 +372,33 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+20s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
#self.check_avg(f"select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from {dbname}.sub1_bound " , f" select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from {dbname}.sub1_bound ")
|
||||
|
||||
|
|
|
@ -0,0 +1,338 @@
|
|||
###################################################################
|
||||
# Copyright (c) 2016 by TAOS Technologies, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This file is proprietary and confidential to TAOS Technologies.
|
||||
# No part of this file may be reproduced, stored, transmitted,
|
||||
# disclosed or used in any form or by any means other than as
|
||||
# expressly provided by the written permission from Jianhui Tao
|
||||
#
|
||||
###################################################################
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import random
|
||||
import os
|
||||
import time
|
||||
import taos
|
||||
import subprocess
|
||||
from faker import Faker
|
||||
from util.log import tdLog
|
||||
from util.cases import tdCases
|
||||
from util.sql import tdSql
|
||||
from util.dnodes import tdDnodes
|
||||
from util.dnodes import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"querySmaOptimize":1}
|
||||
|
||||
def init(self, conn, logSql, replicaVar):
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
self.testcasePath = os.path.split(__file__)[0]
|
||||
self.testcaseFilename = os.path.split(__file__)[-1]
|
||||
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
|
||||
|
||||
self.db = "case_when"
|
||||
|
||||
def dropandcreateDB_random(self,database,n):
|
||||
ts = 1630000000000
|
||||
num_random = 10
|
||||
fake = Faker('zh_CN')
|
||||
tdSql.execute('''drop database if exists %s ;''' %database)
|
||||
tdSql.execute('''create database %s keep 36500 ;'''%(database))
|
||||
tdSql.execute('''use %s;'''%database)
|
||||
|
||||
tdSql.execute('''create stable %s.stable_1 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
|
||||
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
|
||||
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
|
||||
tdSql.execute('''create stable %s.stable_2 (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp , \
|
||||
q_int_null int , q_bigint_null bigint , q_smallint_null smallint , q_tinyint_null tinyint, q_float_null float , q_double_null double , q_bool_null bool , q_binary_null binary(20) , q_nchar_null nchar(20) , q_ts_null timestamp) \
|
||||
tags(loc nchar(100) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, t_bool bool , t_binary binary(100) , t_nchar nchar(100) ,t_float float , t_double double , t_ts timestamp);'''%database)
|
||||
|
||||
for i in range(num_random):
|
||||
tdSql.execute('''create table %s.table_%d \
|
||||
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint , q_float float , q_double double , q_bool bool , q_binary binary(100) , q_nchar nchar(100) , q_ts timestamp ) ;'''%(database,i))
|
||||
tdSql.execute('''create table %s.stable_1_%d using %s.stable_1 tags('stable_1_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
|
||||
tdSql.execute('''create table %s.stable_%d_a using %s.stable_2 tags('stable_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
tdSql.execute('''create table %s.stable_%d_b using %s.stable_2 tags('stable_2_%d', '%d' , '%d', '%d' , '%d' , 1 , 'binary1.%s' , 'nchar1.%s' , '%f', '%f' ,'%d') ;'''
|
||||
%(database,i,database,i,fake.random_int(min=-2147483647, max=2147483647, step=1), fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pystr() ,fake.pystr() ,fake.pyfloat(),fake.pyfloat(),fake.random_int(min=-2147483647, max=2147483647, step=1)))
|
||||
|
||||
# insert data
|
||||
for i in range(num_random):
|
||||
for j in range(n):
|
||||
tdSql.execute('''insert into %s.stable_1_%d (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts)\
|
||||
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d) ;'''
|
||||
% (database,i,ts + i*1000 + j, fake.random_int(min=-2147483647, max=2147483647, step=1),
|
||||
fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i))
|
||||
|
||||
tdSql.execute('''insert into %s.table_%d (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double , q_bool , q_binary , q_nchar, q_ts) \
|
||||
values(%d, %d, %d, %d, %d, %f, %f, 0, 'binary.%s', 'nchar.%s', %d) ;'''
|
||||
% (database,i,ts + i*1000 + j, fake.random_int(min=-2147483647, max=2147483647, step=1),
|
||||
fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=-32767, max=32767, step=1) , fake.random_int(min=-127, max=127, step=1) ,
|
||||
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i ))
|
||||
|
||||
tdSql.execute('''insert into %s.stable_%d_a (ts , q_int , q_bigint , q_smallint , q_tinyint , q_float , q_double, q_bool , q_binary , q_nchar, q_ts)\
|
||||
values(%d, %d, %d, %d, %d, %f, %f, 1, 'binary.%s', 'nchar.%s', %d) ;'''
|
||||
% (database,i,ts + i*1000 + j, fake.random_int(min=0, max=2147483647, step=1),
|
||||
fake.random_int(min=0, max=9223372036854775807, step=1),
|
||||
fake.random_int(min=0, max=32767, step=1) , fake.random_int(min=0, max=127, step=1) ,
|
||||
fake.pyfloat() , fake.pyfloat() , fake.pystr() , fake.pystr() , ts + i))
|
||||
|
||||
tdSql.query("select count(*) from %s.stable_1;" %database)
|
||||
tdSql.checkData(0,0,num_random*n)
|
||||
tdSql.query("select count(*) from %s.table_0;"%database)
|
||||
tdSql.checkData(0,0,n)
|
||||
|
||||
|
||||
def users_bug(self,database):
|
||||
sql1 = "select (case when `q_smallint` >0 then 'many--' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1 where tbname = 'stable_1_1' limit 100;" %database
|
||||
sql2 = "select (case when `q_smallint` >0 then 'many--' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1_1 limit 100;" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
sql1 = "select (case when `q_smallint` >0 then 'many![;;][][]]' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1 where tbname = 'stable_1_1' limit 100;" %database
|
||||
sql2 = "select (case when `q_smallint` >0 then 'many![;;][][]]' when `q_smallint`<0 then 'little' end),q_int,loc from %s.stable_1_1 limit 100;" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
sql1 = "select (case when sum(q_smallint)=0 then null else sum(q_smallint) end) from %s.stable_1 where tbname = 'stable_1_1' limit 100;" %database
|
||||
sql2 = "select (case when sum(q_smallint)=0 then null else sum(q_smallint) end) from %s.stable_1_1 limit 100;" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
sql1 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1 where tbname = 'stable_1_1' and ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
|
||||
sql2 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1_1 where ts < now state_window(case when q_smallint <0 then 1 else 0 end);" %database
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
self.constant_check(database,sql1,sql2,1)
|
||||
self.constant_check(database,sql1,sql2,2)
|
||||
|
||||
def casewhen_list(self):
|
||||
a1,a2,a3 = random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647)
|
||||
casewhen_lists = ['first case when %d then %d end last' %(a1,a2) , #'first case when 3 then 4 end last' ,
|
||||
'first case when 0 then %d end last' %(a1), #'first case when 0 then 4 end last' ,
|
||||
'first case when null then %d end last' %(a1) , #'first case when null then 4 end last' ,
|
||||
'first case when 1 then %d+(%d) end last' %(a1,a2) , #'first case when 1 then 4+1 end last' ,
|
||||
'first case when %d-(%d) then 0 end last' %(a1,a1) , #'first case when 1-1 then 0 end last' ,
|
||||
'first case when %d+(%d) then 0 end last' %(a1,a1), #'first case when 1+1 then 0 end last' ,
|
||||
'first case when 1 then %d-(%d)+(%d) end last' %(a1,a1,a2), #'first case when 1 then 1-1+2 end last' ,
|
||||
'first case when %d > 0 then %d < %d end last' %(a1,a1,a2), #'first case when 1 > 0 then 1 < 2 end last' ,
|
||||
'first case when %d > %d then %d < %d end last' %(a1,a2,a1,a2), #'first case when 1 > 2 then 1 < 2 end last' ,
|
||||
'first case when abs(%d) then abs(-(%d)) end last' %(a1,a2) ,#'first case when abs(3) then abs(-1) end last' ,
|
||||
'first case when abs(%d+(%d)) then abs(-(%d))+abs(%d) end last' %(a1,a2,a1,a2) , #'first case when abs(1+1) then abs(-1)+abs(3) end last' ,
|
||||
'first case when 0 then %d else %d end last' %(a1,a2), #'first case when 0 then 1 else 3 end last' ,
|
||||
'first case when 0 then %d when 1 then %d else %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 else 3 end last' ,
|
||||
'first case when 0 then %d when 1 then %d when 2 then %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
|
||||
'first case when \'a\' then \'b\' when null then 0 end last' , #'first case when \'a\' then \'b\' when null then 0 end last' ,
|
||||
'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2), #'first case when \'2\' then \'b\' when null then 0 end last' ,
|
||||
'first case when \'%d\' then \'b\' else null end last' %(a1), #'first case when \'0\' then \'b\' else null end last',
|
||||
'first case when \'%d\' then \'b\' else %d end last' %(a1,a2), #'first case when \'0\' then \'b\' else 2 end last',
|
||||
'first case when sum(%d) then sum(%d)-sum(%d) end last' %(a1,a1,a3), #'first case when sum(2) then sum(2)-sum(1) end last' ,
|
||||
'first case when sum(%d) then abs(-(%d)) end last' %(a1,a2), #'first case when sum(2) then abs(-2) end last' ,
|
||||
'first case when q_int then ts end last' ,
|
||||
'first case when q_int then q_int when q_int + (%d) then q_int + (%d) else q_int is null end last' %(a1,a2) , #'first case when q_int then q_int when q_int + 1 then q_int + 1 else q_int is null end last' ,
|
||||
'first case when q_int then %d when ts then ts end last' %(a1), #'first case when q_int then 3 when ts then ts end last' ,
|
||||
'first case when %d then q_int end last' %(a1), #'first case when 3 then q_int end last' ,
|
||||
'first case when q_int then %d when %d then %d end last' %(a1,a1,a3), #'first case when q_int then 3 when 1 then 2 end last' ,
|
||||
'first case when sum(q_int) then sum(q_int)-abs(-(%d)) end last' %(a1), #'first case when sum(q_int) then sum(q_int)-abs(-1) end last' ,
|
||||
'first case when q_int < %d then %d when q_int >= %d then %d else %d end last' %(a1,a2,a1,a2,a3), #'first case when q_int < 3 then 1 when q_int >= 3 then 2 else 3 end last' ,
|
||||
'first cast(case q_int when q_int then q_int + (%d) else q_int is null end as double) last' %(a1), #'first cast(case q_int when q_int then q_int + 1 else q_int is null end as double) last' ,
|
||||
'first sum(case q_int when q_int then q_int + (%d) else q_int is null end + (%d)) last' %(a1,a2), #'first sum(case q_int when q_int then q_int + 1 else q_int is null end + 1) last' ,
|
||||
'first case when q_int is not null then case when q_int <= %d then q_int else q_int * (%d) end else -(%d) end last' %(a1,a1,a3), #'first case when q_int is not null then case when q_int <= 0 then q_int else q_int * 10 end else -1 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 3 then 4 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
|
||||
'first case %d when %d then %d else %d end last' %(a1,a1,a2,a3), # 'first case 3 when 1 then 4 else 2 end last' ,
|
||||
'first case %d when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case \'%d\' when null then %d when %d then %d end last' %(a1,a1,a2,a3), # 'first case \'3\' when null then 4 when 3 then 1 end last' ,
|
||||
'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
|
||||
'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case q_double when \'a\' then %d when \'%d\' then %d end last' %(a1,a2,a3) , # 'first case q_double when \'a\' then 4 when \'0\' then 1 end last' ,
|
||||
'first case q_double when q_int then q_int when q_int - (%d) then q_int else %d end last' %(a1,a2), # 'first case q_double when q_int then q_int when q_int - 1 then q_int else 99 end last' ,
|
||||
'first case cast(q_double as int) when %d then q_double when q_int then %d else ts end last' %(a1,a2), #'first case cast(q_double as int) when 0 then q_double when q_int then 11 else ts end last' ,
|
||||
'first case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2,a3), #'first case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
|
||||
'first case when \'a\' then \'b\' when null then %d end last' %(a1), # 'first case when \'a\' then \'b\' when null then 0 end last' ,
|
||||
'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2), # 'first case when \'2\' then \'b\' when null then 0 end last' ,
|
||||
'first case when %d then \'b\' else null end last' %(a1), # 'first case when 0 then \'b\' else null end last' ,
|
||||
'first case when %d then \'b\' else %d+abs(%d) end last' %(a1,a2,a3), # 'first case when 0 then \'b\' else 2+abs(-2) end last' ,
|
||||
'first case when %d then %d end last' %(a1,a2), # 'first case when 3 then 4 end last' ,
|
||||
'first case when %d then %d end last' %(a1,a2), # 'first case when 0 then 4 end last' ,
|
||||
'first case when null then %d end last' %(a1), # 'first case when null then 4 end last' ,
|
||||
'first case when %d then %d+(%d) end last' %(a1,a2,a3), # 'first case when 1 then 4+1 end last' ,
|
||||
'first case when %d-(%d) then %d end last' %(a1,a2,a3), # 'first case when 1-1 then 0 end last' ,
|
||||
'first case when %d+(%d) then %d end last' %(a1,a2,a3), # 'first case when 1+1 then 0 end last' ,
|
||||
'first case when abs(%d) then abs(%d) end last' %(a1,a2), # 'first case when abs(3) then abs(-1) end last' ,
|
||||
'first case when abs(%d+(%d)) then abs(%d)+abs(%d) end last' %(a1,a2,a3,a1), # 'first case when abs(1+1) then abs(-1)+abs(3) end last' ,
|
||||
'first case when %d then %d else %d end last' %(a1,a2,a3), # 'first case when 0 then 1 else 3 end last' ,
|
||||
'first case when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2), # 'first case when 0 then 1 when 1 then 0 else 3 end last' ,
|
||||
'first case when %d then %d when %d then %d when %d then %d end last' %(a1,a2,a3,a1,a2,a3), # 'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a1,a3), # 'first case 3 when 3 then 4 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
|
||||
'first case %d when %d then %d else %d end last' %(a1,a2,a3,a1), # 'first case 3 when 1 then 4 else 2 end last' ,
|
||||
'first case %d when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
|
||||
'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first q_double,case q_double when \'a\' then %d when \'%d\' then %d end last' %(a1,a2,a3), #'first q_double,case q_double when \'a\' then 4 when \'0\' then 1 end last' ,
|
||||
'first case null when null then %d when %d then %d end last' %(a1,a2,a3), #'first case null when null then 4 when 3 then 1 end last' ,
|
||||
'first q_double,q_int,case q_double when q_int then q_int when q_int - (%d ) then q_int else %d end last' %(a1,a2), # 'first q_double,q_int,case q_double when q_int then q_int when q_int - 1 then q_int else 99 end last' ,
|
||||
'first case cast(q_double as int) when %d then q_double when q_int then %d else ts end last' %(a1,a2), # 'first case cast(q_double as int) when 0 then q_double when q_int then 11 else ts end last' ,
|
||||
'first q_int, case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a1,a1,a2,a2,a3), #'first q_int, case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
|
||||
'first distinct loc, case t_int when t_bigint then t_ts else t_smallint + (%d) end last' %(a1), #'first distinct loc, case t_int when t_bigint then t_ts else t_smallint + 100 end last' ,
|
||||
]
|
||||
#num = len(casewhen_lists)
|
||||
|
||||
casewhen_list = str(random.sample(casewhen_lists,50)).replace("[","").replace("]","").replace("'first","").replace("last'","").replace("\"first","").replace("last\"","")
|
||||
|
||||
return casewhen_list
|
||||
|
||||
def base_case(self,database):
|
||||
|
||||
for i in range(30):
|
||||
cs = self.casewhen_list().split(',')[i]
|
||||
sql1 = "select %s from %s.stable_1 where tbname = 'stable_1_1';" % (cs ,database)
|
||||
sql2 = "select %s from %s.stable_1_1 ;" % (cs ,database)
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
|
||||
|
||||
def state_window_list(self):
|
||||
a1,a2,a3 = random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647),random.randint(-2147483647,2147483647)
|
||||
state_window_lists = ['first case when %d then %d end last' %(a1,a2) , #'first case when 3 then 4 end last' ,
|
||||
'first case when 0 then %d end last' %(a1), #'first case when 0 then 4 end last' ,
|
||||
'first case when null then %d end last' %(a1) , #'first case when null then 4 end last' ,
|
||||
'first case when %d-(%d) then 0 end last' %(a1,a1) , #'first case when 1-1 then 0 end last' ,
|
||||
'first case when %d+(%d) then 0 end last' %(a1,a1), #'first case when 1+1 then 0 end last' ,
|
||||
'first case when %d > 0 then %d < %d end last' %(a1,a1,a2), #'first case when 1 > 0 then 1 < 2 end last' ,
|
||||
'first case when %d > %d then %d < %d end last' %(a1,a2,a1,a2), #'first case when 1 > 2 then 1 < 2 end last' ,
|
||||
'first case when abs(%d) then abs(-(%d)) end last' %(a1,a2) ,#'first case when abs(3) then abs(-1) end last' ,
|
||||
'first case when 0 then %d else %d end last' %(a1,a2), #'first case when 0 then 1 else 3 end last' ,
|
||||
'first case when 0 then %d when 1 then %d else %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 else 3 end last' ,
|
||||
'first case when 0 then %d when 1 then %d when 2 then %d end last' %(a1,a1,a3), #'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
|
||||
'first case when \'a\' then \'b\' when null then 0 end last' , #'first case when \'a\' then \'b\' when null then 0 end last' ,
|
||||
'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2) , #'first case when \'2\' then \'b\' when null then 0 end last' ,
|
||||
'first case when \'%d\' then \'b\' else null end last' %(a1), #'first case when \'0\' then \'b\' else null end last',
|
||||
'first case when \'%d\' then \'b\' else %d end last' %(a1,a2), #'first case when \'0\' then \'b\' else 2 end last',
|
||||
'first case when q_int then q_int when q_int + (%d) then q_int + (%d) else q_int is null end last' %(a1,a2) , #'first case when q_int then q_int when q_int + 1 then q_int + 1 else q_int is null end last' ,
|
||||
'first case when q_int then %d when ts then ts end last' %(a1), #'first case when q_int then 3 when ts then ts end last' ,
|
||||
'first case when %d then q_int end last' %(a1), #'first case when 3 then q_int end last' ,
|
||||
'first case when q_int then %d when %d then %d end last' %(a1,a1,a3), #'first case when q_int then 3 when 1 then 2 end last' ,
|
||||
'first case when q_int < %d then %d when q_int >= %d then %d else %d end last' %(a1,a2,a1,a2,a3), #'first case when q_int < 3 then 1 when q_int >= 3 then 2 else 3 end last' ,
|
||||
'first case when q_int is not null then case when q_int <= %d then q_int else q_int * (%d) end else -(%d) end last' %(a1,a1,a3), #'first case when q_int is not null then case when q_int <= 0 then q_int else q_int * 10 end else -1 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 3 then 4 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
|
||||
'first case %d when %d then %d else %d end last' %(a1,a1,a2,a3), # 'first case 3 when 1 then 4 else 2 end last' ,
|
||||
'first case %d when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case \'%d\' when null then %d when %d then %d end last' %(a1,a1,a2,a3), # 'first case \'3\' when null then 4 when 3 then 1 end last' ,
|
||||
'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
|
||||
'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a1,a2,a3) , # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case q_double when \'a\' then %d when \'%d\' then %d end last' %(a1,a2,a3) , # 'first case q_double when \'a\' then 4 when \'0\' then 1 end last' ,
|
||||
'first case q_double when q_int then q_int when q_int - (%d) then q_int else %d end last' %(a1,a2), # 'first case q_double when q_int then q_int when q_int - 1 then q_int else 99 end last' ,
|
||||
'first case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2,a3), #'first case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
|
||||
'first case when \'a\' then \'b\' when null then %d end last' %(a1), # 'first case when \'a\' then \'b\' when null then 0 end last' ,
|
||||
'first case when \'%d\' then \'b\' when null then %d end last' %(a1,a2), # 'first case when \'2\' then \'b\' when null then 0 end last' ,
|
||||
'first case when %d then \'b\' else null end last' %(a1), # 'first case when 0 then \'b\' else null end last' ,
|
||||
'first case when %d then \'b\' else %d+abs(%d) end last' %(a1,a2,a3), # 'first case when 0 then \'b\' else 2+abs(-2) end last' ,
|
||||
'first case when %d then %d end last' %(a1,a2), # 'first case when 3 then 4 end last' ,
|
||||
'first case when %d then %d end last' %(a1,a2), # 'first case when 0 then 4 end last' ,
|
||||
'first case when null then %d end last' %(a1), # 'first case when null then 4 end last' ,
|
||||
#'first case when %d then %d+(%d) end last' %(a1,a2,a3), # 'first case when 1 then 4+1 end last' ,
|
||||
'first case when %d-(%d) then %d end last' %(a1,a2,a3), # 'first case when 1-1 then 0 end last' ,
|
||||
'first case when %d+(%d) then %d end last' %(a1,a2,a3), # 'first case when 1+1 then 0 end last' ,
|
||||
'first case when abs(%d) then abs(%d) end last' %(a1,a2), # 'first case when abs(3) then abs(-1) end last' ,
|
||||
#'first case when abs(%d+(%d)) then abs(%d)+abs(%d) end last' %(a1,a2,a3,a1), # 'first case when abs(1+1) then abs(-1)+abs(3) end last' ,
|
||||
'first case when %d then %d else %d end last' %(a1,a2,a3), # 'first case when 0 then 1 else 3 end last' ,
|
||||
'first case when %d then %d when %d then %d else %d end last' %(a1,a2,a3,a1,a2), # 'first case when 0 then 1 when 1 then 0 else 3 end last' ,
|
||||
'first case when %d then %d when %d then %d when %d then %d end last' %(a1,a2,a3,a1,a2,a3), # 'first case when 0 then 1 when 1 then 0 when 2 then 3 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a1,a3), # 'first case 3 when 3 then 4 end last' ,
|
||||
'first case %d when %d then %d end last' %(a1,a2,a3), # 'first case 3 when 1 then 4 end last' ,
|
||||
'first case %d when %d then %d else %d end last' %(a1,a2,a3,a1), # 'first case 3 when 1 then 4 else 2 end last' ,
|
||||
'first case %d when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case null when null then %d when %d then %d end last' %(a1,a2,a3), # 'first case null when null then 4 when 3 then 1 end last' ,
|
||||
'first case %d.0 when null then %d when \'%d\' then %d end last' %(a1,a2,a1,a3), # 'first case 3.0 when null then 4 when \'3\' then 1 end last' ,
|
||||
'first case null when null then %d when %d then %d end last' %(a1,a2,a3), #'first case null when null then 4 when 3 then 1 end last' ,
|
||||
'first q_int, case q_int + (%d) when %d then %d when %d then %d else %d end last' %(a1,a1,a1,a2,a2,a3), #'first q_int, case q_int + 1 when 1 then 1 when 2 then 2 else 3 end last' ,
|
||||
]
|
||||
|
||||
state_window_list = str(random.sample(state_window_lists,50)).replace("[","").replace("]","").replace("'first","").replace("last'","").replace("\"first","").replace("last\"","")
|
||||
|
||||
return state_window_list
|
||||
|
||||
def state_window_case(self,database):
|
||||
|
||||
for i in range(30):
|
||||
cs = self.state_window_list().split(',')[i]
|
||||
sql1 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1 where tbname = 'stable_1_1' state_window(%s);" % (database,cs)
|
||||
sql2 = "select _wstart,avg(q_int),min(q_smallint) from %s.stable_1_1 state_window(%s) ;" % (database,cs)
|
||||
self.constant_check(database,sql1,sql2,0)
|
||||
self.constant_check(database,sql1,sql2,1)
|
||||
self.constant_check(database,sql1,sql2,2)
|
||||
|
||||
|
||||
|
||||
def constant_check(self,database,sql1,sql2,column):
|
||||
#column =0 代表0列, column = n代表n-1列
|
||||
tdLog.info("\n=============sql1:(%s)___sql2:(%s) ====================\n" %(sql1,sql2))
|
||||
tdSql.query(sql1)
|
||||
queryRows = len(tdSql.queryResult)
|
||||
|
||||
for i in range(queryRows):
|
||||
tdSql.query(sql1)
|
||||
sql1_value = tdSql.getData(i,column)
|
||||
tdSql.execute(" flush database %s;" %database)
|
||||
tdSql.query(sql2)
|
||||
sql2_value = tdSql.getData(i,column)
|
||||
self.value_check(sql1_value,sql2_value)
|
||||
|
||||
def value_check(self,base_value,check_value):
|
||||
if base_value==check_value:
|
||||
tdLog.info(f"checkEqual success, base_value={base_value},check_value={check_value}")
|
||||
else :
|
||||
tdLog.exit(f"checkEqual error, base_value=={base_value},check_value={check_value}")
|
||||
|
||||
def run(self):
|
||||
fake = Faker('zh_CN')
|
||||
fake_data = fake.random_int(min=-9223372036854775807, max=9223372036854775807, step=1)
|
||||
fake_float = fake.pyfloat()
|
||||
fake_str = fake.pystr()
|
||||
|
||||
startTime = time.time()
|
||||
|
||||
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
|
||||
|
||||
self.dropandcreateDB_random("%s" %self.db, 10)
|
||||
|
||||
self.users_bug("%s" %self.db)
|
||||
|
||||
self.base_case("%s" %self.db)
|
||||
|
||||
self.state_window_case("%s" %self.db)
|
||||
|
||||
|
||||
|
||||
#taos -f sql
|
||||
print("taos -f sql start!")
|
||||
taos_cmd1 = "taos -f %s/%s.sql" % (self.testcasePath,self.testcaseFilename)
|
||||
_ = subprocess.check_output(taos_cmd1, shell=True)
|
||||
print("taos -f sql over!")
|
||||
|
||||
|
||||
endTime = time.time()
|
||||
print("total time %ds" % (endTime - startTime))
|
||||
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success("%s successfully executed" % __file__)
|
||||
|
||||
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
|
@ -85,16 +85,11 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
tdSql.query(ceil_query)
|
||||
for row_index , row in enumerate(ceil_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("ceil function value has not as expected , sql is \"%s\" "%ceil_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("ceil value check pass , it work as expected ,sql is \"%s\" "%ceil_query )
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -377,10 +372,10 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
|
@ -388,15 +383,15 @@ class TDTestCase:
|
|||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select ceil(c1), ceil(c2) ,ceil(c3), ceil(c4), ceil(c5) ,ceil(c6) from {dbname}.sub1_bound")
|
||||
self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select ceil(c1), ceil(c2) ,ceil(c3), ceil(c3), ceil(c2) ,ceil(c1) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -84,26 +84,10 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
print("========",pow_query, origin_query )
|
||||
|
||||
tdSql.query(pow_query)
|
||||
for row_index , row in enumerate(pow_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] == None and elem:
|
||||
check_status = False
|
||||
elif auto_result[row_index][col_index] != None and ((auto_result[row_index][col_index] != elem) and (str(auto_result[row_index][col_index])[:6] != str(elem)[:6] )):
|
||||
# elif auto_result[row_index][col_index] != None and (abs(auto_result[row_index][col_index] - elem) > 0.000001):
|
||||
print("=====")
|
||||
print(row_index, col_index)
|
||||
print(auto_result[row_index][col_index], elem, origin_result[row_index][col_index])
|
||||
check_status = False
|
||||
else:
|
||||
pass
|
||||
if not check_status:
|
||||
tdLog.notice("cos function value has not as expected , sql is \"%s\" "%pow_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("cos value check pass , it work as expected ,sql is \"%s\" "%pow_query )
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -413,16 +397,16 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
# self.check_result_auto_cos( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select cos(abs(c1)), cos(abs(c2)) ,cos(abs(c3)), cos(abs(c4)), cos(abs(c5)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -85,16 +85,11 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
tdSql.query(floor_query)
|
||||
for row_index , row in enumerate(floor_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("floor function value has not as expected , sql is \"%s\" "%floor_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("floor value check pass , it work as expected ,sql is \"%s\" "%floor_query )
|
||||
tdSql.checkData(row_index,col_index,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
def test_errors(self, dbname=DBNAME):
|
||||
error_sql_lists = [
|
||||
|
@ -388,10 +383,10 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
|
@ -399,15 +394,15 @@ class TDTestCase:
|
|||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select floor(c1), floor(c2) ,floor(c3), floor(c4), floor(c5) ,floor(c6) from {dbname}.sub1_bound")
|
||||
self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select floor(c1), floor(c2) ,floor(c3), floor(c3), floor(c2) ,floor(c1) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -364,10 +364,10 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
|
@ -375,15 +375,15 @@ class TDTestCase:
|
|||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.sub1_bound")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -572,19 +572,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
# check basic elem for table per row
|
||||
|
|
|
@ -91,7 +91,6 @@ class TDTestCase:
|
|||
elem = math.log(elem , base)
|
||||
elif elem <=0:
|
||||
elem = None
|
||||
|
||||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
|
@ -519,19 +518,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
|
||||
self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -507,19 +507,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_pow(2, f"select c1, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.sub1_bound")
|
||||
self.check_result_auto_pow(3, f"select c1, c3 , c4, c5 from {dbname}.sub1_bound ", f"select pow(c1,3), pow(c3,3), pow(c4,3), pow(c5,3) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -81,16 +81,12 @@ class TDTestCase:
|
|||
row_check.append(elem)
|
||||
auto_result.append(row_check)
|
||||
|
||||
check_status = True
|
||||
tdSql.query(round_query)
|
||||
for row_index , row in enumerate(round_result):
|
||||
for col_index , elem in enumerate(row):
|
||||
if auto_result[row_index][col_index] != elem:
|
||||
check_status = False
|
||||
if not check_status:
|
||||
tdLog.notice("round function value has not as expected , sql is \"%s\" "%round_query )
|
||||
sys.exit(1)
|
||||
else:
|
||||
tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
|
||||
tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index])
|
||||
|
||||
|
||||
|
||||
def test_errors(self, dbname="db"):
|
||||
error_sql_lists = [
|
||||
|
@ -388,10 +384,10 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
|
@ -399,15 +395,15 @@ class TDTestCase:
|
|||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound")
|
||||
self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -394,19 +394,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -443,19 +443,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -451,10 +451,10 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
|
@ -462,15 +462,15 @@ class TDTestCase:
|
|||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound")
|
||||
|
|
|
@ -421,10 +421,10 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
|
@ -432,20 +432,20 @@ class TDTestCase:
|
|||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+15s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0,0,9223372036854775803)
|
||||
tdSql.query(f"select tail(c2,1) from {dbname}.sub1_bound order by 1 desc")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,-9223372036854775803)
|
||||
|
||||
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
|
||||
tdSql.prepare()
|
||||
|
|
|
@ -391,19 +391,19 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound")
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
abs.py: def check_boundary_values(self):
|
||||
abs.py: self.check_boundary_values()
|
||||
and_or_for_byte.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
and_or_for_byte.py: self.check_boundary_values()
|
||||
arccos.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
arccos.py: self.check_boundary_values()
|
||||
arcsin.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
arcsin.py: self.check_boundary_values()
|
||||
arctan.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
arctan.py: self.check_boundary_values()
|
||||
avg.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
avg.py: self.check_boundary_values()
|
||||
ceil.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
ceil.py: self.check_boundary_values()
|
||||
cos.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
cos.py: self.check_boundary_values()
|
||||
floor.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
floor.py: self.check_boundary_values()
|
||||
function_stateduration.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
function_stateduration.py: self.check_boundary_values()
|
||||
last_row.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
last_row.py: self.check_boundary_values()
|
||||
log.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
log.py: self.check_boundary_values()
|
||||
pow.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
pow.py: self.check_boundary_values()
|
||||
round.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
round.py: self.check_boundary_values()
|
||||
sin.py: def check_boundary_values(self, dbname="testdb"):
|
||||
sin.py: self.check_boundary_values()
|
||||
sqrt.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
sqrt.py: self.check_boundary_values()
|
||||
statecount.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
statecount.py: self.check_boundary_values()
|
||||
tail.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
tail.py: self.check_boundary_values()
|
||||
tan.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
tan.py: self.check_boundary_values()
|
||||
unique.py: def check_boundary_values(self, dbname="bound_test"):
|
||||
unique.py: self.check_boundary_values()
|
|
@ -451,26 +451,26 @@ class TDTestCase:
|
|||
)
|
||||
tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-10s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()-5s, 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+5s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+10s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+20s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.error(
|
||||
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
f"insert into {dbname}.sub1_bound values ( now()+30s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
|
||||
)
|
||||
|
||||
tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc")
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
#!/bin/bash
|
||||
|
||||
##################################################
|
||||
#
|
||||
# Do simulation test
|
||||
#
|
||||
##################################################
|
||||
|
||||
set +e
|
||||
#set -x
|
||||
|
||||
UNAME_BIN=`which uname`
|
||||
OS_TYPE=`$UNAME_BIN`
|
||||
|
||||
cd .
|
||||
|
||||
# Get responsible directories
|
||||
CODE_DIR=`dirname $0`
|
||||
CODE_DIR=`pwd`
|
||||
|
||||
IN_TDINTERNAL="community"
|
||||
if [[ "$CODE_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
cd ../../..
|
||||
else
|
||||
cd ../../
|
||||
fi
|
||||
|
||||
TOP_DIR=`pwd`
|
||||
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
|
||||
|
||||
if [[ "$OS_TYPE" != "Darwin" ]]; then
|
||||
cut_opt="--field="
|
||||
else
|
||||
cut_opt="-f "
|
||||
fi
|
||||
|
||||
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
|
||||
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3`
|
||||
else
|
||||
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2`
|
||||
fi
|
||||
|
||||
declare -x BUILD_DIR=$TOP_DIR/$BIN_DIR
|
||||
declare -x SIM_DIR=$TOP_DIR/sim
|
||||
PROGRAM=$BUILD_DIR/build/bin/tsim
|
||||
PRG_DIR=$SIM_DIR/tsim
|
||||
ASAN_DIR=$SIM_DIR/asan
|
||||
|
||||
chmod -R 777 $PRG_DIR
|
||||
echo "------------------------------------------------------------------------"
|
||||
echo "Start TDengine Testing Case ..."
|
||||
echo "BUILD_DIR: $BUILD_DIR"
|
||||
echo "SIM_DIR : $SIM_DIR"
|
||||
echo "CODE_DIR : $CODE_DIR"
|
||||
echo "ASAN_DIR : $ASAN_DIR"
|
||||
|
||||
rm -rf $SIM_DIR/*
|
||||
|
||||
mkdir -p $PRG_DIR
|
||||
mkdir -p $ASAN_DIR
|
||||
|
||||
cd $CODE_DIR
|
||||
ulimit -n 600000
|
||||
ulimit -c unlimited
|
||||
|
||||
#sudo sysctl -w kernel.core_pattern=$TOP_DIR/core.%p.%e
|
||||
|
||||
echo "ExcuteCmd:" $*
|
||||
AsanFile=$ASAN_DIR/psim.info
|
||||
echo "AsanFile:" $AsanFile
|
||||
|
||||
unset LD_PRELOAD
|
||||
#export LD_PRELOAD=libasan.so.5
|
||||
export LD_PRELOAD=`gcc -print-file-name=libasan.so`
|
||||
echo "Preload AsanSo:" $?
|
||||
|
||||
$* -a 2> $AsanFile
|
||||
|
||||
unset LD_PRELOAD
|
||||
for ((i=1;i<=20;i++))
|
||||
do
|
||||
AsanFileLen=`cat $AsanFile | wc -l`
|
||||
echo "AsanFileLen:" $AsanFileLen
|
||||
if [ $AsanFileLen -gt 10 ]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
AsanFileSuccessLen=`grep -w successfully $AsanFile | wc -l`
|
||||
echo "AsanFileSuccessLen:" $AsanFileSuccessLen
|
||||
|
||||
if [ $AsanFileSuccessLen -gt 0 ]; then
|
||||
echo "Execute script successfully and check asan"
|
||||
$CODE_DIR/../script/sh/checkAsan.sh
|
||||
else
|
||||
echo "Execute script failure"
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -73,8 +73,9 @@ if __name__ == "__main__":
|
|||
createDnodeNums = 1
|
||||
restful = False
|
||||
replicaVar = 1
|
||||
asan = False
|
||||
independentMnode = True
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:i:', [
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:i:a', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate','replicaVar','independentMnode'])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
|
@ -99,6 +100,7 @@ if __name__ == "__main__":
|
|||
tdLog.printNoPrefix('-D taosadapter update cfg dict ')
|
||||
tdLog.printNoPrefix('-n the number of replicas')
|
||||
tdLog.printNoPrefix('-i independentMnode Mnode')
|
||||
tdLog.printNoPrefix('-a address sanitizer mode')
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
@ -167,6 +169,9 @@ if __name__ == "__main__":
|
|||
if key in ['-R', '--restful']:
|
||||
restful = True
|
||||
|
||||
if key in ['-a', '--asan']:
|
||||
asan = True
|
||||
|
||||
if key in ['-D', '--adaptercfgupdate']:
|
||||
try:
|
||||
adaptercfgupdate = eval(base64.b64decode(value.encode()).decode())
|
||||
|
@ -387,6 +392,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.init(deployPath, masterIp)
|
||||
tdDnodes.setTestCluster(testCluster)
|
||||
tdDnodes.setValgrind(valgrind)
|
||||
tdDnodes.setAsan(asan)
|
||||
tdDnodes.stopAll()
|
||||
is_test_framework = 0
|
||||
key_word = 'tdCases.addLinux'
|
||||
|
@ -547,4 +553,7 @@ if __name__ == "__main__":
|
|||
|
||||
if conn is not None:
|
||||
conn.close()
|
||||
if asan:
|
||||
tdDnodes.StopAllSigint()
|
||||
tdLog.info("Address sanitizer mode finished")
|
||||
sys.exit(0)
|
||||
|
|
Loading…
Reference in New Issue