Merge branch 'main' into fix/3_liaohj
This commit is contained in:
commit
55ac835294
|
@ -315,7 +315,9 @@ def pre_test_build_win() {
|
|||
python.exe -m pip install --upgrade pip
|
||||
python -m pip uninstall taospy -y
|
||||
python -m pip install taospy==2.7.10
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
python -m pip uninstall taos-ws-py -y
|
||||
python -m pip install taos-ws-py==0.2.8
|
||||
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
|
||||
'''
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.2.0.0.alpha")
|
||||
SET(TD_VER_NUMBER "3.1.2.0.alpha")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# cos
|
||||
ExternalProject_Add(mxml
|
||||
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
||||
GIT_TAG release-2.12
|
||||
GIT_TAG v2.12
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -9,7 +9,7 @@ description: This document describes how to query data in TDengine.
|
|||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||
SELECT [hints] [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -225,14 +225,6 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
|
|||
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
|
||||
```
|
||||
|
||||
### TAGS Query
|
||||
|
||||
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
|
||||
|
||||
```sql
|
||||
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||
```
|
||||
|
||||
## Query Objects
|
||||
|
||||
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
|
||||
|
|
|
@ -292,11 +292,11 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**Description**: The length in bytes of a string
|
||||
**Description**: The length in bytes
|
||||
|
||||
**Return value type**: Bigint
|
||||
|
||||
**Applicable data types**: VARCHAR and NCHAR fields or columns
|
||||
**Applicable data types**: VARCHAR and NCHAR and VARBINARY
|
||||
|
||||
**Nested query**: It can be used in both the outer query and inner query in a nested query.
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ TDengine 是一款开源、高性能、云原生的[时序数据库](https://tde
|
|||
|
||||
## 主要产品
|
||||
|
||||
TDengine 有三个主要产品:TDengine Pro (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
TDengine 有三个主要产品:TDengine Enterprise (即 TDengine 企业版),TDengine Cloud,和 TDengine OSS,关于它们的具体定义请参考
|
||||
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
|
||||
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
|
||||
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
|
||||
|
|
|
@ -1004,7 +1004,7 @@ TaosConsumer consumer = new TaosConsumer<>(config);
|
|||
- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
|
||||
- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
|
||||
- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
|
||||
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
其他参数请参考:[Consumer 参数列表](../../develop/tmq#创建-consumer-以及consumer-group)
|
||||
|
||||
#### 订阅消费数据
|
||||
|
||||
|
@ -1082,7 +1082,7 @@ consumer.unsubscribe();
|
|||
consumer.close()
|
||||
```
|
||||
|
||||
详情请参考:[数据订阅](../../../develop/tmq)
|
||||
详情请参考:[数据订阅](../../develop/tmq)
|
||||
|
||||
#### 完整示例
|
||||
|
||||
|
@ -1373,7 +1373,7 @@ public static void main(String[] args) throws Exception {
|
|||
|
||||
**解决方法**: 更换 taos-jdbcdriver 3.0.2+ 版本。
|
||||
|
||||
其它问题请参考 [FAQ](../../../train-faq/faq)
|
||||
其它问题请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -352,7 +352,7 @@ client.put(&sml_data)?
|
|||
|
||||
### 数据订阅
|
||||
|
||||
TDengine 通过消息队列 [TMQ](../../../taos-sql/tmq/) 启动一个订阅。
|
||||
TDengine 通过消息队列 [TMQ](../../taos-sql/tmq/) 启动一个订阅。
|
||||
|
||||
#### 创建 Topic
|
||||
|
||||
|
@ -491,7 +491,7 @@ let taos = pool.get()?;
|
|||
|
||||
## 常见问题
|
||||
|
||||
请参考 [FAQ](../../../train-faq/faq)
|
||||
请参考 [FAQ](../../train-faq/faq)
|
||||
|
||||
## API 参考
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ description: 查询数据的详细语法
|
|||
```sql
|
||||
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
|
||||
|
||||
SELECT [hints] [DISTINCT] [TAGS] select_list
|
||||
SELECT [hints] [DISTINCT] select_list
|
||||
from_clause
|
||||
[WHERE condition]
|
||||
[partition_by_clause]
|
||||
|
@ -160,16 +160,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
|
|||
|
||||
:::
|
||||
|
||||
### 标签查询
|
||||
|
||||
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
|
||||
|
||||
返回所有子表的标签列:
|
||||
|
||||
```sql
|
||||
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
|
||||
```
|
||||
|
||||
### 结果集列名
|
||||
|
||||
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
|
||||
|
|
|
@ -292,11 +292,11 @@ CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
|
|||
LENGTH(expr)
|
||||
```
|
||||
|
||||
**功能说明**:以字节计数的字符串长度。
|
||||
**功能说明**:以字节计数的长度。
|
||||
|
||||
**返回结果类型**:BIGINT。
|
||||
|
||||
**适用数据类型**:输入参数是 VARCHAR 类型或者 NCHAR 类型的字符串或者列。
|
||||
**适用数据类型**:VARCHAR, NCHAR, VARBINARY。
|
||||
|
||||
**嵌套子查询支持**:适用于内层查询和外层查询。
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
|
|||
|
||||
taosBenchmark 有两种安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考 [TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
|
|||
taosKeeper 有两种安装方式:
|
||||
taosKeeper 安装方式:
|
||||
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
|
||||
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../get-started/)。
|
||||
|
||||
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
|
|||
1. Linux 操作系统
|
||||
2. 已安装 Java 8 和 Maven
|
||||
3. 已安装 Git、curl、vi
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../operation/pkg-install)
|
||||
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../get-started/)
|
||||
|
||||
## 安装 Kafka
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
|
|||
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
|
||||
|
||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
|
||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
|
||||
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision, int32_t order);
|
||||
|
||||
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
|
||||
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision);
|
||||
|
|
|
@ -338,7 +338,7 @@ if [ "$verMode" == "cluster" ]; then
|
|||
tmp_pwd=`pwd`
|
||||
cd ${install_dir}/connector
|
||||
if [ ! -d taos-connector-jdbc ];then
|
||||
git clone -b 3.2.1 --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
||||
git clone -b main --depth=1 https://github.com/taosdata/taos-connector-jdbc.git ||:
|
||||
fi
|
||||
cd taos-connector-jdbc
|
||||
mvn clean package -Dmaven.test.skip=true
|
||||
|
|
|
@ -380,8 +380,7 @@ void destroySubRequests(SRequestObj *pRequest) {
|
|||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId,
|
||||
pTmp->requestId);
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -398,7 +397,7 @@ void destroySubRequests(SRequestObj *pRequest) {
|
|||
removeRequest(pTmp->self);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -492,8 +491,7 @@ void stopAllQueries(SRequestObj *pRequest) {
|
|||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId,
|
||||
pTmp->requestId);
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -512,7 +510,7 @@ void stopAllQueries(SRequestObj *pRequest) {
|
|||
taosStopQueryImpl(pTmp);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -874,8 +874,13 @@ void handleSubQueryFromAnalyse(SSqlCallbackWrapper *pWrapper, SMetaData *pResult
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = cloneCatalogReq(&pNewWrapper->pCatalogReq, pWrapper->pCatalogReq);
|
||||
}
|
||||
doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
|
||||
nodesDestroyNode(pRoot);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
doAsyncQueryFromAnalyse(pResultMeta, pNewWrapper, code);
|
||||
nodesDestroyNode(pRoot);
|
||||
} else {
|
||||
handleQueryAnslyseRes(pWrapper, pResultMeta, code);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void handleQueryAnslyseRes(SSqlCallbackWrapper *pWrapper, SMetaData *pResultMeta, int32_t code) {
|
||||
|
@ -1148,8 +1153,7 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
|
|||
pReqList[++reqIdx] = pTmp;
|
||||
releaseRequest(tmpRefId);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 ", prev req ref 0x%" PRIx64 " is not there, reqId:0x%" PRIx64, pTmp->self, tmpRefId,
|
||||
pTmp->requestId);
|
||||
tscError("prev req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1162,7 +1166,7 @@ void restartAsyncQuery(SRequestObj *pRequest, int32_t code) {
|
|||
removeRequest(pTmp->self);
|
||||
releaseRequest(pTmp->self);
|
||||
} else {
|
||||
tscError("0x%" PRIx64 " is not there", tmpRefId);
|
||||
tscError("next req ref 0x%" PRIx64 " is not there", tmpRefId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -692,34 +692,67 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
|
|||
return (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision) + fraction);
|
||||
}
|
||||
|
||||
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
|
||||
/**
|
||||
* @brief calc how many windows after filling between skey and ekey
|
||||
* @notes for asc order
|
||||
* skey ---> ekey
|
||||
* ^ ^
|
||||
* _____!_____.........._____|_____..
|
||||
* |__1__)
|
||||
* |__2__)...-->|_ret+1_)
|
||||
* skey + ret * interval <= ekey
|
||||
* skey + ret * interval + interval > ekey
|
||||
* ======> (ekey - skey - interval) / interval < ret <= (ekey - skey) / interval
|
||||
* For keys from blocks which do not need filling, skey + ret * interval == ekey.
|
||||
* For keys need filling, skey + ret * interval <= ekey.
|
||||
* Total num of windows is ret + 1(the last window)
|
||||
*
|
||||
* for desc order
|
||||
* skey <--- ekey
|
||||
* ^ ^
|
||||
* _____|____..........______!____...
|
||||
* |_first_)
|
||||
* |__1__)
|
||||
* |_ret_)<--...|__2__)
|
||||
* skey >= ekey - ret * interval
|
||||
* skey < ekey - ret * interval + interval
|
||||
*=======> (ekey - skey) / interval <= ret < (ekey - skey + interval) / interval
|
||||
* For keys from blocks which do not need filling, skey == ekey - ret * interval.
|
||||
* For keys need filling, skey >= ekey - ret * interval.
|
||||
* Total num of windows is ret + 1(the first window)
|
||||
*/
|
||||
int32_t taosTimeCountIntervalForFill(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision,
|
||||
int32_t order) {
|
||||
if (ekey < skey) {
|
||||
int64_t tmp = ekey;
|
||||
ekey = skey;
|
||||
skey = tmp;
|
||||
}
|
||||
int32_t ret;
|
||||
|
||||
if (unit != 'n' && unit != 'y') {
|
||||
return (int32_t)((ekey - skey) / interval);
|
||||
ret = (int32_t)((ekey - skey) / interval);
|
||||
if (order == TSDB_ORDER_DESC && ret * interval < (ekey - skey)) ret += 1;
|
||||
} else {
|
||||
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)skey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t smon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
t = (time_t)ekey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t emon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
if (unit == 'y') {
|
||||
interval *= 12;
|
||||
}
|
||||
ret = (emon - smon) / (int32_t)interval;
|
||||
if (order == TSDB_ORDER_DESC && ret * interval < (smon - emon)) ret += 1;
|
||||
}
|
||||
|
||||
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
|
||||
|
||||
struct tm tm;
|
||||
time_t t = (time_t)skey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t smon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
t = (time_t)ekey;
|
||||
taosLocalTime(&t, &tm, NULL);
|
||||
int32_t emon = tm.tm_year * 12 + tm.tm_mon;
|
||||
|
||||
if (unit == 'y') {
|
||||
interval *= 12;
|
||||
}
|
||||
|
||||
return (emon - smon) / (int32_t)interval;
|
||||
return ret + 1;
|
||||
}
|
||||
|
||||
int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
|
||||
|
|
|
@ -2342,9 +2342,9 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
|
|||
alterReq.alterType, alterReq.numOfFields, alterReq.ttl);
|
||||
|
||||
SName name = {0};
|
||||
tNameFromString(&name, pDb->name, T_NAME_ACCT | T_NAME_DB);
|
||||
tNameFromString(&name, alterReq.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, alterReq.name, detail);
|
||||
auditRecord(pReq, pMnode->clusterId, "alterStb", name.dbname, name.tname, detail);
|
||||
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
|
|
|
@ -51,7 +51,7 @@ int32_t streamStateSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamS
|
|||
|
||||
SStreamSnapReader* pSnapReader = NULL;
|
||||
|
||||
if (streamSnapReaderOpen(pTq, sver, chkpId, pTq->path, &pSnapReader) == 0) {
|
||||
if (streamSnapReaderOpen(meta, sver, chkpId, pTq->path, &pSnapReader) == 0) {
|
||||
pReader->complete = 1;
|
||||
} else {
|
||||
code = -1;
|
||||
|
|
|
@ -248,10 +248,14 @@ int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t
|
|||
tDecoderClear(&decoder);
|
||||
// tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int valLen, TXN *pTxn)
|
||||
int64_t key[2] = {task.streamId, task.taskId};
|
||||
|
||||
taosWLockLatch(&pTq->pStreamMeta->lock);
|
||||
if (tdbTbUpsert(pTq->pStreamMeta->pTaskDb, key, sizeof(int64_t) << 1, (uint8_t*)pData + sizeof(SSnapDataHdr),
|
||||
nData - sizeof(SSnapDataHdr), pWriter->txn) < 0) {
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
return -1;
|
||||
}
|
||||
taosWUnLockLatch(&pTq->pStreamMeta->lock);
|
||||
} else if (pHdr->type == SNAP_DATA_STREAM_TASK_CHECKPOINT) {
|
||||
// do nothing
|
||||
}
|
||||
|
|
|
@ -980,9 +980,7 @@ static int32_t tsdbDataFileDoWriteTableOldData(SDataFileWriter *writer, const TS
|
|||
writer->ctx->brinBlkArray = NULL;
|
||||
writer->ctx->tbHasOldData = false;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
for (; writer->ctx->brinBlkArrayIdx < TARRAY2_SIZE(writer->ctx->brinBlkArray); writer->ctx->brinBlkArrayIdx++) {
|
||||
} else {
|
||||
const SBrinBlk *brinBlk = TARRAY2_GET_PTR(writer->ctx->brinBlkArray, writer->ctx->brinBlkArrayIdx);
|
||||
|
||||
if (brinBlk->minTbid.uid != writer->ctx->tbid->uid) {
|
||||
|
@ -995,7 +993,6 @@ static int32_t tsdbDataFileDoWriteTableOldData(SDataFileWriter *writer, const TS
|
|||
|
||||
writer->ctx->brinBlockIdx = 0;
|
||||
writer->ctx->brinBlkArrayIdx++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1110,9 +1107,7 @@ static int32_t tsdbDataFileWriteTableDataBegin(SDataFileWriter *writer, const TA
|
|||
if (writer->ctx->brinBlkArrayIdx >= TARRAY2_SIZE(writer->ctx->brinBlkArray)) {
|
||||
writer->ctx->brinBlkArray = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
for (; writer->ctx->brinBlkArrayIdx < TARRAY2_SIZE(writer->ctx->brinBlkArray); writer->ctx->brinBlkArrayIdx++) {
|
||||
} else {
|
||||
const SBrinBlk *brinBlk = TARRAY2_GET_PTR(writer->ctx->brinBlkArray, writer->ctx->brinBlkArrayIdx);
|
||||
|
||||
code = tsdbDataFileReadBrinBlock(writer->ctx->reader, brinBlk, writer->ctx->brinBlock);
|
||||
|
@ -1120,7 +1115,6 @@ static int32_t tsdbDataFileWriteTableDataBegin(SDataFileWriter *writer, const TA
|
|||
|
||||
writer->ctx->brinBlockIdx = 0;
|
||||
writer->ctx->brinBlkArrayIdx++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1251,9 +1245,7 @@ static int32_t tsdbDataFileDoWriteTombRecord(SDataFileWriter *writer, const STom
|
|||
if (writer->ctx->tombBlkArrayIdx >= TARRAY2_SIZE(writer->ctx->tombBlkArray)) {
|
||||
writer->ctx->hasOldTomb = false;
|
||||
break;
|
||||
}
|
||||
|
||||
for (; writer->ctx->tombBlkArrayIdx < TARRAY2_SIZE(writer->ctx->tombBlkArray); ++writer->ctx->tombBlkArrayIdx) {
|
||||
} else {
|
||||
const STombBlk *tombBlk = TARRAY2_GET_PTR(writer->ctx->tombBlkArray, writer->ctx->tombBlkArrayIdx);
|
||||
|
||||
code = tsdbDataFileReadTombBlock(writer->ctx->reader, tombBlk, writer->ctx->tombBlock);
|
||||
|
@ -1261,7 +1253,6 @@ static int32_t tsdbDataFileDoWriteTombRecord(SDataFileWriter *writer, const STom
|
|||
|
||||
writer->ctx->tombBlockIdx = 0;
|
||||
writer->ctx->tombBlkArrayIdx++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -174,11 +174,17 @@ int32_t save_fs(const TFileSetArray *arr, const char *fname) {
|
|||
|
||||
// fset
|
||||
cJSON *ajson = cJSON_AddArrayToObject(json, "fset");
|
||||
if (!ajson) TSDB_CHECK_CODE(code = TSDB_CODE_OUT_OF_MEMORY, lino, _exit);
|
||||
if (!ajson) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
const STFileSet *fset;
|
||||
TARRAY2_FOREACH(arr, fset) {
|
||||
cJSON *item = cJSON_CreateObject();
|
||||
if (!item) TSDB_CHECK_CODE(code = TSDB_CODE_OUT_OF_MEMORY, lino, _exit);
|
||||
if (!item) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
cJSON_AddItemToArray(ajson, item);
|
||||
|
||||
code = tsdbTFileSetToJson(fset, item);
|
||||
|
@ -231,7 +237,8 @@ static int32_t load_fs(STsdb *pTsdb, const char *fname, TFileSetArray *arr) {
|
|||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
} else {
|
||||
TSDB_CHECK_CODE(code = TSDB_CODE_FILE_CORRUPTED, lino, _exit);
|
||||
code = TSDB_CODE_FILE_CORRUPTED;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
_exit:
|
||||
|
@ -312,7 +319,8 @@ static int32_t commit_edit(STFileSystem *fs) {
|
|||
int32_t code;
|
||||
int32_t lino;
|
||||
if ((code = taosRenameFile(current_t, current))) {
|
||||
TSDB_CHECK_CODE(code = TAOS_SYSTEM_ERROR(code), lino, _exit);
|
||||
code = TAOS_SYSTEM_ERROR(code);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
code = apply_commit(fs);
|
||||
|
@ -345,7 +353,8 @@ static int32_t abort_edit(STFileSystem *fs) {
|
|||
int32_t code;
|
||||
int32_t lino;
|
||||
if ((code = taosRemoveFile(fname))) {
|
||||
TSDB_CHECK_CODE(code = TAOS_SYSTEM_ERROR(code), lino, _exit);
|
||||
code = TAOS_SYSTEM_ERROR(code);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
||||
code = apply_abort(fs);
|
||||
|
@ -398,7 +407,7 @@ static int32_t tsdbFSAddEntryToFileObjHash(STFileHash *hash, const char *fname)
|
|||
STFileHashEntry *entry = taosMemoryMalloc(sizeof(*entry));
|
||||
if (entry == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
strcpy(entry->fname, fname);
|
||||
strncpy(entry->fname, fname, TSDB_FILENAME_LEN);
|
||||
|
||||
uint32_t idx = MurmurHash3_32(fname, strlen(fname)) % hash->numBucket;
|
||||
|
||||
|
@ -873,7 +882,7 @@ int32_t tsdbFSCreateCopySnapshot(STFileSystem *fs, TFileSetArray **fsetArr) {
|
|||
STFileSet *fset1;
|
||||
|
||||
fsetArr[0] = taosMemoryMalloc(sizeof(TFileSetArray));
|
||||
if (fsetArr == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
if (fsetArr[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
TARRAY2_INIT(fsetArr[0]);
|
||||
|
||||
|
|
|
@ -46,7 +46,8 @@ static int32_t tsdbSttLvlInitEx(STsdb *pTsdb, const SSttLvl *lvl1, SSttLvl **lvl
|
|||
return code;
|
||||
}
|
||||
|
||||
TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
if (code) return code;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -185,7 +186,8 @@ static int32_t tsdbJsonToSttLvl(STsdb *pTsdb, const cJSON *json, SSttLvl **lvl)
|
|||
return code;
|
||||
}
|
||||
|
||||
TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
code = TARRAY2_APPEND(lvl[0]->fobjArr, fobj);
|
||||
if (code) return code;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -263,7 +265,8 @@ int32_t tsdbJsonToTFileSet(STsdb *pTsdb, const cJSON *json, STFileSet **fset) {
|
|||
return code;
|
||||
}
|
||||
|
||||
TARRAY2_APPEND((*fset)->lvlArr, lvl);
|
||||
code = TARRAY2_APPEND((*fset)->lvlArr, lvl);
|
||||
if (code) return code;
|
||||
}
|
||||
} else {
|
||||
return TSDB_CODE_FILE_CORRUPTED;
|
||||
|
@ -326,11 +329,12 @@ int32_t tsdbTFileSetEdit(STsdb *pTsdb, STFileSet *fset, const STFileOp *op) {
|
|||
|
||||
STFileObj tfobj = {.f[0] = {.cid = op->of.cid}}, *tfobjp = &tfobj;
|
||||
STFileObj **fobjPtr = TARRAY2_SEARCH(lvl->fobjArr, &tfobjp, tsdbTFileObjCmpr, TD_EQ);
|
||||
tfobjp = (fobjPtr ? *fobjPtr : NULL);
|
||||
|
||||
ASSERT(tfobjp);
|
||||
|
||||
tfobjp->f[0] = op->nf;
|
||||
if (fobjPtr) {
|
||||
tfobjp = *fobjPtr;
|
||||
tfobjp->f[0] = op->nf;
|
||||
} else {
|
||||
tsdbError("file not found, cid:%" PRId64, op->of.cid);
|
||||
}
|
||||
} else {
|
||||
fset->farr[op->nf.type]->f[0] = op->nf;
|
||||
}
|
||||
|
|
|
@ -42,8 +42,12 @@ static const struct {
|
|||
};
|
||||
|
||||
void remove_file(const char *fname) {
|
||||
taosRemoveFile(fname);
|
||||
tsdbInfo("file:%s is removed", fname);
|
||||
int32_t code = taosRemoveFile(fname);
|
||||
if (code) {
|
||||
tsdbError("file:%s remove failed", fname);
|
||||
} else {
|
||||
tsdbInfo("file:%s is removed", fname);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t tfile_to_json(const STFile *file, cJSON *json) {
|
||||
|
|
|
@ -356,7 +356,8 @@ static int32_t tsdbSttIterOpen(STsdbIter *iter) {
|
|||
}
|
||||
|
||||
iter->sttData->sttBlkArrayIdx = 0;
|
||||
tBlockDataCreate(iter->sttData->blockData);
|
||||
code = tBlockDataCreate(iter->sttData->blockData);
|
||||
if (code) return code;
|
||||
iter->sttData->blockDataIdx = 0;
|
||||
|
||||
return tsdbSttIterNext(iter, NULL);
|
||||
|
@ -381,7 +382,8 @@ static int32_t tsdbDataIterOpen(STsdbIter *iter) {
|
|||
iter->dataData->brinBlockIdx = 0;
|
||||
|
||||
// SBlockData
|
||||
tBlockDataCreate(iter->dataData->blockData);
|
||||
code = tBlockDataCreate(iter->dataData->blockData);
|
||||
if (code) return code;
|
||||
iter->dataData->blockDataIdx = 0;
|
||||
|
||||
return tsdbDataIterNext(iter, NULL);
|
||||
|
|
|
@ -308,10 +308,11 @@ static void setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, ch
|
|||
|
||||
if (retentions) {
|
||||
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, " RETENTIONS %s", retentions);
|
||||
taosMemoryFree(retentions);
|
||||
}
|
||||
}
|
||||
|
||||
taosMemoryFree(retentions);
|
||||
|
||||
(varDataLen(buf2)) = len;
|
||||
|
||||
colDataSetVal(pCol2, 0, buf2, false);
|
||||
|
|
|
@ -446,7 +446,7 @@ int32_t createDataInserter(SDataSinkManager* pManager, const SDataSinkNode* pDat
|
|||
taosThreadMutexInit(&inserter->mutex, NULL);
|
||||
if (NULL == inserter->pDataBlocks) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
inserter->fullOrderColList = pInserterNode->pCols->length == inserter->pSchema->numOfCols;
|
||||
|
|
|
@ -151,14 +151,21 @@ static void updatePostJoinCurrTableInfo(SStbJoinDynCtrlInfo* pStbJoin)
|
|||
static int32_t buildGroupCacheOperatorParam(SOperatorParam** ppRes, int32_t downstreamIdx, int32_t vgId, int64_t tbUid, bool needCache, SOperatorParam* pChild) {
|
||||
*ppRes = taosMemoryMalloc(sizeof(SOperatorParam));
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild, OP_GET_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pChild) {
|
||||
(*ppRes)->pChildren = taosArrayInit(1, POINTER_BYTES);
|
||||
if (NULL == *ppRes) {
|
||||
if (NULL == (*ppRes)->pChildren) {
|
||||
freeOperatorParam(pChild, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (NULL == taosArrayPush((*ppRes)->pChildren, &pChild)) {
|
||||
freeOperatorParam(pChild, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
|
@ -167,6 +174,8 @@ static int32_t buildGroupCacheOperatorParam(SOperatorParam** ppRes, int32_t down
|
|||
|
||||
SGcOperatorParam* pGc = taosMemoryMalloc(sizeof(SGcOperatorParam));
|
||||
if (NULL == pGc) {
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -193,6 +202,7 @@ static int32_t buildGroupCacheNotifyOperatorParam(SOperatorParam** ppRes, int32_
|
|||
|
||||
SGcNotifyOperatorParam* pGc = taosMemoryMalloc(sizeof(SGcNotifyOperatorParam));
|
||||
if (NULL == pGc) {
|
||||
freeOperatorParam(*ppRes, OP_NOTIFY_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -248,6 +258,7 @@ static int32_t buildBatchExchangeOperatorParam(SOperatorParam** ppRes, int32_t d
|
|||
|
||||
SExchangeOperatorBatchParam* pExc = taosMemoryMalloc(sizeof(SExchangeOperatorBatchParam));
|
||||
if (NULL == pExc) {
|
||||
taosMemoryFreeClear(*ppRes);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -255,6 +266,7 @@ static int32_t buildBatchExchangeOperatorParam(SOperatorParam** ppRes, int32_t d
|
|||
pExc->pBatchs = tSimpleHashInit(tSimpleHashGetSize(pVg), taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT));
|
||||
if (NULL == pExc->pBatchs) {
|
||||
taosMemoryFree(pExc);
|
||||
taosMemoryFreeClear(*ppRes);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
tSimpleHashSetFreeFp(pExc->pBatchs, freeExchangeGetBasicOperatorParam);
|
||||
|
@ -288,21 +300,36 @@ static int32_t buildBatchExchangeOperatorParam(SOperatorParam** ppRes, int32_t d
|
|||
static int32_t buildMergeJoinOperatorParam(SOperatorParam** ppRes, bool initParam, SOperatorParam* pChild0, SOperatorParam* pChild1) {
|
||||
*ppRes = taosMemoryMalloc(sizeof(SOperatorParam));
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild0, OP_GET_PARAM);
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
(*ppRes)->pChildren = taosArrayInit(2, POINTER_BYTES);
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild0, OP_GET_PARAM);
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (NULL == taosArrayPush((*ppRes)->pChildren, &pChild0)) {
|
||||
freeOperatorParam(pChild0, OP_GET_PARAM);
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (NULL == taosArrayPush((*ppRes)->pChildren, &pChild1)) {
|
||||
freeOperatorParam(pChild1, OP_GET_PARAM);
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SSortMergeJoinOperatorParam* pJoin = taosMemoryMalloc(sizeof(SSortMergeJoinOperatorParam));
|
||||
if (NULL == pJoin) {
|
||||
freeOperatorParam(*ppRes, OP_GET_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -318,16 +345,28 @@ static int32_t buildMergeJoinOperatorParam(SOperatorParam** ppRes, bool initPara
|
|||
static int32_t buildMergeJoinNotifyOperatorParam(SOperatorParam** ppRes, SOperatorParam* pChild0, SOperatorParam* pChild1) {
|
||||
*ppRes = taosMemoryMalloc(sizeof(SOperatorParam));
|
||||
if (NULL == *ppRes) {
|
||||
freeOperatorParam(pChild0, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
(*ppRes)->pChildren = taosArrayInit(2, POINTER_BYTES);
|
||||
if (NULL == *ppRes) {
|
||||
taosMemoryFreeClear(*ppRes);
|
||||
freeOperatorParam(pChild0, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pChild0 && NULL == taosArrayPush((*ppRes)->pChildren, &pChild0)) {
|
||||
freeOperatorParam(*ppRes, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild0, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (pChild1 && NULL == taosArrayPush((*ppRes)->pChildren, &pChild1)) {
|
||||
freeOperatorParam(*ppRes, OP_NOTIFY_PARAM);
|
||||
freeOperatorParam(pChild1, OP_NOTIFY_PARAM);
|
||||
*ppRes = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -420,13 +459,34 @@ static int32_t buildSeqStbJoinOperatorParam(SDynQueryCtrlOperatorInfo* pInfo, SS
|
|||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildGroupCacheOperatorParam(&pGcParam0, 0, *leftVg, *leftUid, pPost->leftNeedCache, pSrcParam0);
|
||||
pSrcParam0 = NULL;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildGroupCacheOperatorParam(&pGcParam1, 1, *rightVg, *rightUid, pPost->rightNeedCache, pSrcParam1);
|
||||
pSrcParam1 = NULL;
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildMergeJoinOperatorParam(ppParam, pSrcParam0 ? true : false, pGcParam0, pGcParam1);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
if (pSrcParam0) {
|
||||
freeOperatorParam(pSrcParam0, OP_GET_PARAM);
|
||||
}
|
||||
if (pSrcParam1) {
|
||||
freeOperatorParam(pSrcParam1, OP_GET_PARAM);
|
||||
}
|
||||
if (pGcParam0) {
|
||||
freeOperatorParam(pGcParam0, OP_GET_PARAM);
|
||||
}
|
||||
if (pGcParam1) {
|
||||
freeOperatorParam(pGcParam1, OP_GET_PARAM);
|
||||
}
|
||||
if (*ppParam) {
|
||||
freeOperatorParam(*ppParam, OP_GET_PARAM);
|
||||
*ppParam = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -488,7 +548,7 @@ static void handleSeqJoinCurrRetrieveEnd(SOperatorInfo* pOperator, SStbJoinDynCt
|
|||
|
||||
if (pPost->leftNeedCache) {
|
||||
uint32_t* num = tSimpleHashGet(pStbJoin->ctx.prev.leftCache, &pPost->leftCurrUid, sizeof(pPost->leftCurrUid));
|
||||
if (--(*num) <= 0) {
|
||||
if (num && --(*num) <= 0) {
|
||||
tSimpleHashRemove(pStbJoin->ctx.prev.leftCache, &pPost->leftCurrUid, sizeof(pPost->leftCurrUid));
|
||||
notifySeqJoinTableCacheEnd(pOperator, pPost, true);
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p
|
|||
SFilterColumnParam param2 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
|
||||
code = filterSetDataFromSlotId(pInfo->pEndCondInfo, ¶m2);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
int32_t status2 = 0;
|
||||
|
@ -331,10 +331,12 @@ int32_t eventWindowAggImpl(SOperatorInfo* pOperator, SEventWindowOperatorInfo* p
|
|||
}
|
||||
}
|
||||
|
||||
_return:
|
||||
|
||||
colDataDestroy(ps);
|
||||
taosMemoryFree(ps);
|
||||
colDataDestroy(pe);
|
||||
taosMemoryFree(pe);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -223,6 +223,9 @@ static int32_t acquireFdFromFileCtx(SGcFileCacheCtx* pFileCtx, int32_t fileId, S
|
|||
SGroupCacheFileInfo newFile = {0};
|
||||
taosHashPut(pFileCtx->pCacheFile, &fileId, sizeof(fileId), &newFile, sizeof(newFile));
|
||||
pTmp = taosHashGet(pFileCtx->pCacheFile, &fileId, sizeof(fileId));
|
||||
if (NULL == pTmp) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (pTmp->deleted) {
|
||||
|
@ -287,7 +290,7 @@ static int32_t saveBlocksToDisk(SGroupCacheOperatorInfo* pGCache, SGcDownstreamC
|
|||
|
||||
if (deleted) {
|
||||
qTrace("FileId:%d-%d-%d already be deleted, skip write",
|
||||
pCtx->id, pGroup->vgId, pHead->basic.fileId);
|
||||
pCtx->id, pGroup ? pGroup->vgId : GROUP_CACHE_DEFAULT_VGID, pHead->basic.fileId);
|
||||
|
||||
int64_t blkId = pHead->basic.blkId;
|
||||
pHead = pHead->next;
|
||||
|
@ -337,7 +340,9 @@ static int32_t addBlkToDirtyBufList(SGroupCacheOperatorInfo* pGCache, SGcDownstr
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pBufInfo = taosHashGet(pCache->pDirtyBlk, &pBufInfo->basic.blkId, sizeof(pBufInfo->basic.blkId));
|
||||
|
||||
if (NULL == pBufInfo) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SGcBlkBufInfo* pWriteHead = NULL;
|
||||
|
||||
|
@ -378,6 +383,10 @@ static int32_t addBlkToDirtyBufList(SGroupCacheOperatorInfo* pGCache, SGcDownstr
|
|||
|
||||
static FORCE_INLINE void chkRemoveVgroupCurrFile(SGcFileCacheCtx* pFileCtx, int32_t downstreamIdx, int32_t vgId) {
|
||||
SGroupCacheFileInfo* pFileInfo = taosHashGet(pFileCtx->pCacheFile, &pFileCtx->fileId, sizeof(pFileCtx->fileId));
|
||||
if (NULL == pFileInfo) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == pFileInfo->groupNum) {
|
||||
removeGroupCacheFile(pFileInfo);
|
||||
|
||||
|
@ -711,6 +720,9 @@ static int32_t addFileRefTableNum(SGcFileCacheCtx* pFileCtx, int32_t fileId, int
|
|||
newFile.groupNum = 1;
|
||||
taosHashPut(pFileCtx->pCacheFile, &fileId, sizeof(fileId), &newFile, sizeof(newFile));
|
||||
pTmp = taosHashGet(pFileCtx->pCacheFile, &fileId, sizeof(fileId));
|
||||
if (NULL == pTmp) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
pTmp->groupNum++;
|
||||
}
|
||||
|
@ -786,6 +798,9 @@ static int32_t addNewGroupData(struct SOperatorInfo* pOperator, SOperatorParam*
|
|||
}
|
||||
|
||||
*ppGrp = taosHashGet(pGrpHash, &uid, sizeof(uid));
|
||||
if (NULL == *ppGrp) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
initNewGroupData(pCtx, *ppGrp, pParam->downstreamIdx, vgId, pGCache->batchFetch, pGcParam->needCache);
|
||||
|
||||
qError("new group %" PRIu64 " initialized, downstreamIdx:%d, vgId:%d, needCache:%d", uid, pParam->downstreamIdx, vgId, pGcParam->needCache);
|
||||
|
|
|
@ -636,12 +636,14 @@ static int32_t addRowToHashImpl(SHJoinOperatorInfo* pJoin, SGroupData* pGroup, S
|
|||
|
||||
int32_t code = getValBufFromPages(pJoin->pRowBufs, getHJoinValBufSize(pTable, rowIdx), &pTable->valData, pRow);
|
||||
if (code) {
|
||||
taosMemoryFree(pRow);
|
||||
return code;
|
||||
}
|
||||
|
||||
if (NULL == pGroup) {
|
||||
pRow->next = NULL;
|
||||
if (tSimpleHashPut(pJoin->pKeyHash, pTable->keyData, keyLen, &group, sizeof(group))) {
|
||||
taosMemoryFree(pRow);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -711,6 +711,11 @@ static bool mergeJoinGetNextTimestamp(SOperatorInfo* pOperator, int64_t* pLeftTs
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (NULL == pJoinInfo->pLeft || NULL == pJoinInfo->pRight) {
|
||||
setMergeJoinDone(pOperator);
|
||||
return false;
|
||||
}
|
||||
|
||||
// only the timestamp match support for ordinary table
|
||||
SColumnInfoData* pLeftCol = taosArrayGet(pJoinInfo->pLeft->pDataBlock, pJoinInfo->leftCol.slotId);
|
||||
|
|
|
@ -2883,7 +2883,7 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) {
|
|||
}
|
||||
|
||||
|
||||
static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) {
|
||||
static int32_t tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) {
|
||||
int32_t code = 0;
|
||||
int32_t numOfTables = taosArrayGetSize(aUidTags);
|
||||
|
||||
|
@ -2894,9 +2894,15 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF
|
|||
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
|
||||
|
||||
SScalarParam output = {0};
|
||||
tagScanCreateResultData(&type, numOfTables, &output);
|
||||
code = tagScanCreateResultData(&type, numOfTables, &output);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
scalarCalculate(pTagCond, pBlockList, &output);
|
||||
code = scalarCalculate(pTagCond, pBlockList, &output);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
bool* result = (bool*)output.columnData->pData;
|
||||
for (int32_t i = 0 ; i < numOfTables; ++i) {
|
||||
|
@ -2911,7 +2917,7 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF
|
|||
blockDataDestroy(pResBlock);
|
||||
taosArrayDestroy(pBlockList);
|
||||
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void tagScanFillOneCellWithTag(SOperatorInfo* pOperator, const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) {
|
||||
|
@ -3024,7 +3030,11 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) {
|
|||
bool ignoreFilterIdx = true;
|
||||
if (pInfo->pTagCond != NULL) {
|
||||
ignoreFilterIdx = false;
|
||||
tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo);
|
||||
int32_t code = tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
pOperator->pTaskInfo->code = code;
|
||||
T_LONG_JMP(pOperator->pTaskInfo->env, code);
|
||||
}
|
||||
} else {
|
||||
ignoreFilterIdx = true;
|
||||
}
|
||||
|
|
|
@ -669,8 +669,13 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle*
|
|||
p->info.id.groupId = tupleGroupId;
|
||||
pInfo->groupId = tupleGroupId;
|
||||
} else {
|
||||
pInfo->prefetchedTuple = pTupleHandle;
|
||||
break;
|
||||
if (p->info.rows == 0) {
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
p->info.id.groupId = pInfo->groupId = tupleGroupId;
|
||||
} else {
|
||||
pInfo->prefetchedTuple = pTupleHandle;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
appendOneRowToDataBlock(p, pTupleHandle);
|
||||
|
@ -715,14 +720,9 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
|
|||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||
}
|
||||
|
||||
bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo);
|
||||
// if limit is reached within a group, do not clear limiInfo otherwise the next block
|
||||
// will be processed.
|
||||
if (newgroup && limitReached) {
|
||||
resetLimitInfoForNextGroup(&pInfo->limitInfo);
|
||||
}
|
||||
applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo);
|
||||
|
||||
if (p->info.rows > 0 || limitReached) {
|
||||
if (p->info.rows > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -578,9 +578,8 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
|
|||
SColumnInfoData* pCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, pFillInfo->srcTsSlotId);
|
||||
int64_t* tsList = (int64_t*)pCol->pData;
|
||||
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
|
||||
numOfRes = taosTimeCountInterval(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
|
||||
numOfRes += 1;
|
||||
numOfRes = taosTimeCountIntervalForFill(lastKey, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision, pFillInfo->order);
|
||||
ASSERT(numOfRes >= numOfRows);
|
||||
} else { // reach the end of data
|
||||
if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) ||
|
||||
|
@ -588,9 +587,8 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
|
|||
return 0;
|
||||
}
|
||||
|
||||
numOfRes = taosTimeCountInterval(ekey1, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
|
||||
numOfRes += 1;
|
||||
numOfRes = taosTimeCountIntervalForFill(ekey1, pFillInfo->currentKey, pFillInfo->interval.sliding,
|
||||
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision, pFillInfo->order);
|
||||
}
|
||||
|
||||
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
|
||||
|
|
|
@ -904,6 +904,7 @@ static int32_t getPageBufIncForRow(SSDataBlock* blk, int32_t row, int32_t rowIdx
|
|||
}
|
||||
|
||||
static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockOrderInfo* order, SArray* aExtSrc) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int pgHeaderSz = sizeof(int32_t) + sizeof(int32_t) * taosArrayGetSize(pHandle->pDataBlock->pDataBlock);
|
||||
int32_t rowCap = blockDataGetCapacityInRow(pHandle->pDataBlock, pHandle->pageSize, pgHeaderSz);
|
||||
blockDataEnsureCapacity(pHandle->pDataBlock, rowCap);
|
||||
|
@ -930,7 +931,13 @@ static int32_t sortBlocksToExtSource(SSortHandle* pHandle, SArray* aBlk, SBlockO
|
|||
SArray* aPgId = taosArrayInit(8, sizeof(int32_t));
|
||||
|
||||
SMultiwayMergeTreeInfo* pTree = NULL;
|
||||
tMergeTreeCreate(&pTree, taosArrayGetSize(aBlk), &sup, blockCompareTsFn);
|
||||
code = tMergeTreeCreate(&pTree, taosArrayGetSize(aBlk), &sup, blockCompareTsFn);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
taosMemoryFree(sup.aRowIdx);
|
||||
taosMemoryFree(sup.aTs);
|
||||
|
||||
return code;
|
||||
}
|
||||
int32_t nRows = 0;
|
||||
int32_t nMergedRows = 0;
|
||||
bool mergeLimitReached = false;
|
||||
|
@ -1054,7 +1061,14 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) {
|
|||
tSimpleHashClear(mUidBlk);
|
||||
|
||||
int64_t p = taosGetTimestampUs();
|
||||
sortBlocksToExtSource(pHandle, aBlkSort, pOrder, aExtSrc);
|
||||
code = sortBlocksToExtSource(pHandle, aBlkSort, pOrder, aExtSrc);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tSimpleHashCleanup(mUidBlk);
|
||||
taosArrayDestroy(aBlkSort);
|
||||
taosArrayDestroy(aExtSrc);
|
||||
return code;
|
||||
}
|
||||
|
||||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
||||
|
|
|
@ -1839,10 +1839,6 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
if (TSDB_DATA_TYPE_VARBINARY == ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ const char *udfdCPluginUdfInitLoadInitDestoryFuncs(SUdfCPluginCtx *udfCtx, const
|
|||
|
||||
void udfdCPluginUdfInitLoadAggFuncs(SUdfCPluginCtx *udfCtx, const char *udfName) {
|
||||
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
|
||||
strncpy(processFuncName, udfName, sizeof(processFuncName));
|
||||
snprintf(processFuncName, sizeof(processFuncName), "%s", udfName);
|
||||
uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->aggProcFunc));
|
||||
|
||||
char startFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
|
||||
|
@ -103,7 +103,7 @@ int32_t udfdCPluginUdfInit(SScriptUdfInfo *udf, void **pUdfCtx) {
|
|||
|
||||
if (udf->funcType == UDF_FUNC_TYPE_SCALAR) {
|
||||
char processFuncName[TSDB_FUNC_NAME_LEN] = {0};
|
||||
strncpy(processFuncName, udfName, sizeof(processFuncName));
|
||||
snprintf(processFuncName, sizeof(processFuncName), "%s", udfName);
|
||||
uv_dlsym(&udfCtx->lib, processFuncName, (void **)(&udfCtx->scalarProcFunc));
|
||||
} else if (udf->funcType == UDF_FUNC_TYPE_AGG) {
|
||||
udfdCPluginUdfInitLoadAggFuncs(udfCtx, udfName);
|
||||
|
|
|
@ -2902,7 +2902,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
|
|||
taosCreateMD5Hash(buf, len);
|
||||
strncpy(pFunc->node.aliasName, buf, TSDB_COL_NAME_LEN - 1);
|
||||
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pCol->colName);
|
||||
taosCreateMD5Hash(buf, len);
|
||||
// note: userAlias could be truncated here
|
||||
strncpy(pFunc->node.userAlias, buf, TSDB_COL_NAME_LEN - 1);
|
||||
}
|
||||
} else {
|
||||
|
@ -2910,7 +2910,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
|
|||
taosCreateMD5Hash(buf, len);
|
||||
strncpy(pFunc->node.aliasName, buf, TSDB_COL_NAME_LEN - 1);
|
||||
len = snprintf(buf, sizeof(buf) - 1, "%s(%s)", pSrcFunc->functionName, pExpr->userAlias);
|
||||
taosCreateMD5Hash(buf, len);
|
||||
// note: userAlias could be truncated here
|
||||
strncpy(pFunc->node.userAlias, buf, TSDB_COL_NAME_LEN - 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,8 +46,8 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol, bool isPartit
|
|||
pCol->colType = COLUMN_TYPE_TBNAME;
|
||||
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (pVal) {
|
||||
strcpy(pCol->tableName, pVal->literal);
|
||||
strcpy(pCol->tableAlias, pVal->literal);
|
||||
snprintf(pCol->tableName, sizeof(pCol->tableName), "%s", pVal->literal);
|
||||
snprintf(pCol->tableAlias, sizeof(pCol->tableAlias), "%s", pVal->literal);
|
||||
}
|
||||
break;
|
||||
case FUNCTION_TYPE_WSTART:
|
||||
|
@ -531,6 +531,9 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = nodesListStrictAppend(pJoin->node.pChildren, (SNode*)pLeft);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
pLeft = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
SLogicNode* pRight = NULL;
|
||||
|
@ -584,7 +587,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
}
|
||||
}
|
||||
|
||||
if (NULL == pJoin->node.pTargets) {
|
||||
if (NULL == pJoin->node.pTargets && NULL != pLeft) {
|
||||
pJoin->node.pTargets = nodesCloneList(pLeft->pTargets);
|
||||
if (NULL == pJoin->node.pTargets) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
|
|
@ -2200,10 +2200,15 @@ int tdbBtcDelete(SBTC *pBtc) {
|
|||
tdbOsFree(pCell);
|
||||
|
||||
if (pPage->nOverflow > 0) {
|
||||
tdbDebug("tdb/btc-delete: btree balance after update cell, pPage/nOverflow: %p/%d.", pPage,
|
||||
pPage->nOverflow);
|
||||
tdbDebug("tdb/btc-delete: btree balance after update cell, pPage/nOverflow/pgno: %p/%d/%" PRIu32 ".", pPage,
|
||||
pPage->nOverflow, TDB_PAGE_PGNO(pPage));
|
||||
|
||||
pBtc->iPage = iPage;
|
||||
tdbPagerReturnPage(pBtc->pBt->pPager, pBtc->pPage, pBtc->pTxn);
|
||||
while (--pBtc->iPage != iPage) {
|
||||
tdbPagerReturnPage(pBtc->pBt->pPager, pBtc->pgStack[pBtc->iPage], pBtc->pTxn);
|
||||
}
|
||||
|
||||
// pBtc->iPage = iPage;
|
||||
pBtc->pPage = pPage;
|
||||
ret = tdbBtreeBalance(pBtc);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -295,7 +295,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
|
|||
}
|
||||
|
||||
// 1. pPage == NULL
|
||||
// 2. pPage && pPage->isLocal == 0 && !TDB_TXN_IS_WRITE(pTxn)
|
||||
// 2. pPage && !pPage->isLocal == 0 && !TDB_TXN_IS_WRITE(pTxn)
|
||||
pPageH = pPage;
|
||||
pPage = NULL;
|
||||
|
||||
|
|
|
@ -6,6 +6,21 @@
|
|||
,,y,unit-test,bash test.sh
|
||||
|
||||
#system test
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/scalar_function.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_session.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_state_window.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_state_window.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_session.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/at_once_interval_ext.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/max_delay_interval_ext.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/window_close_session_ext.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/partition_interval.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 8-stream/pause_resume_test.py
|
||||
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stbJoin.py -Q 3
|
||||
|
@ -24,7 +39,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/columnLenUpdated.py -Q 4
|
||||
|
@ -44,6 +59,7 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/ins_topics_test.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqMaxTopic.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqParamsTest.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqClientConsLog.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqMaxGroupIds.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqConsumeDiscontinuousData.py
|
||||
|
|
|
@ -81,6 +81,11 @@ pip3 list|grep taospy
|
|||
pip3 uninstall taospy -y
|
||||
pip3 install --default-timeout=120 taospy==2.7.10
|
||||
|
||||
#define taos-ws-py 0.2.8
|
||||
pip3 list|grep taos-ws-py
|
||||
pip3 uninstall taos-ws-py -y
|
||||
pip3 install --default-timeout=120 taos-ws-py==0.2.8
|
||||
|
||||
$TIMEOUT_CMD $cmd
|
||||
RET=$?
|
||||
echo "cmd exit code: $RET"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -668,7 +668,7 @@ class TDDnodes:
|
|||
self.testCluster = False
|
||||
self.valgrind = 0
|
||||
self.asan = False
|
||||
self.killValgrind = 1
|
||||
self.killValgrind = 0
|
||||
|
||||
def init(self, path, remoteIP = ""):
|
||||
binPath = self.dnodes[0].getPath() + "/../../../"
|
||||
|
@ -775,9 +775,41 @@ class TDDnodes:
|
|||
tdLog.info("execute finished")
|
||||
return
|
||||
|
||||
def killProcesser(self, processerName):
|
||||
if platform.system().lower() == 'windows':
|
||||
killCmd = ("wmic process where name=\"%s.exe\" call terminate > NUL 2>&1" % processerName)
|
||||
psCmd = ("wmic process where name=\"%s.exe\" | findstr \"%s.exe\"" % (processerName, processerName))
|
||||
else:
|
||||
killCmd = (
|
||||
"ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1"
|
||||
% processerName
|
||||
)
|
||||
psCmd = ("ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % processerName)
|
||||
|
||||
processID = ""
|
||||
|
||||
try:
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
while processID:
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
try:
|
||||
processID = subprocess.check_output(psCmd, shell=True)
|
||||
except Exception as err:
|
||||
processID = ""
|
||||
tdLog.debug('**** kill pid warn: {err}')
|
||||
except Exception as err:
|
||||
processID = ""
|
||||
tdLog.debug(f'**** find pid warn: {err}')
|
||||
|
||||
|
||||
|
||||
def stopAll(self):
|
||||
tdLog.info("stop all dnodes, asan:%d" % self.asan)
|
||||
distro_id = distro.id()
|
||||
if platform.system().lower() != 'windows':
|
||||
distro_id = distro.id()
|
||||
else:
|
||||
distro_id = "not alpine"
|
||||
if self.asan and distro_id != "alpine":
|
||||
tdLog.info("execute script: %s" % self.stopDnodesPath)
|
||||
os.system(self.stopDnodesPath)
|
||||
|
@ -792,7 +824,6 @@ class TDDnodes:
|
|||
|
||||
|
||||
if (distro_id == "alpine"):
|
||||
print(distro_id)
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
|
@ -803,36 +834,9 @@ class TDDnodes:
|
|||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
elif platform.system().lower() == 'windows':
|
||||
psCmd = "for /f %a in ('wmic process where \"name='taosd.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(f"pid of taosd.exe:{processID}")
|
||||
killCmd = "kill -9 %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
psCmd = "for /f %a in ('wmic process where \"name='tmq_sim.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(f"pid of tmq_sim.exe:{processID}")
|
||||
killCmd = "kill -9 %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
psCmd = "for /f %a in ('wmic process where \"name='taosBenchmark.exe'\" get processId ^| xargs echo ^| awk '{print $2}' ^&^& echo aa') do @(ps | grep %a | awk '{print $1}' | xargs)"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
while(processID):
|
||||
print(f"pid of taosBenchmark.exe:{processID}")
|
||||
killCmd = "kill -9 %s > nul 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
self.killProcesser("taosd")
|
||||
self.killProcesser("tmq_sim")
|
||||
self.killProcesser("taosBenchmark")
|
||||
else:
|
||||
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
|
@ -849,7 +853,6 @@ class TDDnodes:
|
|||
time.sleep(1)
|
||||
processID = subprocess.check_output(
|
||||
psCmd, shell=True).decode("utf-8").strip()
|
||||
|
||||
if self.killValgrind == 1:
|
||||
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs"
|
||||
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
|
||||
|
|
|
@ -111,7 +111,7 @@ class TDSql:
|
|||
return self.error_info
|
||||
|
||||
|
||||
def query(self, sql, row_tag=None,queryTimes=10):
|
||||
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
|
||||
self.sql = sql
|
||||
i=1
|
||||
while i <= queryTimes:
|
||||
|
@ -120,6 +120,17 @@ class TDSql:
|
|||
self.queryResult = self.cursor.fetchall()
|
||||
self.queryRows = len(self.queryResult)
|
||||
self.queryCols = len(self.cursor.description)
|
||||
|
||||
if count_expected_res is not None:
|
||||
counter = 0
|
||||
while count_expected_res != self.queryResult[0][0]:
|
||||
self.cursor.execute(sql)
|
||||
self.queryResult = self.cursor.fetchall()
|
||||
if counter < queryTimes:
|
||||
counter += 0.5
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
return False
|
||||
if row_tag:
|
||||
return self.queryResult
|
||||
return self.queryRows
|
||||
|
@ -501,7 +512,8 @@ class TDSql:
|
|||
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
# tdLog.info("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
raise Exception("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
|
||||
|
||||
def checkNotEqual(self, elm, expect_elm):
|
||||
if elm != expect_elm:
|
||||
|
@ -509,7 +521,8 @@ class TDSql:
|
|||
else:
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
|
||||
tdLog.exit("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||
tdLog.info("%s(%d) failed: sql:%s, elm:%s == expect_elm:%s" % args)
|
||||
raise Exception
|
||||
|
||||
def get_times(self, time_str, precision="ms"):
|
||||
caller = inspect.getframeinfo(inspect.stack()[1][0])
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
sql create database test
|
||||
sql use test
|
||||
sql CREATE TABLE `tb` (`ts` TIMESTAMP, `c0` INT, `c1` FLOAT, `c2` BINARY(10))
|
||||
|
||||
|
||||
sql insert into tb values("2022-05-15 00:01:08.000", 1, 1.0, "abc")
|
||||
sql insert into tb values("2022-05-16 00:01:08.000", 2, 2.0, "bcd")
|
||||
sql insert into tb values("2022-05-17 00:01:08.000", 3, 3.0, "cde")
|
||||
|
||||
|
||||
#sleep 10000000
|
||||
system taos -P7100 -s 'source tsim/query/t/multires_func.sql' | grep -v 'Query OK' | grep -v 'Client Version' > /tmp/multires_func.result
|
||||
system echo ----------------------diff start-----------------------
|
||||
system git diff --exit-code --color tsim/query/r/multires_func.result /tmp/multires_func.result
|
||||
system echo ----------------------diff succeed-----------------------
|
|
@ -0,0 +1,31 @@
|
|||
Copyright (c) 2022 by TDengine, all rights reserved.
|
||||
|
||||
taos> source tsim/query/t/multires_func.sql
|
||||
taos> use test;
|
||||
Database changed.
|
||||
|
||||
taos> select count(*) from tb\G;
|
||||
*************************** 1.row ***************************
|
||||
count(*): 3
|
||||
|
||||
taos> select last(*) from tb\G;
|
||||
*************************** 1.row ***************************
|
||||
ts: 2022-05-17 00:01:08.000
|
||||
c0: 3
|
||||
c1: 3.0000000
|
||||
c2: cde
|
||||
|
||||
taos> select last_row(*) from tb\G;
|
||||
*************************** 1.row ***************************
|
||||
ts: 2022-05-17 00:01:08.000
|
||||
c0: 3
|
||||
c1: 3.0000000
|
||||
c2: cde
|
||||
|
||||
taos> select first(*) from tb\G;
|
||||
*************************** 1.row ***************************
|
||||
ts: 2022-05-15 00:01:08.000
|
||||
c0: 1
|
||||
c1: 1.0000000
|
||||
c2: abc
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
use test;
|
||||
select count(*) from tb\G;
|
||||
select last(*) from tb\G;
|
||||
select last_row(*) from tb\G;
|
||||
select first(*) from tb\G;
|
|
@ -198,7 +198,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -210,7 +210,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -220,7 +220,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -137,6 +137,12 @@ class TDTestCase:
|
|||
sql = "select _wstart, _wend, count(ts), sum(c1) from meters where ts > '2018-11-25 00:00:00.000' and ts < '2018-11-26 00:00:00.00' interval(1d) fill(NULL) order by _wstart desc"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(1)
|
||||
sql = "select _wstart, count(*) from meters where ts > '2018-08-20 00:00:00.000' and ts < '2018-09-30 00:00:00.000' interval(9d) fill(NULL) order by _wstart desc;"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
sql = "select _wstart, count(*) from meters where ts > '2018-08-20 00:00:00.000' and ts < '2018-09-30 00:00:00.000' interval(9d) fill(NULL) order by _wstart;"
|
||||
tdSql.query(sql)
|
||||
tdSql.checkRows(6)
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
|
|
|
@ -251,10 +251,19 @@ class TDTestCase:
|
|||
tdSql.checkData(2, 4, 9)
|
||||
tdSql.checkData(3, 4, 9)
|
||||
|
||||
def test_partition_by_limit_no_agg(self):
|
||||
sql_template = 'select t1 from meters partition by t1 limit %d'
|
||||
|
||||
for i in range(1, 5000, 1000):
|
||||
tdSql.query(sql_template % i)
|
||||
tdSql.checkRows(5 * i)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
self.test_interval_limit_offset()
|
||||
self.test_interval_partition_by_slimit_limit()
|
||||
self.test_partition_by_limit_no_agg()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -220,7 +220,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -269,7 +269,7 @@ class TDTestCase:
|
|||
|
||||
# init
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
seed = time.clock_gettime(time.CLOCK_REALTIME)
|
||||
seed = time.time() % 10000
|
||||
random.seed(seed)
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
|
|
|
@ -233,7 +233,7 @@ class TMQCom:
|
|||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
rowsBatched = 0
|
||||
sql += " %s%d values "%(stbName,i)
|
||||
sql += " %s.%s%d values "%(dbName, stbName, i)
|
||||
for j in range(rowsPerTbl):
|
||||
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
|
||||
rowsBatched += 1
|
||||
|
@ -241,7 +241,7 @@ class TMQCom:
|
|||
tsql.execute(sql)
|
||||
rowsBatched = 0
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s%d values " %(stbName,i)
|
||||
sql = "insert into %s.%s%d values " %(dbName, stbName,i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
|
@ -263,7 +263,7 @@ class TMQCom:
|
|||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
rowsBatched = 0
|
||||
sql += " %s%d values "%(ctbPrefix,i)
|
||||
sql += " %s.%s%d values "%(dbName, ctbPrefix,i)
|
||||
for j in range(rowsPerTbl):
|
||||
if (j % 2 == 0):
|
||||
sql += "(%d, %d, %d, 'tmqrow_%d') "%(startTs + j, j, j, j)
|
||||
|
@ -274,7 +274,7 @@ class TMQCom:
|
|||
tsql.execute(sql)
|
||||
rowsBatched = 0
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s%d values " %(ctbPrefix,i)
|
||||
sql = "insert into %s.%s%d values " %(dbName, ctbPrefix, i)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
|
@ -296,7 +296,7 @@ class TMQCom:
|
|||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
rowsBatched = 0
|
||||
sql += " %s%d values "%(ctbPrefix,i+ctbStartIdx)
|
||||
sql += " %s.%s%d values "%(dbName, ctbPrefix, i+ctbStartIdx)
|
||||
for j in range(rowsPerTbl):
|
||||
if (j % 2 == 0):
|
||||
sql += "(%d, %d, %d, 'tmqrow_%d', now) "%(startTs + j, j, j, j)
|
||||
|
@ -307,7 +307,7 @@ class TMQCom:
|
|||
tsql.execute(sql)
|
||||
rowsBatched = 0
|
||||
if j < rowsPerTbl - 1:
|
||||
sql = "insert into %s%d values " %(ctbPrefix,i+ctbStartIdx)
|
||||
sql = "insert into %s.%s%d values " %(dbName, ctbPrefix, i+ctbStartIdx)
|
||||
else:
|
||||
sql = "insert into "
|
||||
#end sql
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_interval(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, case_when=None):
|
||||
tdLog.info(f"*** testing stream at_once+interval: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, fill: {fill_value}, delete: {delete}, case_when: {case_when} ***")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
elif partition is None:
|
||||
partition_elm_alias = '"no_partition"'
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or partition is None:
|
||||
if case_when:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition} {partition_elm_alias}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_value=fill_value, fill_history_value=fill_history_value)
|
||||
start_time = self.tdCom.date_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value)
|
||||
if i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.ctb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tdCom.ctb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value)
|
||||
if i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tdCom.tb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tdCom.tb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
|
||||
if self.tdCom.subtable:
|
||||
for tname in [self.stb_name, self.ctb_name]:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition is None:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition is None:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
if fill_value:
|
||||
end_date_time = self.tdCom.date_time
|
||||
final_range_count = self.tdCom.range_count
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
self.tdCom.date_time = start_time
|
||||
# update
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(end_date_time)+f'+{self.tdCom.dataDict["interval"]*(final_range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
self.tdCom.date_time = start_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
self.tdCom.date_time += 1
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=ts_cast_delete_value)
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} where `min(c1)` is not Null order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=start_ts, end_ts=ts_cast_delete_value)
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if partition == "tbname":
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts.replace("-", "+")} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart,`min(c1)`', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null order by wstart,`min(c1)`', fill_value=fill_value)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="c1", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="abs(c1)", delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL")
|
||||
# for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value)
|
||||
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,209 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_interval_ext(self, interval, partition="tbname", delete=False, fill_value=None, fill_history_value=None, subtable=None, case_when=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, use_except=False):
|
||||
tdLog.info(f"*** testing stream at_once+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, fill_history: {fill_history_value}, delete: {delete}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||
if use_except:
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm or len(stb_field_name_value.split(",")) == len(self.tdCom.partitial_stb_filter_des_select_elm.split(",")):
|
||||
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||
else:
|
||||
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||
else:
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||
else:
|
||||
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||
|
||||
if stb_field_name_value is not None:
|
||||
if len(stb_field_name_value) == 0:
|
||||
stb_field_name_value = ",".join(self.tdCom.tb_filter_des_select_elm.split(",")[:5])
|
||||
# else:
|
||||
# stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
defined_tag_count = len(tag_value.split()) if tag_value is not None else 0
|
||||
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value, ext_stb=use_exist_stb)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_col_alias
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
elif partition == "tbname,t1,c1":
|
||||
partition_elm_alias = f'{self.tdCom.partition_tbname_alias},t1,c1'
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if subtable:
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if subtable == "constant":
|
||||
# stb_subtable_value = f'"{self.tdCom.ext_ctb_stream_des_table}"'
|
||||
stb_subtable_value = f'"constant_{self.tdCom.ext_ctb_stream_des_table}"'
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(cast({subtable} as int unsigned) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = None
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
# self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.ext_tb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||
if partition:
|
||||
stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except)
|
||||
else:
|
||||
stream_sql = self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb, use_except=use_except)
|
||||
if stream_sql:
|
||||
tdSql.error(stream_sql)
|
||||
return
|
||||
start_time = self.tdCom.date_time
|
||||
if subtable == "constant":
|
||||
range_count = 1
|
||||
else:
|
||||
range_count = self.tdCom.range_count
|
||||
|
||||
for i in range(range_count):
|
||||
latency = 0
|
||||
tag_value_list = list()
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
if tag_value:
|
||||
if subtable == "constant":
|
||||
tdSql.query(f'select {tag_value} from constant_{self.tdCom.ext_ctb_stream_des_table}')
|
||||
else:
|
||||
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||
tag_value_list = tdSql.queryResult
|
||||
if not fill_value:
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
elif stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.partitial_stb_filter_des_select_elm } from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, cast(max(c2) as tinyint), cast(min(c1) as smallint) from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True)
|
||||
else:
|
||||
if partition:
|
||||
if tag_value == self.tdCom.exchange_tag_filter_des_select_elm:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.partitial_tag_stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list)
|
||||
elif tag_value == self.tdCom.cast_tag_filter_des_select_elm:
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart')
|
||||
limit_row = tdSql.queryRows
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.cast_tag_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select cast(t1 as TINYINT UNSIGNED),cast(t2 as varchar(256)),cast(t3 as bool) from {self.stb_name} order by ts limit {limit_row}')
|
||||
tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};')
|
||||
while list(set(tdSql.queryResult)) != [(None, None, None, None, None, None, None, None, None, None)]:
|
||||
tdSql.query(f'select t1,t2,t3,t4,t6,t7,t8,t9,t10,t12 from ext_{self.stb_name}{self.tdCom.des_table_suffix};')
|
||||
if latency < self.tdCom.default_interval:
|
||||
latency += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
return False
|
||||
tdSql.checkEqual(list(set(tdSql.queryResult)), [(None, None, None, None, None, None, None, None, None, None)])
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list)
|
||||
else:
|
||||
if use_exist_stb and not tag_value:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, use_exist_stb=use_exist_stb)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s) order by wstart', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition, subtable=subtable)
|
||||
|
||||
if subtable:
|
||||
for tname in [self.stb_name]:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
else:
|
||||
tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))')
|
||||
subtable_value = tdSql.queryResult[0][0]
|
||||
if subtable == "constant":
|
||||
return
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;')
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
def run(self):
|
||||
# self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition=None, subtable="constant", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
for delete in [True, False]:
|
||||
for fill_history_value in [0, 1]:
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.partitial_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value=self.tdCom.exchange_stb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
# self-define tag
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.partitial_tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=f'{self.tdCom.partitial_tag_filter_des_select_elm}', subtable=None, stb_field_name_value=None, tag_value=self.tdCom.exchange_tag_filter_des_select_elm, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition="t1 as t5,t2 as t11,t3 as t13", subtable=None, stb_field_name_value=None, tag_value="t5,t11,t13", use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t1", use_exist_stb=True)
|
||||
# error cases
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value="", tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', stb_field_name_value=self.tdCom.tb_filter_des_select_elm.replace("c1","c19"), tag_value=self.tdCom.tag_filter_des_select_elm, use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="ttt", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=None, use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t15", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm},c1', subtable="c1", stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="c5", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="ts,c1", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), partition=f'tbname,{self.tdCom.tag_filter_des_select_elm.split(",")[0]},c1', subtable="c1", stb_field_name_value="c1,c2,c3", tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as t14", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,t13", use_exist_stb=True, use_except=True)
|
||||
self.at_once_interval_ext(interval=random.randint(10, 15), delete=False, fill_history_value=1, partition="t1 as t5,t2 as t11,t3 as c13", subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value="t5,t11,c13", use_exist_stb=True, use_except=True)
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,223 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_session(self, session, ignore_expired=None, ignore_update=None, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True):
|
||||
tdLog.info(f"*** testing stream at_once+interval: session: {session}, ignore_expired: {ignore_expired}, ignore_update: {ignore_update}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, case_when: {case_when}, subtable: {subtable} ***")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(session=session, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
if partition == "tbname":
|
||||
if case_when:
|
||||
stream_case_when_partition = case_when
|
||||
else:
|
||||
stream_case_when_partition = self.tdCom.partition_tbname_alias
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
if subtable:
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
else:
|
||||
partition_elm_alias = "constant"
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or subtable is None:
|
||||
if case_when:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {stream_case_when_partition}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if subtable:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if 'abs' in partition:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
|
||||
# create stb/ctb/tb stream
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="at_once", ignore_expired=ignore_expired, ignore_update=ignore_update, subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||
for i in range(self.tdCom.range_count):
|
||||
ctb_name = self.tdCom.get_long_name()
|
||||
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||
|
||||
if i == 0:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session)
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + 1
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, session=session)
|
||||
if i == 0:
|
||||
record_window_close_ts = window_close_ts
|
||||
for ts_value in [self.tdCom.date_time, window_close_ts]:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value, need_null=True)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value, need_null=True)
|
||||
if self.delete and i%2 != 0:
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||
ts_value += 1
|
||||
|
||||
# check result
|
||||
if partition != "tbname":
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True)
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;')
|
||||
else:
|
||||
for tbname in [self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
|
||||
if self.tdCom.disorder:
|
||||
if ignore_expired:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s')
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkEqual(res1, res2)
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=str(self.tdCom.date_time)+f'-{self.tdCom.default_interval*(self.tdCom.range_count+session)}s')
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkEqual(res1, res2)
|
||||
else:
|
||||
if ignore_update:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res2 = tdSql.queryResult
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||
if partition != "tbname":
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.ctb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.ctb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', sorted=True)
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {self.tb_stream_des_table} order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart, `min(c1)`,`max(c2)`,`sum(c3)`;')
|
||||
else:
|
||||
for tbname in [self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} session(ts, {self.tdCom.dataDict["session"]}s)', sorted=True)
|
||||
|
||||
if fill_history_value:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.record_history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.record_history_ts)
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-"))
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(self.tdCom.record_history_ts, "-"))
|
||||
|
||||
if self.tdCom.subtable:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if c1_value[1] is not None:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
if subtable:
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if c1_value[1] is not None:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
if subtable:
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_session(session=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, delete=True, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
|
||||
for subtable in [None, True]:
|
||||
self.at_once_session(session=random.randint(10, 15), subtable=subtable, partition="abs(c1)")
|
||||
for ignore_expired in [None, 0, 1]:
|
||||
for fill_history_value in [None, 1]:
|
||||
self.at_once_session(session=random.randint(10, 15), ignore_expired=ignore_expired, fill_history_value=fill_history_value)
|
||||
for fill_history_value in [None, 1]:
|
||||
self.at_once_session(session=random.randint(10, 15), partition="tbname", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), partition="c1", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), partition="abs(c1)", delete=True, subtable=None, fill_history_value=fill_history_value)
|
||||
self.at_once_session(session=random.randint(10, 15), ignore_update=1, fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,144 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def at_once_state_window(self, state_window, partition="tbname", delete=False, fill_history_value=None, case_when=None, subtable=True):
|
||||
tdLog.info(f"*** testing stream at_once+interval: state_window: {state_window}, partition: {partition}, fill_history: {fill_history_value}, case_when: {case_when}***, delete: {delete}")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(state_window=state_window, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
|
||||
if partition == "tbname":
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1" and subtable is not None:
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "c1" and subtable is None:
|
||||
partition_elm_alias = 'constant'
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or subtable is None:
|
||||
if partition == "tbname":
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", "{partition_elm_alias}"), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
if 'abs' in partition:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
else:
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast({partition_elm_alias} as bigint) as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
state_window_col_name = self.tdCom.dataDict["state_window"]
|
||||
if case_when:
|
||||
stream_state_window = case_when
|
||||
else:
|
||||
stream_state_window = state_window_col_name
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} state_window({stream_state_window})', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||
range_times = self.tdCom.range_count
|
||||
state_window_max = self.tdCom.dataDict['state_window_max']
|
||||
for i in range(range_times):
|
||||
state_window_value = random.randint(int((i)*state_window_max/range_times), int((i+1)*state_window_max/range_times))
|
||||
for i in range(2, range_times+3):
|
||||
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.delete and i%2 != 0:
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
tdSql.execute(f'delete from {self.ctb_name} where ts = {dt}')
|
||||
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.delete and i%2 != 0:
|
||||
tdSql.execute(f'delete from {self.tb_name} where ts = {dt}')
|
||||
self.tdCom.date_time += 1
|
||||
|
||||
# for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} state_window({state_window_col_name}) order by wstart,{state_window}', sorted=True)
|
||||
|
||||
if fill_history_value:
|
||||
self.tdCom.update_delete_history_data(self.delete)
|
||||
|
||||
if self.tdCom.subtable:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
if subtable:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
return
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
if subtable:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
else:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;')
|
||||
return
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
|
||||
def run(self):
|
||||
self.at_once_state_window(state_window="c2", partition="tbname", case_when="case when c1 < 0 then c1 else c2 end")
|
||||
self.at_once_state_window(state_window="c1", partition="tbname", case_when="case when c1 >= 0 then c1 else c2 end")
|
||||
for fill_history_value in [None, 1]:
|
||||
self.at_once_state_window(state_window="c1", partition="tbname", fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="c1", fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="abs(c1)", fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="tbname", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="c1", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="abs(c1)", delete=True, fill_history_value=fill_history_value)
|
||||
self.at_once_state_window(state_window="c1", partition="c1", subtable=None, fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,161 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def watermark_max_delay_interval(self, interval, max_delay, watermark=None, fill_value=None, delete=False):
|
||||
tdLog.info(f"*** testing stream max_delay+interval: interval: {interval}, watermark: {watermark}, fill_value: {fill_value}, delete: {delete} ***")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
if watermark is not None:
|
||||
self.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, watermark=watermark)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tdCom.date_time = 1658921623245
|
||||
if watermark is not None:
|
||||
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||
fill_watermark_value = watermark_value
|
||||
else:
|
||||
watermark_value = None
|
||||
fill_watermark_value = "0s"
|
||||
|
||||
max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s'
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
# create stb/ctb/tb stream
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value)
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value)
|
||||
init_num = 0
|
||||
start_time = self.tdCom.date_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
# if not fill_value:
|
||||
# for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||
# if tbname != self.tb_stream_des_table:
|
||||
# tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||
# else:
|
||||
# tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||
# tdSql.checkEqual(tdSql.queryRows, init_num)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
|
||||
if i == 0:
|
||||
init_num = 2 + i
|
||||
if watermark is not None:
|
||||
init_num += 1
|
||||
else:
|
||||
init_num += 1
|
||||
time.sleep(int(max_delay.replace("s", "")))
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)')
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s)')
|
||||
if fill_value:
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts)
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'])
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
|
||||
if self.tdCom.update:
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||
time.sleep(int(max_delay.replace("s", "")))
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value)
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts}+{self.tdCom.dataDict["interval"]}s+{fill_watermark_value} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value})', fill_value=fill_value)
|
||||
|
||||
|
||||
def run(self):
|
||||
for watermark in [None, random.randint(20, 25)]:
|
||||
self.watermark_max_delay_interval(interval=random.choice([15]), watermark=watermark, max_delay=f"{random.randint(5, 6)}s")
|
||||
for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
self.watermark_max_delay_interval(interval=random.randint(10, 15), watermark=None, max_delay=f"{random.randint(5, 6)}s", fill_value=fill_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,101 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def watermark_max_delay_interval_ext(self, interval, max_delay, watermark=None, fill_value=None, partition="tbname", delete=False, fill_history_value=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False):
|
||||
tdLog.info(f"*** testing stream max_delay+interval+exist_stb+custom_tag: interval: {interval}, partition: {partition}, max_delay: {max_delay}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||
else:
|
||||
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||
if not stb_field_name_value:
|
||||
stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
defined_tag_count = len(tag_value.split())
|
||||
if watermark is not None:
|
||||
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, watermark=watermark, ext_stb=use_exist_stb)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
if subtable:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None
|
||||
else:
|
||||
stb_subtable_value = None
|
||||
self.tdCom.date_time = 1658921623245
|
||||
if watermark is not None:
|
||||
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||
else:
|
||||
watermark_value = None
|
||||
|
||||
max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s'
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
# create stb/ctb/tb stream
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, subtable_value=stb_subtable_value, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_value=fill_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||
|
||||
init_num = 0
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=self.tdCom.date_time+num*self.tdCom.offset)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
|
||||
if i == 0:
|
||||
init_num = 2 + i
|
||||
if watermark is not None:
|
||||
init_num += 1
|
||||
else:
|
||||
init_num += 1
|
||||
time.sleep(int(max_delay.replace("s", "")))
|
||||
if tag_value:
|
||||
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||
tag_value_list = tdSql.queryResult
|
||||
if not fill_value:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts;', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} interval({self.tdCom.dataDict["interval"]}s)', defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition)
|
||||
|
||||
def run(self):
|
||||
for delete in [True, False]:
|
||||
for fill_history_value in [0, 1]:
|
||||
self.watermark_max_delay_interval_ext(interval=random.choice([15]), watermark=random.randint(20, 25), max_delay=f"{random.randint(5, 6)}s", delete=delete, fill_history_value=fill_history_value, partition=None, subtable=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,100 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def watermark_max_delay_session(self, session, watermark, max_delay, fill_history_value=None):
|
||||
tdLog.info(f"*** testing stream max_delay+session: session: {session}, watermark: {watermark}, max_delay: {max_delay}, fill_history_value: {fill_history_value} ***")
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
if watermark is not None:
|
||||
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tdCom.date_time = self.tdCom.dataDict["start_ts"]
|
||||
|
||||
if watermark is not None:
|
||||
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||
else:
|
||||
watermark_value = None
|
||||
max_delay_value = f'{self.tdCom.trans_time_to_s(max_delay)}s'
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="max_delay", watermark=watermark_value, max_delay=max_delay_value, fill_history_value=fill_history_value)
|
||||
init_num = 0
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + 1
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||
|
||||
if watermark_value is not None:
|
||||
for ts_value in [self.tdCom.date_time, window_close_ts-1]:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||
if tbname != self.tb_stream_des_table:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||
if not fill_history_value:
|
||||
tdSql.checkEqual(tdSql.queryRows, init_num)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if i == 0:
|
||||
init_num = 2 + i
|
||||
else:
|
||||
init_num += 1
|
||||
if watermark_value is not None:
|
||||
expected_value = init_num
|
||||
else:
|
||||
expected_value = i + 1
|
||||
|
||||
if not fill_history_value:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay)
|
||||
else:
|
||||
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)', expected_value, max_delay)
|
||||
else:
|
||||
self.tdCom.update_delete_history_data(delete=True)
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s)')
|
||||
|
||||
def run(self):
|
||||
for fill_history_value in [None, 1]:
|
||||
for watermark in [None, random.randint(20, 30)]:
|
||||
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,105 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def partitionby_interval(self, interval=None, partition_by_elm="tbname", ignore_expired=None):
|
||||
tdLog.info(f"*** testing stream partition+interval: interval: {interval}, partition_by: {partition_by_elm}, ignore_expired: {ignore_expired} ***")
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
ctb_name_list = list()
|
||||
for i in range(1, self.tdCom.range_count):
|
||||
ctb_name = self.tdCom.get_long_name()
|
||||
ctb_name_list.append(ctb_name)
|
||||
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||
if interval is not None:
|
||||
source_sql = f'select _wstart AS wstart, {self.tdCom.partition_by_stb_source_select_str} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s)'
|
||||
else:
|
||||
source_sql = f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm}'
|
||||
|
||||
# create stb/ctb/tb stream
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=source_sql, ignore_expired=ignore_expired)
|
||||
# insert data
|
||||
count = 1
|
||||
step_count = 1
|
||||
for i in range(1, self.tdCom.range_count):
|
||||
if i == 1:
|
||||
record_window_close_ts = self.tdCom.date_time - 15 * self.tdCom.offset
|
||||
ctb_name = self.tdCom.get_long_name()
|
||||
self.tdCom.screate_ctable(stbname=self.stb_name, ctbname=ctb_name)
|
||||
if i % 2 == 0:
|
||||
step_count += i
|
||||
for j in range(count, step_count):
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s')
|
||||
for ctb_name in ctb_name_list:
|
||||
self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{j}s')
|
||||
count += i
|
||||
else:
|
||||
step_count += 1
|
||||
for i in range(2):
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s')
|
||||
for ctb_name in ctb_name_list:
|
||||
self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=f'{self.tdCom.date_time}+{count}s')
|
||||
count += 1
|
||||
# check result
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
if interval is not None:
|
||||
self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;')
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;')
|
||||
|
||||
if self.tdCom.disorder:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||
for ctb_name in ctb_name_list:
|
||||
self.tdCom.sinsert_rows(tbname=ctb_name, ts_value=record_window_close_ts)
|
||||
if ignore_expired:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if interval is not None:
|
||||
tdSql.query(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.query(f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;')
|
||||
res2 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;')
|
||||
|
||||
else:
|
||||
for colname in self.tdCom.partition_by_downsampling_function_list:
|
||||
if "first" not in colname and "last" not in colname:
|
||||
if interval is not None:
|
||||
self.tdCom.check_query_data(f'select `{colname}` from {self.stb_name}{self.tdCom.des_table_suffix} order by `{colname}`;', f'select {colname} from {self.stb_name} partition by {partition_by_elm} interval({self.tdCom.dataDict["interval"]}s) order by `{colname}`;')
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name}{self.tdCom.des_table_suffix} order by c1,c2,c3;', f'select {self.tdCom.stb_filter_des_select_elm} from {self.stb_name} partition by {partition_by_elm} order by c1,c2,c3;')
|
||||
|
||||
def run(self):
|
||||
for interval in [None, 10]:
|
||||
for ignore_expired in [0, 1]:
|
||||
self.partitionby_interval(interval=interval, partition_by_elm="tbname", ignore_expired=ignore_expired)
|
||||
self.partitionby_interval(interval=10, partition_by_elm="t1")
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,154 @@
|
|||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def pause_resume_test(self, interval, partition="tbname", delete=False, fill_history_value=None, pause=True, resume=True, ignore_untreated=False):
|
||||
tdLog.info(f"*** testing stream pause+resume: interval: {interval}, partition: {partition}, delete: {delete}, fill_history: {fill_history_value}, ignore_untreated: {ignore_untreated} ***")
|
||||
if_exist_value_list = [None, True]
|
||||
if_exist = random.choice(if_exist_value_list)
|
||||
reverse_check = True if ignore_untreated else False
|
||||
range_count = (self.tdCom.range_count + 3) * 3
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
|
||||
if partition == "tbname":
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
elif partition is None:
|
||||
partition_elm_alias = '"no_partition"'
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname" or partition is None:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast(cast(abs(cast({partition_elm_alias} as int)) as bigint) as varchar(100))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition} {partition_elm_alias}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=stb_subtable_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=ctb_subtable_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} {partition_elm} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="at_once", subtable_value=tb_subtable_value, fill_history_value=fill_history_value)
|
||||
for i in range(range_count):
|
||||
ts_value = str(self.tdCom.date_time+self.tdCom.dataDict["interval"])+f'+{i*10}s'
|
||||
ts_cast_delete_value = self.tdCom.time_cast(ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.delete and i%2 != 0:
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=ts_cast_delete_value)
|
||||
self.tdCom.date_time += 1
|
||||
if partition:
|
||||
partition_elm = f'partition by {partition}'
|
||||
else:
|
||||
partition_elm = ""
|
||||
# if i == int(range_count/2):
|
||||
if i > 2 and i % 3 == 0:
|
||||
for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']:
|
||||
if if_exist is not None:
|
||||
tdSql.execute(f'pause stream if exists {stream_name}_no_exist')
|
||||
tdSql.error(f'pause stream if not exists {stream_name}')
|
||||
tdSql.error(f'pause stream {stream_name}_no_exist')
|
||||
self.tdCom.pause_stream(stream_name, if_exist)
|
||||
if pause and not resume and range_count-i <= 3:
|
||||
time.sleep(self.tdCom.default_interval)
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart')
|
||||
res_after_pause = tdSql.queryResult
|
||||
if resume:
|
||||
if i > 2 and i % 3 != 0:
|
||||
for stream_name in [f'{self.stb_name}{self.tdCom.stream_suffix}', f'{self.ctb_name}{self.tdCom.stream_suffix}', f'{self.tb_name}{self.tdCom.stream_suffix}']:
|
||||
if if_exist is not None:
|
||||
tdSql.execute(f'resume stream if exists {stream_name}_no_exist')
|
||||
tdSql.error(f'resume stream if not exists {stream_name}')
|
||||
self.tdCom.resume_stream(stream_name, if_exist, None, ignore_untreated)
|
||||
if pause and not resume:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {self.stb_name}{self.tdCom.des_table_suffix} order by wstart')
|
||||
res_without_resume = tdSql.queryResult
|
||||
tdSql.checkEqual(res_after_pause, res_without_resume)
|
||||
else:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check)
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} {partition_elm} interval({self.tdCom.dataDict["interval"]}s) order by wstart', sorted=True, reverse_check=reverse_check)
|
||||
|
||||
if self.tdCom.subtable:
|
||||
for tname in [self.stb_name, self.ctb_name]:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition is None:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition is None:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
|
||||
def run(self):
|
||||
for delete in [True, False]:
|
||||
for fill_history_value in [0, 1]:
|
||||
# pause/resume
|
||||
self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", ignore_untreated=False, fill_history_value=fill_history_value, delete=delete)
|
||||
self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", ignore_untreated=True, fill_history_value=fill_history_value, delete=delete)
|
||||
# self.pause_resume_test(interval=random.randint(10, 15), partition="tbname", resume=False, fill_history_value=fill_history_value, delete=delete)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,177 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def scalar_function(self, partition="tbname", fill_history_value=None):
|
||||
tdLog.info(f"*** testing stream scalar funtion partition: {partition}, fill_history_value: {fill_history_value} ***")
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
tdLog.info("preparing data ...")
|
||||
self.tdCom.prepare_data(fill_history_value=fill_history_value)
|
||||
# return
|
||||
tdSql.execute('create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int);')
|
||||
tdSql.execute('create table scalar_ct1 using scalar_stb tags(10);')
|
||||
tdSql.execute('create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20));')
|
||||
if fill_history_value is None:
|
||||
fill_history = ""
|
||||
else:
|
||||
tdLog.info("inserting fill_history data ...")
|
||||
fill_history = f'fill_history {fill_history_value}'
|
||||
for i in range(self.tdCom.range_count):
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{i}s, 100, -100.1, "hebei", Null, "Bigdata");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{i}s, 100, -100.1, "heBei", Null, "Bigdata");')
|
||||
|
||||
# self.tdCom.write_latency(self.case_name)
|
||||
math_function_list = ["abs", "acos", "asin", "atan", "ceil", "cos", "floor", "log", "pow", "round", "sin", "sqrt", "tan"]
|
||||
string_function_list = ["char_length", "concat", "concat_ws", "length", "lower", "ltrim", "rtrim", "substr", "upper"]
|
||||
for math_function in math_function_list:
|
||||
tdLog.info(f"testing function {math_function} ...")
|
||||
tdLog.info(f"creating stream for function {math_function} ...")
|
||||
if math_function in ["log", "pow"]:
|
||||
tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_stb partition by {partition};')
|
||||
tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_ct1;')
|
||||
tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1, 2), {math_function}(c2, 2), c3 from scalar_tb;')
|
||||
else:
|
||||
tdSql.execute(f'create stream stb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_stb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_stb partition by {partition};')
|
||||
tdSql.execute(f'create stream ctb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_ctb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_ct1;')
|
||||
tdSql.execute(f'create stream tb_{math_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{math_function}_tb as select ts, {math_function}(c1), {math_function}(c2), c3 from scalar_tb;')
|
||||
self.tdCom.check_stream_field_type(f"describe output_{math_function}_stb", math_function)
|
||||
self.tdCom.check_stream_field_type(f"describe output_{math_function}_ctb", math_function)
|
||||
self.tdCom.check_stream_field_type(f"describe output_{math_function}_tb", math_function)
|
||||
for tbname in ["scalar_ct1", "scalar_tb"]:
|
||||
tdLog.info(f"function {math_function}: inserting data for tb --- {tbname} ...")
|
||||
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");')
|
||||
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");')
|
||||
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);')
|
||||
for i in range(self.tdCom.range_count):
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");')
|
||||
if i%2 == 0:
|
||||
tdLog.info(f"function {math_function}: update testing ...")
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
else:
|
||||
tdLog.info(f"function {math_function}: delete testing ...")
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||
|
||||
if fill_history_value:
|
||||
tdLog.info(f"function {math_function}: disorder testing ...")
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)'
|
||||
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||
if math_function == "log" or math_function == "pow":
|
||||
tdLog.info(f"function {math_function}: confirming query result ...")
|
||||
self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_stb partition by {partition} order by ts;')
|
||||
self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_ctb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_ct1;')
|
||||
self.tdCom.check_query_data(f'select `{math_function}(c1, 2)`, `{math_function}(c2, 2)` from output_{math_function}_tb;', f'select {math_function}(c1, 2), {math_function}(c2, 2) from scalar_tb;')
|
||||
else:
|
||||
tdLog.info(f"function {math_function}: confirming query result ...")
|
||||
self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_stb order by ts;', f'select {math_function}(c1), {math_function}(c2) from scalar_stb partition by {partition} order by ts;')
|
||||
self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_ctb;', f'select {math_function}(c1), {math_function}(c2) from scalar_ct1;')
|
||||
self.tdCom.check_query_data(f'select `{math_function}(c1)`, `{math_function}(c2)` from output_{math_function}_tb;', f'select {math_function}(c1), {math_function}(c2) from scalar_tb;')
|
||||
tdSql.execute(f'drop stream if exists stb_{math_function}_stream')
|
||||
tdSql.execute(f'drop stream if exists ctb_{math_function}_stream')
|
||||
tdSql.execute(f'drop stream if exists tb_{math_function}_stream')
|
||||
|
||||
for string_function in string_function_list:
|
||||
tdLog.info(f"testing function {string_function} ...")
|
||||
tdLog.info(f"creating stream for function {string_function} ...")
|
||||
if string_function == "concat":
|
||||
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb partition by {partition};')
|
||||
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;')
|
||||
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;')
|
||||
elif string_function == "concat_ws":
|
||||
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb partition by {partition};')
|
||||
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;')
|
||||
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;')
|
||||
elif string_function == "substr":
|
||||
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb partition by {partition};')
|
||||
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;')
|
||||
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;')
|
||||
else:
|
||||
tdSql.execute(f'create stream stb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_stb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb partition by {partition};')
|
||||
tdSql.execute(f'create stream ctb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_ctb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;')
|
||||
tdSql.execute(f'create stream tb_{string_function}_stream trigger at_once ignore expired 0 ignore update 0 {fill_history} into output_{string_function}_tb as select ts, {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;')
|
||||
self.tdCom.check_stream_field_type(f"describe output_{string_function}_stb", string_function)
|
||||
self.tdCom.check_stream_field_type(f"describe output_{string_function}_ctb", string_function)
|
||||
self.tdCom.check_stream_field_type(f"describe output_{string_function}_tb", string_function)
|
||||
for tbname in ["scalar_ct1", "scalar_tb"]:
|
||||
tdLog.info(f"function {string_function}: inserting data for tb --- {tbname} ...")
|
||||
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}, 100, 100.1, "beijing", "taos", "Taos");')
|
||||
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");')
|
||||
tdSql.execute(f'insert into {tbname} values ({self.tdCom.date_time}+2s, 0, Null, "hebei", "TDengine", Null);')
|
||||
|
||||
|
||||
for i in range(self.tdCom.range_count):
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 100, -100.1, "hebei", Null, "Bigdata");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 100, -100.1, "heBei", Null, "Bigdata");')
|
||||
if i%2 == 0:
|
||||
tdLog.info(f"function {string_function}: update testing...")
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}+{i}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
else:
|
||||
tdLog.info(f"function {string_function}: delete testing ...")
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||
|
||||
if fill_history_value:
|
||||
tdLog.info(f"function {string_function}: disorder testing ...")
|
||||
tdSql.execute(f'insert into scalar_ct1 values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
tdSql.execute(f'insert into scalar_tb values ({self.tdCom.date_time}-{self.tdCom.range_count-1}s, 50, -50.1, Null, "heBei", "Bigdata1");')
|
||||
dt = f'cast({self.tdCom.date_time-(self.tdCom.range_count-1)} as timestamp)'
|
||||
tdSql.execute(f'delete from scalar_ct1 where ts = {dt};')
|
||||
tdSql.execute(f'delete from scalar_tb where ts = {dt};')
|
||||
|
||||
|
||||
if string_function == "concat":
|
||||
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_stb order by ts;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_ct1;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3, c4)`, `{string_function}(c3, c5)`, `{string_function}(c4, c5)`, `{string_function}(c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}(c3, c4), {string_function}(c3, c5), {string_function}(c4, c5), {string_function}(c3, c4, c5) from scalar_tb;')
|
||||
elif string_function == "concat_ws":
|
||||
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||
self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_stb order by ts;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_stb order by ts;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_ctb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_ct1;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}("aND", c3, c4)`, `{string_function}("and", c3, c5)`, `{string_function}("And", c4, c5)`, `{string_function}("AND", c3, c4, c5)` from output_{string_function}_tb;', f'select {string_function}("aND", c3, c4), {string_function}("and", c3, c5), {string_function}("And", c4, c5), {string_function}("AND", c3, c4, c5) from scalar_tb;')
|
||||
elif string_function == "substr":
|
||||
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_stb order by ts;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_ctb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_ct1;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3, 2)`, `{string_function}(c3, 2, 2)`, `{string_function}(c4, 5, 1)`, `{string_function}(c5, 3, 4)` from output_{string_function}_tb;', f'select {string_function}(c3, 2), {string_function}(c3, 2, 2), {string_function}(c4, 5, 1), {string_function}(c5, 3, 4) from scalar_tb;')
|
||||
else:
|
||||
tdLog.info(f"function {string_function}: confirming query result ...")
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_stb order by ts;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_stb order by ts;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_ctb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_ct1;')
|
||||
self.tdCom.check_query_data(f'select `{string_function}(c3)`, `{string_function}(c4)`, `{string_function}(c5)` from output_{string_function}_tb;', f'select {string_function}(c3), {string_function}(c4), {string_function}(c5) from scalar_tb;')
|
||||
|
||||
tdSql.execute(f'drop stream if exists stb_{string_function}_stream')
|
||||
tdSql.execute(f'drop stream if exists ctb_{string_function}_stream')
|
||||
tdSql.execute(f'drop stream if exists tb_{string_function}_stream')
|
||||
|
||||
def run(self):
|
||||
self.scalar_function(partition="tbname", fill_history_value=1)
|
||||
self.scalar_function(partition="tbname,c1,t1", fill_history_value=1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,256 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def window_close_interval(self, interval, watermark=None, ignore_expired=None, partition="tbname", fill_value=None, delete=False):
|
||||
tdLog.info(f"*** testing stream window_close+interval: interval: {interval}, watermark: {watermark}, ignore_expired: {ignore_expired}, partition: {partition}, fill: {fill_value}, delete: {delete} ***")
|
||||
self.delete = delete
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
if watermark is not None:
|
||||
self.tdCom.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(interval=interval, watermark=watermark)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
|
||||
if partition == "tbname":
|
||||
partition_elm_alias = self.tdCom.partition_tbname_alias
|
||||
elif partition == "c1":
|
||||
partition_elm_alias = self.tdCom.partition_col_alias
|
||||
elif partition == "abs(c1)":
|
||||
partition_elm_alias = self.tdCom.partition_expression_alias
|
||||
else:
|
||||
partition_elm_alias = self.tdCom.partition_tag_alias
|
||||
if partition == "tbname":
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", {partition_elm_alias}), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
else:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
ctb_subtable_value = f'concat(concat("{self.ctb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
tb_subtable_value = f'concat(concat("{self.tb_name}_{self.tdCom.subtable_prefix}", cast({partition_elm_alias} as varchar(20))), "{self.tdCom.subtable_suffix}")' if self.tdCom.subtable else None
|
||||
|
||||
if watermark is not None:
|
||||
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||
else:
|
||||
watermark_value = None
|
||||
# create stb/ctb/tb stream
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=stb_subtable_value, fill_value=fill_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=ctb_subtable_value, fill_value=fill_value)
|
||||
if fill_value:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,4,5,6,7,8,9,10,11'
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} partition by {partition} {partition_elm_alias} interval({self.tdCom.dataDict["interval"]}s)', trigger_mode="window_close", watermark=watermark_value, ignore_expired=ignore_expired, subtable_value=tb_subtable_value, fill_value=fill_value)
|
||||
|
||||
start_time = self.tdCom.date_time
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||
if i == 0:
|
||||
record_window_close_ts = window_close_ts
|
||||
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||
ts_value=self.tdCom.date_time+num*self.tdCom.offset
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
|
||||
if self.delete and i%2 != 0:
|
||||
dt = f'cast({ts_value-num*self.tdCom.offset} as timestamp)'
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||
if tbname != self.tb_stream_des_table:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||
tdSql.checkEqual(tdSql.queryRows, i)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||
if tbname != self.tb_stream_des_table:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||
|
||||
tdSql.checkEqual(tdSql.queryRows, i)
|
||||
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
|
||||
if not fill_value:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1)
|
||||
else:
|
||||
self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) order by wstart limit {i+1}', i+1)
|
||||
if self.tdCom.disorder and not fill_value:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=record_window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=record_window_close_ts)
|
||||
if ignore_expired:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}')
|
||||
res2 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}')
|
||||
res1 = tdSql.queryResult
|
||||
tdSql.query(f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}')
|
||||
res2 = tdSql.queryResult
|
||||
tdSql.checkNotEqual(res1, res2)
|
||||
else:
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1)
|
||||
else:
|
||||
self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} interval({self.tdCom.dataDict["interval"]}s) limit {i+1}', i+1)
|
||||
if self.tdCom.subtable:
|
||||
tdSql.query(f'select * from {self.ctb_name}')
|
||||
for tname in [self.stb_name, self.ctb_name]:
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count)
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count)
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count)
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count)
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
tdSql.query(f'select * from {self.tb_name}')
|
||||
ptn_counter = 0
|
||||
for c1_value in tdSql.queryResult:
|
||||
if partition == "c1":
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "abs(c1)":
|
||||
abs_c1_value = abs(c1_value[1])
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;')
|
||||
elif partition == "tbname" and ptn_counter == 0:
|
||||
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;')
|
||||
ptn_counter += 1
|
||||
|
||||
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
|
||||
|
||||
if fill_value:
|
||||
history_ts = str(start_time)+f'-{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||
start_ts = self.tdCom.time_cast(history_ts, "-")
|
||||
future_ts = str(self.tdCom.date_time)+f'+{self.tdCom.dataDict["interval"]*(self.tdCom.range_count+2)}s'
|
||||
end_ts = self.tdCom.time_cast(future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=history_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=future_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=future_ts)
|
||||
future_ts_bigint = self.tdCom.str_ts_trans_bigint(future_ts)
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(future_ts_bigint, self.tdCom.dataDict['interval'])
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
|
||||
|
||||
if self.tdCom.update:
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
if watermark is not None:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'], self.tdCom.dataDict['watermark'])
|
||||
else:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_interval_endts(self.tdCom.date_time, self.tdCom.dataDict['interval'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + self.tdCom.offset
|
||||
window_close_ts += self.tdCom.dataDict['interval']*self.tdCom.offset
|
||||
if i == 0:
|
||||
record_window_close_ts = window_close_ts
|
||||
for num in range(int(window_close_ts/self.tdCom.offset-self.tdCom.date_time/self.tdCom.offset)):
|
||||
ts_value=self.tdCom.date_time+num*self.tdCom.offset
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts-1)
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if self.delete:
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=self.tdCom.time_cast(start_time), end_ts=self.tdCom.time_cast(window_close_ts))
|
||||
self.tdCom.date_time = start_time
|
||||
for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11'
|
||||
if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value)
|
||||
else:
|
||||
if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()):
|
||||
additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}"
|
||||
else:
|
||||
additional_options = f"where ts >= {start_ts} and ts <= {end_ts}"
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_stb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
else:
|
||||
if "value" in fill_value.lower():
|
||||
fill_value='VALUE,1,2,3,6,7,8,9,10,11'
|
||||
if (fill_value == "NULL" or fill_value == "NEXT" or fill_value == "LINEAR") and self.delete:
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select * from (select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} where ts >= {start_ts} and ts <= {end_ts} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart) where `min(c1)` is not Null', fill_value=fill_value)
|
||||
else:
|
||||
if self.delete and (fill_value == "PREV" or "value" in fill_value.lower()):
|
||||
additional_options = f"where ts >= {start_ts}-1s and ts <= {start_ts}"
|
||||
else:
|
||||
additional_options = f"where ts >= {start_ts} and ts <= {end_ts}"
|
||||
self.tdCom.check_query_data(f'select wstart, {self.tdCom.fill_tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix} order by wstart', f'select _wstart AS wstart, {self.tdCom.fill_tb_source_select_str} from {tbname} {additional_options} partition by {partition} interval({self.tdCom.dataDict["interval"]}s) fill ({fill_value}) order by wstart', fill_value=fill_value)
|
||||
|
||||
|
||||
def run(self):
|
||||
for watermark in [None, random.randint(15, 20)]:
|
||||
for ignore_expired in [0, 1]:
|
||||
self.window_close_interval(interval=random.randint(10, 15), watermark=watermark, ignore_expired=ignore_expired)
|
||||
for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
|
||||
for watermark in [None, random.randint(15, 20)]:
|
||||
self.window_close_interval(interval=random.randint(10, 12), watermark=watermark, fill_value=fill_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,98 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def watermark_window_close_session(self, session, watermark, fill_history_value=None, delete=True):
|
||||
tdLog.info(f"*** testing stream window_close+session: session: {session}, watermark: {watermark}, fill_history: {fill_history_value}, delete: {delete} ***")
|
||||
self.case_name = sys._getframe().f_code.co_name
|
||||
if watermark is not None:
|
||||
self.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tdCom.date_time = self.tdCom.dataDict["start_ts"]
|
||||
if watermark is not None:
|
||||
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||
else:
|
||||
watermark_value = None
|
||||
# create stb/ctb/tb stream
|
||||
# self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.stb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {self.ctb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value)
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {self.tb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, fill_history_value=fill_history_value)
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + 1
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||
if watermark_value is not None:
|
||||
expected_value = i + 1
|
||||
for ts_value in [self.tdCom.date_time, window_close_ts-1]:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=ts_value)
|
||||
# for tbname in [self.stb_stream_des_table, self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||
for tbname in [self.ctb_stream_des_table, self.tb_stream_des_table]:
|
||||
if tbname != self.tb_stream_des_table:
|
||||
tdSql.query(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}')
|
||||
else:
|
||||
tdSql.query(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}')
|
||||
if not fill_history_value:
|
||||
tdSql.checkEqual(tdSql.queryRows, i)
|
||||
else:
|
||||
expected_value = i
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
self.tdCom.sinsert_rows(tbname=self.tb_name, ts_value=window_close_ts)
|
||||
|
||||
if fill_history_value:
|
||||
self.tdCom.update_delete_history_data(delete=delete)
|
||||
|
||||
# for tbname in [self.stb_name, self.ctb_name, self.tb_name]:
|
||||
if not fill_history_value:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value)
|
||||
else:
|
||||
self.tdCom.check_stream(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value}', expected_value)
|
||||
else:
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.stb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}')
|
||||
else:
|
||||
self.tdCom.check_query_data(f'select wstart, wend-{self.tdCom.dataDict["session"]}s, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, _wend AS wend, {self.tdCom.tb_source_select_str} from {tbname} session(ts, {self.tdCom.dataDict["session"]}s) limit {expected_value+1}')
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
for fill_history_value in [None, 1]:
|
||||
for watermark in [None, random.randint(20, 25)]:
|
||||
self.watermark_window_close_session(session=random.randint(10, 15), watermark=watermark, fill_history_value=fill_history_value)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,83 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def watermark_window_close_session_ext(self, session, watermark, fill_history_value=None, partition=None, subtable=None, stb_field_name_value=None, tag_value=None, use_exist_stb=False, delete=False):
|
||||
tdLog.info(f"*** testing stream window_close+session+exist_stb+custom_tag: session: {session}, partition: {partition}, fill_history: {fill_history_value}, subtable: {subtable}, stb_field_name_value: {stb_field_name_value}, tag_value: {tag_value} ***")
|
||||
if stb_field_name_value == self.tdCom.partitial_stb_filter_des_select_elm or stb_field_name_value == self.tdCom.exchange_stb_filter_des_select_elm:
|
||||
partitial_tb_source_str = self.tdCom.partitial_ext_tb_source_select_str
|
||||
else:
|
||||
partitial_tb_source_str = self.tdCom.ext_tb_source_select_str
|
||||
if not stb_field_name_value:
|
||||
stb_field_name_value = self.tdCom.tb_filter_des_select_elm
|
||||
self.tdCom.case_name = sys._getframe().f_code.co_name
|
||||
defined_tag_count = len(tag_value.split())
|
||||
if watermark is not None:
|
||||
self.case_name = "watermark" + sys._getframe().f_code.co_name
|
||||
self.tdCom.prepare_data(session=session, watermark=watermark, fill_history_value=fill_history_value, ext_stb=use_exist_stb)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tdCom.date_time = self.tdCom.dataDict["start_ts"]
|
||||
if subtable:
|
||||
stb_subtable_value = f'concat(concat("{self.stb_name}_{self.subtable_prefix}", cast(cast(abs(cast({subtable} as int)) as bigint) as varchar(100))), "{self.subtable_suffix}")' if self.subtable else None
|
||||
else:
|
||||
stb_subtable_value = None
|
||||
if watermark is not None:
|
||||
watermark_value = f'{self.tdCom.dataDict["watermark"]}s'
|
||||
else:
|
||||
watermark_value = None
|
||||
# create stb/ctb/tb stream
|
||||
self.tdCom.create_stream(stream_name=f'{self.stb_name}{self.tdCom.stream_suffix}', des_table=self.tdCom.ext_stb_stream_des_table, source_sql=f'select _wstart AS wstart, {partitial_tb_source_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s)', trigger_mode="window_close", watermark=watermark_value, subtable_value=stb_subtable_value, fill_history_value=fill_history_value, stb_field_name_value=stb_field_name_value, tag_value=tag_value, use_exist_stb=use_exist_stb)
|
||||
for i in range(self.tdCom.range_count):
|
||||
if i == 0:
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||
else:
|
||||
self.tdCom.date_time = window_close_ts + 1
|
||||
window_close_ts = self.tdCom.cal_watermark_window_close_session_endts(self.tdCom.date_time, self.tdCom.dataDict['watermark'], self.tdCom.dataDict['session'])
|
||||
if watermark_value is not None:
|
||||
expected_value = i + 1
|
||||
for ts_value in [self.tdCom.date_time, window_close_ts-1]:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=ts_value)
|
||||
else:
|
||||
expected_value = i
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
self.tdCom.sinsert_rows(tbname=self.ctb_name, ts_value=window_close_ts)
|
||||
|
||||
if fill_history_value:
|
||||
self.tdCom.update_delete_history_data(delete=delete)
|
||||
if tag_value:
|
||||
tdSql.query(f'select {tag_value} from {self.stb_name}')
|
||||
tag_value_list = tdSql.queryResult
|
||||
self.tdCom.check_query_data(f'select {self.tdCom.stb_filter_des_select_elm} from ext_{self.stb_name}{self.tdCom.des_table_suffix} order by ts', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.stb_name} session(ts, {self.tdCom.dataDict["session"]}s) order by wstart limit {expected_value};', sorted=True, defined_tag_count=defined_tag_count, tag_value_list=tag_value_list, partition=partition)
|
||||
|
||||
def run(self):
|
||||
#! TD-25893
|
||||
# self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=False, fill_history_value=1)
|
||||
self.watermark_window_close_session_ext(session=random.randint(10, 12), watermark=random.randint(20, 25), subtable=None, partition=None, stb_field_name_value=self.tdCom.tb_filter_des_select_elm, tag_value=self.tdCom.tag_filter_des_select_elm.split(",")[0], use_exist_stb=True, delete=True)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -0,0 +1,73 @@
|
|||
import sys
|
||||
import threading
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.common import *
|
||||
|
||||
class TDTestCase:
|
||||
updatecfgDict = {'debugFlag': 135, 'asynclog': 0}
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug("start to execute %s" % __file__)
|
||||
tdSql.init(conn.cursor(), logSql)
|
||||
self.tdCom = tdCom
|
||||
|
||||
def window_close_state_window(self, state_window, delete=True):
|
||||
tdLog.info(f"*** testing stream window_close+session: state_window: {state_window}, delete: {delete} ***")
|
||||
self.case_name = sys._getframe().f_code.co_name
|
||||
self.delete = delete
|
||||
self.tdCom.prepare_data(state_window=state_window)
|
||||
self.stb_name = self.tdCom.stb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.ctb_name = self.tdCom.ctb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.tb_name = self.tdCom.tb_name.replace(f"{self.tdCom.dbname}.", "")
|
||||
self.stb_stream_des_table = f'{self.stb_name}{self.tdCom.des_table_suffix}'
|
||||
self.ctb_stream_des_table = f'{self.ctb_name}{self.tdCom.des_table_suffix}'
|
||||
self.tb_stream_des_table = f'{self.tb_name}{self.tdCom.des_table_suffix}'
|
||||
state_window_col_name = self.tdCom.dataDict["state_window"]
|
||||
self.tdCom.create_stream(stream_name=f'{self.ctb_name}{self.tdCom.stream_suffix}', des_table=self.ctb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {self.ctb_name} state_window({state_window_col_name})', trigger_mode="window_close")
|
||||
self.tdCom.create_stream(stream_name=f'{self.tb_name}{self.tdCom.stream_suffix}', des_table=self.tb_stream_des_table, source_sql=f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {self.tb_name} state_window({state_window_col_name})', trigger_mode="window_close")
|
||||
state_window_max = self.tdCom.dataDict['state_window_max']
|
||||
state_window_value_inmem = 0
|
||||
sleep_step = 0
|
||||
for i in range(self.tdCom.range_count):
|
||||
state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count))
|
||||
while state_window_value == state_window_value_inmem:
|
||||
state_window_value = random.randint(int((i)*state_window_max/self.tdCom.range_count), int((i+1)*state_window_max/self.tdCom.range_count))
|
||||
if sleep_step < self.tdCom.default_interval:
|
||||
sleep_step += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
return
|
||||
for j in range(2, self.tdCom.range_count+3):
|
||||
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.tdCom.update and i%2 == 0:
|
||||
tdSql.execute(f'insert into {self.ctb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
tdSql.execute(f'insert into {self.tb_name} (ts, {state_window_col_name}) values ({self.tdCom.date_time}, {state_window_value})')
|
||||
if self.delete and i%2 != 0:
|
||||
dt = f'cast({self.tdCom.date_time-1} as timestamp)'
|
||||
self.tdCom.sdelete_rows(tbname=self.ctb_name, start_ts=dt)
|
||||
self.tdCom.sdelete_rows(tbname=self.tb_name, start_ts=dt)
|
||||
self.tdCom.date_time += 1
|
||||
for tbname in [self.ctb_name, self.tb_name]:
|
||||
if tbname != self.tb_name:
|
||||
self.tdCom.check_stream(f'select wstart, {self.tdCom.stb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.stb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i)
|
||||
else:
|
||||
self.tdCom.check_stream(f'select wstart, {self.tdCom.tb_output_select_str} from {tbname}{self.tdCom.des_table_suffix}', f'select _wstart AS wstart, {self.tdCom.tb_source_select_str} from {tbname} state_window({state_window_col_name}) limit {i}', i)
|
||||
state_window_value_inmem = state_window_value
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
for delete in [True, False]:
|
||||
self.window_close_state_window(state_window="c1", delete=delete)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -35,6 +35,7 @@ from util.taosadapter import *
|
|||
|
||||
import taos
|
||||
import taosrest
|
||||
import taosws
|
||||
|
||||
def checkRunTimeError():
|
||||
import win32gui
|
||||
|
@ -105,12 +106,13 @@ if __name__ == "__main__":
|
|||
queryPolicy = 1
|
||||
createDnodeNums = 1
|
||||
restful = False
|
||||
websocket = False
|
||||
replicaVar = 1
|
||||
asan = False
|
||||
independentMnode = True
|
||||
previousCluster = False
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:n:i:aP', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate','replicaVar','independentMnode','previous'])
|
||||
opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RWD:n:i:aP', [
|
||||
'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','websocket','adaptercfgupdate','replicaVar','independentMnode','previous'])
|
||||
for key, value in opts:
|
||||
if key in ['-h', '--help']:
|
||||
tdLog.printNoPrefix(
|
||||
|
@ -131,6 +133,7 @@ if __name__ == "__main__":
|
|||
tdLog.printNoPrefix('-Q set queryPolicy in one dnode')
|
||||
tdLog.printNoPrefix('-C create Dnode Numbers in one cluster')
|
||||
tdLog.printNoPrefix('-R restful realization form')
|
||||
tdLog.printNoPrefix('-W websocket connection')
|
||||
tdLog.printNoPrefix('-D taosadapter update cfg dict ')
|
||||
tdLog.printNoPrefix('-n the number of replicas')
|
||||
tdLog.printNoPrefix('-i independentMnode Mnode')
|
||||
|
@ -177,7 +180,7 @@ if __name__ == "__main__":
|
|||
sys.exit(0)
|
||||
|
||||
if key in ['-k', '--killValgrind']:
|
||||
killValgrind = 0
|
||||
killValgrind = 1
|
||||
|
||||
if key in ['-e', '--execCmd']:
|
||||
try:
|
||||
|
@ -203,6 +206,9 @@ if __name__ == "__main__":
|
|||
|
||||
if key in ['-R', '--restful']:
|
||||
restful = True
|
||||
|
||||
if key in ['-W', '--websocket']:
|
||||
websocket = True
|
||||
|
||||
if key in ['-a', '--asan']:
|
||||
asan = True
|
||||
|
@ -224,7 +230,7 @@ if __name__ == "__main__":
|
|||
# do exeCmd command
|
||||
#
|
||||
if not execCmd == "":
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
tAdapter.init(deployPath)
|
||||
else:
|
||||
tdDnodes.init(deployPath)
|
||||
|
@ -263,7 +269,7 @@ if __name__ == "__main__":
|
|||
if valgrind:
|
||||
time.sleep(2)
|
||||
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
toBeKilled = "taosadapter"
|
||||
|
||||
# killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled
|
||||
|
@ -358,7 +364,7 @@ if __name__ == "__main__":
|
|||
tdDnodes.deploy(1,updateCfgDict)
|
||||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
|
@ -366,6 +372,8 @@ if __name__ == "__main__":
|
|||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
|
||||
|
@ -395,14 +403,16 @@ if __name__ == "__main__":
|
|||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
# tdLog.info(tdDnodes.getSimCfgPath(),host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
|
@ -419,6 +429,8 @@ if __name__ == "__main__":
|
|||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
|
||||
|
@ -438,10 +450,12 @@ if __name__ == "__main__":
|
|||
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
|
||||
conn = None
|
||||
else:
|
||||
if not restful:
|
||||
conn = taos.connect(host="%s"%(host), config=tdDnodes.sim.getCfgDir())
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if testCluster:
|
||||
tdLog.info("Procedures for testing cluster")
|
||||
|
@ -451,10 +465,12 @@ if __name__ == "__main__":
|
|||
tdCases.runOneCluster(fileName)
|
||||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if fileName == "all":
|
||||
tdCases.runAllWindows(conn)
|
||||
|
@ -470,10 +486,12 @@ if __name__ == "__main__":
|
|||
tdDnodes.stopAll()
|
||||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if not restful:
|
||||
conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
tdLog.info("query test after taosd restart")
|
||||
tdCases.runOneWindows(conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
|
@ -505,7 +523,7 @@ if __name__ == "__main__":
|
|||
except:
|
||||
pass
|
||||
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
tAdapter.init(deployPath, masterIp)
|
||||
tAdapter.stop(force_kill=True)
|
||||
|
||||
|
@ -515,16 +533,18 @@ if __name__ == "__main__":
|
|||
tdDnodes.start(1)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
if queryPolicy != 1:
|
||||
queryPolicy=int(queryPolicy)
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
# tdSql.init(conn.cursor())
|
||||
# tdSql.execute("create qnode on dnode 1")
|
||||
# tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
|
||||
|
@ -567,15 +587,17 @@ if __name__ == "__main__":
|
|||
tdDnodes.starttaosd(dnode.index)
|
||||
tdCases.logSql(logSql)
|
||||
|
||||
if restful:
|
||||
if restful or websocket:
|
||||
tAdapter.deploy(adapter_cfg_dict)
|
||||
tAdapter.start()
|
||||
|
||||
# create taos connect
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
print(tdDnodes.getSimCfgPath(),host)
|
||||
if createDnodeNums == 1:
|
||||
createDnodeNums=dnodeNums
|
||||
|
@ -595,8 +617,10 @@ if __name__ == "__main__":
|
|||
queryPolicy=int(queryPolicy)
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("create qnode on dnode 1")
|
||||
|
@ -621,10 +645,12 @@ if __name__ == "__main__":
|
|||
tdCases.runOneCluster(fileName)
|
||||
else:
|
||||
tdLog.info("Procedures for testing self-deployment")
|
||||
if not restful:
|
||||
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
|
||||
if fileName == "all":
|
||||
tdCases.runAllLinux(conn)
|
||||
|
@ -641,10 +667,12 @@ if __name__ == "__main__":
|
|||
tdDnodes.stopAll()
|
||||
tdDnodes.start(1)
|
||||
time.sleep(1)
|
||||
if not restful:
|
||||
conn = taos.connect( host, config=tdDnodes.getSimCfgPath())
|
||||
else:
|
||||
if restful:
|
||||
conn = taosrest.connect(url=f"http://{host}:6041",timezone="utc")
|
||||
elif websocket:
|
||||
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
|
||||
else:
|
||||
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
|
||||
tdLog.info("Procedures for tdengine deployed in %s" % (host))
|
||||
tdLog.info("query test after taosd restart")
|
||||
tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py", replicaVar)
|
||||
|
|
|
@ -28,12 +28,12 @@ static void shellRecordCommandToHistory(char *command);
|
|||
static int32_t shellRunCommand(char *command, bool recordHistory);
|
||||
static void shellRunSingleCommandImp(char *command);
|
||||
static char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision);
|
||||
static int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres);
|
||||
static int64_t shellDumpResultToFile(const char *fname, TAOS_RES *tres);
|
||||
static void shellPrintNChar(const char *str, int32_t length, int32_t width);
|
||||
static void shellPrintGeometry(const unsigned char *str, int32_t length, int32_t width);
|
||||
static int32_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql);
|
||||
static int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql);
|
||||
static int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical, const char *sql);
|
||||
static int64_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql);
|
||||
static int64_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql);
|
||||
static int64_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical, const char *sql);
|
||||
static void shellReadHistory();
|
||||
static void shellWriteHistory();
|
||||
static void shellPrintError(TAOS_RES *tres, int64_t st);
|
||||
|
@ -238,14 +238,14 @@ void shellRunSingleCommandImp(char *command) {
|
|||
if (pFields != NULL) { // select and show kinds of commands
|
||||
int32_t error_no = 0;
|
||||
|
||||
int32_t numOfRows = shellDumpResult(pSql, fname, &error_no, printMode, command);
|
||||
int64_t numOfRows = shellDumpResult(pSql, fname, &error_no, printMode, command);
|
||||
if (numOfRows < 0) return;
|
||||
|
||||
et = taosGetTimestampUs();
|
||||
if (error_no == 0) {
|
||||
printf("Query OK, %d row(s) in set (%.6fs)\r\n", numOfRows, (et - st) / 1E6);
|
||||
printf("Query OK, %"PRId64 " row(s) in set (%.6fs)\r\n", numOfRows, (et - st) / 1E6);
|
||||
} else {
|
||||
printf("Query interrupted (%s), %d row(s) in set (%.6fs)\r\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6);
|
||||
printf("Query interrupted (%s), %"PRId64 " row(s) in set (%.6fs)\r\n", taos_errstr(pSql), numOfRows, (et - st) / 1E6);
|
||||
}
|
||||
taos_free_result(pSql);
|
||||
} else {
|
||||
|
@ -430,7 +430,7 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i
|
|||
}
|
||||
}
|
||||
|
||||
int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) {
|
||||
int64_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) {
|
||||
char fullname[PATH_MAX] = {0};
|
||||
if (taosExpandDir(fname, fullname, PATH_MAX) != 0) {
|
||||
tstrncpy(fullname, fname, PATH_MAX);
|
||||
|
@ -459,7 +459,7 @@ int32_t shellDumpResultToFile(const char *fname, TAOS_RES *tres) {
|
|||
}
|
||||
taosFprintfFile(pFile, "\r\n");
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
int64_t numOfRows = 0;
|
||||
do {
|
||||
int32_t *length = taos_fetch_lengths(tres);
|
||||
for (int32_t i = 0; i < num_fields; i++) {
|
||||
|
@ -702,7 +702,7 @@ bool shellIsShowQuery(const char *sql) {
|
|||
return false;
|
||||
}
|
||||
|
||||
int32_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql) {
|
||||
int64_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql) {
|
||||
TAOS_ROW row = taos_fetch_row(tres);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
|
@ -726,11 +726,11 @@ int32_t shellVerticalPrintResult(TAOS_RES *tres, const char *sql) {
|
|||
resShowMaxNum = SHELL_DEFAULT_RES_SHOW_NUM;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
int64_t numOfRows = 0;
|
||||
int32_t showMore = 1;
|
||||
do {
|
||||
if (numOfRows < resShowMaxNum) {
|
||||
printf("*************************** %d.row ***************************\r\n", numOfRows + 1);
|
||||
printf("*************************** %"PRId64".row ***************************\r\n", numOfRows + 1);
|
||||
|
||||
int32_t *length = taos_fetch_lengths(tres);
|
||||
|
||||
|
@ -856,7 +856,7 @@ void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields) {
|
|||
putchar('\n');
|
||||
}
|
||||
|
||||
int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql) {
|
||||
int64_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql) {
|
||||
TAOS_ROW row = taos_fetch_row(tres);
|
||||
if (row == NULL) {
|
||||
return 0;
|
||||
|
@ -879,7 +879,7 @@ int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql) {
|
|||
resShowMaxNum = SHELL_DEFAULT_RES_SHOW_NUM;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
int64_t numOfRows = 0;
|
||||
int32_t showMore = 1;
|
||||
|
||||
do {
|
||||
|
@ -915,8 +915,8 @@ int32_t shellHorizontalPrintResult(TAOS_RES *tres, const char *sql) {
|
|||
return numOfRows;
|
||||
}
|
||||
|
||||
int32_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical, const char *sql) {
|
||||
int32_t numOfRows = 0;
|
||||
int64_t shellDumpResult(TAOS_RES *tres, char *fname, int32_t *error_no, bool vertical, const char *sql) {
|
||||
int64_t numOfRows = 0;
|
||||
if (fname != NULL) {
|
||||
numOfRows = shellDumpResultToFile(fname, tres);
|
||||
} else if (vertical) {
|
||||
|
|
|
@ -157,10 +157,6 @@ void varbinary_sql_test() {
|
|||
taos_free_result(pRes);
|
||||
|
||||
// string function test, not support
|
||||
pRes = taos_query(taos, "select length(c2) from stb");
|
||||
ASSERT(taos_errno(pRes) != 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "select ltrim(c2) from stb");
|
||||
ASSERT(taos_errno(pRes) != 0);
|
||||
taos_free_result(pRes);
|
||||
|
@ -190,7 +186,7 @@ void varbinary_sql_test() {
|
|||
ASSERT(taos_errno(pRes) != 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
// support first/last/last_row/count/hyperloglog/sample/tail/mode
|
||||
// support first/last/last_row/count/hyperloglog/sample/tail/mode/length
|
||||
pRes = taos_query(taos, "select first(c2) from stb");
|
||||
ASSERT(taos_errno(pRes) == 0);
|
||||
taos_free_result(pRes);
|
||||
|
@ -207,6 +203,10 @@ void varbinary_sql_test() {
|
|||
ASSERT(taos_errno(pRes) == 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "select length(c2) from stb where c2 = '\\x7F8290'");
|
||||
ASSERT(taos_errno(pRes) == 0);
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(taos, "select cast(t2 as varbinary(16)) from stb order by ts");
|
||||
while ((row = taos_fetch_row(pRes)) != NULL) {
|
||||
int32_t* length = taos_fetch_lengths(pRes);
|
||||
|
|
Loading…
Reference in New Issue