Merge branch '3.0' of https://github.com/taosdata/TDengine into TD-20251
This commit is contained in:
commit
270c34928a
|
@ -101,6 +101,7 @@ tests/examples/JDBC/JDBCDemo/.settings/
|
|||
source/libs/parser/inc/sql.*
|
||||
tests/script/tmqResult.txt
|
||||
tests/tmqResult.txt
|
||||
tests/script/jenkins/basic.txt
|
||||
|
||||
# Emacs
|
||||
# -*- mode: gitignore; -*-
|
||||
|
|
|
@ -303,7 +303,7 @@ def pre_test_build_win() {
|
|||
set CL=/MP8
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
|
||||
time /t
|
||||
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true || exit 7
|
||||
cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true -DBUILD_TOOLS=true || exit 7
|
||||
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
|
||||
time /t
|
||||
jom -j 6 || exit 8
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG e00ebd9
|
||||
GIT_TAG efa2a5f
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -20,7 +20,18 @@ import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
|
|||
<DocCardList items={useCurrentSidebarCategory().items}/>
|
||||
```
|
||||
|
||||
### Join TDengine Community
|
||||
## Study TDengine Knowledge Map
|
||||
|
||||
The TDengine Knowledge Map covers the various knowledge points of TDengine, revealing the invocation relationships and data flow between various conceptual entities. Learning and understanding the TDengine Knowledge Map will help you quickly master the TDengine knowledge system.
|
||||
|
||||
<figure>
|
||||
<center>
|
||||
<a href="pathname:///img/tdengine-map.svg" target="_blank"><img src="/img/tdengine-map.svg" width="80%" /></a>
|
||||
<figcaption>Diagram 1. TDengine Knowledge Map</figcaption>
|
||||
</center>
|
||||
</figure>
|
||||
|
||||
## Join TDengine Community
|
||||
|
||||
<table width="100%">
|
||||
<tr align="center" style={{border:0}}>
|
||||
|
|
|
@ -301,7 +301,7 @@ SELECT TIMEZONE();
|
|||
### Syntax
|
||||
|
||||
```txt
|
||||
WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_
|
||||
WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
|
||||
```
|
||||
|
||||
### Specification
|
||||
|
|
|
@ -4,6 +4,7 @@ from taos.tmq import *
|
|||
conn = taos.connect()
|
||||
|
||||
print("init")
|
||||
conn.execute("drop topic if exists topic_ctb_column")
|
||||
conn.execute("drop database if exists py_tmq")
|
||||
conn.execute("create database if not exists py_tmq vgroups 2")
|
||||
conn.select_db("py_tmq")
|
||||
|
@ -15,7 +16,6 @@ conn.execute("create table if not exists tb2 using stb1 tags(2)")
|
|||
conn.execute("create table if not exists tb3 using stb1 tags(3)")
|
||||
|
||||
print("create topic")
|
||||
conn.execute("drop topic if exists topic_ctb_column")
|
||||
conn.execute(
|
||||
"create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1"
|
||||
)
|
||||
|
|
|
@ -73,7 +73,95 @@ TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
|
|||
```c
|
||||
{{#include examples/c/demo.c}}
|
||||
```
|
||||
格式化输出不同类型字段函数 taos_print_row
|
||||
```c
|
||||
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
|
||||
int32_t len = 0;
|
||||
for (int i = 0; i < num_fields; ++i) {
|
||||
if (i > 0) {
|
||||
str[len++] = ' ';
|
||||
}
|
||||
|
||||
if (row[i] == NULL) {
|
||||
len += sprintf(str + len, "%s", TSDB_DATA_NULL_STR);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (fields[i].type) {
|
||||
case TSDB_DATA_TYPE_TINYINT:
|
||||
len += sprintf(str + len, "%d", *((int8_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UTINYINT:
|
||||
len += sprintf(str + len, "%u", *((uint8_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_SMALLINT:
|
||||
len += sprintf(str + len, "%d", *((int16_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_USMALLINT:
|
||||
len += sprintf(str + len, "%u", *((uint16_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_INT:
|
||||
len += sprintf(str + len, "%d", *((int32_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UINT:
|
||||
len += sprintf(str + len, "%u", *((uint32_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BIGINT:
|
||||
len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
len += sprintf(str + len, "%" PRIu64, *((uint64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
float fv = 0;
|
||||
fv = GET_FLOAT_VAL(row[i]);
|
||||
len += sprintf(str + len, "%f", fv);
|
||||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_DOUBLE: {
|
||||
double dv = 0;
|
||||
dv = GET_DOUBLE_VAL(row[i]);
|
||||
len += sprintf(str + len, "%lf", dv);
|
||||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_NCHAR: {
|
||||
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);
|
||||
if (fields[i].type == TSDB_DATA_TYPE_BINARY) {
|
||||
assert(charLen <= fields[i].bytes && charLen >= 0);
|
||||
} else {
|
||||
assert(charLen <= fields[i].bytes * TSDB_NCHAR_SIZE && charLen >= 0);
|
||||
}
|
||||
|
||||
memcpy(str + len, row[i], charLen);
|
||||
len += charLen;
|
||||
} break;
|
||||
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
len += sprintf(str + len, "%" PRId64, *((int64_t *)row[i]));
|
||||
break;
|
||||
|
||||
case TSDB_DATA_TYPE_BOOL:
|
||||
len += sprintf(str + len, "%d", *((int8_t *)row[i]));
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
str[len] = 0;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### 异步查询示例
|
||||
|
|
|
@ -302,7 +302,7 @@ SELECT TIMEZONE();
|
|||
### 语法
|
||||
|
||||
```txt
|
||||
WHERE (column|tbname) **match/MATCH/nmatch/NMATCH** _regex_
|
||||
WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_
|
||||
```
|
||||
|
||||
### 正则表达式规范
|
||||
|
|
|
@ -1629,7 +1629,6 @@ int32_t tSerializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq);
|
|||
int32_t tDeserializeSSubQueryMsg(void *buf, int32_t bufLen, SSubQueryMsg *pReq);
|
||||
void tFreeSSubQueryMsg(SSubQueryMsg *pReq);
|
||||
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
|
@ -1667,6 +1666,10 @@ typedef struct {
|
|||
int32_t execId;
|
||||
} SResFetchReq;
|
||||
|
||||
int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq);
|
||||
int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq);
|
||||
|
||||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
uint64_t sId;
|
||||
|
@ -2948,6 +2951,10 @@ typedef struct {
|
|||
STqOffsetVal reqOffset;
|
||||
} SMqPollReq;
|
||||
|
||||
int32_t tSerializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq);
|
||||
int32_t tDeserializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq);
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t vgId;
|
||||
int64_t offset;
|
||||
|
|
|
@ -221,12 +221,12 @@ if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" =
|
|||
# community-version compile
|
||||
cmake ../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cloud" ]; then
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DBUILD_CLOUD=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
elif [ "$verMode" == "cluster" ]; then
|
||||
if [[ "$dbName" != "taos" ]]; then
|
||||
replace_enterprise_$dbName
|
||||
fi
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
cmake ../../ -DCPUTYPE=${cpuType} -DWEBSOCKET=true -DBUILD_TAOSX=true -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro}
|
||||
fi
|
||||
else
|
||||
echo "input cpuType=${cpuType} error!!!"
|
||||
|
|
|
@ -213,7 +213,7 @@ function install_bin() {
|
|||
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
|
||||
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
|
||||
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
|
||||
[ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${xname} || :
|
||||
[ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${xname} ${bin_link_dir}/${xname} || :
|
||||
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
|
||||
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
|
||||
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
|
||||
|
|
|
@ -1106,6 +1106,8 @@ int taos_get_table_vgId(TAOS *taos, const char *db, const char *table, int *vgId
|
|||
return terrno;
|
||||
}
|
||||
|
||||
pRequest->syncQuery = true;
|
||||
|
||||
STscObj *pTscObj = pRequest->pTscObj;
|
||||
SCatalog *pCtg = NULL;
|
||||
code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCtg);
|
||||
|
|
|
@ -1461,12 +1461,7 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
return code;
|
||||
}
|
||||
|
||||
SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
|
||||
SMqPollReq* pReq = taosMemoryCalloc(1, sizeof(SMqPollReq));
|
||||
if (pReq == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void tmqBuildConsumeReqImpl(SMqPollReq *pReq, tmq_t* tmq, int64_t timeout, SMqClientTopic* pTopic, SMqClientVg* pVg) {
|
||||
/*strcpy(pReq->topic, pTopic->topicName);*/
|
||||
/*strcpy(pReq->cgroup, tmq->groupId);*/
|
||||
|
||||
|
@ -1485,9 +1480,7 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic*
|
|||
|
||||
pReq->useSnapshot = tmq->useSnapshot;
|
||||
|
||||
pReq->head.vgId = htonl(pVg->vgId);
|
||||
pReq->head.contLen = htonl(sizeof(SMqPollReq));
|
||||
return pReq;
|
||||
pReq->head.vgId = pVg->vgId;
|
||||
}
|
||||
|
||||
SMqMetaRspObj* tmqBuildMetaRspFromWrapper(SMqPollRspWrapper* pWrapper) {
|
||||
|
@ -1559,15 +1552,32 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
#endif
|
||||
}
|
||||
atomic_store_32(&pVg->vgSkipCnt, 0);
|
||||
SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, timeout, pTopic, pVg);
|
||||
if (pReq == NULL) {
|
||||
|
||||
SMqPollReq req = {0};
|
||||
tmqBuildConsumeReqImpl(&req, tmq, timeout, pTopic, pVg);
|
||||
int32_t msgSize = tSerializeSMqPollReq(NULL, 0, &req);
|
||||
if (msgSize < 0) {
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&tmq->rspSem);
|
||||
return -1;
|
||||
}
|
||||
char *msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&tmq->rspSem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tSerializeSMqPollReq(msg, msgSize, &req) < 0) {
|
||||
taosMemoryFree(msg);
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&tmq->rspSem);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SMqPollCbParam* pParam = taosMemoryMalloc(sizeof(SMqPollCbParam));
|
||||
if (pParam == NULL) {
|
||||
taosMemoryFree(pReq);
|
||||
taosMemoryFree(msg);
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&tmq->rspSem);
|
||||
return -1;
|
||||
|
@ -1581,7 +1591,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
taosMemoryFree(pReq);
|
||||
taosMemoryFree(msg);
|
||||
taosMemoryFree(pParam);
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
@ -1589,11 +1599,11 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
}
|
||||
|
||||
sendInfo->msgInfo = (SDataBuf){
|
||||
.pData = pReq,
|
||||
.len = sizeof(SMqPollReq),
|
||||
.pData = msg,
|
||||
.len = msgSize,
|
||||
.handle = NULL,
|
||||
};
|
||||
sendInfo->requestId = pReq->reqId;
|
||||
sendInfo->requestId = req.reqId;
|
||||
sendInfo->requestObjRefId = 0;
|
||||
sendInfo->param = pParam;
|
||||
sendInfo->fp = tmqPollCb;
|
||||
|
@ -1605,7 +1615,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
|||
char offsetFormatBuf[80];
|
||||
tFormatOffset(offsetFormatBuf, 80, &pVg->currentOffset);
|
||||
tscDebug("consumer:%" PRId64 ", send poll to %s vgId:%d, epoch %d, req offset:%s, reqId:%" PRIu64,
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, pReq->reqId);
|
||||
tmq->consumerId, pTopic->topicName, pVg->vgId, tmq->epoch, offsetFormatBuf, req.reqId);
|
||||
/*printf("send vgId:%d %" PRId64 "\n", pVg->vgId, pVg->currentOffset);*/
|
||||
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
|
||||
pVg->pollCnt++;
|
||||
|
|
|
@ -36,7 +36,7 @@ int32_t colDataGetFullLength(const SColumnInfoData* pColumnInfoData, int32_t num
|
|||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
return pColumnInfoData->varmeta.length + sizeof(int32_t) * numOfRows;
|
||||
} else {
|
||||
return pColumnInfoData->info.bytes * numOfRows + BitmapLen(numOfRows);
|
||||
return ((pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) ? 0 : pColumnInfoData->info.bytes * numOfRows) + BitmapLen(numOfRows);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1084,8 +1084,6 @@ int32_t dataBlockCompar_rv(const void* p1, const void* p2, const void* param) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t varColSort(SColumnInfoData* pColumnInfoData, SBlockOrderInfo* pOrder) { return 0; }
|
||||
|
||||
int32_t blockDataSort_rv(SSDataBlock* pDataBlock, SArray* pOrderInfo, bool nullFirst) {
|
||||
// Allocate the additional buffer.
|
||||
int64_t p0 = taosGetTimestampUs();
|
||||
|
@ -1962,6 +1960,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
memset(pBuf, 0, sizeof(pBuf));
|
||||
char* pData = colDataGetVarData(pColInfoData, j);
|
||||
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
|
||||
dataSize = TMIN(dataSize, 50);
|
||||
memcpy(pBuf, varDataVal(pData), dataSize);
|
||||
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
|
|
@ -4723,6 +4723,139 @@ void tFreeSSubQueryMsg(SSubQueryMsg *pReq) {
|
|||
taosMemoryFreeClear(pReq->msg);
|
||||
}
|
||||
|
||||
|
||||
int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
if (buf != NULL) {
|
||||
buf = (char *)buf + headLen;
|
||||
bufLen -= headLen;
|
||||
}
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
if (tEncodeU64(&encoder, pReq->sId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->execId) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
if (buf != NULL) {
|
||||
SMsgHead *pHead = (SMsgHead *)((char *)buf - headLen);
|
||||
pHead->vgId = htonl(pReq->header.vgId);
|
||||
pHead->contLen = htonl(tlen + headLen);
|
||||
}
|
||||
|
||||
return tlen + headLen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
|
||||
SMsgHead *pHead = buf;
|
||||
pHead->vgId = pReq->header.vgId;
|
||||
pHead->contLen = pReq->header.contLen;
|
||||
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, (char *)buf + headLen, bufLen - headLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
if (tDecodeU64(&decoder, &pReq->sId) < 0) return -1;
|
||||
if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1;
|
||||
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->execId) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t tSerializeSTqOffsetVal(SEncoder *pEncoder, STqOffsetVal *pOffset) {
|
||||
if (tEncodeI8(pEncoder, pOffset->type) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pOffset->uid) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pOffset->ts) < 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDerializeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffset) {
|
||||
if (tDecodeI8(pDecoder, &pOffset->type) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pOffset->uid) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pOffset->ts) < 0) return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
if (buf != NULL) {
|
||||
buf = (char *)buf + headLen;
|
||||
bufLen -= headLen;
|
||||
}
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
if (tEncodeCStr(&encoder, pReq->subKey) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->withTbName) < 0) return -1;
|
||||
if (tEncodeI8(&encoder, pReq->useSnapshot) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->epoch) < 0) return -1;
|
||||
if (tEncodeU64(&encoder, pReq->reqId) < 0) return -1;
|
||||
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
|
||||
if (tEncodeI64(&encoder, pReq->timeout) < 0) return -1;
|
||||
if (tSerializeSTqOffsetVal(&encoder, &pReq->reqOffset) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
if (buf != NULL) {
|
||||
SMsgHead *pHead = (SMsgHead *)((char *)buf - headLen);
|
||||
pHead->vgId = htonl(pReq->head.vgId);
|
||||
pHead->contLen = htonl(tlen + headLen);
|
||||
}
|
||||
|
||||
return tlen + headLen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSMqPollReq(void *buf, int32_t bufLen, SMqPollReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
|
||||
SMsgHead *pHead = buf;
|
||||
pHead->vgId = pReq->head.vgId;
|
||||
pHead->contLen = pReq->head.contLen;
|
||||
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, (char *)buf + headLen, bufLen - headLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
if (tDecodeCStrTo(&decoder, pReq->subKey) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->withTbName) < 0) return -1;
|
||||
if (tDecodeI8(&decoder, &pReq->useSnapshot) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->epoch) < 0) return -1;
|
||||
if (tDecodeU64(&decoder, &pReq->reqId) < 0) return -1;
|
||||
if (tDecodeI64(&decoder, &pReq->consumerId) < 0) return -1;
|
||||
if (tDecodeI64(&decoder, &pReq->timeout) < 0) return -1;
|
||||
if (tDerializeSTqOffsetVal(&decoder, &pReq->reqOffset) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t tSerializeSTaskDropReq(void *buf, int32_t bufLen, STaskDropReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
if (buf != NULL) {
|
||||
|
|
|
@ -148,8 +148,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
|
||||
if (pVnode == NULL) {
|
||||
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d", pHead->vgId, pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType), qtype);
|
||||
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s qtype:%d contLen:%d", pHead->vgId, pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType), qtype, pHead->contLen);
|
||||
return terrno != 0 ? terrno : -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -203,10 +203,13 @@ static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char
|
|||
goto _err;
|
||||
}
|
||||
|
||||
SBatchDeleteReq deleteReq;
|
||||
SBatchDeleteReq deleteReq = {0};
|
||||
SSubmitReq *pSubmitReq =
|
||||
tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, &pTsmaStat->pTSma->schemaTag, true,
|
||||
pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq);
|
||||
// TODO deleteReq
|
||||
taosArrayDestroy(deleteReq.deleteReqs);
|
||||
|
||||
|
||||
if (!pSubmitReq) {
|
||||
smaError("vgId:%d, failed to gen submit blk while tsma insert for smaIndex %" PRIi64 " since %s", SMA_VID(pSma),
|
||||
|
|
|
@ -458,20 +458,26 @@ static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
|
|||
}
|
||||
|
||||
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SMqPollReq* pReq = pMsg->pCont;
|
||||
int64_t consumerId = pReq->consumerId;
|
||||
int32_t reqEpoch = pReq->epoch;
|
||||
SMqPollReq req = {0};
|
||||
int32_t code = 0;
|
||||
STqOffsetVal reqOffset = pReq->reqOffset;
|
||||
STqOffsetVal fetchOffsetNew;
|
||||
SWalCkHead* pCkHead = NULL;
|
||||
|
||||
if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
tqError("tDeserializeSMqPollReq %d failed", pMsg->contLen);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t consumerId = req.consumerId;
|
||||
int32_t reqEpoch = req.epoch;
|
||||
STqOffsetVal reqOffset = req.reqOffset;
|
||||
|
||||
// 1.find handle
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
|
||||
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
|
||||
/*ASSERT(pHandle);*/
|
||||
if (pHandle == NULL) {
|
||||
tqError("tmq poll: no consumer handle for consumer:%" PRId64 ", in vgId:%d, subkey %s", consumerId,
|
||||
TD_VID(pTq->pVnode), pReq->subKey);
|
||||
TD_VID(pTq->pVnode), req.subKey);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -479,7 +485,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
if (pHandle->consumerId != consumerId) {
|
||||
tqError("tmq poll: consumer handle mismatch for consumer:%" PRId64
|
||||
", in vgId:%d, subkey %s, handle consumer id %" PRId64,
|
||||
consumerId, TD_VID(pTq->pVnode), pReq->subKey, pHandle->consumerId);
|
||||
consumerId, TD_VID(pTq->pVnode), req.subKey, pHandle->consumerId);
|
||||
terrno = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
|
||||
return -1;
|
||||
}
|
||||
|
@ -493,13 +499,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
char buf[80];
|
||||
tFormatOffset(buf, 80, &reqOffset);
|
||||
tqDebug("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, recv poll req in vg %d, req offset %s", consumerId,
|
||||
pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), buf);
|
||||
req.epoch, pHandle->subKey, TD_VID(pTq->pVnode), buf);
|
||||
|
||||
// 2.reset offset if needed
|
||||
if (reqOffset.type > 0) {
|
||||
fetchOffsetNew = reqOffset;
|
||||
} else {
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pReq->subKey);
|
||||
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, req.subKey);
|
||||
if (pOffset != NULL) {
|
||||
fetchOffsetNew = pOffset->val;
|
||||
char formatBuf[80];
|
||||
|
@ -508,7 +514,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
TD_VID(pTq->pVnode), formatBuf);
|
||||
} else {
|
||||
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
|
||||
if (pReq->useSnapshot) {
|
||||
if (req.useSnapshot) {
|
||||
if (pHandle->fetchMeta) {
|
||||
tqOffsetResetToMeta(&fetchOffsetNew, 0);
|
||||
} else {
|
||||
|
@ -520,21 +526,21 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
|
||||
tqInitDataRsp(&dataRsp, &req, pHandle->execHandle.subType);
|
||||
|
||||
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %" PRId64, consumerId,
|
||||
pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.rspOffset.version);
|
||||
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
|
||||
if (tqSendDataRsp(pTq, pMsg, &req, &dataRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
tDeleteSMqDataRsp(&dataRsp);
|
||||
return code;
|
||||
} else {
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, pReq);
|
||||
tqInitTaosxRsp(&taosxRsp, &req);
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
|
||||
if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
|
||||
if (tqSendTaosxRsp(pTq, pMsg, &req, &taosxRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
@ -543,7 +549,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
|
||||
tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64
|
||||
" in vg %d, subkey %s, reset none failed",
|
||||
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey);
|
||||
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), req.subKey);
|
||||
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
|
||||
return -1;
|
||||
}
|
||||
|
@ -552,7 +558,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
SMqDataRsp dataRsp = {0};
|
||||
tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
|
||||
tqInitDataRsp(&dataRsp, &req, pHandle->execHandle.subType);
|
||||
// lock
|
||||
taosWLockLatch(&pTq->pushLock);
|
||||
tqScanData(pTq, pHandle, &dataRsp, &fetchOffsetNew);
|
||||
|
@ -580,7 +586,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
#endif
|
||||
taosWUnLockLatch(&pTq->pushLock);
|
||||
|
||||
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
|
||||
if (tqSendDataRsp(pTq, pMsg, &req, &dataRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
|
||||
|
@ -599,13 +605,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SMqMetaRsp metaRsp = {0};
|
||||
|
||||
STaosxRsp taosxRsp = {0};
|
||||
tqInitTaosxRsp(&taosxRsp, pReq);
|
||||
tqInitTaosxRsp(&taosxRsp, &req);
|
||||
|
||||
if (fetchOffsetNew.type != TMQ_OFFSET__LOG) {
|
||||
tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, &fetchOffsetNew);
|
||||
|
||||
if (metaRsp.metaRspLen > 0) {
|
||||
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
|
||||
if (tqSendMetaPollRsp(pTq, pMsg, &req, &metaRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, send meta offset type:%d,uid:%" PRId64
|
||||
|
@ -618,7 +624,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
|
||||
if (taosxRsp.blockNum > 0) {
|
||||
if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
|
||||
if (tqSendTaosxRsp(pTq, pMsg, &req, &taosxRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
@ -648,13 +654,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
if (consumerEpoch > reqEpoch) {
|
||||
tqWarn("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, vg %d offset %" PRId64
|
||||
", found new consumer epoch %d, discard req epoch %d",
|
||||
consumerId, pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), fetchVer, consumerEpoch, reqEpoch);
|
||||
consumerId, req.epoch, pHandle->subKey, TD_VID(pTq->pVnode), fetchVer, consumerEpoch, reqEpoch);
|
||||
break;
|
||||
}
|
||||
|
||||
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead) < 0) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
|
||||
if (tqSendTaosxRsp(pTq, pMsg, &req, &taosxRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
@ -665,7 +671,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SWalCont* pHead = &pCkHead->head;
|
||||
|
||||
tqDebug("tmq poll: consumer:%" PRId64 ", (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", consumerId,
|
||||
pReq->epoch, TD_VID(pTq->pVnode), fetchVer, pHead->msgType);
|
||||
req.epoch, TD_VID(pTq->pVnode), fetchVer, pHead->msgType);
|
||||
|
||||
if (pHead->msgType == TDMT_VND_SUBMIT) {
|
||||
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
|
||||
|
@ -674,7 +680,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
}
|
||||
if (taosxRsp.blockNum > 0 /* threshold */) {
|
||||
tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
|
||||
if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
|
||||
if (tqSendTaosxRsp(pTq, pMsg, &req, &taosxRsp) < 0) {
|
||||
code = -1;
|
||||
}
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
@ -692,7 +698,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
|
|||
metaRsp.resMsgType = pHead->msgType;
|
||||
metaRsp.metaRspLen = pHead->bodyLen;
|
||||
metaRsp.metaRsp = pHead->body;
|
||||
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
|
||||
if (tqSendMetaPollRsp(pTq, pMsg, &req, &metaRsp) < 0) {
|
||||
code = -1;
|
||||
taosMemoryFreeClear(pCkHead);
|
||||
tDeleteSTaosxRsp(&taosxRsp);
|
||||
|
|
|
@ -308,9 +308,8 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver)
|
|||
}
|
||||
|
||||
if (vnodeIsRoleLeader(pTq->pVnode)) {
|
||||
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
|
||||
if (msgType == TDMT_VND_SUBMIT) {
|
||||
if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
|
||||
|
||||
void* data = taosMemoryMalloc(msgLen);
|
||||
if (data == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
|
|
@ -236,6 +236,7 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
|
|||
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
|
||||
if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
|
||||
pDeleteReq->suid = suid;
|
||||
pDeleteReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
|
||||
tqBuildDeleteReq(pVnode, stbFullName, pDataBlock, pDeleteReq);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -287,12 +287,17 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
|
|||
hasRes = true;
|
||||
p->ts = pColVal->ts;
|
||||
|
||||
uint8_t* px = p->colVal.value.pData;
|
||||
p->colVal = pColVal->colVal;
|
||||
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
|
||||
p->colVal = pColVal->colVal;
|
||||
} else {
|
||||
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
|
||||
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
|
||||
}
|
||||
|
||||
if (COL_VAL_IS_VALUE(&pColVal->colVal) && IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
|
||||
p->colVal.value.pData = px;
|
||||
memcpy(px, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
|
||||
p->colVal.value.nData = pColVal->colVal.value.nData;
|
||||
p->colVal.type = pColVal->colVal.type;
|
||||
p->colVal.flag = pColVal->colVal.flag;
|
||||
p->colVal.cid = pColVal->colVal.cid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -305,7 +305,7 @@ static void* getPosInBlockInfoBuf(SBlockInfoBuf* pBuf, int32_t index) {
|
|||
}
|
||||
|
||||
// NOTE: speedup the whole processing by preparing the buffer for STableBlockScanInfo in batch model
|
||||
static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableKeyInfo* idList, int32_t numOfTables) {
|
||||
static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, SBlockInfoBuf* pBuf, const STableKeyInfo* idList, int32_t numOfTables) {
|
||||
// allocate buffer in order to load data blocks from file
|
||||
// todo use simple hash instead, optimize the memory consumption
|
||||
SHashObj* pTableMap =
|
||||
|
@ -315,10 +315,10 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
|
|||
}
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
initBlockScanInfoBuf(&pTsdbReader->blockInfoBuf, numOfTables);
|
||||
initBlockScanInfoBuf(pBuf, numOfTables);
|
||||
|
||||
for (int32_t j = 0; j < numOfTables; ++j) {
|
||||
STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(&pTsdbReader->blockInfoBuf, j);
|
||||
STableBlockScanInfo* pScanInfo = getPosInBlockInfoBuf(pBuf, j);
|
||||
pScanInfo->uid = idList[j].uid;
|
||||
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
|
||||
int64_t skey = pTsdbReader->window.skey;
|
||||
|
@ -3785,9 +3785,9 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
}
|
||||
|
||||
STsdbReader* p = (pReader->innerReader[0] != NULL)? pReader->innerReader[0]:pReader;
|
||||
pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList, numOfTables);
|
||||
pReader->status.pTableMap = createDataBlockScanInfo(p, &pReader->blockInfoBuf, pTableList, numOfTables);
|
||||
if (pReader->status.pTableMap == NULL) {
|
||||
tsdbReaderClose(pReader);
|
||||
tsdbReaderClose(p);
|
||||
*ppReader = NULL;
|
||||
|
||||
code = TSDB_CODE_TDB_OUT_OF_MEMORY;
|
||||
|
|
|
@ -2,10 +2,6 @@ aux_source_directory(src EXECUTOR_SRC)
|
|||
#add_library(executor ${EXECUTOR_SRC})
|
||||
|
||||
add_library(executor STATIC ${EXECUTOR_SRC})
|
||||
#set_target_properties(executor PROPERTIES
|
||||
# IMPORTED_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}/libexecutor.a"
|
||||
# INTERFACE_INCLUDE_DIRECTORIES "${TD_SOURCE_DIR}/include/libs/executor"
|
||||
# )
|
||||
|
||||
target_link_libraries(executor
|
||||
PRIVATE os util common function parser planner qcom vnode scalar nodes index stream
|
||||
|
|
|
@ -235,16 +235,6 @@ typedef enum {
|
|||
#define COL_MATCH_FROM_COL_ID 0x1
|
||||
#define COL_MATCH_FROM_SLOT_ID 0x2
|
||||
|
||||
typedef struct SSourceDataInfo {
|
||||
int32_t index;
|
||||
SRetrieveTableRsp* pRsp;
|
||||
uint64_t totalRows;
|
||||
int64_t startTime;
|
||||
int32_t code;
|
||||
EX_SOURCE_STATUS status;
|
||||
const char* taskId;
|
||||
} SSourceDataInfo;
|
||||
|
||||
typedef struct SLoadRemoteDataInfo {
|
||||
uint64_t totalSize; // total load bytes from remote
|
||||
uint64_t totalRows; // total number of rows
|
||||
|
@ -371,23 +361,8 @@ typedef struct STagScanInfo {
|
|||
SColMatchInfo matchInfo;
|
||||
int32_t curPos;
|
||||
SReadHandle readHandle;
|
||||
STableListInfo* pTableList;
|
||||
} STagScanInfo;
|
||||
|
||||
typedef struct SLastrowScanInfo {
|
||||
SSDataBlock* pRes;
|
||||
SReadHandle readHandle;
|
||||
void* pLastrowReader;
|
||||
SColMatchInfo matchInfo;
|
||||
int32_t* pSlotIds;
|
||||
SExprSupp pseudoExprSup;
|
||||
int32_t retrieveType;
|
||||
int32_t currentGroupIndex;
|
||||
SSDataBlock* pBufferredRes;
|
||||
SArray* pUidList;
|
||||
int32_t indexOfBufferedRes;
|
||||
} SLastrowScanInfo;
|
||||
|
||||
typedef enum EStreamScanMode {
|
||||
STREAM_SCAN_FROM_READERHANDLE = 1,
|
||||
STREAM_SCAN_FROM_RES,
|
||||
|
@ -504,40 +479,6 @@ typedef struct {
|
|||
SSnapContext* sContext;
|
||||
} SStreamRawScanInfo;
|
||||
|
||||
typedef struct SSysTableIndex {
|
||||
int8_t init;
|
||||
SArray* uids;
|
||||
int32_t lastIdx;
|
||||
} SSysTableIndex;
|
||||
|
||||
typedef struct SSysTableScanInfo {
|
||||
SRetrieveMetaTableRsp* pRsp;
|
||||
SRetrieveTableReq req;
|
||||
SEpSet epSet;
|
||||
tsem_t ready;
|
||||
SReadHandle readHandle;
|
||||
int32_t accountId;
|
||||
const char* pUser;
|
||||
bool sysInfo;
|
||||
bool showRewrite;
|
||||
SNode* pCondition; // db_name filter condition, to discard data that are not in current database
|
||||
SMTbCursor* pCur; // cursor for iterate the local table meta store.
|
||||
SSysTableIndex* pIdx; // idx for local table meta
|
||||
SColMatchInfo matchInfo;
|
||||
SName name;
|
||||
SSDataBlock* pRes;
|
||||
int64_t numOfBlocks; // extract basic running information.
|
||||
SLoadRemoteDataInfo loadInfo;
|
||||
} SSysTableScanInfo;
|
||||
|
||||
typedef struct SBlockDistInfo {
|
||||
SSDataBlock* pResBlock;
|
||||
STsdbReader* pHandle;
|
||||
SReadHandle readHandle;
|
||||
uint64_t uid; // table uid
|
||||
} SBlockDistInfo;
|
||||
|
||||
// todo remove this
|
||||
typedef struct SOptrBasicInfo {
|
||||
SResultRowInfo resultRowInfo;
|
||||
SSDataBlock* pRes;
|
||||
|
@ -603,24 +544,6 @@ typedef struct SAggOperatorInfo {
|
|||
SExprSupp scalarExprSup;
|
||||
} SAggOperatorInfo;
|
||||
|
||||
typedef struct SProjectOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SArray* pPseudoColInfo;
|
||||
SLimitInfo limitInfo;
|
||||
bool mergeDataBlocks;
|
||||
SSDataBlock* pFinalRes;
|
||||
} SProjectOperatorInfo;
|
||||
|
||||
typedef struct SIndefOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SArray* pPseudoColInfo;
|
||||
SExprSupp scalarSup;
|
||||
uint64_t groupId;
|
||||
SSDataBlock* pNextGroupRes;
|
||||
} SIndefOperatorInfo;
|
||||
|
||||
typedef struct SFillOperatorInfo {
|
||||
struct SFillInfo* pFillInfo;
|
||||
SSDataBlock* pRes;
|
||||
|
@ -638,42 +561,12 @@ typedef struct SFillOperatorInfo {
|
|||
SExprSupp noFillExprSupp;
|
||||
} SFillOperatorInfo;
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SArray* pGroupCols; // group by columns, SArray<SColumn>
|
||||
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
|
||||
bool isInit; // denote if current val is initialized or not
|
||||
char* keyBuf; // group by keys for hash
|
||||
int32_t groupKeyLen; // total group by column width
|
||||
SGroupResInfo groupResInfo;
|
||||
SExprSupp scalarSup;
|
||||
} SGroupbyOperatorInfo;
|
||||
|
||||
typedef struct SDataGroupInfo {
|
||||
uint64_t groupId;
|
||||
int64_t numOfRows;
|
||||
SArray* pPageList;
|
||||
} SDataGroupInfo;
|
||||
|
||||
// The sort in partition may be needed later.
|
||||
typedef struct SPartitionOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SArray* pGroupCols;
|
||||
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
|
||||
char* keyBuf; // group by keys for hash
|
||||
int32_t groupKeyLen; // total group by column width
|
||||
SHashObj* pGroupSet; // quick locate the window object for each result
|
||||
|
||||
SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file
|
||||
int32_t rowCapacity; // maximum number of rows for each buffer page
|
||||
int32_t* columnOffset; // start position for each column data
|
||||
SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
|
||||
int32_t groupIndex; // group index
|
||||
int32_t pageIndex; // page index of current group
|
||||
SExprSupp scalarSup;
|
||||
} SPartitionOperatorInfo;
|
||||
|
||||
typedef struct SWindowRowsSup {
|
||||
STimeWindow win;
|
||||
TSKEY prevTs;
|
||||
|
@ -754,6 +647,23 @@ typedef struct SStreamPartitionOperatorInfo {
|
|||
SSDataBlock* pDelRes;
|
||||
} SStreamPartitionOperatorInfo;
|
||||
|
||||
typedef struct SStreamFillSupporter {
|
||||
int32_t type; // fill type
|
||||
SInterval interval;
|
||||
SResultRowData prev;
|
||||
SResultRowData cur;
|
||||
SResultRowData next;
|
||||
SResultRowData nextNext;
|
||||
SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
|
||||
SExprSupp notFillExprSup;
|
||||
int32_t numOfAllCols; // number of all exprs, including the tags columns
|
||||
int32_t numOfFillCols;
|
||||
int32_t numOfNotFillCols;
|
||||
int32_t rowSize;
|
||||
SSHashObj* pResMap;
|
||||
bool hasDelete;
|
||||
} SStreamFillSupporter;
|
||||
|
||||
typedef struct SStreamFillOperatorInfo {
|
||||
SStreamFillSupporter* pFillSup;
|
||||
SSDataBlock* pRes;
|
||||
|
@ -800,33 +710,6 @@ typedef struct SStateWindowOperatorInfo {
|
|||
STimeWindowAggSupp twAggSup;
|
||||
} SStateWindowOperatorInfo;
|
||||
|
||||
typedef struct SSortOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
SArray* pSortInfo;
|
||||
SSortHandle* pSortHandle;
|
||||
SColMatchInfo matchInfo;
|
||||
int32_t bufPageSize;
|
||||
int64_t startTs; // sort start time
|
||||
uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
|
||||
SLimitInfo limitInfo;
|
||||
} SSortOperatorInfo;
|
||||
|
||||
typedef struct SJoinOperatorInfo {
|
||||
SSDataBlock* pRes;
|
||||
int32_t joinType;
|
||||
int32_t inputOrder;
|
||||
|
||||
SSDataBlock* pLeft;
|
||||
int32_t leftPos;
|
||||
SColumnInfo leftCol;
|
||||
|
||||
SSDataBlock* pRight;
|
||||
int32_t rightPos;
|
||||
SColumnInfo rightCol;
|
||||
SNode* pCondAfterMerge;
|
||||
} SJoinOperatorInfo;
|
||||
|
||||
#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
|
||||
#define OPTR_SET_OPENED(_optr) ((_optr)->status |= OP_OPENED)
|
||||
|
||||
|
@ -850,7 +733,6 @@ void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGr
|
|||
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
|
||||
SDiskbasedBuf* pBuf);
|
||||
|
||||
int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
|
||||
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
|
||||
void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
|
||||
void applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator);
|
||||
|
@ -880,9 +762,6 @@ void cleanupAggSup(SAggSupporter* pAggSup);
|
|||
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
|
||||
void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId, const char* name);
|
||||
|
||||
int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts);
|
||||
int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts);
|
||||
|
||||
SSDataBlock* loadNextDataBlock(void* param);
|
||||
|
||||
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset);
|
||||
|
@ -965,9 +844,8 @@ void setInputDataBlock(SExprSupp* pExprSupp, SSDataBlock* pBlock, int32_t order,
|
|||
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
|
||||
int32_t checkForQueryBuf(size_t numOfTables);
|
||||
|
||||
void setTaskKilled(SExecTaskInfo* pTaskInfo);
|
||||
void queryCostStatis(SExecTaskInfo* pTaskInfo);
|
||||
|
||||
void setTaskKilled(SExecTaskInfo* pTaskInfo);
|
||||
void queryCostStatis(SExecTaskInfo* pTaskInfo);
|
||||
void doDestroyTask(SExecTaskInfo* pTaskInfo);
|
||||
void destroyOperatorInfo(SOperatorInfo* pOperator);
|
||||
int32_t getMaximumIdleDurationSec();
|
||||
|
@ -995,9 +873,6 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
|
|||
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
|
||||
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
|
||||
|
||||
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
|
||||
int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length);
|
||||
|
||||
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
|
||||
int32_t order);
|
||||
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
|
||||
|
|
|
@ -111,22 +111,6 @@ typedef struct SStreamFillInfo {
|
|||
int32_t delIndex;
|
||||
} SStreamFillInfo;
|
||||
|
||||
typedef struct SStreamFillSupporter {
|
||||
int32_t type; // fill type
|
||||
SInterval interval;
|
||||
SResultRowData prev;
|
||||
SResultRowData cur;
|
||||
SResultRowData next;
|
||||
SResultRowData nextNext;
|
||||
SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
|
||||
int32_t numOfAllCols; // number of all exprs, including the tags columns
|
||||
int32_t numOfFillCols;
|
||||
int32_t numOfNotFillCols;
|
||||
int32_t rowSize;
|
||||
SSHashObj* pResMap;
|
||||
bool hasDelete;
|
||||
} SStreamFillSupporter;
|
||||
|
||||
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
|
||||
|
||||
void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
|
||||
|
|
|
@ -36,12 +36,13 @@ typedef struct SMultiMergeSource {
|
|||
|
||||
typedef struct SSortSource {
|
||||
SMultiMergeSource src;
|
||||
union {
|
||||
struct {
|
||||
SArray* pageIdList;
|
||||
int32_t pageIndex;
|
||||
};
|
||||
struct {
|
||||
SArray* pageIdList;
|
||||
int32_t pageIndex;
|
||||
};
|
||||
struct {
|
||||
void* param;
|
||||
bool onlyRef;
|
||||
};
|
||||
|
||||
} SSortSource;
|
||||
|
|
|
@ -25,15 +25,29 @@
|
|||
#include "thash.h"
|
||||
#include "ttypes.h"
|
||||
|
||||
typedef struct SCacheRowsScanInfo {
|
||||
SSDataBlock* pRes;
|
||||
SReadHandle readHandle;
|
||||
void* pLastrowReader;
|
||||
SColMatchInfo matchInfo;
|
||||
int32_t* pSlotIds;
|
||||
SExprSupp pseudoExprSup;
|
||||
int32_t retrieveType;
|
||||
int32_t currentGroupIndex;
|
||||
SSDataBlock* pBufferredRes;
|
||||
SArray* pUidList;
|
||||
int32_t indexOfBufferedRes;
|
||||
} SCacheRowsScanInfo;
|
||||
|
||||
static SSDataBlock* doScanCache(SOperatorInfo* pOperator);
|
||||
static void destroyLastrowScanOperator(void* param);
|
||||
static void destroyCacheScanOperator(void* param);
|
||||
static int32_t extractCacheScanSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds);
|
||||
static int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pColMatchInfo);
|
||||
|
||||
SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo));
|
||||
SCacheRowsScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SCacheRowsScanInfo));
|
||||
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
|
||||
if (pInfo == NULL || pOperator == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -97,14 +111,14 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
|
|||
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
|
||||
|
||||
pOperator->fpSet =
|
||||
createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, destroyLastrowScanOperator, NULL);
|
||||
createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, destroyCacheScanOperator, NULL);
|
||||
|
||||
pOperator->cost.openCost = 0;
|
||||
return pOperator;
|
||||
|
||||
_error:
|
||||
pTaskInfo->code = code;
|
||||
destroyLastrowScanOperator(pInfo);
|
||||
destroyCacheScanOperator(pInfo);
|
||||
taosMemoryFree(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -114,7 +128,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SLastrowScanInfo* pInfo = pOperator->info;
|
||||
SCacheRowsScanInfo* pInfo = pOperator->info;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
STableListInfo* pTableList = pTaskInfo->pTableInfoList;
|
||||
|
||||
|
@ -157,9 +171,12 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
SColumnInfoData* pSrc = taosArrayGet(pInfo->pBufferredRes->pDataBlock, slotId);
|
||||
SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, slotId);
|
||||
|
||||
char* p = colDataGetData(pSrc, pInfo->indexOfBufferedRes);
|
||||
bool isNull = colDataIsNull_s(pSrc, pInfo->indexOfBufferedRes);
|
||||
colDataAppend(pDst, 0, p, isNull);
|
||||
if (colDataIsNull_s(pSrc, pInfo->indexOfBufferedRes)) {
|
||||
colDataAppendNULL(pDst, 0);
|
||||
} else {
|
||||
char* p = colDataGetData(pSrc, pInfo->indexOfBufferedRes);
|
||||
colDataAppend(pDst, 0, p, false);
|
||||
}
|
||||
}
|
||||
|
||||
pRes->info.uid = *(tb_uid_t*)taosArrayGet(pInfo->pUidList, pInfo->indexOfBufferedRes);
|
||||
|
@ -226,6 +243,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
|
||||
pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader);
|
||||
return pInfo->pRes;
|
||||
} else {
|
||||
pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,8 +253,8 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
void destroyLastrowScanOperator(void* param) {
|
||||
SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param;
|
||||
void destroyCacheScanOperator(void* param) {
|
||||
SCacheRowsScanInfo* pInfo = (SCacheRowsScanInfo*)param;
|
||||
blockDataDestroy(pInfo->pRes);
|
||||
blockDataDestroy(pInfo->pBufferredRes);
|
||||
taosMemoryFree(pInfo->pSlotIds);
|
||||
|
|
|
@ -250,6 +250,8 @@ static int32_t putDataBlock(SDataSinkHandle* pHandle, const SInputData* pInput,
|
|||
return code;
|
||||
}
|
||||
|
||||
taosArrayClear(pInserter->pDataBlocks);
|
||||
|
||||
code = sendSubmitRequest(pInserter, pMsg, pInserter->pParam->readHandle->pMsgCb->clientRpc, &pInserter->pNode->epSet);
|
||||
if (code) {
|
||||
return code;
|
||||
|
|
|
@ -41,6 +41,16 @@ typedef struct SFetchRspHandleWrapper {
|
|||
int32_t sourceIndex;
|
||||
} SFetchRspHandleWrapper;
|
||||
|
||||
typedef struct SSourceDataInfo {
|
||||
int32_t index;
|
||||
SRetrieveTableRsp* pRsp;
|
||||
uint64_t totalRows;
|
||||
int64_t startTime;
|
||||
int32_t code;
|
||||
EX_SOURCE_STATUS status;
|
||||
const char* taskId;
|
||||
} SSourceDataInfo;
|
||||
|
||||
static void destroyExchangeOperatorInfo(void* param);
|
||||
static void freeBlock(void* pParam);
|
||||
static void freeSourceDataInfo(void* param);
|
||||
|
@ -52,6 +62,7 @@ static int32_t getCompletedSources(const SArray* pArray);
|
|||
static int32_t prepareConcurrentlyLoad(SOperatorInfo* pOperator);
|
||||
static int32_t seqLoadRemoteData(SOperatorInfo* pOperator);
|
||||
static int32_t prepareLoadRemoteData(SOperatorInfo* pOperator);
|
||||
static int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
|
||||
|
||||
static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeInfo* pExchangeInfo,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
|
@ -405,27 +416,42 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
|
|||
loadRemoteDataCallback(pWrapper, &pBuf, code);
|
||||
taosMemoryFree(pWrapper);
|
||||
} else {
|
||||
SResFetchReq* pMsg = taosMemoryCalloc(1, sizeof(SResFetchReq));
|
||||
if (NULL == pMsg) {
|
||||
SResFetchReq req = {0};
|
||||
req.header.vgId = pSource->addr.nodeId;
|
||||
req.sId = pSource->schedId;
|
||||
req.taskId = pSource->taskId;
|
||||
req.queryId = pTaskInfo->id.queryId;
|
||||
req.execId = pSource->execId;
|
||||
|
||||
int32_t msgSize = tSerializeSResFetchReq(NULL, 0, &req);
|
||||
if (msgSize < 0) {
|
||||
pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
taosMemoryFree(pWrapper);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
void* msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
taosMemoryFree(pWrapper);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
if (tSerializeSResFetchReq(msg, msgSize, &req) < 0) {
|
||||
pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
taosMemoryFree(pWrapper);
|
||||
taosMemoryFree(msg);
|
||||
return pTaskInfo->code;
|
||||
}
|
||||
|
||||
qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %d/%" PRIzu,
|
||||
GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId,
|
||||
pSource->execId, sourceIndex, totalSources);
|
||||
|
||||
pMsg->header.vgId = htonl(pSource->addr.nodeId);
|
||||
pMsg->sId = htobe64(pSource->schedId);
|
||||
pMsg->taskId = htobe64(pSource->taskId);
|
||||
pMsg->queryId = htobe64(pTaskInfo->id.queryId);
|
||||
pMsg->execId = htonl(pSource->execId);
|
||||
|
||||
// send the fetch remote task result reques
|
||||
SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (NULL == pMsgSendInfo) {
|
||||
taosMemoryFreeClear(pMsg);
|
||||
taosMemoryFreeClear(msg);
|
||||
taosMemoryFree(pWrapper);
|
||||
qError("%s prepare message %d failed", GET_TASKID(pTaskInfo), (int32_t)sizeof(SMsgSendInfo));
|
||||
pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
|
||||
|
@ -434,8 +460,8 @@ int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTas
|
|||
|
||||
pMsgSendInfo->param = pWrapper;
|
||||
pMsgSendInfo->paramFreeFp = taosMemoryFree;
|
||||
pMsgSendInfo->msgInfo.pData = pMsg;
|
||||
pMsgSendInfo->msgInfo.len = sizeof(SResFetchReq);
|
||||
pMsgSendInfo->msgInfo.pData = msg;
|
||||
pMsgSendInfo->msgInfo.len = msgSize;
|
||||
pMsgSendInfo->msgType = pSource->fetchMsgType;
|
||||
pMsgSendInfo->fp = loadRemoteDataCallback;
|
||||
|
||||
|
@ -647,3 +673,80 @@ int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
|
|||
pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) {
|
||||
if (pLimitInfo->remainGroupOffset > 0) {
|
||||
if (pLimitInfo->currentGroupId == 0) { // it is the first group
|
||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||
blockDataCleanup(pBlock);
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
} else if (pLimitInfo->currentGroupId != pBlock->info.groupId) {
|
||||
// now it is the data from a new group
|
||||
pLimitInfo->remainGroupOffset -= 1;
|
||||
|
||||
// ignore data block in current group
|
||||
if (pLimitInfo->remainGroupOffset > 0) {
|
||||
blockDataCleanup(pBlock);
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
}
|
||||
}
|
||||
|
||||
// set current group id of the project operator
|
||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||
}
|
||||
|
||||
// here check for a new group data, we need to handle the data of the previous group.
|
||||
if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) {
|
||||
pLimitInfo->numOfOutputGroups += 1;
|
||||
if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
blockDataCleanup(pBlock);
|
||||
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
}
|
||||
|
||||
// reset the value for a new group data
|
||||
pLimitInfo->numOfOutputRows = 0;
|
||||
pLimitInfo->remainOffset = pLimitInfo->limit.offset;
|
||||
|
||||
// existing rows that belongs to previous group.
|
||||
if (pBlock->info.rows > 0) {
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
// here we reach the start position, according to the limit/offset requirements.
|
||||
|
||||
// set current group id
|
||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||
|
||||
if (pLimitInfo->remainOffset >= pBlock->info.rows) {
|
||||
pLimitInfo->remainOffset -= pBlock->info.rows;
|
||||
blockDataCleanup(pBlock);
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
} else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) {
|
||||
blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset);
|
||||
pLimitInfo->remainOffset = 0;
|
||||
}
|
||||
|
||||
// check for the limitation in each group
|
||||
if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) {
|
||||
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
|
||||
blockDataKeepFirstNRows(pBlock, keepRows);
|
||||
if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
}
|
||||
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
}
|
||||
|
||||
// todo optimize performance
|
||||
// If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the
|
||||
// they may not belong to the same group the limit/offset value is not valid in this case.
|
||||
if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 ||
|
||||
pLimitInfo->slimit.limit != -1) {
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
} else { // not full enough, continue to accumulate the output data in the buffer.
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -480,51 +480,6 @@ _error:
|
|||
return code;
|
||||
}
|
||||
|
||||
#ifdef TEST_IMPL
|
||||
// wait moment
|
||||
int waitMoment(SQInfo* pQInfo) {
|
||||
if (pQInfo->sql) {
|
||||
int ms = 0;
|
||||
char* pcnt = strstr(pQInfo->sql, " count(*)");
|
||||
if (pcnt) return 0;
|
||||
|
||||
char* pos = strstr(pQInfo->sql, " t_");
|
||||
if (pos) {
|
||||
pos += 3;
|
||||
ms = atoi(pos);
|
||||
while (*pos >= '0' && *pos <= '9') {
|
||||
pos++;
|
||||
}
|
||||
char unit_char = *pos;
|
||||
if (unit_char == 'h') {
|
||||
ms *= 3600 * 1000;
|
||||
} else if (unit_char == 'm') {
|
||||
ms *= 60 * 1000;
|
||||
} else if (unit_char == 's') {
|
||||
ms *= 1000;
|
||||
}
|
||||
}
|
||||
if (ms == 0) return 0;
|
||||
printf("test wait sleep %dms. sql=%s ...\n", ms, pQInfo->sql);
|
||||
|
||||
if (ms < 1000) {
|
||||
taosMsleep(ms);
|
||||
} else {
|
||||
int used_ms = 0;
|
||||
while (used_ms < ms) {
|
||||
taosMsleep(1000);
|
||||
used_ms += 1000;
|
||||
if (isTaskKilled(pQInfo)) {
|
||||
printf("test check query is canceled, sleep break.%s\n", pQInfo->sql);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void freeBlock(void* param) {
|
||||
SSDataBlock* pBlock = *(SSDataBlock**)param;
|
||||
blockDataDestroy(pBlock);
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include "tdatablock.h"
|
||||
#include "tglobal.h"
|
||||
#include "tmsg.h"
|
||||
#include "tsort.h"
|
||||
#include "ttime.h"
|
||||
|
||||
#include "executorimpl.h"
|
||||
|
@ -297,8 +296,6 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow
|
|||
colDataAppendInt64(pColData, 4, &pQueryWindow->ekey);
|
||||
}
|
||||
|
||||
void cleanupExecTimeWindowInfo(SColumnInfoData* pColData) { colDataDestroy(pColData); }
|
||||
|
||||
typedef struct {
|
||||
bool hasAgg;
|
||||
int32_t numOfRows;
|
||||
|
@ -1347,42 +1344,6 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
// static void updateOffsetVal(STaskRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) {
|
||||
// STaskAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
// STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current;
|
||||
//
|
||||
// int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
|
||||
//
|
||||
// if (pQueryAttr->limit.offset == pBlockInfo->rows) { // current block will ignore completed
|
||||
// pTableQueryInfo->lastKey = QUERY_IS_ASC_QUERY(pQueryAttr) ? pBlockInfo->window.ekey + step :
|
||||
// pBlockInfo->window.skey + step; pQueryAttr->limit.offset = 0; return;
|
||||
// }
|
||||
//
|
||||
// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
|
||||
// pQueryAttr->pos = (int32_t)pQueryAttr->limit.offset;
|
||||
// } else {
|
||||
// pQueryAttr->pos = pBlockInfo->rows - (int32_t)pQueryAttr->limit.offset - 1;
|
||||
// }
|
||||
//
|
||||
// assert(pQueryAttr->pos >= 0 && pQueryAttr->pos <= pBlockInfo->rows - 1);
|
||||
//
|
||||
// SArray * pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pTsdbReadHandle, NULL);
|
||||
// SColumnInfoData *pColInfoData = taosArrayGet(pDataBlock, 0);
|
||||
//
|
||||
// // update the pQueryAttr->limit.offset value, and pQueryAttr->pos value
|
||||
// TSKEY *keys = (TSKEY *) pColInfoData->pData;
|
||||
//
|
||||
// // update the offset value
|
||||
// pTableQueryInfo->lastKey = keys[pQueryAttr->pos];
|
||||
// pQueryAttr->limit.offset = 0;
|
||||
//
|
||||
// int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock);
|
||||
//
|
||||
// //qDebug("QInfo:0x%"PRIx64" check data block, brange:%" PRId64 "-%" PRId64 ", numBlocksOfStep:%d, numOfRes:%d,
|
||||
// lastKey:%"PRId64, GET_TASKID(pRuntimeEnv),
|
||||
// pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pQuery->current->lastKey);
|
||||
// }
|
||||
|
||||
// void skipBlocks(STaskRuntimeEnv *pRuntimeEnv) {
|
||||
// STaskAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
|
||||
//
|
||||
|
@ -1723,159 +1684,6 @@ static SSDataBlock* getAggregateResult(SOperatorInfo* pOperator) {
|
|||
return (rows == 0) ? NULL : pInfo->pRes;
|
||||
}
|
||||
|
||||
int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* length) {
|
||||
if (result == NULL || length == NULL) {
|
||||
return TSDB_CODE_TSC_INVALID_INPUT;
|
||||
}
|
||||
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
|
||||
SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
|
||||
int32_t size = tSimpleHashGetSize(pSup->pResultRowHashTable);
|
||||
size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
|
||||
int32_t totalSize =
|
||||
sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
|
||||
|
||||
// no result
|
||||
if (getTotalBufSize(pSup->pResultBuf) == 0) {
|
||||
*result = NULL;
|
||||
*length = 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
*result = (char*)taosMemoryCalloc(1, totalSize);
|
||||
if (*result == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
int32_t offset = sizeof(int32_t);
|
||||
*(int32_t*)(*result + offset) = size;
|
||||
offset += sizeof(int32_t);
|
||||
|
||||
// prepare memory
|
||||
SResultRowPosition* pos = &pInfo->resultRowInfo.cur;
|
||||
void* pPage = getBufPage(pSup->pResultBuf, pos->pageId);
|
||||
SResultRow* pRow = (SResultRow*)((char*)pPage + pos->offset);
|
||||
setBufPageDirty(pPage, true);
|
||||
releaseBufPage(pSup->pResultBuf, pPage);
|
||||
|
||||
int32_t iter = 0;
|
||||
void* pIter = NULL;
|
||||
while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) {
|
||||
void* key = tSimpleHashGetKey(pIter, &keyLen);
|
||||
SResultRowPosition* p1 = (SResultRowPosition*)pIter;
|
||||
|
||||
pPage = (SFilePage*)getBufPage(pSup->pResultBuf, p1->pageId);
|
||||
pRow = (SResultRow*)((char*)pPage + p1->offset);
|
||||
setBufPageDirty(pPage, true);
|
||||
releaseBufPage(pSup->pResultBuf, pPage);
|
||||
|
||||
// recalculate the result size
|
||||
int32_t realTotalSize = offset + sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize;
|
||||
if (realTotalSize > totalSize) {
|
||||
char* tmp = (char*)taosMemoryRealloc(*result, realTotalSize);
|
||||
if (tmp == NULL) {
|
||||
taosMemoryFree(*result);
|
||||
*result = NULL;
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
} else {
|
||||
*result = tmp;
|
||||
}
|
||||
}
|
||||
// save key
|
||||
*(int32_t*)(*result + offset) = keyLen;
|
||||
offset += sizeof(int32_t);
|
||||
memcpy(*result + offset, key, keyLen);
|
||||
offset += keyLen;
|
||||
|
||||
// save value
|
||||
*(int32_t*)(*result + offset) = pSup->resultRowSize;
|
||||
offset += sizeof(int32_t);
|
||||
memcpy(*result + offset, pRow, pSup->resultRowSize);
|
||||
offset += pSup->resultRowSize;
|
||||
}
|
||||
|
||||
*(int32_t*)(*result) = offset;
|
||||
*length = offset;
|
||||
|
||||
return TDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) {
|
||||
if (pLimitInfo->remainGroupOffset > 0) {
|
||||
if (pLimitInfo->currentGroupId == 0) { // it is the first group
|
||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||
blockDataCleanup(pBlock);
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
} else if (pLimitInfo->currentGroupId != pBlock->info.groupId) {
|
||||
// now it is the data from a new group
|
||||
pLimitInfo->remainGroupOffset -= 1;
|
||||
|
||||
// ignore data block in current group
|
||||
if (pLimitInfo->remainGroupOffset > 0) {
|
||||
blockDataCleanup(pBlock);
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
}
|
||||
}
|
||||
|
||||
// set current group id of the project operator
|
||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||
}
|
||||
|
||||
// here check for a new group data, we need to handle the data of the previous group.
|
||||
if (pLimitInfo->currentGroupId != 0 && pLimitInfo->currentGroupId != pBlock->info.groupId) {
|
||||
pLimitInfo->numOfOutputGroups += 1;
|
||||
if ((pLimitInfo->slimit.limit > 0) && (pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
blockDataCleanup(pBlock);
|
||||
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
}
|
||||
|
||||
// reset the value for a new group data
|
||||
pLimitInfo->numOfOutputRows = 0;
|
||||
pLimitInfo->remainOffset = pLimitInfo->limit.offset;
|
||||
|
||||
// existing rows that belongs to previous group.
|
||||
if (pBlock->info.rows > 0) {
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
// here we reach the start position, according to the limit/offset requirements.
|
||||
|
||||
// set current group id
|
||||
pLimitInfo->currentGroupId = pBlock->info.groupId;
|
||||
|
||||
if (pLimitInfo->remainOffset >= pBlock->info.rows) {
|
||||
pLimitInfo->remainOffset -= pBlock->info.rows;
|
||||
blockDataCleanup(pBlock);
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
} else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) {
|
||||
blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset);
|
||||
pLimitInfo->remainOffset = 0;
|
||||
}
|
||||
|
||||
// check for the limitation in each group
|
||||
if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) {
|
||||
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
|
||||
blockDataKeepFirstNRows(pBlock, keepRows);
|
||||
if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
}
|
||||
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
}
|
||||
|
||||
// todo optimize performance
|
||||
// If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the
|
||||
// they may not belong to the same group the limit/offset value is not valid in this case.
|
||||
if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 ||
|
||||
pLimitInfo->slimit.limit != -1) {
|
||||
return PROJECT_RETRIEVE_DONE;
|
||||
} else { // not full enough, continue to accumulate the output data in the buffer.
|
||||
return PROJECT_RETRIEVE_CONTINUE;
|
||||
}
|
||||
}
|
||||
|
||||
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
|
||||
static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo,
|
||||
SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) {
|
||||
|
@ -2056,6 +1864,8 @@ void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs) {
|
|||
for (int32_t j = 0; j < pExprInfo->base.numOfParams; ++j) {
|
||||
if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_COLUMN) {
|
||||
taosMemoryFreeClear(pExprInfo->base.pParam[j].pCol);
|
||||
} else if (pExprInfo->base.pParam[j].type == FUNC_PARAM_TYPE_VALUE) {
|
||||
taosVariantDestroy(&pExprInfo->base.pParam[j].param);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,36 @@
|
|||
#include "thash.h"
|
||||
#include "ttypes.h"
|
||||
|
||||
typedef struct SGroupbyOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SArray* pGroupCols; // group by columns, SArray<SColumn>
|
||||
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
|
||||
bool isInit; // denote if current val is initialized or not
|
||||
char* keyBuf; // group by keys for hash
|
||||
int32_t groupKeyLen; // total group by column width
|
||||
SGroupResInfo groupResInfo;
|
||||
SExprSupp scalarSup;
|
||||
} SGroupbyOperatorInfo;
|
||||
|
||||
// The sort in partition may be needed later.
|
||||
typedef struct SPartitionOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SArray* pGroupCols;
|
||||
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
|
||||
char* keyBuf; // group by keys for hash
|
||||
int32_t groupKeyLen; // total group by column width
|
||||
SHashObj* pGroupSet; // quick locate the window object for each result
|
||||
|
||||
SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file
|
||||
int32_t rowCapacity; // maximum number of rows for each buffer page
|
||||
int32_t* columnOffset; // start position for each column data
|
||||
SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
|
||||
int32_t groupIndex; // group index
|
||||
int32_t pageIndex; // page index of current group
|
||||
SExprSupp scalarSup;
|
||||
} SPartitionOperatorInfo;
|
||||
|
||||
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
|
||||
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
|
||||
static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
|
||||
|
|
|
@ -24,6 +24,21 @@
|
|||
#include "tmsg.h"
|
||||
#include "ttypes.h"
|
||||
|
||||
typedef struct SJoinOperatorInfo {
|
||||
SSDataBlock* pRes;
|
||||
int32_t joinType;
|
||||
int32_t inputOrder;
|
||||
|
||||
SSDataBlock* pLeft;
|
||||
int32_t leftPos;
|
||||
SColumnInfo leftCol;
|
||||
|
||||
SSDataBlock* pRight;
|
||||
int32_t rightPos;
|
||||
SColumnInfo rightCol;
|
||||
SNode* pCondAfterMerge;
|
||||
} SJoinOperatorInfo;
|
||||
|
||||
static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
|
||||
static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
|
||||
static void destroyMergeJoinOperator(void* param);
|
||||
|
|
|
@ -17,6 +17,24 @@
|
|||
#include "executorimpl.h"
|
||||
#include "functionMgt.h"
|
||||
|
||||
typedef struct SProjectOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SArray* pPseudoColInfo;
|
||||
SLimitInfo limitInfo;
|
||||
bool mergeDataBlocks;
|
||||
SSDataBlock* pFinalRes;
|
||||
} SProjectOperatorInfo;
|
||||
|
||||
typedef struct SIndefOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
SAggSupporter aggSup;
|
||||
SArray* pPseudoColInfo;
|
||||
SExprSupp scalarSup;
|
||||
uint64_t groupId;
|
||||
SSDataBlock* pNextGroupRes;
|
||||
} SIndefOperatorInfo;
|
||||
|
||||
static SSDataBlock* doGenerateSourceData(SOperatorInfo* pOperator);
|
||||
static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator);
|
||||
static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -17,6 +17,18 @@
|
|||
#include "executorimpl.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
typedef struct SSortOperatorInfo {
|
||||
SOptrBasicInfo binfo;
|
||||
uint32_t sortBufSize; // max buffer size for in-memory sort
|
||||
SArray* pSortInfo;
|
||||
SSortHandle* pSortHandle;
|
||||
SColMatchInfo matchInfo;
|
||||
int32_t bufPageSize;
|
||||
int64_t startTs; // sort start time
|
||||
uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
|
||||
SLimitInfo limitInfo;
|
||||
} SSortOperatorInfo;
|
||||
|
||||
static SSDataBlock* doSort(SOperatorInfo* pOperator);
|
||||
static int32_t doOpenSortOperator(SOperatorInfo* pOperator);
|
||||
static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
|
||||
|
@ -176,10 +188,10 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) {
|
|||
|
||||
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
|
||||
ps->param = pOperator->pDownstream[0];
|
||||
ps->onlyRef = true;
|
||||
tsortAddSource(pInfo->pSortHandle, ps);
|
||||
|
||||
int32_t code = tsortOpen(pInfo->pSortHandle);
|
||||
taosMemoryFreeClear(ps);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||
|
@ -377,10 +389,10 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) {
|
|||
param->childOpInfo = pOperator->pDownstream[0];
|
||||
param->grpSortOpInfo = pInfo;
|
||||
ps->param = param;
|
||||
ps->onlyRef = false;
|
||||
tsortAddSource(pInfo->pCurrSortHandle, ps);
|
||||
|
||||
int32_t code = tsortOpen(pInfo->pCurrSortHandle);
|
||||
taosMemoryFreeClear(ps);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
T_LONG_JMP(pTaskInfo->env, terrno);
|
||||
|
@ -471,6 +483,9 @@ void destroyGroupSortOperatorInfo(void* param) {
|
|||
taosArrayDestroy(pInfo->pSortInfo);
|
||||
taosArrayDestroy(pInfo->matchInfo.pList);
|
||||
|
||||
tsortDestroySortHandle(pInfo->pCurrSortHandle);
|
||||
pInfo->pCurrSortHandle = NULL;
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
||||
|
@ -563,6 +578,7 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
|
|||
for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
|
||||
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
|
||||
ps->param = pOperator->pDownstream[i];
|
||||
ps->onlyRef = true;
|
||||
tsortAddSource(pInfo->pSortHandle, ps);
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -709,6 +709,7 @@ void* destroyStreamFillSupporter(SStreamFillSupporter* pFillSup) {
|
|||
pFillSup->pResMap = NULL;
|
||||
releaseOutputBuf(NULL, NULL, (SResultRow*)pFillSup->cur.pRowVal);
|
||||
pFillSup->cur.pRowVal = NULL;
|
||||
cleanupExprSupp(&pFillSup->notFillExprSup);
|
||||
|
||||
taosMemoryFree(pFillSup);
|
||||
return NULL;
|
||||
|
@ -1417,25 +1418,13 @@ static void doApplyStreamScalarCalculation(SOperatorInfo* pOperator, SSDataBlock
|
|||
blockDataEnsureCapacity(pDstBlock, pSrcBlock->info.rows);
|
||||
setInputDataBlock(pSup, pSrcBlock, TSDB_ORDER_ASC, MAIN_SCAN, false);
|
||||
projectApplyFunctions(pSup->pExprInfo, pDstBlock, pSrcBlock, pSup->pCtx, pSup->numOfExprs, NULL);
|
||||
|
||||
pDstBlock->info.rows = 0;
|
||||
pSup = &pInfo->pFillSup->notFillExprSup;
|
||||
setInputDataBlock(pSup, pSrcBlock, TSDB_ORDER_ASC, MAIN_SCAN, false);
|
||||
projectApplyFunctions(pSup->pExprInfo, pDstBlock, pSrcBlock, pSup->pCtx, pSup->numOfExprs, NULL);
|
||||
pDstBlock->info.groupId = pSrcBlock->info.groupId;
|
||||
|
||||
SColumnInfoData* pDst = taosArrayGet(pDstBlock->pDataBlock, pInfo->primaryTsCol);
|
||||
SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, pInfo->primarySrcSlotId);
|
||||
colDataAssign(pDst, pSrc, pDstBlock->info.rows, &pDstBlock->info);
|
||||
|
||||
int32_t numOfNotFill = pInfo->pFillSup->numOfAllCols - pInfo->pFillSup->numOfFillCols;
|
||||
for (int32_t i = 0; i < numOfNotFill; ++i) {
|
||||
SFillColInfo* pCol = &pInfo->pFillSup->pAllColInfo[i + pInfo->pFillSup->numOfFillCols];
|
||||
ASSERT(pCol->notFillCol);
|
||||
|
||||
SExprInfo* pExpr = pCol->pExpr;
|
||||
int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId;
|
||||
int32_t dstSlotId = pExpr->base.resSchema.slotId;
|
||||
|
||||
SColumnInfoData* pDst1 = taosArrayGet(pDstBlock->pDataBlock, dstSlotId);
|
||||
SColumnInfoData* pSrc1 = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
|
||||
colDataAssign(pDst1, pSrc1, pDstBlock->info.rows, &pDstBlock->info);
|
||||
}
|
||||
blockDataUpdateTsWindow(pDstBlock, pInfo->primaryTsCol);
|
||||
}
|
||||
|
||||
|
@ -1577,6 +1566,14 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod
|
|||
destroyStreamFillSupporter(pFillSup);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SExprInfo* noFillExpr = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &numOfNotFillCols);
|
||||
code = initExprSupp(&pFillSup->notFillExprSup, noFillExpr, numOfNotFillCols);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
destroyStreamFillSupporter(pFillSup);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
|
||||
pFillSup->pResMap = tSimpleHashInit(16, hashFn);
|
||||
pFillSup->hasDelete = false;
|
||||
|
|
|
@ -1618,6 +1618,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
|||
nodesDestroyNode((SNode*)pInfo->pPhyNode);
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
cleanupGroupResInfo(&pInfo->groupResInfo);
|
||||
cleanupExprSupp(&pInfo->scalarSupp);
|
||||
|
||||
taosMemoryFreeClear(param);
|
||||
}
|
||||
|
@ -2703,6 +2704,7 @@ static void rebuildIntervalWindow(SOperatorInfo* pOperator, SArray* pWinArray, S
|
|||
pChildSup->rowEntryInfoOffset, &pChInfo->aggSup);
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin, true);
|
||||
compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
|
||||
releaseOutputBuf(pChInfo->pState, pWinRes, pChResult);
|
||||
}
|
||||
if (num > 0 && pUpdatedMap) {
|
||||
saveWinResultInfo(pCurResult->win.skey, pWinRes->groupId, pUpdatedMap);
|
||||
|
@ -5362,15 +5364,6 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
|
|||
pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
|
||||
initResultSizeInfo(&pOperator->resultInfo, 4096);
|
||||
|
||||
if (pIntervalPhyNode->window.pExprs != NULL) {
|
||||
int32_t numOfScalar = 0;
|
||||
SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar);
|
||||
code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
goto _error;
|
||||
}
|
||||
}
|
||||
|
||||
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
|
||||
code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
|
|
|
@ -110,6 +110,22 @@ static int32_t sortComparCleanup(SMsortComparParam* cmpParam) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void tsortClearOrderdSource(SArray *pOrderedSource) {
|
||||
for (size_t i = 0; i < taosArrayGetSize(pOrderedSource); i++) {
|
||||
SSortSource** pSource = taosArrayGet(pOrderedSource, i);
|
||||
if (NULL == *pSource) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((*pSource)->param && !(*pSource)->onlyRef) {
|
||||
taosMemoryFree((*pSource)->param);
|
||||
}
|
||||
taosMemoryFreeClear(*pSource);
|
||||
}
|
||||
|
||||
taosArrayClear(pOrderedSource);
|
||||
}
|
||||
|
||||
void tsortDestroySortHandle(SSortHandle* pSortHandle) {
|
||||
if (pSortHandle == NULL) {
|
||||
return;
|
||||
|
@ -123,10 +139,8 @@ void tsortDestroySortHandle(SSortHandle* pSortHandle) {
|
|||
destroyDiskbasedBuf(pSortHandle->pBuf);
|
||||
taosMemoryFreeClear(pSortHandle->idStr);
|
||||
blockDataDestroy(pSortHandle->pDataBlock);
|
||||
for (size_t i = 0; i < taosArrayGetSize(pSortHandle->pOrderedSource); i++) {
|
||||
SSortSource** pSource = taosArrayGet(pSortHandle->pOrderedSource, i);
|
||||
taosMemoryFreeClear(*pSource);
|
||||
}
|
||||
|
||||
tsortClearOrderdSource(pSortHandle->pOrderedSource);
|
||||
taosArrayDestroy(pSortHandle->pOrderedSource);
|
||||
taosMemoryFreeClear(pSortHandle);
|
||||
}
|
||||
|
@ -561,7 +575,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
|
|||
}
|
||||
}
|
||||
|
||||
taosArrayClear(pHandle->pOrderedSource);
|
||||
tsortClearOrderdSource(pHandle->pOrderedSource);
|
||||
taosArrayAddAll(pHandle->pOrderedSource, pResList);
|
||||
taosArrayDestroy(pResList);
|
||||
|
||||
|
@ -598,8 +612,11 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
size_t sortBufSize = pHandle->numOfPages * pHandle->pageSize;
|
||||
|
||||
if (pHandle->type == SORT_SINGLESOURCE_SORT) {
|
||||
SSortSource* source = taosArrayGetP(pHandle->pOrderedSource, 0);
|
||||
taosArrayClear(pHandle->pOrderedSource);
|
||||
SSortSource** pSource = taosArrayGet(pHandle->pOrderedSource, 0);
|
||||
SSortSource* source = *pSource;
|
||||
*pSource = NULL;
|
||||
|
||||
tsortClearOrderdSource(pHandle->pOrderedSource);
|
||||
|
||||
while (1) {
|
||||
SSDataBlock* pBlock = pHandle->fetchfp(source->param);
|
||||
|
@ -623,6 +640,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
|
||||
int32_t code = blockDataMerge(pHandle->pDataBlock, pBlock);
|
||||
if (code != 0) {
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
taosMemoryFree(source);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -632,6 +653,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
int64_t p = taosGetTimestampUs();
|
||||
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
if (code != 0) {
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
taosMemoryFree(source);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -642,6 +667,11 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
}
|
||||
}
|
||||
|
||||
if (source->param && !source->onlyRef) {
|
||||
taosMemoryFree(source->param);
|
||||
}
|
||||
taosMemoryFree(source);
|
||||
|
||||
if (pHandle->pDataBlock != NULL && pHandle->pDataBlock->info.rows > 0) {
|
||||
size_t size = blockDataGetSize(pHandle->pDataBlock);
|
||||
|
||||
|
|
|
@ -5672,12 +5672,12 @@ int32_t modeFunction(SqlFunctionCtx* pCtx) {
|
|||
int32_t numOfElems = 0;
|
||||
int32_t startOffset = pCtx->offset;
|
||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
if (colDataIsNull_s(pInputCol, i)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
numOfElems++;
|
||||
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
doModeAdd(pInfo, i, pCtx, data);
|
||||
|
||||
if (sizeof(SModeInfo) + pInfo->numOfPoints * (sizeof(SModeItem) + pInfo->colBytes) >= MODE_MAX_RESULT_SIZE) {
|
||||
|
@ -6568,7 +6568,9 @@ int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
for (int32_t i = pInput->numOfRows + pInput->startRowIndex - 1; i >= pInput->startRowIndex; --i) {
|
||||
numOfElems++;
|
||||
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
bool isNull = colDataIsNull(pInputCol, pInput->numOfRows, i, NULL);
|
||||
char* data = isNull ? NULL : colDataGetData(pInputCol, i);
|
||||
|
||||
TSKEY cts = getRowPTs(pInput->pPTS, i);
|
||||
if (pResInfo->numOfRes == 0 || pInfo->ts < cts) {
|
||||
doSaveLastrow(pCtx, data, i, cts, pInfo);
|
||||
|
|
|
@ -172,8 +172,8 @@ static int32_t parseDuplicateUsingClause(SInsertParseContext* pCxt, SVnodeModifO
|
|||
}
|
||||
|
||||
// pStmt->pSql -> field1_name, ...)
|
||||
static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, SParsedDataColInfo* pColList,
|
||||
SSchema* pSchema) {
|
||||
static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, bool isTags,
|
||||
SParsedDataColInfo* pColList, SSchema* pSchema) {
|
||||
col_id_t nCols = pColList->numOfCols;
|
||||
|
||||
pColList->numOfBound = 0;
|
||||
|
@ -227,6 +227,10 @@ static int32_t parseBoundColumns(SInsertParseContext* pCxt, const char** pSql, S
|
|||
}
|
||||
}
|
||||
|
||||
if (!isTags && pColList->cols[0].valStat == VAL_STAT_NONE) {
|
||||
return buildInvalidOperationMsg(&pCxt->msg, "primary timestamp column can not be null");
|
||||
}
|
||||
|
||||
pColList->orderStatus = isOrdered ? ORDER_STATUS_ORDERED : ORDER_STATUS_DISORDERED;
|
||||
|
||||
if (!isOrdered) {
|
||||
|
@ -525,7 +529,7 @@ static int32_t parseBoundTagsClause(SInsertParseContext* pCxt, SVnodeModifOpStmt
|
|||
}
|
||||
|
||||
pStmt->pSql += index;
|
||||
return parseBoundColumns(pCxt, &pStmt->pSql, &pCxt->tags, pTagsSchema);
|
||||
return parseBoundColumns(pCxt, &pStmt->pSql, true, &pCxt->tags, pTagsSchema);
|
||||
}
|
||||
|
||||
static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifOpStmt* pStmt, SSchema* pTagSchema, SToken* pToken,
|
||||
|
@ -792,6 +796,8 @@ static int32_t getTableMeta(SInsertParseContext* pCxt, SName* pTbName, bool isSt
|
|||
*pMissCache = true;
|
||||
} else if (isStb && TSDB_SUPER_TABLE != (*pTableMeta)->tableType) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "create table only from super table is allowed");
|
||||
} else if (!isStb && TSDB_SUPER_TABLE == (*pTableMeta)->tableType) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported");
|
||||
}
|
||||
}
|
||||
return code;
|
||||
|
@ -935,11 +941,12 @@ static int32_t parseBoundColumnsClause(SInsertParseContext* pCxt, SVnodeModifOpS
|
|||
return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is expected", token.z);
|
||||
}
|
||||
// pStmt->pSql -> field1_name, ...)
|
||||
return parseBoundColumns(pCxt, &pStmt->pSql, &pDataBuf->boundColumnInfo, getTableColumnSchema(pStmt->pTableMeta));
|
||||
return parseBoundColumns(pCxt, &pStmt->pSql, false, &pDataBuf->boundColumnInfo,
|
||||
getTableColumnSchema(pStmt->pTableMeta));
|
||||
}
|
||||
|
||||
if (NULL != pStmt->pBoundCols) {
|
||||
return parseBoundColumns(pCxt, &pStmt->pBoundCols, &pDataBuf->boundColumnInfo,
|
||||
return parseBoundColumns(pCxt, &pStmt->pBoundCols, false, &pDataBuf->boundColumnInfo,
|
||||
getTableColumnSchema(pStmt->pTableMeta));
|
||||
}
|
||||
|
||||
|
@ -1571,16 +1578,16 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt, SVnodeModifOpStmt* pSt
|
|||
|
||||
static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); }
|
||||
|
||||
static int32_t createVnodeModifOpStmt(SParseContext* pCxt, bool reentry, SNode** pOutput) {
|
||||
static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) {
|
||||
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
|
||||
if (NULL == pStmt) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (pCxt->pStmtCb) {
|
||||
if (pCxt->pComCxt->pStmtCb) {
|
||||
TSDB_QUERY_SET_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT);
|
||||
}
|
||||
pStmt->pSql = pCxt->pSql;
|
||||
pStmt->pSql = pCxt->pComCxt->pSql;
|
||||
pStmt->freeHashFunc = insDestroyBlockHashmap;
|
||||
pStmt->freeArrayFunc = insDestroyBlockArrayList;
|
||||
|
||||
|
@ -1604,7 +1611,7 @@ static int32_t createVnodeModifOpStmt(SParseContext* pCxt, bool reentry, SNode**
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t createInsertQuery(SParseContext* pCxt, SQuery** pOutput) {
|
||||
static int32_t createInsertQuery(SInsertParseContext* pCxt, SQuery** pOutput) {
|
||||
SQuery* pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
|
||||
if (NULL == pQuery) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -1667,11 +1674,15 @@ static int32_t getTableVgroupFromMetaData(const SArray* pTables, SVnodeModifOpSt
|
|||
sizeof(SVgroupInfo));
|
||||
}
|
||||
|
||||
static int32_t getTableSchemaFromMetaData(const SMetaData* pMetaData, SVnodeModifOpStmt* pStmt, bool isStb) {
|
||||
static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMetaData* pMetaData,
|
||||
SVnodeModifOpStmt* pStmt, bool isStb) {
|
||||
int32_t code = checkAuthFromMetaData(pMetaData->pUser);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = getTableMetaFromMetaData(pMetaData->pTableMeta, &pStmt->pTableMeta);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && !isStb && TSDB_SUPER_TABLE == pStmt->pTableMeta->tableType) {
|
||||
code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported");
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = getTableVgroupFromMetaData(pMetaData->pTableHash, pStmt, isStb);
|
||||
}
|
||||
|
@ -1696,24 +1707,25 @@ static void clearCatalogReq(SCatalogReq* pCatalogReq) {
|
|||
pCatalogReq->pUser = NULL;
|
||||
}
|
||||
|
||||
static int32_t setVnodeModifOpStmt(SParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
|
||||
static int32_t setVnodeModifOpStmt(SInsertParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
|
||||
SVnodeModifOpStmt* pStmt) {
|
||||
clearCatalogReq(pCatalogReq);
|
||||
|
||||
if (pStmt->usingTableProcessing) {
|
||||
return getTableSchemaFromMetaData(pMetaData, pStmt, true);
|
||||
return getTableSchemaFromMetaData(pCxt, pMetaData, pStmt, true);
|
||||
}
|
||||
return getTableSchemaFromMetaData(pMetaData, pStmt, false);
|
||||
return getTableSchemaFromMetaData(pCxt, pMetaData, pStmt, false);
|
||||
}
|
||||
|
||||
static int32_t resetVnodeModifOpStmt(SParseContext* pCxt, SQuery* pQuery) {
|
||||
static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery) {
|
||||
nodesDestroyNode(pQuery->pRoot);
|
||||
|
||||
int32_t code = createVnodeModifOpStmt(pCxt, true, &pQuery->pRoot);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
|
||||
|
||||
(*pCxt->pStmtCb->getExecInfoFn)(pCxt->pStmtCb->pStmt, &pStmt->pVgroupsHashObj, &pStmt->pTableBlockHashObj);
|
||||
(*pCxt->pComCxt->pStmtCb->getExecInfoFn)(pCxt->pComCxt->pStmtCb->pStmt, &pStmt->pVgroupsHashObj,
|
||||
&pStmt->pTableBlockHashObj);
|
||||
if (NULL == pStmt->pVgroupsHashObj) {
|
||||
pStmt->pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
|
||||
}
|
||||
|
@ -1729,13 +1741,13 @@ static int32_t resetVnodeModifOpStmt(SParseContext* pCxt, SQuery* pQuery) {
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t initInsertQuery(SParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
|
||||
static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogReq, const SMetaData* pMetaData,
|
||||
SQuery** pQuery) {
|
||||
if (NULL == *pQuery) {
|
||||
return createInsertQuery(pCxt, pQuery);
|
||||
}
|
||||
|
||||
if (NULL != pCxt->pStmtCb) {
|
||||
if (NULL != pCxt->pComCxt->pStmtCb) {
|
||||
return resetVnodeModifOpStmt(pCxt, *pQuery);
|
||||
}
|
||||
|
||||
|
@ -1896,7 +1908,7 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal
|
|||
.usingDuplicateTable = false,
|
||||
};
|
||||
|
||||
int32_t code = initInsertQuery(pCxt, pCatalogReq, pMetaData, pQuery);
|
||||
int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = parseInsertSqlImpl(&context, (SVnodeModifOpStmt*)(*pQuery)->pRoot);
|
||||
}
|
||||
|
|
|
@ -499,27 +499,22 @@ int32_t qWorkerProcessFetchMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
|
|||
return TSDB_CODE_QRY_INVALID_INPUT;
|
||||
}
|
||||
|
||||
SResFetchReq *msg = pMsg->pCont;
|
||||
SResFetchReq req = {0};
|
||||
SQWorker *mgmt = (SQWorker *)qWorkerMgmt;
|
||||
|
||||
qwUpdateTimeInQueue(mgmt, ts, FETCH_QUEUE);
|
||||
QW_STAT_INC(mgmt->stat.msgStat.fetchProcessed, 1);
|
||||
|
||||
if (NULL == msg || pMsg->contLen < sizeof(*msg)) {
|
||||
QW_ELOG("invalid fetch msg, msg:%p, msgLen:%d", msg, pMsg->contLen);
|
||||
if (tDeserializeSResFetchReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
QW_ELOG("tDeserializeSResFetchReq %d failed", pMsg->contLen);
|
||||
QW_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
|
||||
msg->sId = be64toh(msg->sId);
|
||||
msg->queryId = be64toh(msg->queryId);
|
||||
msg->taskId = be64toh(msg->taskId);
|
||||
msg->execId = ntohl(msg->execId);
|
||||
|
||||
uint64_t sId = msg->sId;
|
||||
uint64_t qId = msg->queryId;
|
||||
uint64_t tId = msg->taskId;
|
||||
uint64_t sId = req.sId;
|
||||
uint64_t qId = req.queryId;
|
||||
uint64_t tId = req.taskId;
|
||||
int64_t rId = 0;
|
||||
int32_t eId = msg->execId;
|
||||
int32_t eId = req.execId;
|
||||
|
||||
SQWMsg qwMsg = {.node = node, .msg = NULL, .msgLen = 0, .connInfo = pMsg->info, .msgType = pMsg->msgType};
|
||||
|
||||
|
|
|
@ -1083,22 +1083,29 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
}
|
||||
case TDMT_SCH_FETCH:
|
||||
case TDMT_SCH_MERGE_FETCH: {
|
||||
msgSize = sizeof(SResFetchReq);
|
||||
SResFetchReq req = {0};
|
||||
req.header.vgId = addr->nodeId;
|
||||
req.sId = schMgmt.sId;
|
||||
req.queryId = pJob->queryId;
|
||||
req.taskId = pTask->taskId;
|
||||
req.execId = pTask->execId;
|
||||
|
||||
msgSize = tSerializeSResFetchReq(NULL, 0, &req);
|
||||
if (msgSize < 0) {
|
||||
SCH_TASK_ELOG("tSerializeSResFetchReq get size, msgSize:%d", msgSize);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == msg) {
|
||||
SCH_TASK_ELOG("calloc %d failed", msgSize);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
SResFetchReq *pMsg = msg;
|
||||
|
||||
pMsg->header.vgId = htonl(addr->nodeId);
|
||||
|
||||
pMsg->sId = htobe64(schMgmt.sId);
|
||||
pMsg->queryId = htobe64(pJob->queryId);
|
||||
pMsg->taskId = htobe64(pTask->taskId);
|
||||
pMsg->execId = htonl(pTask->execId);
|
||||
|
||||
if (tSerializeSResFetchReq(msg, msgSize, &req) < 0) {
|
||||
SCH_TASK_ELOG("tSerializeSResFetchReq %d failed", msgSize);
|
||||
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TDMT_SCH_DROP_TASK: {
|
||||
|
|
|
@ -171,7 +171,7 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
|
|||
pSyncNode->pLogStore->syncLogUpdateCommitIndex(pSyncNode->pLogStore, pSyncNode->commitIndex);
|
||||
|
||||
// execute fsm
|
||||
if (pSyncNode->pFsm != NULL) {
|
||||
if (pSyncNode != NULL && pSyncNode->pFsm != NULL) {
|
||||
int32_t code = syncNodeDoCommit(pSyncNode, beginIndex, endIndex, pSyncNode->state);
|
||||
if (code != 0) {
|
||||
sNError(pSyncNode, "advance commit index error, do commit begin:%" PRId64 ", end:%" PRId64, beginIndex,
|
||||
|
|
|
@ -64,7 +64,7 @@ SSyncRaftEntry* syncEntryBuildFromRpcMsg(const SRpcMsg* pMsg, SyncTerm term, Syn
|
|||
}
|
||||
|
||||
SSyncRaftEntry* syncEntryBuildFromAppendEntries(const SyncAppendEntries* pMsg) {
|
||||
SSyncRaftEntry* pEntry = syncEntryBuild(pMsg->dataLen);
|
||||
SSyncRaftEntry* pEntry = syncEntryBuild((int32_t)(pMsg->dataLen));
|
||||
if (pEntry == NULL) return NULL;
|
||||
|
||||
memcpy(pEntry, pMsg->data, pMsg->dataLen);
|
||||
|
@ -91,15 +91,14 @@ SSyncRaftEntry* syncEntryBuildNoop(SyncTerm term, SyncIndex index, int32_t vgId)
|
|||
|
||||
void syncEntryDestory(SSyncRaftEntry* pEntry) {
|
||||
if (pEntry != NULL) {
|
||||
taosMemoryFree(pEntry);
|
||||
|
||||
sTrace("free entry: %p", pEntry);
|
||||
taosMemoryFree(pEntry);
|
||||
}
|
||||
}
|
||||
|
||||
void syncEntry2OriginalRpc(const SSyncRaftEntry* pEntry, SRpcMsg* pRpcMsg) {
|
||||
pRpcMsg->msgType = pEntry->originalRpcType;
|
||||
pRpcMsg->contLen = pEntry->dataLen;
|
||||
pRpcMsg->contLen = (int32_t)(pEntry->dataLen);
|
||||
pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
|
||||
memcpy(pRpcMsg->pCont, pEntry->data, pRpcMsg->contLen);
|
||||
}
|
||||
|
@ -339,7 +338,8 @@ int32_t raftEntryCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index,
|
|||
SSyncRaftEntry* pEntry = NULL;
|
||||
int32_t code = raftEntryCacheGetEntryP(pCache, index, &pEntry);
|
||||
if (code == 1) {
|
||||
*ppEntry = taosMemoryMalloc((int64_t)(pEntry->bytes));
|
||||
int32_t bytes = (int32_t)pEntry->bytes;
|
||||
*ppEntry = taosMemoryMalloc((int64_t)bytes);
|
||||
memcpy(*ppEntry, pEntry, pEntry->bytes);
|
||||
(*ppEntry)->rid = -1;
|
||||
} else {
|
||||
|
|
|
@ -91,7 +91,7 @@ int32_t syncNodeReplicateOne(SSyncNode* pSyncNode, SRaftId* pDestId, bool snapsh
|
|||
if (code == 0) {
|
||||
ASSERT(pEntry != NULL);
|
||||
|
||||
code = syncBuildAppendEntries(&rpcMsg, pEntry->bytes, pSyncNode->vgId);
|
||||
code = syncBuildAppendEntries(&rpcMsg, (int32_t)(pEntry->bytes), pSyncNode->vgId);
|
||||
ASSERT(code == 0);
|
||||
|
||||
pMsg = rpcMsg.pCont;
|
||||
|
|
|
@ -229,7 +229,10 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo
|
|||
int32_t writeLen = vsnprintf(eventLog, sizeof(eventLog), format, argpointer);
|
||||
va_end(argpointer);
|
||||
|
||||
int32_t aqItems = pNode->pFsm->FpApplyQueueItems(pNode->pFsm);
|
||||
int32_t aqItems = 0;
|
||||
if (pNode != NULL && pNode->pFsm != NULL && pNode->pFsm->FpApplyQueueItems != NULL) {
|
||||
aqItems = pNode->pFsm->FpApplyQueueItems(pNode->pFsm);
|
||||
}
|
||||
|
||||
// restore error code
|
||||
terrno = errCode;
|
||||
|
|
|
@ -445,9 +445,11 @@ void cliHandleExceptImpl(SCliConn* pConn, int32_t code) {
|
|||
|
||||
if (pCtx == NULL || pCtx->pSem == NULL) {
|
||||
if (transMsg.info.ahandle == NULL) {
|
||||
if (pMsg == NULL || REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
|
||||
once = true;
|
||||
continue;
|
||||
if (pMsg == NULL || REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) {
|
||||
destroyCmsg(pMsg);
|
||||
once = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -791,6 +793,7 @@ void cliSend(SCliConn* pConn) {
|
|||
|
||||
int msgLen = transMsgLenFromCont(pMsg->contLen);
|
||||
STransMsgHead* pHead = transHeadFromCont(pMsg->pCont);
|
||||
|
||||
pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0;
|
||||
pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0;
|
||||
pHead->persist = REQUEST_PERSIS_HANDLE(pMsg) ? 1 : 0;
|
||||
|
@ -820,10 +823,15 @@ void cliSend(SCliConn* pConn) {
|
|||
uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0);
|
||||
}
|
||||
|
||||
if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) {
|
||||
msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead);
|
||||
pHead->msgLen = (int32_t)htonl((uint32_t)msgLen);
|
||||
if (pHead->comp == 0) {
|
||||
if (pTransInst->compressSize != -1 && pTransInst->compressSize < pMsg->contLen) {
|
||||
msgLen = transCompressMsg(pMsg->pCont, pMsg->contLen) + sizeof(STransMsgHead);
|
||||
pHead->msgLen = (int32_t)htonl((uint32_t)msgLen);
|
||||
}
|
||||
} else {
|
||||
msgLen = (int32_t)ntohl((uint32_t)(pHead->msgLen));
|
||||
}
|
||||
|
||||
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
|
||||
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, msgLen);
|
||||
|
||||
|
@ -1211,6 +1219,7 @@ static FORCE_INLINE void destroyCmsg(void* arg) {
|
|||
if (pMsg == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
transDestroyConnCtx(pMsg->ctx);
|
||||
destroyUserdata(&pMsg->msg);
|
||||
taosMemoryFree(pMsg);
|
||||
|
|
|
@ -398,7 +398,7 @@ static int uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
|
|||
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
|
||||
|
||||
// handle invalid drop_task resp, TD-20098
|
||||
if (pMsg->msgType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
||||
if (pConn->inType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
|
||||
transQueuePop(&pConn->srvMsgs);
|
||||
destroySmsg(smsg);
|
||||
return -1;
|
||||
|
|
|
@ -324,24 +324,36 @@ int32_t walEndSnapshot(SWal *pWal) {
|
|||
// find files safe to delete
|
||||
SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE);
|
||||
if (pInfo) {
|
||||
SWalFileInfo *pLastFileInfo = taosArrayGetLast(pWal->fileInfoSet);
|
||||
wDebug("vgId:%d, wal search found file info: first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer,
|
||||
pInfo->lastVer);
|
||||
if (ver >= pInfo->lastVer) {
|
||||
//pInfo--;
|
||||
pInfo++;
|
||||
wDebug("vgId:%d, wal remove advance one file: first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer,
|
||||
pInfo->lastVer);
|
||||
}
|
||||
if (POINTER_DISTANCE(pInfo, pWal->fileInfoSet->pData) > 0) {
|
||||
wDebug("vgId:%d, wal end remove for %" PRId64, pWal->cfg.vgId, pInfo->firstVer);
|
||||
if (pInfo <= pLastFileInfo) {
|
||||
wDebug("vgId:%d, wal end remove for first:%" PRId64 " last:%" PRId64, pWal->cfg.vgId, pInfo->firstVer,
|
||||
pInfo->lastVer);
|
||||
} else {
|
||||
wDebug("vgId:%d, wal no remove", pWal->cfg.vgId);
|
||||
}
|
||||
// iterate files, until the searched result
|
||||
for (SWalFileInfo *iter = pWal->fileInfoSet->pData; iter < pInfo; iter++) {
|
||||
if ((pWal->cfg.retentionSize != -1 && newTotSize > pWal->cfg.retentionSize) ||
|
||||
(pWal->cfg.retentionPeriod != -1 && iter->closeTs + pWal->cfg.retentionPeriod > ts)) {
|
||||
wDebug("vgId:%d, wal check remove file %" PRId64 "(file size %" PRId64 " close ts %" PRId64
|
||||
"), new tot size %" PRId64,
|
||||
pWal->cfg.vgId, iter->firstVer, iter->fileSize, iter->closeTs, newTotSize);
|
||||
if (((pWal->cfg.retentionSize == 0) || (pWal->cfg.retentionSize != -1 && newTotSize > pWal->cfg.retentionSize)) ||
|
||||
((pWal->cfg.retentionPeriod == 0) ||
|
||||
(pWal->cfg.retentionPeriod != -1 && iter->closeTs + pWal->cfg.retentionPeriod > ts))) {
|
||||
// delete according to file size or close time
|
||||
wDebug("vgId:%d, check pass", pWal->cfg.vgId);
|
||||
deleteCnt++;
|
||||
newTotSize -= iter->fileSize;
|
||||
}
|
||||
wDebug("vgId:%d, check not pass", pWal->cfg.vgId);
|
||||
}
|
||||
wDebug("vgId:%d, wal should delete %d files", pWal->cfg.vgId, deleteCnt);
|
||||
int32_t actualDelete = 0;
|
||||
char fnameStr[WAL_FILE_LEN];
|
||||
// remove file
|
||||
|
|
|
@ -739,7 +739,6 @@ void taosFprintfFile(TdFilePtr pFile, const char *format, ...) {
|
|||
va_start(ap, format);
|
||||
vfprintf(pFile->fp, format, ap);
|
||||
va_end(ap);
|
||||
fflush(pFile->fp);
|
||||
}
|
||||
|
||||
bool taosValidFile(TdFilePtr pFile) { return pFile != NULL && pFile->fd > 0; }
|
||||
|
|
|
@ -107,7 +107,7 @@ static uint64_t allocatePositionInFile(SDiskbasedBuf* pBuf, size_t size) {
|
|||
|
||||
static void setPageNotInBuf(SPageInfo* pPageInfo) { pPageInfo->pData = NULL; }
|
||||
|
||||
static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + 2; }
|
||||
static FORCE_INLINE size_t getAllocPageSize(int32_t pageSize) { return pageSize + POINTER_BYTES + sizeof(SFilePage); }
|
||||
|
||||
/**
|
||||
* +--------------------------+-------------------+--------------+
|
||||
|
|
|
@ -216,7 +216,7 @@
|
|||
,,y,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim
|
||||
,,n,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
|
||||
|
@ -227,11 +227,11 @@
|
|||
,,y,script,./test.sh -f tsim/stream/triggerSession0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby1.sim
|
||||
,,n,script,./test.sh -f tsim/stream/schedSnode.sim
|
||||
,,y,script,./test.sh -f tsim/stream/schedSnode.sim
|
||||
,,y,script,./test.sh -f tsim/stream/windowClose.sim
|
||||
,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
|
||||
,,y,script,./test.sh -f tsim/stream/sliding.sim
|
||||
,,n,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
,,,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnSession.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnState.sim
|
||||
,,y,script,./test.sh -f tsim/stream/deleteInterval.sim
|
||||
|
@ -278,8 +278,8 @@
|
|||
,,y,script,./test.sh -f tsim/stable/values.sim
|
||||
,,y,script,./test.sh -f tsim/stable/vnode3.sim
|
||||
,,y,script,./test.sh -f tsim/stable/metrics_idx.sim
|
||||
,,n,script,./test.sh -f tsim/sma/drop_sma.sim
|
||||
,,n,script,./test.sh -f tsim/sma/sma_leak.sim
|
||||
,,,script,./test.sh -f tsim/sma/drop_sma.sim
|
||||
,,y,script,./test.sh -f tsim/sma/sma_leak.sim
|
||||
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
|
@ -401,18 +401,18 @@
|
|||
|
||||
#system test
|
||||
|
||||
,,,system-test,python3 ./test.py -f 0-others/taosShell.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/taosShellError.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/taosShellNetChk.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/telemetry.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/taosdMonitor.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/udfTest.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/udf_create.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/udf_restart_taosd.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/cachemodel.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/udf_cfg1.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/udf_cfg2.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShellError.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShellNetChk.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/telemetry.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosdMonitor.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udfTest.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udf_create.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udf_restart_taosd.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/cachemodel.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udf_cfg1.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/udf_cfg2.py
|
||||
,,n,system-test,python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3
|
||||
,,,system-test,python3 ./test.py -f 0-others/sysinfo.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/user_control.py
|
||||
,,,system-test,python3 ./test.py -f 0-others/fsync.py
|
||||
|
@ -451,8 +451,8 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py
|
||||
|
@ -465,12 +465,12 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py
|
||||
|
@ -515,14 +515,14 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/histogram.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interp.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interp.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py
|
||||
|
@ -531,10 +531,10 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/lower.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/lower.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ltrim.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ltrim.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py
|
||||
|
@ -555,8 +555,8 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/rtrim.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/rtrim.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/rtrim.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/rtrim.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py
|
||||
|
@ -573,12 +573,12 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/statecount.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/substr.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/substr.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/substr.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/substr.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sum.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sum.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/tail.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/tail.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py
|
||||
|
@ -603,8 +603,8 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py -R
|
||||
,,,system-test,python3 ./test.py -f 2-query/unique.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/unique.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -R
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py
|
||||
|
@ -618,7 +618,7 @@
|
|||
,,,system-test,python3 ./test.py -f 1-insert/delete_normaltable.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/keep_expired.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/union1.py
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union1.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat2.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/json_tag.py
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery.py
|
||||
|
@ -704,6 +704,7 @@
|
|||
,,,system-test,python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqDnodeRestart1.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
|
||||
|
@ -724,11 +725,11 @@
|
|||
,,,system-test,python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
|
||||
,,,system-test,python3 ./test.py -f 99-TDcase/TD-19201.py
|
||||
,,,system-test,python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/rtrim.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ltrim.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/rtrim.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -Q 2
|
||||
|
@ -736,13 +737,13 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/substr.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/union.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/union1.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat2.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/substr.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union1.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat2.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -Q 2
|
||||
|
@ -781,230 +782,230 @@
|
|||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interp.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_math.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/stablity.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/stablity_1.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/elapsed.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/unique.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/tail.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/elapsed.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/max_partition.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 2
|
||||
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/case_when.py -Q 2
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/rtrim.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/substr.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/union.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/union1.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat2.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/spread.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timezone.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Today.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/mode.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ltrim.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/rtrim.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/substr.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union1.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat2.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timezone.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Today.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/mode.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_iso8601.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/json_tag.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/percentile.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/apercentile.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/arctan.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_iso8601.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/json_tag.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/stablity.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/stablity_1.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/elapsed.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/unique.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/tail.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/elapsed.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 3
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interp.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/case_when.py -Q 3
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/ltrim.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/rtrim.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/upper.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/join2.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/substr.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/union.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/union1.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat2.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/concat_ws2.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/spread.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timezone.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Today.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/mode.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/between.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distinct.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/varchar.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ltrim.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/rtrim.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/length.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/char_length.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/upper.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/lower.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/join2.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/substr.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/union1.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat2.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/concat_ws2.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/check_tsdb.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/spread.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/hyperloglog.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/explain.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/leastsquares.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timezone.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Today.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/mode.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_iso8601.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/first.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_iso8601.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/apercentile.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ceil.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/floor.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/round.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/log.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/pow.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sqrt.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cos.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tan.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arcsin.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arccos.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/arctan.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/stablity.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/stablity_1.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/avg.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/elapsed.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/cast.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/unique.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/tail.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/ttl_comment.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/avg.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/elapsed.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/csum.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mavg.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/sample.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/cast.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_diff.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/unique.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/stateduration.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/function_stateduration.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/statecount.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tail.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ttl_comment.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_count.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_max.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_min.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_sum.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_spread.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_apercentile.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_avg.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/distribute_agg_stddev.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/twa.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/irate.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_null.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_partition.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max_partition.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/last_row.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/last_row.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/tsbsQuery.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py -Q 4
|
||||
,,,system-test,python3 ./test.py -f 2-query/interp.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interp.py -Q 4
|
||||
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/case_when.py -Q 4
|
||||
|
||||
#develop test
|
||||
|
@ -1022,8 +1023,8 @@
|
|||
,,,develop-test,python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py -R
|
||||
|
||||
#docs-examples test
|
||||
,,,docs-examples-test,bash python.sh
|
||||
,,,docs-examples-test,bash node.sh
|
||||
,,,docs-examples-test,bash csharp.sh
|
||||
,,,docs-examples-test,bash jdbc.sh
|
||||
,,,docs-examples-test,bash go.sh
|
||||
,,n,docs-examples-test,bash python.sh
|
||||
,,n,docs-examples-test,bash node.sh
|
||||
,,n,docs-examples-test,bash csharp.sh
|
||||
,,n,docs-examples-test,bash jdbc.sh
|
||||
,,n,docs-examples-test,bash go.sh
|
||||
|
|
|
@ -0,0 +1,365 @@
|
|||
import os
|
||||
import socket
|
||||
import requests
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import os ,sys
|
||||
import random
|
||||
import argparse
|
||||
import subprocess
|
||||
import time
|
||||
import platform
|
||||
|
||||
# valgrind mode ?
|
||||
valgrind_mode = False
|
||||
|
||||
msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" }
|
||||
|
||||
# formal
|
||||
hostname = socket.gethostname()
|
||||
|
||||
group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
|
||||
|
||||
def get_msg(text):
|
||||
return {
|
||||
"msg_type": "post",
|
||||
"content": {
|
||||
"post": {
|
||||
"zh_cn": {
|
||||
"title": "Crash_gen Monitor",
|
||||
"content": [
|
||||
[{
|
||||
"tag": "text",
|
||||
"text": text
|
||||
}
|
||||
]]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def send_msg(json):
|
||||
headers = {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
req = requests.post(url=group_url, headers=headers, json=json)
|
||||
inf = req.json()
|
||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||
pass
|
||||
else:
|
||||
print(inf)
|
||||
|
||||
|
||||
# set path about run instance
|
||||
|
||||
core_path = subprocess.Popen("cat /proc/sys/kernel/core_pattern", shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
core_path = "/".join(core_path.split("/")[:-1])
|
||||
print(" ======= core path is %s ======== " %core_path)
|
||||
if not os.path.exists(core_path):
|
||||
os.mkdir(core_path)
|
||||
|
||||
base_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
if base_dir.find("community")>0:
|
||||
repo = "community"
|
||||
elif base_dir.find("TDengine")>0:
|
||||
repo = "TDengine"
|
||||
else:
|
||||
repo ="TDengine"
|
||||
print("base_dir:",base_dir)
|
||||
home_dir = base_dir[:base_dir.find(repo)]
|
||||
print("home_dir:",home_dir)
|
||||
run_dir = os.path.join(home_dir,'run_dir')
|
||||
run_dir = os.path.abspath(run_dir)
|
||||
print("run dir is *** :",run_dir)
|
||||
if not os.path.exists(run_dir):
|
||||
os.mkdir(run_dir)
|
||||
run_log_file = run_dir+'/crash_gen_run.log'
|
||||
crash_gen_cmds_file = os.path.join(run_dir, 'crash_gen_cmds.sh')
|
||||
exit_status_logs = os.path.join(run_dir, 'crash_exit.log')
|
||||
|
||||
def get_path():
|
||||
buildPath=''
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
# generate crash_gen start script randomly
|
||||
|
||||
def random_args(args_list):
|
||||
nums_args_list = ["--max-dbs","--num-replicas","--num-dnodes","--max-steps","--num-threads",] # record int type arguments
|
||||
bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names",
|
||||
"--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"
|
||||
] # record bool type arguments
|
||||
strs_args_list = ["--connector-type"] # record str type arguments
|
||||
|
||||
args_list["--auto-start-service"]= False
|
||||
args_list["--continue-on-exception"]=True
|
||||
# connect_types=['native','rest','mixed'] # restful interface has change ,we should trans dbnames to connection or change sql such as "db.test"
|
||||
connect_types=['native']
|
||||
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
||||
args_list["--connector-type"]= connect_types[0]
|
||||
args_list["--max-dbs"]= random.randint(1,10)
|
||||
|
||||
# dnodes = [1,3] # set single dnodes;
|
||||
|
||||
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
||||
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
||||
args_list["--debug"]=False
|
||||
args_list["--per-thread-db-connection"]=True
|
||||
args_list["--track-memory-leaks"]=False
|
||||
|
||||
args_list["--max-steps"]=random.randint(500,2000)
|
||||
|
||||
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
||||
|
||||
|
||||
args_list["--run-tdengine"]= False
|
||||
args_list["--use-shadow-db"]= False
|
||||
args_list["--dynamic-db-table-names"]= True
|
||||
args_list["--verify-data"]= False
|
||||
args_list["--record-ops"] = False
|
||||
|
||||
for key in bools_args_list:
|
||||
set_bool_value = [True,False]
|
||||
if key == "--auto-start-service" :
|
||||
continue
|
||||
elif key =="--run-tdengine":
|
||||
continue
|
||||
elif key == "--ignore-errors":
|
||||
continue
|
||||
elif key == "--debug":
|
||||
continue
|
||||
elif key == "--per-thread-db-connection":
|
||||
continue
|
||||
elif key == "--continue-on-exception":
|
||||
continue
|
||||
elif key == "--use-shadow-db":
|
||||
continue
|
||||
elif key =="--track-memory-leaks":
|
||||
continue
|
||||
elif key == "--dynamic-db-table-names":
|
||||
continue
|
||||
elif key == "--verify-data":
|
||||
continue
|
||||
elif key == "--record-ops":
|
||||
continue
|
||||
else:
|
||||
args_list[key]=set_bool_value[random.randint(0,1)]
|
||||
|
||||
if args_list["--larger-data"]:
|
||||
threads = [16,32]
|
||||
else:
|
||||
threads = [32,64,128,256]
|
||||
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
||||
|
||||
return args_list
|
||||
|
||||
def limits(args_list):
|
||||
if args_list["--use-shadow-db"]==True:
|
||||
if args_list["--max-dbs"] > 1:
|
||||
print("Cannot combine use-shadow-db with max-dbs of more than 1 ,set max-dbs=1")
|
||||
args_list["--max-dbs"]=1
|
||||
else:
|
||||
pass
|
||||
|
||||
# env is start by test frame , not crash_gen instance
|
||||
|
||||
# elif args_list["--num-replicas"]==0:
|
||||
# print(" make sure num-replicas is at least 1 ")
|
||||
# args_list["--num-replicas"]=1
|
||||
# elif args_list["--num-replicas"]==1:
|
||||
# pass
|
||||
|
||||
# elif args_list["--num-replicas"]>1:
|
||||
# if not args_list["--auto-start-service"]:
|
||||
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
||||
|
||||
# else:
|
||||
# pass
|
||||
|
||||
return args_list
|
||||
|
||||
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
|
||||
bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names",
|
||||
"--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"]
|
||||
arguments = ""
|
||||
for k ,v in args_list.items():
|
||||
if k == "--ignore-errors":
|
||||
if v:
|
||||
arguments+=(k+"="+str(v)+" ")
|
||||
else:
|
||||
arguments+=""
|
||||
elif k in bools_args_list and v==True:
|
||||
arguments+=(k+" ")
|
||||
elif k in bools_args_list and v==False:
|
||||
arguments+=""
|
||||
else:
|
||||
arguments+=(k+"="+str(v)+" ")
|
||||
|
||||
if valgrind :
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments)
|
||||
|
||||
else:
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments)
|
||||
|
||||
return crash_gen_cmd
|
||||
|
||||
def start_taosd():
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
start_path = build_path[:-5]+"community/tests/system-test/"
|
||||
elif repo == "TDengine":
|
||||
start_path = build_path[:-5]+"/tests/system-test/"
|
||||
else:
|
||||
pass
|
||||
|
||||
start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path)
|
||||
os.system(start_cmd)
|
||||
|
||||
def get_cmds(args_list):
|
||||
# build_path = get_path()
|
||||
# if repo == "community":
|
||||
# crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
# elif repo == "TDengine":
|
||||
# crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
# else:
|
||||
# pass
|
||||
|
||||
# crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -p -t 10 -s 1000 -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path)
|
||||
|
||||
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
||||
return crash_gen_cmd
|
||||
|
||||
def run_crash_gen(crash_cmds):
|
||||
|
||||
# prepare env of taosd
|
||||
start_taosd()
|
||||
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
result_file = os.path.join(crash_gen_path, 'valgrind.out')
|
||||
|
||||
|
||||
# run crash_gen and back logs
|
||||
os.system('echo "%s">>%s'%(crash_cmds,crash_gen_cmds_file))
|
||||
os.system("%s >>%s "%(crash_cmds,result_file))
|
||||
|
||||
|
||||
def check_status():
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
result_file = os.path.join(crash_gen_path, 'valgrind.out')
|
||||
run_code = subprocess.Popen("tail -n 50 %s"%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs))
|
||||
|
||||
core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if int(core_check.strip().rstrip()) > 0:
|
||||
# it means core files has occured
|
||||
return 3
|
||||
|
||||
if "Crash_Gen is now exiting with status code: 1" in run_code:
|
||||
return 1
|
||||
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
||||
return 0
|
||||
else:
|
||||
return 2
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
||||
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
||||
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
||||
"--continue-on-exception":False }
|
||||
|
||||
args = random_args(args_list)
|
||||
args = limits(args)
|
||||
|
||||
|
||||
build_path = get_path()
|
||||
os.system("pip install git+https://github.com/taosdata/taos-connector-python.git")
|
||||
if repo =="community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo =="TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
|
||||
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
||||
print(" make sure crash_gen.sh is ready")
|
||||
else:
|
||||
print( " crash_gen.sh is not exists ")
|
||||
sys.exit(1)
|
||||
|
||||
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[8:16]
|
||||
|
||||
# crash_cmds = get_cmds()
|
||||
|
||||
crash_cmds = get_cmds(args)
|
||||
# clean run_dir
|
||||
os.system('rm -rf %s'%run_dir )
|
||||
if not os.path.exists(run_dir):
|
||||
os.mkdir(run_dir)
|
||||
print(crash_cmds)
|
||||
run_crash_gen(crash_cmds)
|
||||
status = check_status()
|
||||
|
||||
print("exit status : ", status)
|
||||
|
||||
if status ==4:
|
||||
print('======== crash_gen found memory bugs ========')
|
||||
if status ==5:
|
||||
print('======== crash_gen found memory errors ========')
|
||||
if status >0:
|
||||
print('======== crash_gen run failed and not exit as expected ========')
|
||||
else:
|
||||
print('======== crash_gen run sucess and exit as expected ========')
|
||||
|
||||
|
||||
if status!=0 :
|
||||
|
||||
try:
|
||||
text = f"crash_gen instance exit status of docker [ {hostname} ] is : {msg_dict[status]}\n " + f" and git commit : {git_commit}"
|
||||
send_msg(get_msg(text))
|
||||
except Exception as e:
|
||||
print("exception:", e)
|
||||
exit(status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
@ -0,0 +1,399 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
|
||||
import os
|
||||
import socket
|
||||
import requests
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import os ,sys
|
||||
import random
|
||||
import argparse
|
||||
import subprocess
|
||||
import time
|
||||
import platform
|
||||
|
||||
# valgrind mode ?
|
||||
valgrind_mode = True
|
||||
|
||||
msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" }
|
||||
|
||||
# formal
|
||||
hostname = socket.gethostname()
|
||||
|
||||
group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
|
||||
|
||||
def get_msg(text):
|
||||
return {
|
||||
"msg_type": "post",
|
||||
"content": {
|
||||
"post": {
|
||||
"zh_cn": {
|
||||
"title": "Crash_gen Monitor",
|
||||
"content": [
|
||||
[{
|
||||
"tag": "text",
|
||||
"text": text
|
||||
}
|
||||
]]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def send_msg(json):
|
||||
headers = {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
req = requests.post(url=group_url, headers=headers, json=json)
|
||||
inf = req.json()
|
||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||
pass
|
||||
else:
|
||||
print(inf)
|
||||
|
||||
|
||||
# set path about run instance
|
||||
|
||||
core_path = subprocess.Popen("cat /proc/sys/kernel/core_pattern", shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
core_path = "/".join(core_path.split("/")[:-1])
|
||||
print(" ======= core path is %s ======== " %core_path)
|
||||
if not os.path.exists(core_path):
|
||||
os.mkdir(core_path)
|
||||
|
||||
base_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
if base_dir.find("community")>0:
|
||||
repo = "community"
|
||||
elif base_dir.find("TDengine")>0:
|
||||
repo = "TDengine"
|
||||
else:
|
||||
repo ="TDengine"
|
||||
print("base_dir:",base_dir)
|
||||
home_dir = base_dir[:base_dir.find(repo)]
|
||||
print("home_dir:",home_dir)
|
||||
run_dir = os.path.join(home_dir,'run_dir')
|
||||
run_dir = os.path.abspath(run_dir)
|
||||
print("run dir is *** :",run_dir)
|
||||
if not os.path.exists(run_dir):
|
||||
os.mkdir(run_dir)
|
||||
run_log_file = run_dir+'/crash_gen_run.log'
|
||||
crash_gen_cmds_file = os.path.join(run_dir, 'crash_gen_cmds.sh')
|
||||
exit_status_logs = os.path.join(run_dir, 'crash_exit.log')
|
||||
|
||||
def get_path():
|
||||
buildPath=''
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
# generate crash_gen start script randomly
|
||||
|
||||
def random_args(args_list):
|
||||
nums_args_list = ["--max-dbs","--num-replicas","--num-dnodes","--max-steps","--num-threads",] # record int type arguments
|
||||
bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names",
|
||||
"--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"
|
||||
] # record bool type arguments
|
||||
strs_args_list = ["--connector-type"] # record str type arguments
|
||||
|
||||
args_list["--auto-start-service"]= False
|
||||
args_list["--continue-on-exception"]=True
|
||||
# connect_types=['native','rest','mixed'] # restful interface has change ,we should trans dbnames to connection or change sql such as "db.test"
|
||||
connect_types=['native']
|
||||
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
||||
args_list["--connector-type"]= connect_types[0]
|
||||
args_list["--max-dbs"]= random.randint(1,10)
|
||||
|
||||
# dnodes = [1,3] # set single dnodes;
|
||||
|
||||
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
||||
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
||||
args_list["--debug"]=False
|
||||
args_list["--per-thread-db-connection"]=True
|
||||
args_list["--track-memory-leaks"]=False
|
||||
|
||||
args_list["--max-steps"]=random.randint(200,500)
|
||||
|
||||
threads = [16,32]
|
||||
|
||||
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
||||
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
||||
|
||||
|
||||
args_list["--run-tdengine"]= False
|
||||
args_list["--use-shadow-db"]= False
|
||||
args_list["--dynamic-db-table-names"]= True
|
||||
args_list["--verify-data"]= False
|
||||
args_list["--record-ops"] = False
|
||||
|
||||
for key in bools_args_list:
|
||||
set_bool_value = [True,False]
|
||||
if key == "--auto-start-service" :
|
||||
continue
|
||||
elif key =="--run-tdengine":
|
||||
continue
|
||||
elif key == "--ignore-errors":
|
||||
continue
|
||||
elif key == "--debug":
|
||||
continue
|
||||
elif key == "--per-thread-db-connection":
|
||||
continue
|
||||
elif key == "--continue-on-exception":
|
||||
continue
|
||||
elif key == "--use-shadow-db":
|
||||
continue
|
||||
elif key =="--track-memory-leaks":
|
||||
continue
|
||||
elif key == "--dynamic-db-table-names":
|
||||
continue
|
||||
elif key == "--verify-data":
|
||||
continue
|
||||
elif key == "--record-ops":
|
||||
continue
|
||||
elif key == "--larger-data":
|
||||
continue
|
||||
else:
|
||||
args_list[key]=set_bool_value[random.randint(0,1)]
|
||||
return args_list
|
||||
|
||||
def limits(args_list):
|
||||
if args_list["--use-shadow-db"]==True:
|
||||
if args_list["--max-dbs"] > 1:
|
||||
print("Cannot combine use-shadow-db with max-dbs of more than 1 ,set max-dbs=1")
|
||||
args_list["--max-dbs"]=1
|
||||
else:
|
||||
pass
|
||||
|
||||
# env is start by test frame , not crash_gen instance
|
||||
|
||||
# elif args_list["--num-replicas"]==0:
|
||||
# print(" make sure num-replicas is at least 1 ")
|
||||
# args_list["--num-replicas"]=1
|
||||
# elif args_list["--num-replicas"]==1:
|
||||
# pass
|
||||
|
||||
# elif args_list["--num-replicas"]>1:
|
||||
# if not args_list["--auto-start-service"]:
|
||||
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
||||
|
||||
# else:
|
||||
# pass
|
||||
|
||||
return args_list
|
||||
|
||||
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
|
||||
bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names",
|
||||
"--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"]
|
||||
arguments = ""
|
||||
for k ,v in args_list.items():
|
||||
if k == "--ignore-errors":
|
||||
if v:
|
||||
arguments+=(k+"="+str(v)+" ")
|
||||
else:
|
||||
arguments+=""
|
||||
elif k in bools_args_list and v==True:
|
||||
arguments+=(k+" ")
|
||||
elif k in bools_args_list and v==False:
|
||||
arguments+=""
|
||||
else:
|
||||
arguments+=(k+"="+str(v)+" ")
|
||||
|
||||
if valgrind :
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203 '%(crash_gen_path ,arguments)
|
||||
|
||||
else:
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203'%(crash_gen_path ,arguments)
|
||||
|
||||
return crash_gen_cmd
|
||||
|
||||
|
||||
def start_taosd():
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
start_path = build_path[:-5]+"community/tests/system-test/"
|
||||
elif repo == "TDengine":
|
||||
start_path = build_path[:-5]+"/tests/system-test/"
|
||||
else:
|
||||
pass
|
||||
|
||||
start_cmd = 'cd %s && python3 test.py '%(start_path)
|
||||
os.system(start_cmd +">>/dev/null")
|
||||
|
||||
def get_cmds(args_list):
|
||||
# build_path = get_path()
|
||||
# if repo == "community":
|
||||
# crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
# elif repo == "TDengine":
|
||||
# crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
# else:
|
||||
# pass
|
||||
|
||||
# crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -p -t 10 -s 1000 -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path)
|
||||
|
||||
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
||||
return crash_gen_cmd
|
||||
|
||||
def run_crash_gen(crash_cmds):
|
||||
|
||||
# prepare env of taosd
|
||||
start_taosd()
|
||||
# run crash_gen and back logs
|
||||
os.system('echo "%s">>%s'%(crash_cmds,crash_gen_cmds_file))
|
||||
# os.system("cp %s %s"%(crash_gen_cmds_file, core_path))
|
||||
os.system("%s"%(crash_cmds))
|
||||
|
||||
def check_status():
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
result_file = os.path.join(crash_gen_path, 'valgrind.out')
|
||||
run_code = subprocess.Popen("tail -n 50 %s"%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs))
|
||||
|
||||
core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if int(core_check.strip().rstrip()) > 0:
|
||||
# it means core files has occured
|
||||
return 3
|
||||
|
||||
mem_status = check_memory()
|
||||
if mem_status >0:
|
||||
return mem_status
|
||||
if "Crash_Gen is now exiting with status code: 1" in run_code:
|
||||
return 1
|
||||
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
||||
return 0
|
||||
else:
|
||||
return 2
|
||||
|
||||
|
||||
def check_memory():
|
||||
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
'''
|
||||
invalid read, invalid write
|
||||
'''
|
||||
back_path = os.path.join(core_path,"valgrind_report")
|
||||
if not os.path.exists(back_path):
|
||||
os.mkdir(back_path)
|
||||
|
||||
stderr_file = os.path.join(crash_gen_path , "valgrind.err")
|
||||
|
||||
status = 0
|
||||
|
||||
grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if grep_res:
|
||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||
status = 4
|
||||
|
||||
grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if grep_res:
|
||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||
status = 4
|
||||
|
||||
grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if grep_res:
|
||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||
status = 5
|
||||
|
||||
return status
|
||||
|
||||
def main():
|
||||
|
||||
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
||||
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
||||
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
||||
"--continue-on-exception":False }
|
||||
|
||||
args = random_args(args_list)
|
||||
args = limits(args)
|
||||
|
||||
build_path = get_path()
|
||||
os.system("pip install git+https://github.com/taosdata/taos-connector-python.git >>/dev/null")
|
||||
if repo =="community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo =="TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
|
||||
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
||||
print(" make sure crash_gen.sh is ready")
|
||||
else:
|
||||
print( " crash_gen.sh is not exists ")
|
||||
sys.exit(1)
|
||||
|
||||
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[8:16]
|
||||
|
||||
# crash_cmds = get_cmds()
|
||||
|
||||
crash_cmds = get_cmds(args)
|
||||
|
||||
# clean run_dir
|
||||
os.system('rm -rf %s'%run_dir )
|
||||
if not os.path.exists(run_dir):
|
||||
os.mkdir(run_dir)
|
||||
print(crash_cmds)
|
||||
run_crash_gen(crash_cmds)
|
||||
status = check_status()
|
||||
# back_path = os.path.join(core_path,"valgrind_report")
|
||||
|
||||
print("exit status : ", status)
|
||||
|
||||
if status ==4:
|
||||
print('======== crash_gen found memory bugs ========')
|
||||
if status ==5:
|
||||
print('======== crash_gen found memory errors ========')
|
||||
if status >0:
|
||||
print('======== crash_gen run failed and not exit as expected ========')
|
||||
else:
|
||||
print('======== crash_gen run sucess and exit as expected ========')
|
||||
|
||||
if status!=0 :
|
||||
|
||||
try:
|
||||
text = f"crash_gen instance exit status of docker [ {hostname} ] is : {msg_dict[status]}\n " + f" and git commit : {git_commit}"
|
||||
send_msg(get_msg(text))
|
||||
except Exception as e:
|
||||
print("exception:", e)
|
||||
exit(status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
@ -0,0 +1,399 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
|
||||
import os
|
||||
import socket
|
||||
import requests
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import os ,sys
|
||||
import random
|
||||
import argparse
|
||||
import subprocess
|
||||
import time
|
||||
import platform
|
||||
|
||||
# valgrind mode ?
|
||||
valgrind_mode = True
|
||||
|
||||
msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" }
|
||||
|
||||
# formal
|
||||
hostname = socket.gethostname()
|
||||
|
||||
group_url = 'https://open.feishu.cn/open-apis/bot/v2/hook/56c333b5-eae9-4c18-b0b6-7e4b7174f5c9'
|
||||
|
||||
def get_msg(text):
|
||||
return {
|
||||
"msg_type": "post",
|
||||
"content": {
|
||||
"post": {
|
||||
"zh_cn": {
|
||||
"title": "Crash_gen Monitor",
|
||||
"content": [
|
||||
[{
|
||||
"tag": "text",
|
||||
"text": text
|
||||
}
|
||||
]]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def send_msg(json):
|
||||
headers = {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
req = requests.post(url=group_url, headers=headers, json=json)
|
||||
inf = req.json()
|
||||
if "StatusCode" in inf and inf["StatusCode"] == 0:
|
||||
pass
|
||||
else:
|
||||
print(inf)
|
||||
|
||||
|
||||
# set path about run instance
|
||||
|
||||
core_path = subprocess.Popen("cat /proc/sys/kernel/core_pattern", shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
core_path = "/".join(core_path.split("/")[:-1])
|
||||
print(" ======= core path is %s ======== " %core_path)
|
||||
if not os.path.exists(core_path):
|
||||
os.mkdir(core_path)
|
||||
|
||||
base_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
if base_dir.find("community")>0:
|
||||
repo = "community"
|
||||
elif base_dir.find("TDengine")>0:
|
||||
repo = "TDengine"
|
||||
else:
|
||||
repo ="TDengine"
|
||||
print("base_dir:",base_dir)
|
||||
home_dir = base_dir[:base_dir.find(repo)]
|
||||
print("home_dir:",home_dir)
|
||||
run_dir = os.path.join(home_dir,'run_dir')
|
||||
run_dir = os.path.abspath(run_dir)
|
||||
print("run dir is *** :",run_dir)
|
||||
if not os.path.exists(run_dir):
|
||||
os.mkdir(run_dir)
|
||||
run_log_file = run_dir+'/crash_gen_run.log'
|
||||
crash_gen_cmds_file = os.path.join(run_dir, 'crash_gen_cmds.sh')
|
||||
exit_status_logs = os.path.join(run_dir, 'crash_exit.log')
|
||||
|
||||
def get_path():
|
||||
buildPath=''
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
# generate crash_gen start script randomly
|
||||
|
||||
def random_args(args_list):
|
||||
nums_args_list = ["--max-dbs","--num-replicas","--num-dnodes","--max-steps","--num-threads",] # record int type arguments
|
||||
bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names",
|
||||
"--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"
|
||||
] # record bool type arguments
|
||||
strs_args_list = ["--connector-type"] # record str type arguments
|
||||
|
||||
args_list["--auto-start-service"]= False
|
||||
args_list["--continue-on-exception"]=True
|
||||
# connect_types=['native','rest','mixed'] # restful interface has change ,we should trans dbnames to connection or change sql such as "db.test"
|
||||
connect_types=['native']
|
||||
# args_list["--connector-type"]=connect_types[random.randint(0,2)]
|
||||
args_list["--connector-type"]= connect_types[0]
|
||||
args_list["--max-dbs"]= random.randint(1,10)
|
||||
|
||||
# dnodes = [1,3] # set single dnodes;
|
||||
|
||||
# args_list["--num-dnodes"]= random.sample(dnodes,1)[0]
|
||||
# args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"])
|
||||
args_list["--debug"]=False
|
||||
args_list["--per-thread-db-connection"]=True
|
||||
args_list["--track-memory-leaks"]=False
|
||||
|
||||
args_list["--max-steps"]=random.randint(200,500)
|
||||
|
||||
threads = [16,32]
|
||||
|
||||
args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug
|
||||
# args_list["--ignore-errors"]=[] ## can add error codes for detail
|
||||
|
||||
|
||||
args_list["--run-tdengine"]= False
|
||||
args_list["--use-shadow-db"]= False
|
||||
args_list["--dynamic-db-table-names"]= True
|
||||
args_list["--verify-data"]= False
|
||||
args_list["--record-ops"] = False
|
||||
|
||||
for key in bools_args_list:
|
||||
set_bool_value = [True,False]
|
||||
if key == "--auto-start-service" :
|
||||
continue
|
||||
elif key =="--run-tdengine":
|
||||
continue
|
||||
elif key == "--ignore-errors":
|
||||
continue
|
||||
elif key == "--debug":
|
||||
continue
|
||||
elif key == "--per-thread-db-connection":
|
||||
continue
|
||||
elif key == "--continue-on-exception":
|
||||
continue
|
||||
elif key == "--use-shadow-db":
|
||||
continue
|
||||
elif key =="--track-memory-leaks":
|
||||
continue
|
||||
elif key == "--dynamic-db-table-names":
|
||||
continue
|
||||
elif key == "--verify-data":
|
||||
continue
|
||||
elif key == "--record-ops":
|
||||
continue
|
||||
elif key == "--larger-data":
|
||||
continue
|
||||
else:
|
||||
args_list[key]=set_bool_value[random.randint(0,1)]
|
||||
return args_list
|
||||
|
||||
def limits(args_list):
|
||||
if args_list["--use-shadow-db"]==True:
|
||||
if args_list["--max-dbs"] > 1:
|
||||
print("Cannot combine use-shadow-db with max-dbs of more than 1 ,set max-dbs=1")
|
||||
args_list["--max-dbs"]=1
|
||||
else:
|
||||
pass
|
||||
|
||||
# env is start by test frame , not crash_gen instance
|
||||
|
||||
# elif args_list["--num-replicas"]==0:
|
||||
# print(" make sure num-replicas is at least 1 ")
|
||||
# args_list["--num-replicas"]=1
|
||||
# elif args_list["--num-replicas"]==1:
|
||||
# pass
|
||||
|
||||
# elif args_list["--num-replicas"]>1:
|
||||
# if not args_list["--auto-start-service"]:
|
||||
# print("it should be deployed by crash_gen auto-start-service for multi replicas")
|
||||
|
||||
# else:
|
||||
# pass
|
||||
|
||||
return args_list
|
||||
|
||||
def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode):
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
|
||||
bools_args_list = ["--auto-start-service" , "--debug","--run-tdengine","--ignore-errors","--track-memory-leaks","--larger-data","--mix-oos-data","--dynamic-db-table-names",
|
||||
"--per-thread-db-connection","--record-ops","--verify-data","--use-shadow-db","--continue-on-exception"]
|
||||
arguments = ""
|
||||
for k ,v in args_list.items():
|
||||
if k == "--ignore-errors":
|
||||
if v:
|
||||
arguments+=(k+"="+str(v)+" ")
|
||||
else:
|
||||
arguments+=""
|
||||
elif k in bools_args_list and v==True:
|
||||
arguments+=(k+" ")
|
||||
elif k in bools_args_list and v==False:
|
||||
arguments+=""
|
||||
else:
|
||||
arguments+=(k+"="+str(v)+" ")
|
||||
|
||||
if valgrind :
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203 '%(crash_gen_path ,arguments)
|
||||
|
||||
else:
|
||||
|
||||
crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203'%(crash_gen_path ,arguments)
|
||||
|
||||
return crash_gen_cmd
|
||||
|
||||
|
||||
def start_taosd():
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
start_path = build_path[:-5]+"community/tests/system-test/"
|
||||
elif repo == "TDengine":
|
||||
start_path = build_path[:-5]+"/tests/system-test/"
|
||||
else:
|
||||
pass
|
||||
|
||||
start_cmd = 'cd %s && python3 test.py -N 4 -M 1 '%(start_path)
|
||||
os.system(start_cmd +">>/dev/null")
|
||||
|
||||
def get_cmds(args_list):
|
||||
# build_path = get_path()
|
||||
# if repo == "community":
|
||||
# crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
# elif repo == "TDengine":
|
||||
# crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
# else:
|
||||
# pass
|
||||
|
||||
# crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -p -t 10 -s 1000 -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550 '%(crash_gen_path)
|
||||
|
||||
crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode)
|
||||
return crash_gen_cmd
|
||||
|
||||
def run_crash_gen(crash_cmds):
|
||||
|
||||
# prepare env of taosd
|
||||
start_taosd()
|
||||
# run crash_gen and back logs
|
||||
os.system('echo "%s">>%s'%(crash_cmds,crash_gen_cmds_file))
|
||||
# os.system("cp %s %s"%(crash_gen_cmds_file, core_path))
|
||||
os.system("%s"%(crash_cmds))
|
||||
|
||||
def check_status():
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
result_file = os.path.join(crash_gen_path, 'valgrind.out')
|
||||
run_code = subprocess.Popen("tail -n 50 %s"%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs))
|
||||
|
||||
core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if int(core_check.strip().rstrip()) > 0:
|
||||
# it means core files has occured
|
||||
return 3
|
||||
|
||||
mem_status = check_memory()
|
||||
if mem_status >0:
|
||||
return mem_status
|
||||
if "Crash_Gen is now exiting with status code: 1" in run_code:
|
||||
return 1
|
||||
elif "Crash_Gen is now exiting with status code: 0" in run_code:
|
||||
return 0
|
||||
else:
|
||||
return 2
|
||||
|
||||
|
||||
def check_memory():
|
||||
|
||||
build_path = get_path()
|
||||
if repo == "community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo == "TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
'''
|
||||
invalid read, invalid write
|
||||
'''
|
||||
back_path = os.path.join(core_path,"valgrind_report")
|
||||
if not os.path.exists(back_path):
|
||||
os.mkdir(back_path)
|
||||
|
||||
stderr_file = os.path.join(crash_gen_path , "valgrind.err")
|
||||
|
||||
status = 0
|
||||
|
||||
grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if grep_res:
|
||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||
status = 4
|
||||
|
||||
grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if grep_res:
|
||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||
status = 4
|
||||
|
||||
grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
|
||||
|
||||
if grep_res:
|
||||
# os.system("cp %s %s"%(stderr_file , back_path))
|
||||
status = 5
|
||||
|
||||
return status
|
||||
|
||||
def main():
|
||||
|
||||
args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[],
|
||||
"--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False,
|
||||
"--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False ,
|
||||
"--continue-on-exception":False }
|
||||
|
||||
args = random_args(args_list)
|
||||
args = limits(args)
|
||||
|
||||
build_path = get_path()
|
||||
os.system("pip install git+https://github.com/taosdata/taos-connector-python.git >>/dev/null")
|
||||
if repo =="community":
|
||||
crash_gen_path = build_path[:-5]+"community/tests/pytest/"
|
||||
elif repo =="TDengine":
|
||||
crash_gen_path = build_path[:-5]+"/tests/pytest/"
|
||||
else:
|
||||
pass
|
||||
|
||||
if os.path.exists(crash_gen_path+"crash_gen.sh"):
|
||||
print(" make sure crash_gen.sh is ready")
|
||||
else:
|
||||
print( " crash_gen.sh is not exists ")
|
||||
sys.exit(1)
|
||||
|
||||
git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[8:16]
|
||||
|
||||
# crash_cmds = get_cmds()
|
||||
|
||||
crash_cmds = get_cmds(args)
|
||||
|
||||
# clean run_dir
|
||||
os.system('rm -rf %s'%run_dir )
|
||||
if not os.path.exists(run_dir):
|
||||
os.mkdir(run_dir)
|
||||
print(crash_cmds)
|
||||
run_crash_gen(crash_cmds)
|
||||
status = check_status()
|
||||
# back_path = os.path.join(core_path,"valgrind_report")
|
||||
|
||||
print("exit status : ", status)
|
||||
|
||||
if status ==4:
|
||||
print('======== crash_gen found memory bugs ========')
|
||||
if status ==5:
|
||||
print('======== crash_gen found memory errors ========')
|
||||
if status >0:
|
||||
print('======== crash_gen run failed and not exit as expected ========')
|
||||
else:
|
||||
print('======== crash_gen run sucess and exit as expected ========')
|
||||
|
||||
if status!=0 :
|
||||
|
||||
try:
|
||||
text = f"crash_gen instance exit status of docker [ {hostname} ] is : {msg_dict[status]}\n " + f" and git commit : {git_commit}"
|
||||
send_msg(get_msg(text))
|
||||
except Exception as e:
|
||||
print("exception:", e)
|
||||
exit(status)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# set LD_LIBRARY_PATH
|
||||
export PATH=$PATH:/home/TDengine/debug/build/bin
|
||||
export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
|
||||
ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null
|
||||
|
||||
# run crash_gen auto script
|
||||
python3 /home/TDengine/tests/pytest/auto_crash_gen.py
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# set LD_LIBRARY_PATH
|
||||
export PATH=$PATH:/home/TDengine/debug/build/bin
|
||||
export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
|
||||
ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null
|
||||
|
||||
# run crash_gen auto script
|
||||
python3 /home/TDengine/tests/pytest/auto_crash_gen_valgrind.py
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# set LD_LIBRARY_PATH
|
||||
export PATH=$PATH:/home/TDengine/debug/build/bin
|
||||
export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null
|
||||
ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null
|
||||
ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null
|
||||
|
||||
# run crash_gen auto script
|
||||
python3 /home/TDengine/tests/pytest/auto_crash_gen_valgrind_cluster.py
|
|
@ -0,0 +1,31 @@
|
|||
#!/bin/bash
|
||||
|
||||
mkdir -p /data/wz/crash_gen_logs/
|
||||
logdir='/data/wz/crash_gen_logs/'
|
||||
date_tag=`date +%Y%m%d-%H%M%S`
|
||||
hostname='vm_valgrind_'
|
||||
|
||||
for i in {1..50}
|
||||
do
|
||||
echo $i
|
||||
# create docker and start crash_gen
|
||||
log_dir=${logdir}${hostname}${date_tag}_${i}
|
||||
docker run -d --hostname=${hostname}${date_tag}_${i} --name ${hostname}${date_tag}_${i} --privileged -v ${log_dir}:/corefile/ -- crash_gen:v1.0 sleep 99999999999999
|
||||
echo create docker ${hostname}${date_tag}_${i}
|
||||
docker exec -d ${hostname}${date_tag}_${i} sh -c 'rm -rf /home/taos-connector-python'
|
||||
docker cp /data/wz/TDengine ${hostname}${date_tag}_${i}:/home/TDengine
|
||||
docker cp /data/wz/taos-connector-python ${hostname}${date_tag}_${i}:/home/taos-connector-python
|
||||
echo copy TDengine in container done!
|
||||
docker exec ${hostname}${date_tag}_${i} sh -c 'sh /home/TDengine/tests/pytest/auto_run_valgrind.sh '
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
echo crash_gen exit as expect , run success
|
||||
|
||||
# # clear docker which success
|
||||
docker stop ${hostname}${date_tag}_${i}
|
||||
docker rm -f ${hostname}${date_tag}_${i}
|
||||
else
|
||||
docker stop ${hostname}${date_tag}_${i}
|
||||
echo crash_gen exit error , run failed
|
||||
fi
|
||||
done
|
|
@ -11,6 +11,9 @@ set -e
|
|||
VALGRIND=0
|
||||
LOG_BK_DIR=/data/valgrind_log_backup # 192.168.0.203
|
||||
SIM_FILES=./jenkins/basic.txt
|
||||
cases_task_file=../parallel_test/cases.task
|
||||
|
||||
cat $cases_task_file | grep "./test.sh " | awk -F, '{print $5}' > $SIM_FILES
|
||||
|
||||
while getopts "v:r:f:" arg
|
||||
do
|
||||
|
@ -21,9 +24,9 @@ do
|
|||
r)
|
||||
LOG_BK_DIR=$(echo $OPTARG)
|
||||
;;
|
||||
f)
|
||||
SIM_FILES=$(echo $OPTARG)
|
||||
;;
|
||||
#f)
|
||||
# SIM_FILES=$(echo $OPTARG)
|
||||
# ;;
|
||||
?) #unknow option
|
||||
echo "unkonw argument"
|
||||
exit 1
|
||||
|
|
|
@ -21,15 +21,24 @@ LOG_DIR=$TAOS_DIR/sim/asan
|
|||
error_num=`cat ${LOG_DIR}/*.asan | grep "ERROR" | wc -l`
|
||||
memory_leak=`cat ${LOG_DIR}/*.asan | grep "Direct leak" | wc -l`
|
||||
indirect_leak=`cat ${LOG_DIR}/*.asan | grep "Indirect leak" | wc -l`
|
||||
python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l`
|
||||
|
||||
# ignore
|
||||
|
||||
# TD-20368
|
||||
# /root/TDengine/contrib/zlib/trees.c:873:5: runtime error: null pointer passed as argument 2, which is declared to never be null
|
||||
|
||||
# TD-20494 TD-20452
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:735:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'signed char'
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:790:11: runtime error: 3.4e+38 is outside the range of representable values of type 'long int'
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:772:11: runtime error: 3.52344e+09 is outside the range of representable values of type 'int'
|
||||
# /root/TDengine/source/libs/scalar/src/sclfunc.c:753:11: runtime error: 4.75783e+11 is outside the range of representable values of type 'short int'
|
||||
runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type" | wc -l`
|
||||
|
||||
python_error=`cat ${LOG_DIR}/*.info | grep -w "stack" | wc -l`
|
||||
# TD-20569
|
||||
# /root/TDengine/source/libs/function/src/builtinsimpl.c:856:29: runtime error: signed integer overflow: 9223372036854775806 + 9223372036854775805 cannot be represented in type 'long int'
|
||||
# /root/TDengine/source/libs/scalar/src/sclvector.c:1075:66: runtime error: signed integer overflow: 9223372034707292160 + 1668838476672 cannot be represented in type 'long int'
|
||||
|
||||
runtime_error=`cat ${LOG_DIR}/*.asan | grep "runtime error" | grep -v "trees.c:873" | grep -v "sclfunc.c.*outside the range of representable values of type"| grep -v "builtinsimpl.c.*signed integer overflow"| grep -v "sclvector.c.*signed integer overflow" | wc -l`
|
||||
|
||||
echo -e "\033[44;32;1m"asan error_num: $error_num"\033[0m"
|
||||
echo -e "\033[44;32;1m"asan memory_leak: $memory_leak"\033[0m"
|
||||
|
|
|
@ -26,3 +26,17 @@ while [ -n "$PID" ]; do
|
|||
fi
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
||||
PID=`ps -ef|grep -w taos | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]; do
|
||||
echo kill -9 $PID
|
||||
#pkill -9 taosd
|
||||
kill -9 $PID
|
||||
echo "Killing taosd processes"
|
||||
if [ "$OS_TYPE" != "Darwin" ]; then
|
||||
fuser -k -n tcp 6030
|
||||
else
|
||||
lsof -nti:6030 | xargs kill -9
|
||||
fi
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
|
|
|
@ -230,9 +230,11 @@ class TDTestCase:
|
|||
tdLog.exit('taos -n client fail!')
|
||||
finally:
|
||||
if platform.system().lower() == 'windows':
|
||||
os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9')
|
||||
tdLog.info("ps -a | grep taos | awk \'{print $2}\' | xargs kill -9")
|
||||
# os.system('ps -a | grep taos | awk \'{print $2}\' | xargs kill -9')
|
||||
else:
|
||||
os.system('pkill -9 taos')
|
||||
tdLog.info("pkill -9 taos")
|
||||
# os.system('pkill -9 taos')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -149,6 +149,8 @@ class TDTestCase:
|
|||
|
||||
tmqCom.waitSubscriptionExit(tdSql, topicFromStb)
|
||||
tdSql.query("drop topic %s"%topicFromStb)
|
||||
|
||||
tmqCom.stopTmqSimProcess(processorName="tmq_sim")
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
|
@ -178,6 +180,8 @@ class TDTestCase:
|
|||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tmqCom.initConsumerTable()
|
||||
|
||||
tdLog.info("create topics from stb")
|
||||
topicFromDb = 'topic_db'
|
||||
|
@ -203,10 +207,10 @@ class TDTestCase:
|
|||
tmqCom.getStartCommitNotifyFromTmqsim('cdb',1)
|
||||
|
||||
tdLog.info("create some new child table and insert data for latest mode")
|
||||
paraDict["batchNum"] = 100
|
||||
paraDict["batchNum"] = 10
|
||||
paraDict["ctbPrefix"] = 'newCtb'
|
||||
paraDict["ctbNum"] = 10
|
||||
paraDict["rowsPerTbl"] = 10
|
||||
paraDict["ctbNum"] = 100
|
||||
paraDict["rowsPerTbl"] = 100
|
||||
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
|
||||
|
||||
tdLog.info("================= restart dnode ===========================")
|
||||
|
|
Loading…
Reference in New Issue