Merge branch '3.0' of https://github.com/taosdata/TDengine into enh/row_optimize
This commit is contained in:
commit
93089916e4
|
@ -2,7 +2,7 @@
|
|||
IF (DEFINED VERNUMBER)
|
||||
SET(TD_VER_NUMBER ${VERNUMBER})
|
||||
ELSE ()
|
||||
SET(TD_VER_NUMBER "3.0.1.6")
|
||||
SET(TD_VER_NUMBER "3.0.1.7")
|
||||
ENDIF ()
|
||||
|
||||
IF (DEFINED VERCOMPATIBLE)
|
||||
|
|
|
@ -23,7 +23,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
|||
|
||||
## General Limits
|
||||
|
||||
- Maximum length of database name is 32 bytes
|
||||
- Maximum length of database name is 64 bytes
|
||||
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
|
||||
- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
|
||||
- The maximum length of a column name is 64 bytes.
|
||||
|
@ -35,7 +35,7 @@ The following characters cannot occur in a password: single quotation marks ('),
|
|||
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
|
||||
- The number of replicas can only be 1 or 3.
|
||||
- The maximum length of a username is 23 bytes.
|
||||
- The maximum length of a password is 15 bytes.
|
||||
- The maximum length of a password is 128 bytes.
|
||||
- The maximum number of rows depends on system resources.
|
||||
- The maximum number of vnodes in a database is 1024.
|
||||
|
||||
|
|
|
@ -8,6 +8,9 @@ will automatically add the required columns to ensure that the data written by t
|
|||
|
||||
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
|
||||
|
||||
Tips:
|
||||
The schemaless write will automatically create a table. You do not need to create a table manually, or an unknown error may occur.
|
||||
|
||||
## Schemaless Writing Line Protocol
|
||||
|
||||
TDengine's schemaless writing line protocol supports InfluxDB's Line Protocol, OpenTSDB's telnet line protocol, and OpenTSDB's JSON format protocol. However, when using these three protocols, you need to specify in the API the standard of the parsing protocol to be used for the input content.
|
||||
|
|
|
@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.1.7
|
||||
|
||||
<Release type="tdengine" version="3.0.1.7" />
|
||||
|
||||
## 3.0.1.6
|
||||
|
||||
<Release type="tdengine" version="3.0.1.6" />
|
||||
|
|
|
@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.2.9
|
||||
|
||||
<Release type="tools" version="2.2.9" />
|
||||
|
||||
## 2.2.7
|
||||
|
||||
<Release type="tools" version="2.2.7" />
|
||||
|
|
|
@ -24,19 +24,19 @@ description: 合法字符集和命名中的限制规则
|
|||
|
||||
## 一般限制
|
||||
|
||||
- 数据库名最大长度为 32
|
||||
- 表名最大长度为 192,不包括数据库名前缀和分隔符
|
||||
- 数据库名最大长度为 64 字节
|
||||
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
|
||||
- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
|
||||
- 列名最大长度为 64
|
||||
- 列名最大长度为 64 字节
|
||||
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
|
||||
- 标签名最大长度为 64
|
||||
- 标签名最大长度为 64 字节
|
||||
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
|
||||
- SQL 语句最大长度 1048576 个字符
|
||||
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
|
||||
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
|
||||
- 数据库的副本数只能设置为 1 或 3
|
||||
- 用户名的最大长度是 23 个字节
|
||||
- 用户密码的最大长度是 15 个字节
|
||||
- 用户名的最大长度是 23 字节
|
||||
- 用户密码的最大长度是 128 字节
|
||||
- 总数据行数取决于可用资源
|
||||
- 单个数据库的虚拟结点数上限为 1024
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@ description: 'Schemaless 写入方式,可以免于预先创建超级表/子表
|
|||
|
||||
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
|
||||
|
||||
注意:无模式写入会自动建表,不需要手动建表,手动建表的话可能会出现未知的错误。
|
||||
|
||||
## 无模式写入行协议
|
||||
|
||||
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。
|
||||
|
|
|
@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 3.0.1.7
|
||||
|
||||
<Release type="tdengine" version="3.0.1.7" />
|
||||
|
||||
## 3.0.1.6
|
||||
|
||||
<Release type="tdengine" version="3.0.1.6" />
|
||||
|
|
|
@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
|
|||
|
||||
import Release from "/components/ReleaseV3";
|
||||
|
||||
## 2.2.9
|
||||
|
||||
<Release type="tools" version="2.2.9" />
|
||||
|
||||
## 2.2.7
|
||||
|
||||
<Release type="tools" version="2.2.7" />
|
||||
|
|
|
@ -3169,8 +3169,7 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
SMsgHead header;
|
||||
int32_t msgNum;
|
||||
SBatchMsg msg[];
|
||||
SArray* pMsgs; //SArray<SBatchMsg>
|
||||
} SBatchReq;
|
||||
|
||||
typedef struct {
|
||||
|
@ -3179,17 +3178,40 @@ typedef struct {
|
|||
int32_t msgLen;
|
||||
int32_t rspCode;
|
||||
void* msg;
|
||||
} SBatchRspMsg;
|
||||
|
||||
typedef struct {
|
||||
SArray* pRsps; //SArray<SBatchRspMsg>
|
||||
} SBatchRsp;
|
||||
|
||||
static FORCE_INLINE void tFreeSBatchRsp(void* p) {
|
||||
int32_t tSerializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq);
|
||||
int32_t tDeserializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq);
|
||||
static FORCE_INLINE void tFreeSBatchReqMsg(void* msg) {
|
||||
if (NULL == msg) {
|
||||
return;
|
||||
}
|
||||
SBatchMsg* pMsg = (SBatchMsg*)msg;
|
||||
taosMemoryFree(pMsg->msg);
|
||||
}
|
||||
|
||||
int32_t tSerializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp);
|
||||
int32_t tDeserializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp);
|
||||
|
||||
static FORCE_INLINE void tFreeSBatchRspMsg(void* p) {
|
||||
if (NULL == p) {
|
||||
return;
|
||||
}
|
||||
|
||||
SBatchRsp* pRsp = (SBatchRsp*)p;
|
||||
SBatchRspMsg* pRsp = (SBatchRspMsg*)p;
|
||||
taosMemoryFree(pRsp->msg);
|
||||
}
|
||||
|
||||
int32_t tSerializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq);
|
||||
int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq);
|
||||
int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq);
|
||||
int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq);
|
||||
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -30,6 +30,7 @@ typedef struct SVariant {
|
|||
int64_t i;
|
||||
uint64_t u;
|
||||
double d;
|
||||
float f;
|
||||
char *pz;
|
||||
TdUcs4 *ucs4;
|
||||
SArray *arr; // only for 'in' query to hold value list, not value for a field
|
||||
|
@ -47,7 +48,7 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc);
|
|||
|
||||
int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2);
|
||||
|
||||
char *taosVariantGet(SVariant *pVar, int32_t type);
|
||||
char *taosVariantGet(SVariant *pVar, int32_t type);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -351,7 +351,7 @@ typedef struct SVgDataBlocks {
|
|||
SVgroupInfo vg;
|
||||
int32_t numOfTables; // number of tables in current submit block
|
||||
uint32_t size;
|
||||
void* pData; // SMsgDesc + SSubmitReq + SSubmitBlk + ...
|
||||
void* pData; // SSubmitReq + SSubmitBlk + ...
|
||||
} SVgDataBlocks;
|
||||
|
||||
typedef void (*FFreeDataBlockHash)(SHashObj*);
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef struct SUpdateInfo {
|
|||
|
||||
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
|
||||
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
|
||||
void updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
|
||||
TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
|
||||
bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);
|
||||
void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow *pWin, uint64_t groupId, uint64_t version);
|
||||
|
|
|
@ -489,6 +489,9 @@ enum {
|
|||
#define MAX_META_MSG_IN_BATCH 1048576
|
||||
#define MAX_META_BATCH_RSP_SIZE (1 * 1048576 * 1024)
|
||||
|
||||
// sort page size by default
|
||||
#define DEFAULT_PAGESIZE 4096
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -73,10 +73,10 @@
|
|||
# compressColData -1
|
||||
|
||||
# system time zone
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
# timezone UTC-8
|
||||
|
||||
# system time zone (for windows 10)
|
||||
# timezone UTC-8
|
||||
# timezone Asia/Shanghai (CST, +0800)
|
||||
|
||||
# system locale
|
||||
# locale en_US.UTF-8
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
TDengine is a high-efficient, scalable, high-available distributed time-series database, which makes a lot of optimizations on inserting and querying data, which is far more efficient than normal regular databases. So TDengine can meet the high requirements of IOT and other areas on storing and querying a large amount of data.
|
||||
|
||||
TDengine will be installed under C:\TDengine, users can modify configuration file C:\TDengine\cfg\taos.cfg, set the log file path or other parameters.
|
||||
To start/stop TDengine with administrator privileges: sc start/stop taosd
|
||||
To start/stop taosAdapter with administrator privileges: sc start/stop taosadapter
|
||||
Please manually remove C:\TDengine from your system PATH environment after you remove TDengine software.
|
||||
|
|
|
@ -728,12 +728,26 @@ void tmqSendHbReq(void* param, void* tmrId) {
|
|||
taosMemoryFree(param);
|
||||
return;
|
||||
}
|
||||
int64_t consumerId = tmq->consumerId;
|
||||
int32_t epoch = tmq->epoch;
|
||||
SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq));
|
||||
if (pReq == NULL) goto OVER;
|
||||
pReq->consumerId = htobe64(consumerId);
|
||||
pReq->epoch = epoch;
|
||||
|
||||
SMqHbReq req = {0};
|
||||
req.consumerId = tmq->consumerId;
|
||||
req.epoch = tmq->epoch;
|
||||
|
||||
int32_t tlen = tSerializeSMqHbReq(NULL, 0, &req);
|
||||
if (tlen < 0) {
|
||||
tscError("tSerializeSMqHbReq failed");
|
||||
return;
|
||||
}
|
||||
void *pReq = taosMemoryCalloc(1, tlen);
|
||||
if (tlen < 0) {
|
||||
tscError("failed to malloc MqHbReq msg, size:%d", tlen);
|
||||
return;
|
||||
}
|
||||
if (tSerializeSMqHbReq(pReq, tlen, &req) < 0) {
|
||||
tscError("tSerializeSMqHbReq %d failed", tlen);
|
||||
taosMemoryFree(pReq);
|
||||
return;
|
||||
}
|
||||
|
||||
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
|
||||
if (sendInfo == NULL) {
|
||||
|
@ -742,7 +756,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
|
|||
}
|
||||
sendInfo->msgInfo = (SDataBuf){
|
||||
.pData = pReq,
|
||||
.len = sizeof(SMqHbReq),
|
||||
.len = tlen,
|
||||
.handle = NULL,
|
||||
};
|
||||
|
||||
|
@ -1378,21 +1392,31 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
}
|
||||
atomic_store_32(&tmq->epSkipCnt, 0);
|
||||
#endif
|
||||
int32_t tlen = sizeof(SMqAskEpReq);
|
||||
SMqAskEpReq* req = taosMemoryCalloc(1, tlen);
|
||||
if (req == NULL) {
|
||||
tscError("failed to malloc get subscribe ep buf");
|
||||
/*atomic_store_8(&tmq->epStatus, 0);*/
|
||||
SMqAskEpReq req = {0};
|
||||
req.consumerId = tmq->consumerId;
|
||||
req.epoch = tmq->epoch;
|
||||
strcpy(req.cgroup, tmq->groupId);
|
||||
|
||||
int32_t tlen = tSerializeSMqAskEpReq(NULL, 0, &req);
|
||||
if (tlen < 0) {
|
||||
tscError("tSerializeSMqAskEpReq failed");
|
||||
return -1;
|
||||
}
|
||||
void *pReq = taosMemoryCalloc(1, tlen);
|
||||
if (tlen < 0) {
|
||||
tscError("failed to malloc askEpReq msg, size:%d", tlen);
|
||||
return -1;
|
||||
}
|
||||
if (tSerializeSMqAskEpReq(pReq, tlen, &req) < 0) {
|
||||
tscError("tSerializeSMqAskEpReq %d failed", tlen);
|
||||
taosMemoryFree(pReq);
|
||||
return -1;
|
||||
}
|
||||
req->consumerId = htobe64(tmq->consumerId);
|
||||
req->epoch = htonl(tmq->epoch);
|
||||
strcpy(req->cgroup, tmq->groupId);
|
||||
|
||||
SMqAskEpCbParam* pParam = taosMemoryCalloc(1, sizeof(SMqAskEpCbParam));
|
||||
if (pParam == NULL) {
|
||||
tscError("failed to malloc subscribe param");
|
||||
taosMemoryFree(req);
|
||||
taosMemoryFree(pReq);
|
||||
/*atomic_store_8(&tmq->epStatus, 0);*/
|
||||
return -1;
|
||||
}
|
||||
|
@ -1405,13 +1429,13 @@ int32_t tmqAskEp(tmq_t* tmq, bool async) {
|
|||
if (sendInfo == NULL) {
|
||||
tsem_destroy(&pParam->rspSem);
|
||||
taosMemoryFree(pParam);
|
||||
taosMemoryFree(req);
|
||||
taosMemoryFree(pReq);
|
||||
/*atomic_store_8(&tmq->epStatus, 0);*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
sendInfo->msgInfo = (SDataBuf){
|
||||
.pData = req,
|
||||
.pData = pReq,
|
||||
.len = tlen,
|
||||
.handle = NULL,
|
||||
};
|
||||
|
|
|
@ -1403,6 +1403,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
|
|||
pBlock->info = pDataBlock->info;
|
||||
pBlock->info.rows = 0;
|
||||
pBlock->info.capacity = 0;
|
||||
pBlock->info.rowSize = 0;
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
|
|
|
@ -4446,6 +4446,204 @@ void tFreeSExplainRsp(SExplainRsp *pRsp) {
|
|||
taosMemoryFreeClear(pRsp->subplanInfo);
|
||||
}
|
||||
|
||||
int32_t tSerializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
if (buf != NULL) {
|
||||
buf = (char *)buf + headLen;
|
||||
bufLen -= headLen;
|
||||
}
|
||||
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
int32_t num = taosArrayGetSize(pReq->pMsgs);
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SBatchMsg *pMsg = taosArrayGet(pReq->pMsgs, i);
|
||||
if (tEncodeI32(&encoder, pMsg->msgIdx) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pMsg->msgType) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pMsg->msgLen) < 0) return -1;
|
||||
if (tEncodeBinary(&encoder, pMsg->msg, pMsg->msgLen) < 0) return -1;
|
||||
}
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
if (buf != NULL) {
|
||||
SMsgHead *pHead = (SMsgHead *)((char *)buf - headLen);
|
||||
pHead->vgId = htonl(pReq->header.vgId);
|
||||
pHead->contLen = htonl(tlen + headLen);
|
||||
}
|
||||
|
||||
return tlen + headLen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSBatchReq(void *buf, int32_t bufLen, SBatchReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
|
||||
SMsgHead *pHead = buf;
|
||||
pHead->vgId = pReq->header.vgId;
|
||||
pHead->contLen = pReq->header.contLen;
|
||||
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, (char *)buf + headLen, bufLen - headLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
int32_t num = 0;
|
||||
if (tDecodeI32(&decoder, &num) < 0) return -1;
|
||||
if (num <= 0) {
|
||||
pReq->pMsgs = NULL;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pReq->pMsgs = taosArrayInit(num, sizeof(SBatchMsg));
|
||||
if (NULL == pReq->pMsgs) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SBatchMsg msg = {0};
|
||||
if (tDecodeI32(&decoder, &msg.msgIdx) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &msg.msgType) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &msg.msgLen) < 0) return -1;
|
||||
if (tDecodeBinaryAlloc(&decoder, &msg.msg, NULL) < 0) return -1;
|
||||
if (NULL == taosArrayPush(pReq->pMsgs, &msg)) return -1;
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
int32_t num = taosArrayGetSize(pRsp->pRsps);
|
||||
if (tEncodeI32(&encoder, num) < 0) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SBatchRspMsg *pMsg = taosArrayGet(pRsp->pRsps, i);
|
||||
if (tEncodeI32(&encoder, pMsg->reqType) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pMsg->msgIdx) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pMsg->msgLen) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pMsg->rspCode) < 0) return -1;
|
||||
if (tEncodeBinary(&encoder, pMsg->msg, pMsg->msgLen) < 0) return -1;
|
||||
}
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSBatchRsp(void *buf, int32_t bufLen, SBatchRsp *pRsp) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, (char *)buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
int32_t num = 0;
|
||||
if (tDecodeI32(&decoder, &num) < 0) return -1;
|
||||
if (num <= 0) {
|
||||
pRsp->pRsps = NULL;
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pRsp->pRsps = taosArrayInit(num, sizeof(SBatchRspMsg));
|
||||
if (NULL == pRsp->pRsps) return -1;
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SBatchRspMsg msg = {0};
|
||||
if (tDecodeI32(&decoder, &msg.reqType) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &msg.msgIdx) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &msg.msgLen) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &msg.rspCode) < 0) return -1;
|
||||
if (tDecodeBinaryAlloc(&decoder, &msg.msg, NULL) < 0) return -1;
|
||||
if (NULL == taosArrayPush(pRsp->pRsps, &msg)) return -1;
|
||||
}
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t tSerializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->epoch) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pReq->cgroup) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSMqAskEpReq(void *buf, int32_t bufLen, SMqAskEpReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, (char *)buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI64(&decoder, &pReq->consumerId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->epoch) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, pReq->cgroup) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tSerializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
|
||||
SEncoder encoder = {0};
|
||||
tEncoderInit(&encoder, buf, bufLen);
|
||||
if (tStartEncode(&encoder) < 0) return -1;
|
||||
|
||||
if (tEncodeI64(&encoder, pReq->consumerId) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pReq->epoch) < 0) return -1;
|
||||
|
||||
tEndEncode(&encoder);
|
||||
|
||||
int32_t tlen = encoder.pos;
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
return tlen;
|
||||
}
|
||||
|
||||
int32_t tDeserializeSMqHbReq(void *buf, int32_t bufLen, SMqHbReq *pReq) {
|
||||
SDecoder decoder = {0};
|
||||
tDecoderInit(&decoder, (char *)buf, bufLen);
|
||||
|
||||
if (tStartDecode(&decoder) < 0) return -1;
|
||||
|
||||
if (tDecodeI64(&decoder, &pReq->consumerId) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &pReq->epoch) < 0) return -1;
|
||||
|
||||
tEndDecode(&decoder);
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int32_t tSerializeSSchedulerHbReq(void *buf, int32_t bufLen, SSchedulerHbReq *pReq) {
|
||||
int32_t headLen = sizeof(SMsgHead);
|
||||
if (buf != NULL) {
|
||||
|
|
|
@ -436,21 +436,24 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
|
|||
ASSERT(toPrecision == TSDB_TIME_PRECISION_MILLI || toPrecision == TSDB_TIME_PRECISION_MICRO ||
|
||||
toPrecision == TSDB_TIME_PRECISION_NANO);
|
||||
|
||||
double tempResult = (double)utime;
|
||||
|
||||
switch (fromPrecision) {
|
||||
case TSDB_TIME_PRECISION_MILLI: {
|
||||
switch (toPrecision) {
|
||||
case TSDB_TIME_PRECISION_MILLI:
|
||||
return utime;
|
||||
case TSDB_TIME_PRECISION_MICRO:
|
||||
tempResult *= 1000;
|
||||
utime *= 1000;
|
||||
goto end_;
|
||||
if (utime > INT64_MAX / 1000) {
|
||||
return INT64_MAX;
|
||||
}
|
||||
return utime * 1000;
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
tempResult *= 1000000;
|
||||
utime *= 1000000;
|
||||
goto end_;
|
||||
if (utime > INT64_MAX / 1000000) {
|
||||
return INT64_MAX;
|
||||
}
|
||||
return utime * 1000000;
|
||||
default:
|
||||
ASSERT(0);
|
||||
return utime;
|
||||
}
|
||||
} // end from milli
|
||||
case TSDB_TIME_PRECISION_MICRO: {
|
||||
|
@ -460,9 +463,13 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
|
|||
case TSDB_TIME_PRECISION_MICRO:
|
||||
return utime;
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
tempResult *= 1000;
|
||||
utime *= 1000;
|
||||
goto end_;
|
||||
if (utime > INT64_MAX / 1000) {
|
||||
return INT64_MAX;
|
||||
}
|
||||
return utime * 1000;
|
||||
default:
|
||||
ASSERT(0);
|
||||
return utime;
|
||||
}
|
||||
} // end from micro
|
||||
case TSDB_TIME_PRECISION_NANO: {
|
||||
|
@ -473,17 +480,17 @@ int64_t convertTimePrecision(int64_t utime, int32_t fromPrecision, int32_t toPre
|
|||
return utime / 1000;
|
||||
case TSDB_TIME_PRECISION_NANO:
|
||||
return utime;
|
||||
default:
|
||||
ASSERT(0);
|
||||
return utime;
|
||||
}
|
||||
} // end from nano
|
||||
default: {
|
||||
assert(0);
|
||||
ASSERT(0);
|
||||
return utime; // only to pass windows compilation
|
||||
}
|
||||
} // end switch fromPrecision
|
||||
|
||||
end_:
|
||||
if (tempResult >= (double)INT64_MAX) return INT64_MAX;
|
||||
if (tempResult <= (double)INT64_MIN) return INT64_MIN; // INT64_MIN means NULL
|
||||
return utime;
|
||||
}
|
||||
|
||||
|
@ -599,18 +606,33 @@ int32_t convertStringToTimestamp(int16_t type, char* inputData, int64_t timePrec
|
|||
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
|
||||
switch (unit) {
|
||||
case 's':
|
||||
if (val > INT64_MAX / MILLISECOND_PER_SECOND) {
|
||||
return -1;
|
||||
}
|
||||
(*result) = convertTimePrecision(val * MILLISECOND_PER_SECOND, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||
break;
|
||||
case 'm':
|
||||
if (val > INT64_MAX / MILLISECOND_PER_MINUTE) {
|
||||
return -1;
|
||||
}
|
||||
(*result) = convertTimePrecision(val * MILLISECOND_PER_MINUTE, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||
break;
|
||||
case 'h':
|
||||
if (val > INT64_MAX / MILLISECOND_PER_MINUTE) {
|
||||
return -1;
|
||||
}
|
||||
(*result) = convertTimePrecision(val * MILLISECOND_PER_HOUR, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||
break;
|
||||
case 'd':
|
||||
if (val > INT64_MAX / MILLISECOND_PER_DAY) {
|
||||
return -1;
|
||||
}
|
||||
(*result) = convertTimePrecision(val * MILLISECOND_PER_DAY, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||
break;
|
||||
case 'w':
|
||||
if (val > INT64_MAX / MILLISECOND_PER_WEEK) {
|
||||
return -1;
|
||||
}
|
||||
(*result) = convertTimePrecision(val * MILLISECOND_PER_WEEK, TSDB_TIME_PRECISION_MILLI, timePrecision);
|
||||
break;
|
||||
case 'a':
|
||||
|
@ -650,7 +672,7 @@ int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* dura
|
|||
|
||||
/* get the basic numeric value */
|
||||
int64_t timestamp = taosStr2Int64(token, &endPtr, 10);
|
||||
if (errno != 0) {
|
||||
if (timestamp < 0 || errno != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -668,7 +690,7 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
|
|||
|
||||
/* get the basic numeric value */
|
||||
*duration = taosStr2Int64(token, NULL, 10);
|
||||
if (errno != 0) {
|
||||
if (*duration < 0 || errno != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
|||
}
|
||||
case TSDB_DATA_TYPE_FLOAT: {
|
||||
pVar->nLen = tDataTypes[type].bytes;
|
||||
pVar->d = GET_FLOAT_VAL(pz);
|
||||
pVar->f = GET_FLOAT_VAL(pz);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_NCHAR: { // here we get the nchar length from raw binary bits length
|
||||
|
@ -223,12 +223,18 @@ int32_t taosVariantCompare(const SVariant *p1, const SVariant *p2) {
|
|||
} else {
|
||||
return p1->nLen > p2->nLen ? 1 : -1;
|
||||
}
|
||||
} else if (p1->nType == TSDB_DATA_TYPE_FLOAT || p1->nType == TSDB_DATA_TYPE_DOUBLE) {
|
||||
} else if (p1->nType == TSDB_DATA_TYPE_DOUBLE) {
|
||||
if (p1->d == p2->d) {
|
||||
return 0;
|
||||
} else {
|
||||
return p1->d > p2->d ? 1 : -1;
|
||||
}
|
||||
} else if (p1->nType == TSDB_DATA_TYPE_FLOAT) {
|
||||
if (p1->f == p2->f) {
|
||||
return 0;
|
||||
} else {
|
||||
return p1->f > p2->f ? 1 : -1;
|
||||
}
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(p1->nType)) {
|
||||
if (p1->u == p2->u) {
|
||||
return 0;
|
||||
|
@ -259,8 +265,9 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
|
|||
case TSDB_DATA_TYPE_UBIGINT:
|
||||
return (char *)&pVar->u;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return (char *)&pVar->d;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return (char *)&pVar->f;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
return (char *)pVar->pz;
|
||||
|
|
|
@ -287,7 +287,7 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
|
|||
vmReleaseVnode(pMgmt, pVnode);
|
||||
}
|
||||
if (size < 0) {
|
||||
dError("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
|
||||
dTrace("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
|
||||
size = 0;
|
||||
}
|
||||
return size;
|
||||
|
|
|
@ -325,9 +325,14 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
|
|||
|
||||
static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqHbReq *pReq = (SMqHbReq *)pMsg->pCont;
|
||||
int64_t consumerId = be64toh(pReq->consumerId);
|
||||
SMqHbReq req = {0};
|
||||
|
||||
if (tDeserializeSMqHbReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t consumerId = req.consumerId;
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
mError("consumer %" PRId64 " not exist", consumerId);
|
||||
|
@ -359,10 +364,16 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
|
|||
|
||||
static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
SMqAskEpReq *pReq = (SMqAskEpReq *)pMsg->pCont;
|
||||
SMqAskEpReq req = {0};
|
||||
SMqAskEpRsp rsp = {0};
|
||||
int64_t consumerId = be64toh(pReq->consumerId);
|
||||
int32_t epoch = ntohl(pReq->epoch);
|
||||
|
||||
if (tDeserializeSMqAskEpReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int64_t consumerId = req.consumerId;
|
||||
int32_t epoch = req.epoch;
|
||||
|
||||
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
|
||||
if (pConsumer == NULL) {
|
||||
|
@ -370,7 +381,7 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
ASSERT(strcmp(pReq->cgroup, pConsumer->cgroup) == 0);
|
||||
ASSERT(strcmp(req.cgroup, pConsumer->cgroup) == 0);
|
||||
|
||||
atomic_store_32(&pConsumer->hbStatus, 0);
|
||||
|
||||
|
|
|
@ -63,81 +63,60 @@ int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
|
|||
return code;
|
||||
}
|
||||
|
||||
|
||||
static FORCE_INLINE void mnodeFreeSBatchRspMsg(void* p) {
|
||||
if (NULL == p) {
|
||||
return;
|
||||
}
|
||||
|
||||
SBatchRspMsg* pRsp = (SBatchRspMsg*)p;
|
||||
rpcFreeCont(pRsp->msg);
|
||||
}
|
||||
|
||||
int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
int32_t offset = 0;
|
||||
int32_t rspSize = 0;
|
||||
SBatchReq *batchReq = (SBatchReq *)pMsg->pCont;
|
||||
int32_t msgNum = ntohl(batchReq->msgNum);
|
||||
offset += sizeof(SBatchReq);
|
||||
SBatchReq batchReq = {0};
|
||||
SBatchMsg req = {0};
|
||||
SBatchRsp rsp = {0};
|
||||
SBatchRspMsg rsp = {0};
|
||||
SBatchRsp batchRsp = {0};
|
||||
SRpcMsg reqMsg = *pMsg;
|
||||
SRpcMsg rspMsg = {0};
|
||||
void *pRsp = NULL;
|
||||
SMnode *pMnode = pMsg->info.node;
|
||||
|
||||
if (tDeserializeSBatchReq(pMsg->pCont, pMsg->contLen, &batchReq)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
mError("tDeserializeSBatchReq failed");
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
int32_t msgNum = taosArrayGetSize(batchReq.pMsgs);
|
||||
if (msgNum >= MAX_META_MSG_IN_BATCH) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
mError("too many msgs %d in mnode batch meta req", msgNum);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
SArray *batchRsp = taosArrayInit(msgNum, sizeof(SBatchRsp));
|
||||
if (NULL == batchRsp) {
|
||||
batchRsp.pRsps = taosArrayInit(msgNum, sizeof(SBatchRspMsg));
|
||||
if (NULL == batchRsp.pRsps) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < msgNum; ++i) {
|
||||
if (offset >= pMsg->contLen) {
|
||||
mError("offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
SBatchMsg* req = taosArrayGet(batchReq.pMsgs, i);
|
||||
|
||||
req.msgIdx = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
|
||||
offset += sizeof(req.msgIdx);
|
||||
if (offset >= pMsg->contLen) {
|
||||
mError("offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req.msgType = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
|
||||
offset += sizeof(req.msgType);
|
||||
if (offset >= pMsg->contLen) {
|
||||
mError("offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req.msgLen = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
|
||||
offset += sizeof(req.msgLen);
|
||||
if (offset >= pMsg->contLen) {
|
||||
mError("offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req.msg = (char *)pMsg->pCont + offset;
|
||||
offset += req.msgLen;
|
||||
|
||||
reqMsg.msgType = req.msgType;
|
||||
reqMsg.pCont = req.msg;
|
||||
reqMsg.contLen = req.msgLen;
|
||||
reqMsg.msgType = req->msgType;
|
||||
reqMsg.pCont = req->msg;
|
||||
reqMsg.contLen = req->msgLen;
|
||||
reqMsg.info.rsp = NULL;
|
||||
reqMsg.info.rspLen = 0;
|
||||
|
||||
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(req.msgType)];
|
||||
MndMsgFp fp = pMnode->msgFp[TMSG_INDEX(req->msgType)];
|
||||
if (fp == NULL) {
|
||||
mError("msg:%p, failed to get msg handle, app:%p type:%s", pMsg, pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));
|
||||
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
|
||||
taosArrayDestroy(batchRsp);
|
||||
taosArrayDestroy(batchRsp.pRsps);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -146,49 +125,29 @@ int32_t mndProcessBatchMetaMsg(SRpcMsg *pMsg) {
|
|||
} else {
|
||||
rsp.rspCode = 0;
|
||||
}
|
||||
rsp.msgIdx = req.msgIdx;
|
||||
rsp.msgIdx = req->msgIdx;
|
||||
rsp.reqType = reqMsg.msgType;
|
||||
rsp.msgLen = reqMsg.info.rspLen;
|
||||
rsp.msg = reqMsg.info.rsp;
|
||||
|
||||
taosArrayPush(batchRsp, &rsp);
|
||||
|
||||
rspSize += sizeof(rsp) + rsp.msgLen - POINTER_BYTES;
|
||||
taosArrayPush(batchRsp.pRsps, &rsp);
|
||||
}
|
||||
|
||||
rspSize += sizeof(int32_t);
|
||||
offset = 0;
|
||||
|
||||
rspSize = tSerializeSBatchRsp(NULL, 0, &batchRsp);
|
||||
if (rspSize < 0) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
pRsp = rpcMallocCont(rspSize);
|
||||
if (pRsp == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(msgNum);
|
||||
offset += sizeof(msgNum);
|
||||
for (int32_t i = 0; i < msgNum; ++i) {
|
||||
SBatchRsp *p = taosArrayGet(batchRsp, i);
|
||||
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->reqType);
|
||||
offset += sizeof(p->reqType);
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->msgIdx);
|
||||
offset += sizeof(p->msgIdx);
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->msgLen);
|
||||
offset += sizeof(p->msgLen);
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->rspCode);
|
||||
offset += sizeof(p->rspCode);
|
||||
if (p->msg != NULL) {
|
||||
memcpy((char *)pRsp + offset, p->msg, p->msgLen);
|
||||
offset += p->msgLen;
|
||||
}
|
||||
|
||||
rpcFreeCont(p->msg);
|
||||
if (tSerializeSBatchRsp(pRsp, rspSize, &batchRsp) < 0) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
taosArrayDestroy(batchRsp);
|
||||
batchRsp = NULL;
|
||||
|
||||
_exit:
|
||||
|
||||
pMsg->info.rsp = pRsp;
|
||||
|
@ -198,7 +157,8 @@ _exit:
|
|||
mError("mnd get batch meta failed cause of %s", tstrerror(code));
|
||||
}
|
||||
|
||||
taosArrayDestroyEx(batchRsp, tFreeSBatchRsp);
|
||||
taosArrayDestroyEx(batchReq.pMsgs, tFreeSBatchReqMsg);
|
||||
taosArrayDestroyEx(batchRsp.pRsps, mnodeFreeSBatchRspMsg);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -508,6 +508,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
qDestroyQueryPlan(pPlan);
|
||||
return -1;
|
||||
}
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
}
|
||||
}
|
||||
qDestroyQueryPlan(pPlan);
|
||||
|
|
|
@ -295,6 +295,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
|
|||
return -1;
|
||||
}
|
||||
pObj->sourceDbUid = pSourceDb->uid;
|
||||
mndReleaseDb(pMnode, pSourceDb);
|
||||
|
||||
memcpy(pObj->targetSTbName, pCreate->targetStbFullName, TSDB_TABLE_FNAME_LEN);
|
||||
|
||||
|
@ -307,6 +308,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
|
|||
|
||||
pObj->targetStbUid = mndGenerateUid(pObj->targetSTbName, TSDB_TABLE_FNAME_LEN);
|
||||
pObj->targetDbUid = pTargetDb->uid;
|
||||
mndReleaseDb(pMnode, pTargetDb);
|
||||
|
||||
pObj->sql = pCreate->sql;
|
||||
pObj->ast = pCreate->ast;
|
||||
|
@ -523,6 +525,7 @@ static int32_t mndCreateStbForStream(SMnode *pMnode, STrans *pTrans, const SStre
|
|||
|
||||
tFreeSMCreateStbReq(&createReq);
|
||||
mndFreeStb(&stbObj);
|
||||
mndReleaseDb(pMnode, pDb);
|
||||
|
||||
return 0;
|
||||
_OVER:
|
||||
|
|
|
@ -996,7 +996,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pConsumerEp->consumerId, false);
|
||||
|
||||
mDebug("mnd show subscrptions: topic %s, consumer %" PRId64 "cgroup %s vgid %d", varDataVal(topic),
|
||||
mDebug("mnd show subscriptions: topic %s, consumer %" PRId64 " cgroup %s vgid %d", varDataVal(topic),
|
||||
pConsumerEp->consumerId, varDataVal(cgroup), pVgEp->vgId);
|
||||
|
||||
// offset
|
||||
|
@ -1044,7 +1044,7 @@ static int32_t mndRetrieveSubscribe(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, NULL, true);
|
||||
|
||||
mDebug("mnd show subscrptions(unassigned): topic %s, cgroup %s vgid %d", varDataVal(topic), varDataVal(cgroup),
|
||||
mDebug("mnd show subscriptions(unassigned): topic %s, cgroup %s vgid %d", varDataVal(topic), varDataVal(cgroup),
|
||||
pVgEp->vgId);
|
||||
|
||||
// offset
|
||||
|
|
|
@ -1277,9 +1277,11 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
|
|||
pAction->id);
|
||||
code = mndTransSync(pMnode, pTrans);
|
||||
if (code != 0) {
|
||||
pTrans->redoActionPos--;
|
||||
pTrans->code = terrno;
|
||||
mError("trans:%d, %s:%d is executed and failed to sync to other mnodes since %s", pTrans->id,
|
||||
mndTransStr(pAction->stage), pAction->id, terrstr());
|
||||
break;
|
||||
}
|
||||
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mInfo("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
|
||||
|
|
|
@ -1739,6 +1739,7 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
|
|||
code = 0;
|
||||
|
||||
_OVER:
|
||||
taosArrayDestroy(pArray);
|
||||
mndTransDrop(pTrans);
|
||||
sdbFreeRaw(pRaw);
|
||||
return code;
|
||||
|
@ -1907,6 +1908,7 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) {
|
|||
}
|
||||
|
||||
_OVER:
|
||||
taosHashCleanup(pBalancedVgroups);
|
||||
mndTransDrop(pTrans);
|
||||
return code;
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@ typedef struct SBlockLoadSuppInfo {
|
|||
int16_t* colIds; // column ids for loading file block data
|
||||
int32_t numOfCols;
|
||||
char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated.
|
||||
bool smaValid; // the sma on all queried columns are activated
|
||||
} SBlockLoadSuppInfo;
|
||||
|
||||
typedef struct SLastBlockReader {
|
||||
|
@ -213,11 +214,10 @@ static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFil
|
|||
|
||||
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
|
||||
|
||||
static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
|
||||
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
|
||||
|
||||
static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SSDataBlock* pBlock) {
|
||||
size_t numOfCols = blockDataGetNumOfCols(pBlock);
|
||||
|
||||
pSupInfo->smaValid = true;
|
||||
pSupInfo->numOfCols = numOfCols;
|
||||
pSupInfo->colIds = taosMemoryMalloc(numOfCols * sizeof(int16_t));
|
||||
pSupInfo->buildBuf = taosMemoryCalloc(numOfCols, POINTER_BYTES);
|
||||
|
@ -239,6 +239,28 @@ static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void updateBlockSMAInfo(STSchema* pSchema, SBlockLoadSuppInfo* pSupInfo) {
|
||||
int32_t i = 0, j = 0;
|
||||
|
||||
while(i < pSchema->numOfCols && j < pSupInfo->numOfCols) {
|
||||
STColumn* pTCol = &pSchema->columns[i];
|
||||
if (pTCol->colId == pSupInfo->colIds[j]) {
|
||||
if (!IS_BSMA_ON(pTCol)) {
|
||||
pSupInfo->smaValid = false;
|
||||
return;
|
||||
}
|
||||
|
||||
i += 1;
|
||||
j += 1;
|
||||
} else if (pTCol->colId < pSupInfo->colIds[j]) {
|
||||
// do nothing
|
||||
i += 1;
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t initBlockScanInfoBuf(SBlockInfoBuf* pBuf, int32_t numOfTables) {
|
||||
int32_t num = numOfTables / pBuf->numPerBucket;
|
||||
int32_t remainder = numOfTables % pBuf->numPerBucket;
|
||||
|
@ -580,7 +602,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
|||
|
||||
// allocate buffer in order to load data blocks from file
|
||||
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
|
||||
pSup->pColAgg = taosArrayInit(4, sizeof(SColumnDataAgg));
|
||||
pSup->pColAgg = taosArrayInit(pCond->numOfCols, sizeof(SColumnDataAgg));
|
||||
pSup->plist = taosMemoryCalloc(pCond->numOfCols, POINTER_BYTES);
|
||||
if (pSup->pColAgg == NULL || pSup->plist == NULL) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -601,7 +623,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
|
|||
goto _end;
|
||||
}
|
||||
|
||||
setColumnIdSlotList(pReader, pReader->pResBlock);
|
||||
setColumnIdSlotList(&pReader->suppInfo, pReader->pResBlock);
|
||||
|
||||
*ppReader = pReader;
|
||||
return code;
|
||||
|
@ -3763,6 +3785,10 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
|
|||
}
|
||||
}
|
||||
|
||||
if (pReader->pSchema != NULL) {
|
||||
updateBlockSMAInfo(pReader->pSchema, &pReader->suppInfo);
|
||||
}
|
||||
|
||||
STsdbReader* p = pReader->innerReader[0] != NULL ? pReader->innerReader[0] : pReader;
|
||||
|
||||
pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList, numOfTables);
|
||||
|
@ -4020,7 +4046,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
|||
}
|
||||
|
||||
// there is no statistics data for composed block
|
||||
if (pReader->status.composedDataBlock) {
|
||||
if (pReader->status.composedDataBlock || (!pReader->suppInfo.smaValid)) {
|
||||
*pBlockStatis = NULL;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -4060,7 +4086,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
|||
|
||||
int32_t i = 0, j = 0;
|
||||
size_t size = taosArrayGetSize(pSup->pColAgg);
|
||||
|
||||
#if 0
|
||||
while (j < numOfCols && i < size) {
|
||||
SColumnDataAgg* pAgg = taosArrayGet(pSup->pColAgg, i);
|
||||
if (pAgg->colId == pSup->colIds[j]) {
|
||||
|
@ -4068,6 +4094,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
|||
pSup->plist[j] = pAgg;
|
||||
} else {
|
||||
*allHave = false;
|
||||
break;
|
||||
}
|
||||
i += 1;
|
||||
j += 1;
|
||||
|
@ -4077,12 +4104,43 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
|
|||
j += 1;
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
// fill the all null data column
|
||||
SArray* pNewAggList = taosArrayInit(numOfCols, sizeof(SColumnDataAgg));
|
||||
|
||||
while (j < numOfCols && i < size) {
|
||||
SColumnDataAgg* pAgg = taosArrayGet(pSup->pColAgg, i);
|
||||
if (pAgg->colId == pSup->colIds[j]) {
|
||||
taosArrayPush(pNewAggList, pAgg);
|
||||
i += 1;
|
||||
j += 1;
|
||||
} else if (pAgg->colId < pSup->colIds[j]) {
|
||||
i += 1;
|
||||
} else if (pSup->colIds[j] < pAgg->colId) {
|
||||
// all date in this block are null
|
||||
SColumnDataAgg nullColAgg = {.colId = pSup->colIds[j], .numOfNull = pBlock->nRow};
|
||||
taosArrayPush(pNewAggList, &nullColAgg);
|
||||
j += 1;
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayClear(pSup->pColAgg);
|
||||
taosArrayAddAll(pSup->pColAgg, pNewAggList);
|
||||
|
||||
size_t num = taosArrayGetSize(pSup->pColAgg);
|
||||
for(int32_t k = 0; k < num; ++k) {
|
||||
pSup->plist[k] = taosArrayGet(pSup->pColAgg, k);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pNewAggList);
|
||||
|
||||
#endif
|
||||
|
||||
pReader->cost.smaDataLoad += 1;
|
||||
*pBlockStatis = pSup->plist;
|
||||
|
||||
tsdbDebug("vgId:%d, succeed to load block SMA for uid %" PRIu64 ", %s", 0, pFBlock->uid, pReader->idStr);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -517,7 +517,7 @@ static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData,
|
|||
for (int32_t iColData = 0; iColData < pBlockData->nColData; iColData++) {
|
||||
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
|
||||
|
||||
if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
|
||||
if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type) || ((pColData->flag & HAS_VALUE) == 0)) continue;
|
||||
|
||||
SColumnDataAgg sma = {.colId = pColData->cid};
|
||||
tColDataCalcSMA[pColData->type](pColData, &sma.sum, &sma.max, &sma.min, &sma.numOfNull);
|
||||
|
|
|
@ -264,77 +264,55 @@ _exit:
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static FORCE_INLINE void vnodeFreeSBatchRspMsg(void* p) {
|
||||
if (NULL == p) {
|
||||
return;
|
||||
}
|
||||
|
||||
SBatchRspMsg* pRsp = (SBatchRspMsg*)p;
|
||||
rpcFreeCont(pRsp->msg);
|
||||
}
|
||||
|
||||
|
||||
int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
|
||||
int32_t code = 0;
|
||||
int32_t offset = 0;
|
||||
int32_t rspSize = 0;
|
||||
SBatchReq *batchReq = (SBatchReq *)pMsg->pCont;
|
||||
int32_t msgNum = ntohl(batchReq->msgNum);
|
||||
offset += sizeof(SBatchReq);
|
||||
SBatchMsg req = {0};
|
||||
SBatchRsp rsp = {0};
|
||||
SBatchReq batchReq = {0};
|
||||
SBatchMsg *req = NULL;
|
||||
SBatchRspMsg rsp = {0};
|
||||
SBatchRsp batchRsp = {0};
|
||||
SRpcMsg reqMsg = *pMsg;
|
||||
SRpcMsg rspMsg = {0};
|
||||
void *pRsp = NULL;
|
||||
|
||||
if (tDeserializeSBatchReq(pMsg->pCont, pMsg->contLen, &batchReq)) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
qError("tDeserializeSBatchReq failed");
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
int32_t msgNum = taosArrayGetSize(batchReq.pMsgs);
|
||||
if (msgNum >= MAX_META_MSG_IN_BATCH) {
|
||||
code = TSDB_CODE_INVALID_MSG;
|
||||
qError("too many msgs %d in vnode batch meta req", msgNum);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
SArray *batchRsp = taosArrayInit(msgNum, sizeof(SBatchRsp));
|
||||
if (NULL == batchRsp) {
|
||||
batchRsp.pRsps = taosArrayInit(msgNum, sizeof(SBatchRspMsg));
|
||||
if (NULL == batchRsp.pRsps) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
qError("taosArrayInit %d SBatchRspMsg failed", msgNum);
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < msgNum; ++i) {
|
||||
if (offset >= pMsg->contLen) {
|
||||
qError("vnode offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
req = taosArrayGet(batchReq.pMsgs, i);
|
||||
|
||||
req.msgIdx = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
|
||||
offset += sizeof(req.msgIdx);
|
||||
reqMsg.msgType = req->msgType;
|
||||
reqMsg.pCont = req->msg;
|
||||
reqMsg.contLen = req->msgLen;
|
||||
|
||||
if (offset >= pMsg->contLen) {
|
||||
qError("vnode offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req.msgType = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
|
||||
offset += sizeof(req.msgType);
|
||||
|
||||
if (offset >= pMsg->contLen) {
|
||||
qError("vnode offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req.msgLen = ntohl(*(int32_t *)((char *)pMsg->pCont + offset));
|
||||
offset += sizeof(req.msgLen);
|
||||
|
||||
if (offset >= pMsg->contLen) {
|
||||
qError("vnode offset %d is bigger than contLen %d", offset, pMsg->contLen);
|
||||
terrno = TSDB_CODE_INVALID_MSG_LEN;
|
||||
taosArrayDestroy(batchRsp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
req.msg = (char *)pMsg->pCont + offset;
|
||||
offset += req.msgLen;
|
||||
|
||||
reqMsg.msgType = req.msgType;
|
||||
reqMsg.pCont = req.msg;
|
||||
reqMsg.contLen = req.msgLen;
|
||||
|
||||
switch (req.msgType) {
|
||||
switch (req->msgType) {
|
||||
case TDMT_VND_TABLE_META:
|
||||
vnodeGetTableMeta(pVnode, &reqMsg, false);
|
||||
break;
|
||||
|
@ -342,62 +320,39 @@ int32_t vnodeGetBatchMeta(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
vnodeGetTableCfg(pVnode, &reqMsg, false);
|
||||
break;
|
||||
default:
|
||||
qError("invalid req msgType %d", req.msgType);
|
||||
qError("invalid req msgType %d", req->msgType);
|
||||
reqMsg.code = TSDB_CODE_INVALID_MSG;
|
||||
reqMsg.pCont = NULL;
|
||||
reqMsg.contLen = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
rsp.msgIdx = req.msgIdx;
|
||||
rsp.msgIdx = req->msgIdx;
|
||||
rsp.reqType = reqMsg.msgType;
|
||||
rsp.msgLen = reqMsg.contLen;
|
||||
rsp.rspCode = reqMsg.code;
|
||||
rsp.msg = reqMsg.pCont;
|
||||
|
||||
taosArrayPush(batchRsp, &rsp);
|
||||
|
||||
rspSize += sizeof(rsp) + rsp.msgLen - POINTER_BYTES;
|
||||
taosArrayPush(batchRsp.pRsps, &rsp);
|
||||
}
|
||||
|
||||
rspSize += sizeof(int32_t);
|
||||
offset = 0;
|
||||
|
||||
if (rspSize > MAX_META_BATCH_RSP_SIZE) {
|
||||
qError("rspSize:%d overload", rspSize);
|
||||
code = TSDB_CODE_INVALID_MSG_LEN;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
pRsp = rpcMallocCont(rspSize);
|
||||
if (pRsp == NULL) {
|
||||
rspSize = tSerializeSBatchRsp(NULL, 0, &batchRsp);
|
||||
if (rspSize < 0) {
|
||||
qError("tSerializeSBatchRsp failed");
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(msgNum);
|
||||
offset += sizeof(msgNum);
|
||||
for (int32_t i = 0; i < msgNum; ++i) {
|
||||
SBatchRsp *p = taosArrayGet(batchRsp, i);
|
||||
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->reqType);
|
||||
offset += sizeof(p->reqType);
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->msgIdx);
|
||||
offset += sizeof(p->msgIdx);
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->msgLen);
|
||||
offset += sizeof(p->msgLen);
|
||||
*(int32_t *)((char *)pRsp + offset) = htonl(p->rspCode);
|
||||
offset += sizeof(p->rspCode);
|
||||
if (p->msg) {
|
||||
memcpy((char *)pRsp + offset, p->msg, p->msgLen);
|
||||
offset += p->msgLen;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(p->msg);
|
||||
pRsp = rpcMallocCont(rspSize);
|
||||
if (pRsp == NULL) {
|
||||
qError("rpcMallocCont %d failed", rspSize);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
if (tSerializeSBatchRsp(pRsp, rspSize, &batchRsp) < 0) {
|
||||
qError("tSerializeSBatchRsp %d failed", rspSize);
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
taosArrayDestroy(batchRsp);
|
||||
batchRsp = NULL;
|
||||
|
||||
_exit:
|
||||
|
||||
|
@ -411,7 +366,8 @@ _exit:
|
|||
qError("vnd get batch meta failed cause of %s", tstrerror(code));
|
||||
}
|
||||
|
||||
taosArrayDestroyEx(batchRsp, tFreeSBatchRsp);
|
||||
taosArrayDestroyEx(batchReq.pMsgs, tFreeSBatchReqMsg);
|
||||
taosArrayDestroyEx(batchRsp.pRsps, tFreeSBatchRspMsg);
|
||||
|
||||
tmsgSendRsp(&rspMsg);
|
||||
|
||||
|
|
|
@ -234,7 +234,6 @@ typedef struct SCatalog {
|
|||
typedef struct SCtgBatch {
|
||||
int32_t batchId;
|
||||
int32_t msgType;
|
||||
int32_t msgSize;
|
||||
SArray* pMsgs;
|
||||
SRequestConnInfo conn;
|
||||
char dbFName[TSDB_DB_FNAME_LEN];
|
||||
|
|
|
@ -26,19 +26,29 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
|
|||
SCatalog* pCtg = pJob->pCtg;
|
||||
int32_t taskNum = taosArrayGetSize(cbParam->taskId);
|
||||
SDataBuf taskMsg = *pMsg;
|
||||
int32_t offset = 0;
|
||||
int32_t msgNum = (TSDB_CODE_SUCCESS == rspCode && pMsg->pData && (pMsg->len > 0)) ? ntohl(*(int32_t*)pMsg->pData) : 0;
|
||||
int32_t msgNum = 0;
|
||||
SBatchRsp batchRsp = {0};
|
||||
SBatchRspMsg rsp = {0};
|
||||
SBatchRspMsg *pRsp = NULL;
|
||||
|
||||
if (TSDB_CODE_SUCCESS == rspCode && pMsg->pData && (pMsg->len > 0)) {
|
||||
if (tDeserializeSBatchRsp(pMsg->pData, pMsg->len, &batchRsp) < 0) {
|
||||
ctgError("tDeserializeSBatchRsp failed, msgLen:%d", pMsg->len);
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
msgNum = taosArrayGetSize(batchRsp.pRsps);
|
||||
}
|
||||
|
||||
ASSERT(taskNum == msgNum || 0 == msgNum);
|
||||
|
||||
ctgDebug("QID:0x%" PRIx64 " ctg got batch %d rsp %s", pJob->queryId, cbParam->batchId,
|
||||
TMSG_INFO(cbParam->reqType + 1));
|
||||
|
||||
offset += sizeof(msgNum);
|
||||
SBatchRsp rsp = {0};
|
||||
SHashObj* pBatchs = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
|
||||
if (NULL == pBatchs) {
|
||||
ctgError("taosHashInit %d batch failed", taskNum);
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < taskNum; ++i) {
|
||||
|
@ -46,25 +56,18 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
|
|||
int32_t* msgIdx = taosArrayGet(cbParam->msgIdx, i);
|
||||
SCtgTask* pTask = taosArrayGet(pJob->pTasks, *taskId);
|
||||
if (msgNum > 0) {
|
||||
rsp.reqType = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
|
||||
offset += sizeof(rsp.reqType);
|
||||
rsp.msgIdx = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
|
||||
offset += sizeof(rsp.msgIdx);
|
||||
rsp.msgLen = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
|
||||
offset += sizeof(rsp.msgLen);
|
||||
rsp.rspCode = ntohl(*(int32_t*)((char*)pMsg->pData + offset));
|
||||
offset += sizeof(rsp.rspCode);
|
||||
rsp.msg = ((char*)pMsg->pData) + offset;
|
||||
offset += rsp.msgLen;
|
||||
pRsp = taosArrayGet(batchRsp.pRsps, i);
|
||||
|
||||
taskMsg.msgType = rsp.reqType;
|
||||
taskMsg.pData = rsp.msg;
|
||||
taskMsg.len = rsp.msgLen;
|
||||
taskMsg.msgType = pRsp->reqType;
|
||||
taskMsg.pData = pRsp->msg;
|
||||
taskMsg.len = pRsp->msgLen;
|
||||
|
||||
ASSERT(rsp.msgIdx == *msgIdx);
|
||||
ASSERT(pRsp->msgIdx == *msgIdx);
|
||||
} else {
|
||||
rsp.msgIdx = *msgIdx;
|
||||
rsp.reqType = -1;
|
||||
pRsp = &rsp;
|
||||
pRsp->msgIdx = *msgIdx;
|
||||
pRsp->reqType = -1;
|
||||
pRsp->rspCode = 0;
|
||||
taskMsg.msgType = -1;
|
||||
taskMsg.pData = NULL;
|
||||
taskMsg.len = 0;
|
||||
|
@ -72,20 +75,22 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
|
|||
|
||||
SCtgTaskReq tReq;
|
||||
tReq.pTask = pTask;
|
||||
tReq.msgIdx = rsp.msgIdx;
|
||||
tReq.msgIdx = pRsp->msgIdx;
|
||||
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq.msgIdx);
|
||||
pMsgCtx->pBatchs = pBatchs;
|
||||
|
||||
ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId,
|
||||
rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs);
|
||||
pRsp->msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs);
|
||||
|
||||
(*gCtgAsyncFps[pTask->type].handleRspFp)(&tReq, rsp.reqType, &taskMsg, (rsp.rspCode ? rsp.rspCode : rspCode));
|
||||
(*gCtgAsyncFps[pTask->type].handleRspFp)(&tReq, pRsp->reqType, &taskMsg, (pRsp->rspCode ? pRsp->rspCode : rspCode));
|
||||
}
|
||||
|
||||
CTG_ERR_JRET(ctgLaunchBatchs(pJob->pCtg, pJob, pBatchs));
|
||||
|
||||
_return:
|
||||
|
||||
taosArrayDestroyEx(batchRsp.pRsps, tFreeSBatchRspMsg);
|
||||
|
||||
ctgFreeBatchs(pBatchs);
|
||||
CTG_RET(code);
|
||||
}
|
||||
|
@ -481,7 +486,6 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
if (NULL == taosArrayPush(newBatch.pMsgIdxs, &req.msgIdx)) {
|
||||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
newBatch.msgSize = sizeof(SBatchReq) + sizeof(req) + msgSize - POINTER_BYTES;
|
||||
|
||||
if (vgId > 0) {
|
||||
SName* pName = NULL;
|
||||
|
@ -533,8 +537,6 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
|
|||
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
pBatch->msgSize += sizeof(req) + msgSize - POINTER_BYTES;
|
||||
|
||||
if (vgId > 0) {
|
||||
SName* pName = NULL;
|
||||
if (TDMT_VND_TABLE_CFG == msgType) {
|
||||
|
@ -570,38 +572,35 @@ _return:
|
|||
return code;
|
||||
}
|
||||
|
||||
int32_t ctgBuildBatchReqMsg(SCtgBatch* pBatch, int32_t vgId, void** msg) {
|
||||
*msg = taosMemoryCalloc(1, pBatch->msgSize);
|
||||
if (NULL == (*msg)) {
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
int32_t offset = 0;
|
||||
int32_t ctgBuildBatchReqMsg(SCtgBatch* pBatch, int32_t vgId, void** msg, int32_t *pSize) {
|
||||
int32_t num = taosArrayGetSize(pBatch->pMsgs);
|
||||
SBatchReq* pBatchReq = (SBatchReq*)(*msg);
|
||||
|
||||
if (num >= CTG_MAX_REQ_IN_BATCH) {
|
||||
qError("too many msgs %d in one batch request", num);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
|
||||
}
|
||||
|
||||
pBatchReq->header.vgId = htonl(vgId);
|
||||
pBatchReq->msgNum = htonl(num);
|
||||
offset += sizeof(SBatchReq);
|
||||
SBatchReq batchReq = {0};
|
||||
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SBatchMsg* pReq = taosArrayGet(pBatch->pMsgs, i);
|
||||
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgIdx);
|
||||
offset += sizeof(pReq->msgIdx);
|
||||
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgType);
|
||||
offset += sizeof(pReq->msgType);
|
||||
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgLen);
|
||||
offset += sizeof(pReq->msgLen);
|
||||
memcpy((char*)(*msg) + offset, pReq->msg, pReq->msgLen);
|
||||
offset += pReq->msgLen;
|
||||
batchReq.header.vgId = vgId;
|
||||
batchReq.pMsgs = pBatch->pMsgs;
|
||||
|
||||
int32_t msgSize = tSerializeSBatchReq(NULL, 0, &batchReq);
|
||||
if (msgSize < 0) {
|
||||
qError("tSerializeSBatchReq failed");
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
ASSERT(pBatch->msgSize == offset);
|
||||
*msg = taosMemoryCalloc(1, msgSize);
|
||||
if (NULL == (*msg)) {
|
||||
qError("calloc batchReq msg failed, size:%d", msgSize);
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
if (tSerializeSBatchReq(*msg, msgSize, &batchReq) < 0) {
|
||||
qError("tSerializeSBatchReq failed");
|
||||
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
*pSize = msgSize;
|
||||
|
||||
qDebug("batch req %d to vg %d msg built with %d meta reqs", pBatch->batchId, vgId, num);
|
||||
|
||||
|
@ -616,12 +615,13 @@ int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob* pJob, SHashObj* pBatchs) {
|
|||
size_t len = 0;
|
||||
int32_t* vgId = taosHashGetKey(p, &len);
|
||||
SCtgBatch* pBatch = (SCtgBatch*)p;
|
||||
int32_t msgSize = 0;
|
||||
|
||||
ctgDebug("QID:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId);
|
||||
|
||||
CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg));
|
||||
CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg, &msgSize));
|
||||
code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId, pBatch->pMsgIdxs,
|
||||
pBatch->dbFName, *vgId, pBatch->msgType, msg, pBatch->msgSize);
|
||||
pBatch->dbFName, *vgId, pBatch->msgType, msg, msgSize);
|
||||
pBatch->pTaskIds = NULL;
|
||||
CTG_ERR_JRET(code);
|
||||
|
||||
|
|
|
@ -154,8 +154,8 @@ typedef struct {
|
|||
} SSchemaInfo;
|
||||
|
||||
typedef struct {
|
||||
int32_t operatorType;
|
||||
int64_t refId;
|
||||
int32_t operatorType;
|
||||
int64_t refId;
|
||||
} SExchangeOpStopInfo;
|
||||
|
||||
typedef struct {
|
||||
|
@ -261,10 +261,10 @@ typedef struct SLimitInfo {
|
|||
} SLimitInfo;
|
||||
|
||||
typedef struct SExchangeInfo {
|
||||
SArray* pSources;
|
||||
SArray* pSourceDataInfo;
|
||||
tsem_t ready;
|
||||
void* pTransporter;
|
||||
SArray* pSources;
|
||||
SArray* pSourceDataInfo;
|
||||
tsem_t ready;
|
||||
void* pTransporter;
|
||||
// SArray<SSDataBlock*>, result block list, used to keep the multi-block that
|
||||
// passed by downstream operator
|
||||
SArray* pResultBlockList;
|
||||
|
@ -275,7 +275,7 @@ typedef struct SExchangeInfo {
|
|||
SLoadRemoteDataInfo loadInfo;
|
||||
uint64_t self;
|
||||
SLimitInfo limitInfo;
|
||||
int64_t openedTs; // start exec time stamp
|
||||
int64_t openedTs; // start exec time stamp
|
||||
} SExchangeInfo;
|
||||
|
||||
typedef struct SScanInfo {
|
||||
|
@ -310,9 +310,9 @@ typedef struct {
|
|||
} SAggOptrPushDownInfo;
|
||||
|
||||
typedef struct STableMetaCacheInfo {
|
||||
SLRUCache* pTableMetaEntryCache; // 100 by default
|
||||
uint64_t metaFetch;
|
||||
uint64_t cacheHit;
|
||||
SLRUCache* pTableMetaEntryCache; // 100 by default
|
||||
uint64_t metaFetch;
|
||||
uint64_t cacheHit;
|
||||
} STableMetaCacheInfo;
|
||||
|
||||
typedef struct STableScanInfo {
|
||||
|
@ -343,7 +343,7 @@ typedef struct STableMergeScanInfo {
|
|||
int32_t tableEndIndex;
|
||||
bool hasGroupId;
|
||||
uint64_t groupId;
|
||||
SArray* queryConds; // array of queryTableDataCond
|
||||
SArray* queryConds; // array of queryTableDataCond
|
||||
STsdbReader* pReader;
|
||||
SReadHandle readHandle;
|
||||
int32_t bufPageSize;
|
||||
|
@ -358,7 +358,7 @@ typedef struct STableMergeScanInfo {
|
|||
int64_t numOfRows;
|
||||
SScanInfo scanInfo;
|
||||
int32_t scanTimes;
|
||||
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
|
||||
SqlFunctionCtx* pCtx; // which belongs to the direct upstream operator operator query context
|
||||
SResultRowInfo* pResultRowInfo;
|
||||
int32_t* rowEntryInfoOffset;
|
||||
SExprInfo* pExpr;
|
||||
|
@ -504,6 +504,7 @@ typedef struct SStreamScanInfo {
|
|||
STimeWindow updateWin;
|
||||
STimeWindowAggSupp twAggSup;
|
||||
SSDataBlock* pUpdateDataRes;
|
||||
SHashObj* pGroupIdTbNameMap;
|
||||
// status for tmq
|
||||
SNodeList* pGroupTags;
|
||||
SNode* pTagCond;
|
||||
|
@ -550,10 +551,10 @@ typedef struct SSysTableScanInfo {
|
|||
} SSysTableScanInfo;
|
||||
|
||||
typedef struct SBlockDistInfo {
|
||||
SSDataBlock* pResBlock;
|
||||
STsdbReader* pHandle;
|
||||
SReadHandle readHandle;
|
||||
uint64_t uid; // table uid
|
||||
SSDataBlock* pResBlock;
|
||||
STsdbReader* pHandle;
|
||||
SReadHandle readHandle;
|
||||
uint64_t uid; // table uid
|
||||
} SBlockDistInfo;
|
||||
|
||||
// todo remove this
|
||||
|
@ -663,9 +664,9 @@ typedef struct SGroupbyOperatorInfo {
|
|||
SAggSupporter aggSup;
|
||||
SArray* pGroupCols; // group by columns, SArray<SColumn>
|
||||
SArray* pGroupColVals; // current group column values, SArray<SGroupKeys>
|
||||
bool isInit; // denote if current val is initialized or not
|
||||
char* keyBuf; // group by keys for hash
|
||||
int32_t groupKeyLen; // total group by column width
|
||||
bool isInit; // denote if current val is initialized or not
|
||||
char* keyBuf; // group by keys for hash
|
||||
int32_t groupKeyLen; // total group by column width
|
||||
SGroupResInfo groupResInfo;
|
||||
SExprSupp scalarSup;
|
||||
} SGroupbyOperatorInfo;
|
||||
|
@ -890,14 +891,14 @@ STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInter
|
|||
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag);
|
||||
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
|
||||
|
||||
void doDestroyExchangeOperatorInfo(void* param);
|
||||
void doDestroyExchangeOperatorInfo(void* param);
|
||||
|
||||
void setOperatorCompleted(SOperatorInfo* pOperator);
|
||||
void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status, void* pInfo,
|
||||
SExecTaskInfo* pTaskInfo);
|
||||
void setOperatorInfo(SOperatorInfo* pOperator, const char* name, int32_t type, bool blocking, int32_t status,
|
||||
void* pInfo, SExecTaskInfo* pTaskInfo);
|
||||
void doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* pColMatchInfo);
|
||||
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr,
|
||||
SSDataBlock* pBlock, int32_t rows, const char* idStr, STableMetaCacheInfo * pCache);
|
||||
int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int32_t numOfExpr, SSDataBlock* pBlock,
|
||||
int32_t rows, const char* idStr, STableMetaCacheInfo* pCache);
|
||||
|
||||
void cleanupAggSup(SAggSupporter* pAggSup);
|
||||
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
|
||||
|
@ -992,7 +993,7 @@ void setTaskKilled(SExecTaskInfo* pTaskInfo);
|
|||
void queryCostStatis(SExecTaskInfo* pTaskInfo);
|
||||
|
||||
void doDestroyTask(SExecTaskInfo* pTaskInfo);
|
||||
void destroyOperatorInfo(SOperatorInfo* pOperator);
|
||||
void destroyOperatorInfo(SOperatorInfo* pOperator);
|
||||
int32_t getMaximumIdleDurationSec();
|
||||
|
||||
/*
|
||||
|
@ -1038,6 +1039,7 @@ void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKE
|
|||
uint64_t* pGp, void* pTbName);
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag);
|
||||
uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
|
||||
void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock);
|
||||
|
||||
int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
|
||||
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
|
||||
|
@ -1061,7 +1063,7 @@ int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResul
|
|||
int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult);
|
||||
int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
|
||||
void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
|
||||
int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo *pInfo);
|
||||
int32_t qAppendTaskStopInfo(SExecTaskInfo* pTaskInfo, SExchangeOpStopInfo* pInfo);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -163,9 +163,10 @@ SSortExecInfo tsortGetSortExecInfo(SSortHandle* pHandle);
|
|||
/**
|
||||
* get proper sort buffer pages according to the row size
|
||||
* @param rowSize
|
||||
* @param numOfCols columns count that be put into page
|
||||
* @return
|
||||
*/
|
||||
int32_t getProperSortPageSize(size_t rowSize);
|
||||
int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -1994,8 +1994,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
|
|||
int32_t scanFlag = MAIN_SCAN;
|
||||
getTableScanInfo(pOperator, &order, &scanFlag);
|
||||
|
||||
int64_t ekey =
|
||||
Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey;
|
||||
int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey;
|
||||
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
|
||||
|
||||
blockDataCleanup(pInfo->pRes);
|
||||
|
|
|
@ -337,9 +337,13 @@ static void doSetTagColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlo
|
|||
|
||||
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock, rows,
|
||||
GET_TASKID(pTaskInfo), &pTableScanInfo->metaCache);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
|
||||
if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
T_LONG_JMP(pTaskInfo->env, code);
|
||||
}
|
||||
|
||||
// reset the error code.
|
||||
terrno = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -513,6 +517,21 @@ static void freeTableCachedVal(void* param) {
|
|||
taosMemoryFree(pVal);
|
||||
}
|
||||
|
||||
static STableCachedVal* createTableCacheVal(const SMetaReader* pMetaReader) {
|
||||
STableCachedVal* pVal = taosMemoryMalloc(sizeof(STableCachedVal));
|
||||
pVal->pName = strdup(pMetaReader->me.name);
|
||||
pVal->pTags = NULL;
|
||||
|
||||
// only child table has tag value
|
||||
if (pMetaReader->me.type == TSDB_CHILD_TABLE) {
|
||||
STag* pTag = (STag*) pMetaReader->me.ctbEntry.pTags;
|
||||
pVal->pTags = taosMemoryMalloc(pTag->len);
|
||||
memcpy(pVal->pTags, pTag, pTag->len);
|
||||
}
|
||||
|
||||
return pVal;
|
||||
}
|
||||
|
||||
// const void *key, size_t keyLen, void *value
|
||||
static void freeCachedMetaItem(const void* key, size_t keyLen, void* value) { freeTableCachedVal(value); }
|
||||
|
||||
|
@ -540,7 +559,11 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
|
|||
metaReaderInit(&mr, pHandle->meta, 0);
|
||||
code = metaGetTableEntryByUid(&mr, pBlock->info.uid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
|
||||
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
|
||||
} else {
|
||||
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
|
||||
}
|
||||
metaReaderClear(&mr);
|
||||
return terrno;
|
||||
}
|
||||
|
@ -559,23 +582,18 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
|
|||
metaReaderInit(&mr, pHandle->meta, 0);
|
||||
code = metaGetTableEntryByUid(&mr, pBlock->info.uid);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
|
||||
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
|
||||
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
|
||||
} else {
|
||||
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.uid, tstrerror(terrno), idStr);
|
||||
}
|
||||
metaReaderClear(&mr);
|
||||
return terrno;
|
||||
}
|
||||
|
||||
metaReaderReleaseLock(&mr);
|
||||
|
||||
STableCachedVal* pVal = taosMemoryMalloc(sizeof(STableCachedVal));
|
||||
pVal->pName = strdup(mr.me.name);
|
||||
pVal->pTags = NULL;
|
||||
|
||||
// only child table has tag value
|
||||
if (mr.me.type == TSDB_CHILD_TABLE) {
|
||||
STag* pTag = (STag*)mr.me.ctbEntry.pTags;
|
||||
pVal->pTags = taosMemoryMalloc(pTag->len);
|
||||
memcpy(pVal->pTags, mr.me.ctbEntry.pTags, pTag->len);
|
||||
}
|
||||
STableCachedVal* pVal = createTableCacheVal(&mr);
|
||||
|
||||
val = *pVal;
|
||||
freeReader = true;
|
||||
|
@ -590,6 +608,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
|
|||
pCache->cacheHit += 1;
|
||||
STableCachedVal* pVal = taosLRUCacheValue(pCache->pTableMetaEntryCache, h);
|
||||
val = *pVal;
|
||||
|
||||
taosLRUCacheRelease(pCache->pTableMetaEntryCache, h, false);
|
||||
}
|
||||
|
||||
|
@ -1515,10 +1534,17 @@ static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pS
|
|||
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
|
||||
uint64_t srcUid = srcUidData[i];
|
||||
uint64_t groupId = srcGp[i];
|
||||
char* tbname[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN] = {0};
|
||||
if (groupId == 0) {
|
||||
groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
|
||||
}
|
||||
appendOneRowToStreamSpecialBlock(pDestBlock, srcStartTsCol + i, srcEndTsCol + i, srcUidData + i, &groupId, NULL);
|
||||
if (pInfo->tbnameCalSup.pExprInfo) {
|
||||
char* parTbname = taosHashGet(pInfo->pGroupIdTbNameMap, &groupId, sizeof(int64_t));
|
||||
memcpy(varDataVal(tbname), parTbname, TSDB_TABLE_NAME_LEN);
|
||||
varDataSetLen(tbname, strlen(varDataVal(tbname)));
|
||||
}
|
||||
appendOneRowToStreamSpecialBlock(pDestBlock, srcStartTsCol + i, srcEndTsCol + i, srcUidData + i, &groupId,
|
||||
tbname[0] == 0 ? NULL : tbname);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
@ -1562,9 +1588,16 @@ static void calBlockTag(SExprSupp* pTagCalSup, SSDataBlock* pBlock, SSDataBlock*
|
|||
blockDataDestroy(pSrcBlock);
|
||||
}
|
||||
|
||||
static void calBlockTbName(SExprSupp* pTbNameCalSup, SSDataBlock* pBlock) {
|
||||
void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
|
||||
SExprSupp* pTbNameCalSup = &pInfo->tbnameCalSup;
|
||||
if (pTbNameCalSup == NULL || pTbNameCalSup->numOfExprs == 0) return;
|
||||
if (pBlock == NULL || pBlock->info.rows == 0) return;
|
||||
if (pBlock->info.groupId) {
|
||||
char* tbname = taosHashGet(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t));
|
||||
if (tbname != NULL) {
|
||||
memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
SSDataBlock* pSrcBlock = blockCopyOneRow(pBlock, 0);
|
||||
ASSERT(pSrcBlock->info.rows == 1);
|
||||
|
@ -1592,6 +1625,11 @@ static void calBlockTbName(SExprSupp* pTbNameCalSup, SSDataBlock* pBlock) {
|
|||
pBlock->info.parTbName[0] = 0;
|
||||
}
|
||||
|
||||
if (pBlock->info.groupId) {
|
||||
taosHashPut(pInfo->pGroupIdTbNameMap, &pBlock->info.groupId, sizeof(int64_t), pBlock->info.parTbName,
|
||||
TSDB_TABLE_NAME_LEN);
|
||||
}
|
||||
|
||||
blockDataDestroy(pSrcBlock);
|
||||
blockDataDestroy(pResBlock);
|
||||
}
|
||||
|
@ -1713,7 +1751,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
|
|||
blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex);
|
||||
blockDataFreeRes((SSDataBlock*)pBlock);
|
||||
|
||||
calBlockTbName(&pInfo->tbnameCalSup, pInfo->pRes);
|
||||
calBlockTbName(pInfo, pInfo->pRes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1960,9 +1998,13 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
|
|||
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__SCAN) {
|
||||
SSDataBlock* pBlock = doTableScan(pInfo->pTableScanOp);
|
||||
if (pBlock != NULL) {
|
||||
calBlockTbName(&pInfo->tbnameCalSup, pBlock);
|
||||
updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
|
||||
calBlockTbName(pInfo, pBlock);
|
||||
if (pInfo->pUpdateInfo) {
|
||||
TSKEY maxTs = updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
|
||||
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
|
||||
}
|
||||
qDebug("stream recover scan get block, rows %d", pBlock->info.rows);
|
||||
printDataBlock(pBlock, "scan recover");
|
||||
return pBlock;
|
||||
}
|
||||
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
|
||||
|
@ -2081,7 +2123,7 @@ FETCH_NEXT_BLOCK:
|
|||
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
|
||||
checkUpdateData(pInfo, true, pSDB, false);
|
||||
// printDataBlock(pSDB, "stream scan update");
|
||||
calBlockTbName(&pInfo->tbnameCalSup, pSDB);
|
||||
calBlockTbName(pInfo, pSDB);
|
||||
return pSDB;
|
||||
}
|
||||
blockDataCleanup(pInfo->pUpdateDataRes);
|
||||
|
@ -2386,6 +2428,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
|
|||
}
|
||||
|
||||
cleanupExprSupp(&pStreamScan->tbnameCalSup);
|
||||
taosHashCleanup(pStreamScan->pGroupIdTbNameMap);
|
||||
|
||||
updateInfoDestroy(pStreamScan->pUpdateInfo);
|
||||
blockDataDestroy(pStreamScan->pRes);
|
||||
|
@ -2443,6 +2486,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
|
|||
if (initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1) != 0) {
|
||||
goto _error;
|
||||
}
|
||||
pInfo->pGroupIdTbNameMap =
|
||||
taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
|
||||
}
|
||||
|
||||
if (pTableScanNode->pTags != NULL) {
|
||||
|
@ -4822,7 +4867,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
|
|||
initLimitInfo(pTableScanNode->scan.node.pLimit, pTableScanNode->scan.node.pSlimit, &pInfo->limitInfo);
|
||||
|
||||
int32_t rowSize = pInfo->pResBlock->info.rowSize;
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize);
|
||||
uint32_t nCols = taosArrayGetSize(pInfo->pResBlock->pDataBlock);
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize, nCols);
|
||||
|
||||
setOperatorInfo(pOperator, "TableMergeScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN, false, OP_NOT_OPENED,
|
||||
pInfo, pTaskInfo);
|
||||
|
|
|
@ -760,7 +760,8 @@ SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** downStreams, size
|
|||
pInfo->groupSort = pMergePhyNode->groupSort;
|
||||
pInfo->pSortInfo = createSortInfo(pMergePhyNode->pMergeKeys);
|
||||
pInfo->pInputBlock = pInputBlock;
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize);
|
||||
size_t numOfCols = taosArrayGetSize(pInfo->binfo.pRes->pDataBlock);
|
||||
pInfo->bufPageSize = getProperSortPageSize(rowSize, numOfCols);
|
||||
pInfo->sortBufSize = pInfo->bufPageSize * (numStreams + 1); // one additional is reserved for merged result.
|
||||
|
||||
setOperatorInfo(pOperator, "MultiwayMergeOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE, false, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
|
|
@ -1610,10 +1610,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
|
|||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
|
||||
destroyStreamFinalIntervalOperatorInfo(pChildOp->info);
|
||||
taosMemoryFree(pChildOp->pDownstream);
|
||||
cleanupExprSupp(&pChildOp->exprSupp);
|
||||
taosMemoryFreeClear(pChildOp);
|
||||
destroyOperatorInfo(pChildOp);
|
||||
}
|
||||
taosArrayDestroy(pInfo->pChildren);
|
||||
}
|
||||
|
@ -3418,11 +3415,10 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
|
|||
if (pInfo->pChildren != NULL) {
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
|
||||
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
|
||||
destroyStreamSessionAggOperatorInfo(pChInfo);
|
||||
taosMemoryFreeClear(pChild);
|
||||
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
|
||||
destroyOperatorInfo(pChild);
|
||||
}
|
||||
taosArrayDestroy(pInfo->pChildren);
|
||||
}
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
blockDataDestroy(pInfo->pDelRes);
|
||||
|
@ -3470,7 +3466,9 @@ void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int
|
|||
}
|
||||
SStreamScanInfo* pScanInfo = downstream->info;
|
||||
pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = pAggSup->gap, .parentType = type};
|
||||
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
|
||||
if (!pScanInfo->pUpdateInfo) {
|
||||
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, int64_t gap,
|
||||
|
@ -4374,9 +4372,6 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
|
|||
|
||||
setOperatorInfo(pOperator, name, pPhyNode->type, false, OP_NOT_OPENED, pInfo, pTaskInfo);
|
||||
|
||||
pInfo->pGroupIdTbNameMap =
|
||||
taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false, HASH_NO_LOCK);
|
||||
|
||||
pOperator->operatorType = pPhyNode->type;
|
||||
if (numOfChild > 0) {
|
||||
pInfo->pChildren = taosArrayInit(numOfChild, sizeof(void*));
|
||||
|
@ -4415,11 +4410,10 @@ void destroyStreamStateOperatorInfo(void* param) {
|
|||
if (pInfo->pChildren != NULL) {
|
||||
int32_t size = taosArrayGetSize(pInfo->pChildren);
|
||||
for (int32_t i = 0; i < size; i++) {
|
||||
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
|
||||
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
|
||||
destroyStreamSessionAggOperatorInfo(pChInfo);
|
||||
taosMemoryFreeClear(pChild);
|
||||
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
|
||||
destroyOperatorInfo(pChild);
|
||||
}
|
||||
taosArrayDestroy(pInfo->pChildren);
|
||||
}
|
||||
colDataDestroy(&pInfo->twAggSup.timeWindowData);
|
||||
blockDataDestroy(pInfo->pDelRes);
|
||||
|
@ -4898,6 +4892,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
|||
if (pMiaInfo->groupId == 0) {
|
||||
if (pMiaInfo->groupId != pBlock->info.groupId) {
|
||||
pMiaInfo->groupId = pBlock->info.groupId;
|
||||
pRes->info.groupId = pMiaInfo->groupId;
|
||||
}
|
||||
} else {
|
||||
if (pMiaInfo->groupId != pBlock->info.groupId) {
|
||||
|
@ -4911,6 +4906,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
|
|||
break;
|
||||
} else {
|
||||
// continue
|
||||
pRes->info.groupId = pMiaInfo->groupId;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -584,15 +584,11 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// TODO consider the page meta size
|
||||
int32_t getProperSortPageSize(size_t rowSize) {
|
||||
uint32_t defaultPageSize = 4096;
|
||||
|
||||
uint32_t pgSize = 0;
|
||||
if (rowSize * 4 > defaultPageSize) {
|
||||
pgSize = rowSize * 4;
|
||||
} else {
|
||||
pgSize = defaultPageSize;
|
||||
// get sort page size
|
||||
int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols) {
|
||||
uint32_t pgSize = rowSize * 4 + blockDataGetSerialMetaSize(numOfCols);
|
||||
if (pgSize < DEFAULT_PAGESIZE) {
|
||||
return DEFAULT_PAGESIZE;
|
||||
}
|
||||
|
||||
return pgSize;
|
||||
|
@ -612,7 +608,8 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
}
|
||||
|
||||
if (pHandle->pDataBlock == NULL) {
|
||||
pHandle->pageSize = getProperSortPageSize(blockDataGetRowSize(pBlock));
|
||||
uint32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
pHandle->pageSize = getProperSortPageSize(blockDataGetRowSize(pBlock), numOfCols);
|
||||
|
||||
// todo, number of pages are set according to the total available sort buffer
|
||||
pHandle->numOfPages = 1024;
|
||||
|
|
|
@ -917,6 +917,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
int32_t startIndex;
|
||||
if (numOfParams != 4) {
|
||||
snprintf(errMsg, msgLen, "%s", msg1);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -928,17 +929,20 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
|
||||
if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000
|
||||
snprintf(errMsg, msgLen, "%s", msg4);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) ||
|
||||
(factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) {
|
||||
snprintf(errMsg, msgLen, "%s", msg5);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -957,6 +961,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
if (width->valuedouble == 0) {
|
||||
snprintf(errMsg, msgLen, "%s", msg6);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < counter + 1; ++i) {
|
||||
|
@ -964,6 +969,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
if (isinf(intervals[startIndex])) {
|
||||
snprintf(errMsg, msgLen, "%s", msg5);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
startIndex++;
|
||||
|
@ -973,11 +979,13 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
if (start->valuedouble == 0) {
|
||||
snprintf(errMsg, msgLen, "%s", msg7);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) {
|
||||
snprintf(errMsg, msgLen, "%s", msg8);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < counter + 1; ++i) {
|
||||
|
@ -985,6 +993,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
if (isinf(intervals[startIndex])) {
|
||||
snprintf(errMsg, msgLen, "%s", msg5);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
startIndex++;
|
||||
|
@ -992,6 +1001,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
} else {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1007,6 +1017,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
} else if (cJSON_IsArray(binDesc)) { /* user input bins */
|
||||
if (binType != USER_INPUT_BIN) {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
numOfBins = cJSON_GetArraySize(binDesc);
|
||||
|
@ -1015,6 +1026,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
if (bin == NULL) {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
int i = 0;
|
||||
|
@ -1023,11 +1035,13 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
if (!cJSON_IsNumber(bin)) {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
if (i != 0 && intervals[i] <= intervals[i - 1]) {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
taosMemoryFree(intervals);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
bin = bin->next;
|
||||
|
@ -1035,6 +1049,7 @@ static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* err
|
|||
}
|
||||
} else {
|
||||
snprintf(errMsg, msgLen, "%s", msg3);
|
||||
cJSON_Delete(binDesc);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1464,11 +1479,16 @@ static int32_t translateDerivative(SFunctionNode* pFunc, char* pErrBuf, int32_t
|
|||
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
|
||||
|
||||
// param1
|
||||
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
|
||||
SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 1);
|
||||
SValueNode* pValue1 = (SValueNode*)pParamNode1;
|
||||
if (QUERY_NODE_VALUE != nodeType(pParamNode1)) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
if (pValue1->datum.i <= 0) {
|
||||
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
SValueNode* pValue = (SValueNode*)pParamNode1;
|
||||
pValue->notReserved = true;
|
||||
|
||||
|
|
|
@ -48,8 +48,8 @@ typedef struct SSumRes {
|
|||
double dsum;
|
||||
};
|
||||
int16_t type;
|
||||
int64_t prevTs; // used for csum only
|
||||
bool isPrevTsSet; //used for csum only
|
||||
int64_t prevTs; // used for csum only
|
||||
bool isPrevTsSet; // used for csum only
|
||||
|
||||
} SSumRes;
|
||||
|
||||
|
@ -1083,7 +1083,10 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
int32_t type = pAvgRes->type;
|
||||
|
||||
if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
||||
if (pAvgRes->count == 0) {
|
||||
// [ASAN] runtime error: division by zero
|
||||
GET_RES_INFO(pCtx)->numOfRes = 0;
|
||||
} else if (IS_SIGNED_NUMERIC_TYPE(type)) {
|
||||
pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count);
|
||||
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
|
||||
pAvgRes->result = pAvgRes->sum.usum / ((double)pAvgRes->count);
|
||||
|
@ -2217,8 +2220,8 @@ bool leastSQRFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInf
|
|||
|
||||
SLeastSQRInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
|
||||
|
||||
pInfo->startVal = IS_FLOAT_TYPE(pCtx->param[1].param.nType) ? pCtx->param[1].param.d : (double)pCtx->param[1].param.i;
|
||||
pInfo->stepVal = IS_FLOAT_TYPE(pCtx->param[2].param.nType) ? pCtx->param[2].param.d : (double)pCtx->param[2].param.i;
|
||||
GET_TYPED_DATA(pInfo->startVal, double, pCtx->param[1].param.nType, &pCtx->param[1].param.i);
|
||||
GET_TYPED_DATA(pInfo->stepVal, double, pCtx->param[2].param.nType, &pCtx->param[2].param.i);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2562,8 +2565,8 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
SVariant* pVal = &pCtx->param[1].param;
|
||||
double v =
|
||||
(IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
|
||||
double v = 0;
|
||||
GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
|
||||
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SPercentileInfo* ppInfo = (SPercentileInfo*)GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
@ -2622,8 +2625,8 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
|
|||
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
|
||||
|
||||
SVariant* pVal = &pCtx->param[1].param;
|
||||
pInfo->percent =
|
||||
(IS_SIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->i : (IS_UNSIGNED_NUMERIC_TYPE(pVal->nType) ? pVal->u : pVal->d));
|
||||
pInfo->percent = 0;
|
||||
GET_TYPED_DATA(pInfo->percent, double, pVal->nType, &pVal->i);
|
||||
|
||||
if (pCtx->numOfParams == 2) {
|
||||
pInfo->algo = APERCT_ALGO_DEFAULT;
|
||||
|
@ -2924,6 +2927,8 @@ static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowInde
|
|||
|
||||
if (!pInfo->hasResult) {
|
||||
pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL);
|
||||
ASSERT(pCtx->subsidiaries.buf != NULL);
|
||||
ASSERT(pCtx->subsidiaries.rowLen > 0);
|
||||
} else {
|
||||
updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
|
||||
}
|
||||
|
@ -3717,6 +3722,12 @@ static int32_t topBotResComparFn(const void* p1, const void* p2, const void* par
|
|||
}
|
||||
|
||||
return (val1->v.u > val2->v.u) ? 1 : -1;
|
||||
} else if (TSDB_DATA_TYPE_FLOAT == type) {
|
||||
if (val1->v.f == val2->v.f) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (val1->v.f > val2->v.f) ? 1 : -1;
|
||||
}
|
||||
|
||||
if (val1->v.d == val2->v.d) {
|
||||
|
@ -3757,10 +3768,12 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
|||
} else { // replace the minimum value in the result
|
||||
if ((isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && val.i > pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u > pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && val.d > pItems[0].v.d))) ||
|
||||
(TSDB_DATA_TYPE_FLOAT == type && val.f > pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && val.d > pItems[0].v.d))) ||
|
||||
(!isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && val.i < pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && val.u < pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && val.d < pItems[0].v.d)))) {
|
||||
(TSDB_DATA_TYPE_FLOAT == type && val.f < pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && val.d < pItems[0].v.d)))) {
|
||||
// replace the old data and the coresponding tuple data
|
||||
STopBotResItem* pItem = &pItems[0];
|
||||
pItem->v = val;
|
||||
|
@ -3923,12 +3936,7 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
}
|
||||
for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) {
|
||||
STopBotResItem* pItem = &pRes->pItems[i];
|
||||
if (type == TSDB_DATA_TYPE_FLOAT) {
|
||||
float v = pItem->v.d;
|
||||
colDataAppend(pCol, currentRow, (const char*)&v, false);
|
||||
} else {
|
||||
colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
|
||||
}
|
||||
colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_finalize i:%d,item:%p,pageId:%d, offset:%d\n", i, pItem, pItem->tuplePos.pageId,
|
||||
pItem->tuplePos.offset);
|
||||
|
@ -3959,10 +3967,12 @@ void addResult(SqlFunctionCtx* pCtx, STopBotResItem* pSourceItem, int16_t type,
|
|||
} else { // replace the minimum value in the result
|
||||
if ((isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && pSourceItem->v.i > pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && pSourceItem->v.u > pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && pSourceItem->v.d > pItems[0].v.d))) ||
|
||||
(TSDB_DATA_TYPE_FLOAT == type && pSourceItem->v.f > pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && pSourceItem->v.d > pItems[0].v.d))) ||
|
||||
(!isTopQuery && ((IS_SIGNED_NUMERIC_TYPE(type) && pSourceItem->v.i < pItems[0].v.i) ||
|
||||
(IS_UNSIGNED_NUMERIC_TYPE(type) && pSourceItem->v.u < pItems[0].v.u) ||
|
||||
(IS_FLOAT_TYPE(type) && pSourceItem->v.d < pItems[0].v.d)))) {
|
||||
(TSDB_DATA_TYPE_FLOAT == type && pSourceItem->v.f < pItems[0].v.f) ||
|
||||
(TSDB_DATA_TYPE_DOUBLE == type && pSourceItem->v.d < pItems[0].v.d)))) {
|
||||
// replace the old data and the coresponding tuple data
|
||||
STopBotResItem* pItem = &pItems[0];
|
||||
pItem->v = pSourceItem->v;
|
||||
|
@ -6033,7 +6043,7 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
} else {
|
||||
if (pInfo->win.ekey == pInfo->win.skey) {
|
||||
pInfo->dOutput = pInfo->p.val;
|
||||
} else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { //no data in timewindow
|
||||
} else if (pInfo->win.ekey == INT64_MAX || pInfo->win.skey == INT64_MIN) { // no data in timewindow
|
||||
pInfo->dOutput = 0;
|
||||
} else {
|
||||
pInfo->dOutput = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey);
|
||||
|
|
|
@ -2036,6 +2036,8 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
|
|||
pVal->u = pNode->datum.u;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_FLOAT:
|
||||
pVal->f = pNode->datum.d;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
pVal->d = pNode->datum.d;
|
||||
break;
|
||||
|
|
|
@ -2762,17 +2762,17 @@ static bool needFill(SNode* pNode) {
|
|||
return hasFillFunc;
|
||||
}
|
||||
|
||||
static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
|
||||
if (TSDB_DATA_TYPE_NULL == fillDt.type) {
|
||||
return false;
|
||||
static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList* pValues, int32_t index) {
|
||||
SListCell* pCell = nodesListGetCell(pValues, index);
|
||||
if (dataTypeEqual(&dt, &((SExprNode*)pCell->pNode)->resType)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
|
||||
return true;
|
||||
SNode* pCaseFunc = NULL;
|
||||
int32_t code = createCastFunc(pCxt, pCell->pNode, dt, &pCaseFunc);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
|
||||
}
|
||||
if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
|
||||
|
@ -2788,8 +2788,8 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
|
|||
if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
|
||||
}
|
||||
if (mismatchFillDataType(((SExprNode*)pProject)->resType,
|
||||
((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
|
||||
if (TSDB_CODE_SUCCESS !=
|
||||
convertFillValue(pCxt, ((SExprNode*)pProject)->resType, pFillValues->pNodeList, fillNo)) {
|
||||
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
|
||||
}
|
||||
++fillNo;
|
||||
|
|
|
@ -348,7 +348,9 @@ static int32_t scanPathOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
|
|||
int32_t code = scanPathOptMatch(pCxt, pLogicSubplan->pNode, &info);
|
||||
if (TSDB_CODE_SUCCESS == code && info.pScan) {
|
||||
scanPathOptSetScanWin(info.pScan);
|
||||
scanPathOptSetScanOrder(info.scanOrder, info.pScan);
|
||||
if (!pCxt->pPlanCxt->streamQuery) {
|
||||
scanPathOptSetScanOrder(info.scanOrder, info.pScan);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code && (NULL != info.pDsoFuncs || NULL != info.pSdrFuncs)) {
|
||||
info.pScan->dataRequired = scanPathOptGetDataRequired(info.pSdrFuncs);
|
||||
|
|
|
@ -1191,6 +1191,8 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) {
|
|||
|
||||
if (QW_QUERY_RUNNING(ctx)) {
|
||||
qwKillTaskHandle(ctx);
|
||||
} else if (!QW_EVENT_PROCESSED(ctx, QW_EVENT_DROP)) {
|
||||
QW_SET_EVENT_RECEIVED(ctx, QW_EVENT_DROP);
|
||||
}
|
||||
|
||||
QW_UNLOCK(QW_WRITE, &ctx->lock);
|
||||
|
|
|
@ -91,6 +91,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
|||
void* exec = pTask->exec.executor;
|
||||
|
||||
qSetStreamOpOpen(exec);
|
||||
bool finished = false;
|
||||
|
||||
while (1) {
|
||||
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
|
@ -106,7 +107,10 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
|||
if (qExecTask(exec, &output, &ts) < 0) {
|
||||
ASSERT(0);
|
||||
}
|
||||
if (output == NULL) break;
|
||||
if (output == NULL) {
|
||||
finished = true;
|
||||
break;
|
||||
}
|
||||
|
||||
SSDataBlock block = {0};
|
||||
assignOneDataBlock(&block, output);
|
||||
|
@ -133,6 +137,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
|
|||
if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
|
||||
streamDispatch(pTask);
|
||||
}
|
||||
if (finished) break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -163,9 +163,9 @@ bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid) {
|
|||
return false;
|
||||
}
|
||||
|
||||
void updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol) {
|
||||
if (pBlock == NULL || pBlock->info.rows == 0) return;
|
||||
TSKEY maxTs = -1;
|
||||
TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol) {
|
||||
if (pBlock == NULL || pBlock->info.rows == 0) return INT64_MIN;
|
||||
TSKEY maxTs = INT64_MIN;
|
||||
int64_t tbUid = pBlock->info.uid;
|
||||
|
||||
SColumnInfoData *pColDataInfo = taosArrayGet(pBlock->pDataBlock, primaryTsCol);
|
||||
|
@ -186,6 +186,7 @@ void updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t pr
|
|||
if (pMaxTs == NULL || *pMaxTs > maxTs) {
|
||||
taosHashPut(pInfo->pMap, &tbUid, sizeof(int64_t), &maxTs, sizeof(TSKEY));
|
||||
}
|
||||
return maxTs;
|
||||
}
|
||||
|
||||
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
|
||||
|
|
|
@ -57,6 +57,11 @@ void syncNodeRemove(int64_t rid);
|
|||
SSyncNode* syncNodeAcquire(int64_t rid);
|
||||
void syncNodeRelease(SSyncNode* pNode);
|
||||
|
||||
int64_t syncHbTimerDataAdd(SSyncHbTimerData* pData);
|
||||
void syncHbTimerDataRemove(int64_t rid);
|
||||
SSyncHbTimerData* syncHbTimerDataAcquire(int64_t rid);
|
||||
void syncHbTimerDataRelease(SSyncHbTimerData* pData);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -57,10 +57,11 @@ typedef struct SRaftId {
|
|||
} SRaftId;
|
||||
|
||||
typedef struct SSyncHbTimerData {
|
||||
SSyncNode* pSyncNode;
|
||||
int64_t syncNodeRid;
|
||||
SSyncTimer* pTimer;
|
||||
SRaftId destId;
|
||||
uint64_t logicClock;
|
||||
int64_t rid;
|
||||
} SSyncHbTimerData;
|
||||
|
||||
typedef struct SSyncTimer {
|
||||
|
@ -70,7 +71,7 @@ typedef struct SSyncTimer {
|
|||
uint64_t counter;
|
||||
int32_t timerMS;
|
||||
SRaftId destId;
|
||||
SSyncHbTimerData hbData;
|
||||
int64_t hbDataRid;
|
||||
} SSyncTimer;
|
||||
|
||||
typedef struct SElectTimerParam {
|
||||
|
@ -189,6 +190,8 @@ typedef struct SSyncNode {
|
|||
int64_t leaderTime;
|
||||
int64_t lastReplicateTime;
|
||||
|
||||
bool isStart;
|
||||
|
||||
} SSyncNode;
|
||||
|
||||
// open/close --------------
|
||||
|
@ -198,6 +201,7 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode);
|
|||
void syncNodeClose(SSyncNode* pSyncNode);
|
||||
void syncNodePreClose(SSyncNode* pSyncNode);
|
||||
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak);
|
||||
void syncHbTimerDataFree(SSyncHbTimerData* pData);
|
||||
|
||||
// on message ---------------------
|
||||
int32_t syncNodeOnTimeout(SSyncNode* ths, const SRpcMsg* pMsg);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
static SSyncEnv gSyncEnv = {0};
|
||||
static int32_t gNodeRefId = -1;
|
||||
static int32_t gHbDataRefId = -1;
|
||||
static void syncEnvTick(void *param, void *tmrId);
|
||||
|
||||
SSyncEnv *syncEnv() { return &gSyncEnv; }
|
||||
|
@ -50,6 +51,13 @@ int32_t syncInit() {
|
|||
return -1;
|
||||
}
|
||||
|
||||
gHbDataRefId = taosOpenRef(200, (RefFp)syncHbTimerDataFree);
|
||||
if (gHbDataRefId < 0) {
|
||||
sError("failed to init hb-data ref");
|
||||
syncCleanUp();
|
||||
return -1;
|
||||
}
|
||||
|
||||
sDebug("sync rsetId:%d is open", gNodeRefId);
|
||||
return 0;
|
||||
}
|
||||
|
@ -64,6 +72,12 @@ void syncCleanUp() {
|
|||
taosCloseRef(gNodeRefId);
|
||||
gNodeRefId = -1;
|
||||
}
|
||||
|
||||
if (gHbDataRefId != -1) {
|
||||
sDebug("sync rsetId:%d is closed", gHbDataRefId);
|
||||
taosCloseRef(gHbDataRefId);
|
||||
gHbDataRefId = -1;
|
||||
}
|
||||
}
|
||||
|
||||
int64_t syncNodeAdd(SSyncNode *pNode) {
|
||||
|
@ -88,6 +102,26 @@ SSyncNode *syncNodeAcquire(int64_t rid) {
|
|||
|
||||
void syncNodeRelease(SSyncNode *pNode) { taosReleaseRef(gNodeRefId, pNode->rid); }
|
||||
|
||||
int64_t syncHbTimerDataAdd(SSyncHbTimerData *pData) {
|
||||
pData->rid = taosAddRef(gHbDataRefId, pData);
|
||||
if (pData->rid < 0) return -1;
|
||||
return pData->rid;
|
||||
}
|
||||
|
||||
void syncHbTimerDataRemove(int64_t rid) { taosRemoveRef(gHbDataRefId, rid); }
|
||||
|
||||
SSyncHbTimerData *syncHbTimerDataAcquire(int64_t rid) {
|
||||
SSyncHbTimerData *pData = taosAcquireRef(gHbDataRefId, rid);
|
||||
if (pData == NULL) {
|
||||
sError("failed to acquire hb-timer-data from refId:%" PRId64, rid);
|
||||
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
return pData;
|
||||
}
|
||||
|
||||
void syncHbTimerDataRelease(SSyncHbTimerData *pData) { taosReleaseRef(gHbDataRefId, pData->rid); }
|
||||
|
||||
#if 0
|
||||
void syncEnvStartTimer() {
|
||||
taosTmrReset(gSyncEnv.FpEnvTickTimer, gSyncEnv.envTickTimerMS, &gSyncEnv, gSyncEnv.pTimerManager,
|
||||
|
|
|
@ -91,6 +91,7 @@ void syncStart(int64_t rid) {
|
|||
void syncStop(int64_t rid) {
|
||||
SSyncNode* pSyncNode = syncNodeAcquire(rid);
|
||||
if (pSyncNode != NULL) {
|
||||
pSyncNode->isStart = false;
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncNodeRemove(rid);
|
||||
}
|
||||
|
@ -429,7 +430,7 @@ bool syncIsReadyForRead(int64_t rid) {
|
|||
|
||||
int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode->peersNum == 0) {
|
||||
sDebug("only one replica, cannot leader transfer");
|
||||
sDebug("vgId:%d, only one replica, cannot leader transfer", pSyncNode->vgId);
|
||||
terrno = TSDB_CODE_SYN_ONE_REPLICA;
|
||||
return -1;
|
||||
}
|
||||
|
@ -445,7 +446,7 @@ int32_t syncNodeLeaderTransfer(SSyncNode* pSyncNode) {
|
|||
|
||||
int32_t syncNodeLeaderTransferTo(SSyncNode* pSyncNode, SNodeInfo newLeader) {
|
||||
if (pSyncNode->replicaNum == 1) {
|
||||
sDebug("only one replica, cannot leader transfer");
|
||||
sDebug("vgId:%d, only one replica, cannot leader transfer", pSyncNode->vgId);
|
||||
terrno = TSDB_CODE_SYN_ONE_REPLICA;
|
||||
return -1;
|
||||
}
|
||||
|
@ -665,13 +666,20 @@ static int32_t syncHbTimerInit(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer, SRa
|
|||
static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
|
||||
int32_t ret = 0;
|
||||
if (syncIsInit()) {
|
||||
SSyncHbTimerData* pData = &pSyncTimer->hbData;
|
||||
pData->pSyncNode = pSyncNode;
|
||||
SSyncHbTimerData* pData = syncHbTimerDataAcquire(pSyncTimer->hbDataRid);
|
||||
if (pData == NULL) {
|
||||
pData = taosMemoryMalloc(sizeof(SSyncHbTimerData));
|
||||
pData->rid = syncHbTimerDataAdd(pData);
|
||||
}
|
||||
pSyncTimer->hbDataRid = pData->rid;
|
||||
|
||||
pData->syncNodeRid = pSyncNode->rid;
|
||||
pData->pTimer = pSyncTimer;
|
||||
pData->destId = pSyncTimer->destId;
|
||||
pData->logicClock = pSyncTimer->logicClock;
|
||||
|
||||
taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, pData, syncEnv()->pTimerManager, &pSyncTimer->pTimer);
|
||||
taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid), syncEnv()->pTimerManager,
|
||||
&pSyncTimer->pTimer);
|
||||
} else {
|
||||
sError("vgId:%d, start ctrl hb timer error, sync env is stop", pSyncNode->vgId);
|
||||
}
|
||||
|
@ -683,6 +691,8 @@ static int32_t syncHbTimerStop(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
|
|||
atomic_add_fetch_64(&pSyncTimer->logicClock, 1);
|
||||
taosTmrStop(pSyncTimer->pTimer);
|
||||
pSyncTimer->pTimer = NULL;
|
||||
syncHbTimerDataRemove(pSyncTimer->hbDataRid);
|
||||
pSyncTimer->hbDataRid = -1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -934,6 +944,7 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
|
|||
SSyncSnapshotSender* pSender = snapshotSenderCreate(pSyncNode, i);
|
||||
// ASSERT(pSender != NULL);
|
||||
(pSyncNode->senders)[i] = pSender;
|
||||
sSTrace(pSender, "snapshot sender create new while open, data:%p", pSender);
|
||||
}
|
||||
|
||||
// snapshot receivers
|
||||
|
@ -960,7 +971,8 @@ SSyncNode* syncNodeOpen(SSyncInfo* pSyncInfo) {
|
|||
// snapshotting
|
||||
atomic_store_64(&pSyncNode->snapshottingIndex, SYNC_INDEX_INVALID);
|
||||
|
||||
sNTrace(pSyncNode, "sync open");
|
||||
pSyncNode->isStart = true;
|
||||
sNTrace(pSyncNode, "sync open, node:%p", pSyncNode);
|
||||
|
||||
return pSyncNode;
|
||||
|
||||
|
@ -1027,15 +1039,13 @@ void syncNodePreClose(SSyncNode* pSyncNode) {
|
|||
syncNodeStopHeartbeatTimer(pSyncNode);
|
||||
}
|
||||
|
||||
void syncHbTimerDataFree(SSyncHbTimerData* pData) { taosMemoryFree(pData); }
|
||||
|
||||
void syncNodeClose(SSyncNode* pSyncNode) {
|
||||
if (pSyncNode == NULL) {
|
||||
return;
|
||||
}
|
||||
int32_t ret;
|
||||
if (pSyncNode == NULL) return;
|
||||
sNTrace(pSyncNode, "sync close, data:%p", pSyncNode);
|
||||
|
||||
sNTrace(pSyncNode, "sync close");
|
||||
|
||||
ret = raftStoreClose(pSyncNode->pRaftStore);
|
||||
int32_t ret = raftStoreClose(pSyncNode->pRaftStore);
|
||||
ASSERT(ret == 0);
|
||||
pSyncNode->pRaftStore = NULL;
|
||||
|
||||
|
@ -1064,6 +1074,7 @@ void syncNodeClose(SSyncNode* pSyncNode) {
|
|||
|
||||
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
|
||||
if ((pSyncNode->senders)[i] != NULL) {
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender destroy while close, data:%p", (pSyncNode->senders)[i]);
|
||||
snapshotSenderDestroy((pSyncNode->senders)[i]);
|
||||
(pSyncNode->senders)[i] = NULL;
|
||||
}
|
||||
|
@ -1410,15 +1421,17 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde
|
|||
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
|
||||
if ((pSyncNode->senders)[i] == NULL) {
|
||||
(pSyncNode->senders)[i] = snapshotSenderCreate(pSyncNode, i);
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender create new");
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender create new while reconfig, data:%p", (pSyncNode->senders)[i]);
|
||||
} else {
|
||||
sSTrace((pSyncNode->senders)[i], "snapshot sender already exist, data:%p", (pSyncNode->senders)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// free old
|
||||
for (int32_t i = 0; i < TSDB_MAX_REPLICA; ++i) {
|
||||
if (oldSenders[i] != NULL) {
|
||||
sNTrace(pSyncNode, "snapshot sender destroy old, data:%p replica-index:%d", oldSenders[i], i);
|
||||
snapshotSenderDestroy(oldSenders[i]);
|
||||
sNTrace(pSyncNode, "snapshot sender delete old %p replica-index:%d", oldSenders[i], i);
|
||||
oldSenders[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1929,29 +1942,59 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
|
|||
}
|
||||
|
||||
static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
||||
SSyncHbTimerData* pData = (SSyncHbTimerData*)param;
|
||||
SSyncNode* pSyncNode = pData->pSyncNode;
|
||||
SSyncTimer* pSyncTimer = pData->pTimer;
|
||||
int64_t hbDataRid = (int64_t)param;
|
||||
|
||||
SSyncHbTimerData* pData = syncHbTimerDataAcquire(hbDataRid);
|
||||
if (pData == NULL) {
|
||||
sError("hb timer get pData NULL, %" PRId64, hbDataRid);
|
||||
return;
|
||||
}
|
||||
|
||||
SSyncNode* pSyncNode = syncNodeAcquire(pData->syncNodeRid);
|
||||
if (pSyncNode == NULL) {
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("hb timer get pSyncNode NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
SSyncTimer* pSyncTimer = pData->pTimer;
|
||||
|
||||
if (!pSyncNode->isStart) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("vgId:%d, hb timer sync node already stop", pSyncNode->vgId);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("vgId:%d, hb timer sync node not leader", pSyncNode->vgId);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pSyncNode->pRaftStore == NULL) {
|
||||
syncNodeRelease(pSyncNode);
|
||||
syncHbTimerDataRelease(pData);
|
||||
sError("vgId:%d, hb timer raft store already stop", pSyncNode->vgId);
|
||||
return;
|
||||
}
|
||||
|
||||
// sNTrace(pSyncNode, "eq peer hb timer");
|
||||
|
||||
int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
|
||||
int64_t msgLogicClock = atomic_load_64(&pData->logicClock);
|
||||
// sTrace("vgId:%d, eq peer hb timer", pSyncNode->vgId);
|
||||
|
||||
if (pSyncNode->replicaNum > 1) {
|
||||
int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
|
||||
int64_t msgLogicClock = atomic_load_64(&pData->logicClock);
|
||||
|
||||
if (timerLogicClock == msgLogicClock) {
|
||||
if (syncIsInit()) {
|
||||
// sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId);
|
||||
taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid, syncEnv()->pTimerManager,
|
||||
&pSyncTimer->pTimer);
|
||||
} else {
|
||||
sError("sync env is stop, reset peer hb timer error");
|
||||
}
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
(void)syncBuildHeartbeat(&rpcMsg, pSyncNode->vgId);
|
||||
|
||||
|
@ -1966,18 +2009,14 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
|
|||
// send msg
|
||||
syncNodeSendHeartbeat(pSyncNode, &pSyncMsg->destId, &rpcMsg);
|
||||
|
||||
if (syncIsInit()) {
|
||||
taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, pData, syncEnv()->pTimerManager,
|
||||
&pSyncTimer->pTimer);
|
||||
} else {
|
||||
sError("sync env is stop, syncNodeEqHeartbeatTimer");
|
||||
}
|
||||
|
||||
} else {
|
||||
sTrace("==syncNodeEqPeerHeartbeatTimer== timerLogicClock:%" PRId64 ", msgLogicClock:%" PRId64 "", timerLogicClock,
|
||||
msgLogicClock);
|
||||
sTrace("vgId:%d, do not send hb, timerLogicClock:%" PRId64 ", msgLogicClock:%" PRId64 "", pSyncNode->vgId,
|
||||
timerLogicClock, msgLogicClock);
|
||||
}
|
||||
}
|
||||
|
||||
syncHbTimerDataRelease(pData);
|
||||
syncNodeRelease(pSyncNode);
|
||||
}
|
||||
|
||||
static int32_t syncNodeEqNoop(SSyncNode* pNode) {
|
||||
|
@ -2075,6 +2114,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
SyncLocalCmd* pSyncMsg = rpcMsgLocalCmd.pCont;
|
||||
pSyncMsg->cmd = SYNC_LOCAL_CMD_FOLLOWER_CMT;
|
||||
pSyncMsg->fcIndex = pMsg->commitIndex;
|
||||
SyncIndex fcIndex = pSyncMsg->fcIndex;
|
||||
|
||||
if (ths->syncEqMsg != NULL && ths->msgcb != NULL) {
|
||||
int32_t code = ths->syncEqMsg(ths->msgcb, &rpcMsgLocalCmd);
|
||||
|
@ -2082,7 +2122,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, const SRpcMsg* pRpcMsg) {
|
|||
sError("vgId:%d, sync enqueue fc-commit msg error, code:%d", ths->vgId, code);
|
||||
rpcFreeCont(rpcMsgLocalCmd.pCont);
|
||||
} else {
|
||||
sTrace("vgId:%d, sync enqueue fc-commit msg, fc-index:%" PRId64, ths->vgId, pSyncMsg->fcIndex);
|
||||
sTrace("vgId:%d, sync enqueue fc-commit msg, fc-index:%" PRId64, ths->vgId, fcIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
|
|||
}
|
||||
|
||||
// pLogStore->pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5);
|
||||
pLogStore->pCache = taosLRUCacheInit(100 * 1024 * 1024, 1, .5);
|
||||
pLogStore->pCache = taosLRUCacheInit(30 * 1024 * 1024, 1, .5);
|
||||
if (pLogStore->pCache == NULL) {
|
||||
taosMemoryFree(pLogStore);
|
||||
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
|
||||
|
|
|
@ -251,7 +251,7 @@ static void walFsyncAll() {
|
|||
int32_t code = taosFsyncFile(pWal->pLogFile);
|
||||
if (code != 0) {
|
||||
wError("vgId:%d, file:%" PRId64 ".log, failed to fsync since %s", pWal->cfg.vgId, walGetLastFileFirstVer(pWal),
|
||||
strerror(code));
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
pWal = taosIterateRef(tsWal.refSetId, pWal->refId);
|
||||
|
|
|
@ -325,10 +325,10 @@ int32_t walEndSnapshot(SWal *pWal) {
|
|||
SWalFileInfo *pInfo = taosArraySearch(pWal->fileInfoSet, &tmp, compareWalFileInfo, TD_LE);
|
||||
if (pInfo) {
|
||||
if (ver >= pInfo->lastVer) {
|
||||
pInfo++;
|
||||
pInfo--;
|
||||
}
|
||||
if (POINTER_DISTANCE(pInfo, pWal->fileInfoSet->pData) > 0) {
|
||||
wDebug("vgId:%d, wal end remove from %" PRId64, pWal->cfg.vgId, pInfo->firstVer);
|
||||
wDebug("vgId:%d, wal end remove for %" PRId64, pWal->cfg.vgId, pInfo->firstVer);
|
||||
} else {
|
||||
wDebug("vgId:%d, wal no remove", pWal->cfg.vgId);
|
||||
}
|
||||
|
|
|
@ -646,7 +646,12 @@ const char* tstrerror(int32_t err) {
|
|||
|
||||
// this is a system errno
|
||||
if ((err & 0x00ff0000) == 0x00ff0000) {
|
||||
return strerror(err & 0x0000ffff);
|
||||
int32_t code = err & 0x0000ffff;
|
||||
if (code >= 0 && code < 36) {
|
||||
return strerror(code);
|
||||
} else {
|
||||
return "unknown err";
|
||||
}
|
||||
}
|
||||
|
||||
int32_t s = 0;
|
||||
|
|
|
@ -147,7 +147,7 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashObj *pHashObj, SHashEntr
|
|||
uint32_t hashVal) {
|
||||
SHashNode *pNode = pe->next;
|
||||
while (pNode) {
|
||||
atomic_add_fetch_64(&pHashObj->compTimes, 1);
|
||||
//atomic_add_fetch_64(&pHashObj->compTimes, 1);
|
||||
if ((pNode->keyLen == keyLen) && ((*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0) &&
|
||||
pNode->removed == 0) {
|
||||
assert(pNode->hashVal == hashVal);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
,,y,script,./test.sh -f tsim/user/privilege_db.sim
|
||||
,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim
|
||||
,,y,script,./test.sh -f tsim/db/alter_option.sim
|
||||
,,,script,./test.sh -f tsim/db/alter_replica_13.sim
|
||||
#,,y,script,./test.sh -f tsim/db/alter_replica_13.sim
|
||||
,,y,script,./test.sh -f tsim/db/alter_replica_31.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/db/basic2.sim
|
||||
|
@ -35,30 +35,30 @@
|
|||
,,y,script,./test.sh -f tsim/db/show_create_table.sim
|
||||
,,y,script,./test.sh -f tsim/db/tables.sim
|
||||
,,y,script,./test.sh -f tsim/db/taosdlog.sim
|
||||
,,,script,./test.sh -f tsim/dnode/balance_replica1.sim
|
||||
,,,script,./test.sh -f tsim/dnode/balance_replica3.sim
|
||||
#,,,script,./test.sh -f tsim/dnode/balance1.sim
|
||||
#,,,script,./test.sh -f tsim/dnode/balance2.sim
|
||||
#,,,script,./test.sh -f tsim/dnode/balance3.sim
|
||||
#,,,script,./test.sh -f tsim/dnode/balancex.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance2.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balance3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/balancex.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/create_dnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
|
||||
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
|
||||
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
|
||||
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
|
||||
,,,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
|
||||
,,,script,./test.sh -f tsim/dnode/drop_dnode_force.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/offline_reason.sim
|
||||
,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
|
||||
,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
|
||||
,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
|
||||
,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
|
||||
,,,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
|
||||
#,,,script,./test.sh -f tsim/dnode/vnode_clean.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/vnode_clean.sim
|
||||
,,,script,./test.sh -f tsim/dnode/use_dropped_dnode.sim
|
||||
,,,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim
|
||||
,,,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/dnode/split_vgroup_replica3.sim
|
||||
,,y,script,./test.sh -f tsim/import/basic.sim
|
||||
,,y,script,./test.sh -f tsim/import/commit.sim
|
||||
,,y,script,./test.sh -f tsim/import/large.sim
|
||||
|
@ -117,7 +117,7 @@
|
|||
,,y,script,./test.sh -f tsim/parser/fourArithmetic-basic.sim
|
||||
,,y,script,./test.sh -f tsim/parser/function.sim
|
||||
,,y,script,./test.sh -f tsim/parser/groupby-basic.sim
|
||||
,,,script,./test.sh -f tsim/parser/groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/groupby.sim
|
||||
,,y,script,./test.sh -f tsim/parser/having_child.sim
|
||||
,,y,script,./test.sh -f tsim/parser/having.sim
|
||||
,,y,script,./test.sh -f tsim/parser/import_commit1.sim
|
||||
|
@ -213,14 +213,14 @@
|
|||
,,n,script,./test.sh -f tsim/stream/basic0.sim -g
|
||||
,,y,script,./test.sh -f tsim/stream/basic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/basic2.sim
|
||||
,,,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/drop_stream.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic2.sim
|
||||
,,y,script,./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/distributeSession0.sim
|
||||
,,,script,./test.sh -f tsim/stream/session0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/session0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/session1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/state0.sim
|
||||
,,y,script,./test.sh -f tsim/stream/triggerInterval0.sim
|
||||
|
@ -228,7 +228,7 @@
|
|||
,,y,script,./test.sh -f tsim/stream/partitionby.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionby1.sim
|
||||
,,y,script,./test.sh -f tsim/stream/schedSnode.sim
|
||||
,,,script,./test.sh -f tsim/stream/windowClose.sim
|
||||
,,y,script,./test.sh -f tsim/stream/windowClose.sim
|
||||
,,y,script,./test.sh -f tsim/stream/ignoreExpiredData.sim
|
||||
,,y,script,./test.sh -f tsim/stream/sliding.sim
|
||||
,,y,script,./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
|
@ -279,7 +279,7 @@
|
|||
,,y,script,./test.sh -f tsim/stable/vnode3.sim
|
||||
,,y,script,./test.sh -f tsim/stable/metrics_idx.sim
|
||||
,,,script,./test.sh -f tsim/sma/drop_sma.sim
|
||||
,,,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
|
||||
|
@ -296,7 +296,7 @@
|
|||
,,y,script,./test.sh -f tsim/vnode/replica3_vgroup.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_many.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/replica3_import.sim
|
||||
#,,,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_balance_replica1.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_dnode2_stop.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_dnode2.sim
|
||||
,,y,script,./test.sh -f tsim/vnode/stable_dnode3.sim
|
||||
|
|
|
@ -10,7 +10,8 @@ else
|
|||
fi
|
||||
|
||||
today=`date +"%Y%m%d"`
|
||||
TDENGINE_DIR=/root/pxiao/TDengine
|
||||
TDENGINE_DIR=/root/TDengine
|
||||
JDBC_DIR=/root/taos-connector-jdbc
|
||||
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
|
||||
|
||||
# Color setting
|
||||
|
@ -20,7 +21,7 @@ GREEN_DARK='\033[0;32m'
|
|||
GREEN_UNDERLINE='\033[4;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
function buildTDengine {
|
||||
function buildTDengine() {
|
||||
echo "check if TDengine need build"
|
||||
cd $TDENGINE_DIR
|
||||
git remote prune origin > /dev/null
|
||||
|
@ -33,159 +34,145 @@ function buildTDengine {
|
|||
# reset counter
|
||||
lcov -d . --zerocounters
|
||||
|
||||
|
||||
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
|
||||
echo "repo up-to-date"
|
||||
else
|
||||
echo "repo need to pull"
|
||||
fi
|
||||
|
||||
git reset --hard
|
||||
git checkout -- .
|
||||
git checkout $branch
|
||||
git reset --hard
|
||||
git checkout -- .
|
||||
git checkout $branch
|
||||
git checkout -- .
|
||||
git clean -dfx
|
||||
git pull
|
||||
git submodule update --init --recursive -f
|
||||
|
||||
[ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug
|
||||
cd $TDENGINE_DIR/debug
|
||||
|
||||
echo "rebuild.."
|
||||
echo "rebuild.."
|
||||
LOCAL_COMMIT=`git rev-parse --short @`
|
||||
|
||||
rm -rf *
|
||||
if [ "$branch" == "3.0" ]; then
|
||||
echo "3.0 ============="
|
||||
cmake -DCOVER=true -DBUILD_TEST=true ..
|
||||
cmake -DCOVER=true -DBUILD_TEST=true -DBUILD_HTTP=false -DBUILD_TOOLS=true ..
|
||||
else
|
||||
cmake -DCOVER=true -DBUILD_TOOLS=true -DBUILD_HTTP=false .. > /dev/null
|
||||
fi
|
||||
make -j4
|
||||
make -j
|
||||
make install
|
||||
}
|
||||
|
||||
function runGeneralCaseOneByOne {
|
||||
function runCasesOneByOne () {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^./test.sh* ]]; then
|
||||
case=`echo $line | grep sim$ | awk '{print $NF}'`
|
||||
|
||||
if [ -n "$case" ]; then
|
||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && ./test.sh -f $case > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
|
||||
|| echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
cmd=`echo $line | cut -d',' -f 5`
|
||||
if [[ "$2" == "sim" ]] && [[ $cmd == *"test.sh"* ]]; then
|
||||
case=`echo $cmd | cut -d' ' -f 3`
|
||||
start_time=`date +%s`
|
||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
|
||||
|| echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
elif [[ "$2" == "system-test" ]] && [[ $line == *"system-test"* ]]; then
|
||||
case=`echo $cmd | cut -d' ' -f 4`
|
||||
start_time=`date +%s`
|
||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
||||
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
elif [[ "$2" == "develop-test" ]] && [[ $line == *"develop-test"* ]]; then
|
||||
case=`echo $cmd | cut -d' ' -f 4`
|
||||
start_time=`date +%s`
|
||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && $cmd > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
|
||||
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
end_time=`date +%s`
|
||||
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
done < $1
|
||||
}
|
||||
|
||||
function runTestNGCaseOneByOne {
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^taostest* ]]; then
|
||||
case=`echo $line | cut -d' ' -f 3 | cut -d'=' -f 2`
|
||||
yaml=`echo $line | cut -d' ' -f 2`
|
||||
|
||||
if [ -n "$case" ]; then
|
||||
date +%F\ %T | tee -a $TDENGINE_COVERAGE_REPORT && taostest $yaml --case=$case --keep --disable_collection > /dev/null 2>&1 && \
|
||||
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT \
|
||||
|| echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
fi
|
||||
done < $1
|
||||
function runUnitTest() {
|
||||
echo "=== Run unit test case ==="
|
||||
echo " $TDENGINE_DIR/debug"
|
||||
cd $TDENGINE_DIR/debug
|
||||
ctest -j12
|
||||
echo "3.0 unit test done"
|
||||
}
|
||||
|
||||
function runTest {
|
||||
echo "run Test"
|
||||
|
||||
if [ "$branch" == "3.0" ]; then
|
||||
echo "start run unit test case ................"
|
||||
echo " $TDENGINE_DIR/debug "
|
||||
cd $TDENGINE_DIR/debug
|
||||
ctest -j12
|
||||
echo "3.0 unit test done"
|
||||
fi
|
||||
function runSimCases() {
|
||||
echo "=== Run sim cases ==="
|
||||
|
||||
cd $TDENGINE_DIR/tests/script
|
||||
runCasesOneByOne ../parallel_test/cases.task sim
|
||||
|
||||
[ -d ../../sim ] && rm -rf ../../sim
|
||||
[ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
runGeneralCaseOneByOne jenkins/basic.txt
|
||||
|
||||
sed -i "1i\SIM cases test result" $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
totalSuccess=`grep 'success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
totalSuccess=`grep 'sim success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
sed -i -e "2i\ ### Total $totalSuccess SIM test case(s) succeed! ###" $TDENGINE_COVERAGE_REPORT
|
||||
echo "### Total $totalSuccess SIM test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
totalFailed=`grep 'failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
totalFailed=`grep 'sim failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
sed -i "3i\### Total $totalFailed SIM test case(s) failed! ###" $TDENGINE_COVERAGE_REPORT
|
||||
echo "### Total $totalFailed SIM test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
sed "3G" $TDENGINE_COVERAGE_REPORT
|
||||
}
|
||||
|
||||
stopTaosd
|
||||
echo "run TestNG cases"
|
||||
rm -rf /var/lib/taos/*
|
||||
rm -rf /var/log/taos/*
|
||||
nohup $TDENGINE_DIR/debug/build/bin/taosd -c /etc/taos > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
cd $TDENGINE_DIR/../TestNG/cases
|
||||
runTestNGCaseOneByOne ../scripts/cases.txt
|
||||
echo "TestNG cases done"
|
||||
function runPythonCases() {
|
||||
echo "=== Run python cases ==="
|
||||
|
||||
cd $TDENGINE_DIR/tests
|
||||
rm -rf ../sim
|
||||
/root/pxiao/test-all-coverage.sh full python $branch | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
cd $TDENGINE_DIR/tests/system-test
|
||||
runCasesOneByOne ../parallel_test/cases.task system-test
|
||||
|
||||
cd $TDENGINE_DIR/tests/develop-test
|
||||
runCasesOneByOne ../parallel_test/cases.task develop-test
|
||||
|
||||
sed -i "4i\Python cases test result" $TDENGINE_COVERAGE_REPORT
|
||||
totalPySuccess=`grep 'python case(s) succeed!' $TDENGINE_COVERAGE_REPORT | awk '{print $4}'`
|
||||
if [ "$totalPySuccess" -gt "0" ]; then
|
||||
sed -i -e "5i\ ### Total $totalPySuccess Python test case(s) succeed! ###" $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
totalPyFailed=`grep 'python case(s) failed!' $TDENGINE_COVERAGE_REPORT | awk '{print $4}'`
|
||||
if [ -z $totalPyFailed ]; then
|
||||
sed -i "6i\\n" $TDENGINE_COVERAGE_REPORT
|
||||
else
|
||||
sed -i "6i\### Total $totalPyFailed Python test case(s) failed! ###" $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
echo "### run JDBC test cases ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
# Test Connector
|
||||
stopTaosd
|
||||
nohup $TDENGINE_DIR/debug/build/bin/taosd -c /etc/taos > /dev/null 2>&1 &
|
||||
sleep 10
|
||||
|
||||
cd $TDENGINE_DIR/src/connector/jdbc
|
||||
mvn clean package > /dev/null 2>&1
|
||||
mvn test > jdbc-out.log 2>&1
|
||||
tail -n 20 jdbc-out.log 2>&1 | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
# Test C Demo
|
||||
stopTaosd
|
||||
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
|
||||
sleep 10
|
||||
yes | $TDENGINE_DIR/debug/build/bin/demo 127.0.0.1 > /dev/null 2>&1 | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
# Test waltest
|
||||
dataDir=`grep dataDir $TDENGINE_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
|
||||
walDir=`find $dataDir -name "wal"|head -n1`
|
||||
echo "dataDir: $dataDir" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
echo "walDir: $walDir" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
if [ -n "$walDir" ]; then
|
||||
yes | $TDENGINE_DIR/debug/build/bin/waltest -p $walDir > /dev/null 2>&1 | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
totalSuccess=`grep 'py success' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
if [ "$totalSuccess" -gt "0" ]; then
|
||||
echo "### Total $totalSuccess python test case(s) succeed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
|
||||
# run Unit Test
|
||||
echo "Run Unit Test: utilTest, queryTest and cliTest"
|
||||
#$TDENGINE_DIR/debug/build/bin/utilTest > /dev/null 2>&1 && echo "utilTest pass!" || echo "utilTest failed!"
|
||||
#$TDENGINE_DIR/debug/build/bin/queryTest > /dev/null 2>&1 && echo "queryTest pass!" || echo "queryTest failed!"
|
||||
#$TDENGINE_DIR/debug/build/bin/cliTest > /dev/null 2>&1 && echo "cliTest pass!" || echo "cliTest failed!"
|
||||
totalFailed=`grep 'py failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
|
||||
if [ "$totalFailed" -ne "0" ]; then
|
||||
echo "### Total $totalFailed python test case(s) failed! ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
fi
|
||||
}
|
||||
|
||||
function runJDBCCases() {
|
||||
echo "=== Run JDBC cases ==="
|
||||
|
||||
cd $JDBC_DIR
|
||||
git checkout -- .
|
||||
git reset --hard HEAD
|
||||
git checkout main
|
||||
git pull
|
||||
|
||||
stopTaosd
|
||||
stopTaosadapter
|
||||
|
||||
taosd -c /etc/taos >> /dev/null 2>&1 &
|
||||
taosadapter >> /dev/null 2>&1 &
|
||||
|
||||
mvn clean test > result.txt 2>&1
|
||||
summary=`grep "Tests run:" result.txt | tail -n 1`
|
||||
echo -e "### JDBC test result: $summary ###" | tee -a $TDENGINE_COVERAGE_REPORT
|
||||
}
|
||||
|
||||
function runTest() {
|
||||
echo "run Test"
|
||||
|
||||
cd $TDENGINE_DIR
|
||||
[ -d sim ] && rm -rf sim
|
||||
[ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
|
||||
|
||||
runUnitTest
|
||||
runSimCases
|
||||
runPythonCases
|
||||
runJDBCCases
|
||||
|
||||
stopTaosd
|
||||
cd $TDENGINE_DIR/tests/script
|
||||
find . -name '*.sql' | xargs rm -f
|
||||
|
||||
|
@ -203,15 +190,18 @@ function lcovFunc {
|
|||
# remove exclude paths
|
||||
if [ "$branch" == "3.0" ]; then
|
||||
lcov --remove coverage.info \
|
||||
'*/contrib/*' '*/tests/*' '*/test/*'\
|
||||
'*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c'\
|
||||
'*/contrib/*' '*/tests/*' '*/test/*' '*/tools/*' '*/libs/sync/*'\
|
||||
'*/AccessBridgeCalls.c' '*/ttszip.c' '*/dataInserter.c' '*/tlinearhash.c' '*/tsimplehash.c' '*/tsdbDiskData.c'\
|
||||
'*/texpr.c' '*/runUdf.c' '*/schDbg.c' '*/syncIO.c' '*/tdbOs.c' '*/pushServer.c' '*/osLz4.c'\
|
||||
'*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/tidpool.c' '*/tmempool.c'\
|
||||
'*/tthread.c' '*/tversion.c'\
|
||||
'*/tbase64.c' '*/tbuffer.c' '*/tdes.c' '*/texception.c' '*/tidpool.c' '*/tmempool.c'\
|
||||
'*/clientJniConnector.c' '*/clientTmqConnector.c' '*/version.c' '*/shellAuto.c' '*/shellTire.c'\
|
||||
'*/tthread.c' '*/tversion.c' '*/ctgDbg.c' '*/schDbg.c' '*/qwDbg.c' '*/tencode.h' '*/catalog.c'\
|
||||
'*/tqSnapshot.c' '*/tsdbSnapshot.c''*/metaSnapshot.c' '*/smaSnapshot.c' '*/tqOffsetSnapshot.c'\
|
||||
'*/vnodeSnapshot.c' '*/metaSnapshot.c' '*/tsdbSnapshot.c' '*/mndGrant.c' '*/mndSnode.c' '*/streamRecover.c'\
|
||||
--rc lcov_branch_coverage=1 -o coverage.info
|
||||
else
|
||||
lcov --remove coverage.info \
|
||||
'*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' '*/ttype.h' '*/tarithoperator.c' '*/TSDBJNIConnector.c' '*/taosdemo.c'\
|
||||
'*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' '*/ttype.h' '*/tarithoperator.c' '*/TSDBJNIConnector.c' '*/taosdemo.c' '*/clientJniConnector.c'\
|
||||
--rc lcov_branch_coverage=1 -o coverage.info
|
||||
fi
|
||||
|
||||
|
@ -257,34 +247,32 @@ function stopTaosd {
|
|||
echo "Stop tasod end"
|
||||
}
|
||||
|
||||
function runTestRandomFail {
|
||||
exec_random_fail_sh=$1
|
||||
default_exec_sh=$TDENGINE_DIR/tests/script/sh/exec.sh
|
||||
[ -f $exec_random_fail_sh ] && cp $exec_random_fail_sh $default_exec_sh || exit 1
|
||||
function stopTaosadapter {
|
||||
echo "Stop taosadapter"
|
||||
systemctl stop taosadapter.service
|
||||
PID=`ps -ef|grep -w taosadapter | grep -v grep | awk '{print $2}'`
|
||||
while [ -n "$PID" ]
|
||||
do
|
||||
pkill -TERM -x taosadapter
|
||||
sleep 1
|
||||
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
|
||||
done
|
||||
echo "Stop tasoadapter end"
|
||||
|
||||
dnodes_random_fail_py=$TDENGINE_DIR/tests/pytest/util/dnodes-no-random-fail.py
|
||||
default_dnodes_py=$TDENGINE_DIR/tests/pytest/util/dnodes.py
|
||||
[ -f $dnodes_random_fail_py ] && cp $dnodes_random_fail_py $default_dnodes_py || exit 1
|
||||
|
||||
runTest NoRandomFail
|
||||
}
|
||||
|
||||
WORK_DIR=/root/pxiao
|
||||
WORK_DIR=/root/
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
echo "Run Coverage Test" | tee -a $WORK_DIR/cron.log
|
||||
|
||||
stopTaosd
|
||||
|
||||
buildTDengine
|
||||
|
||||
#runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-random-fail.sh
|
||||
#runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-default.sh
|
||||
#runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-no-random-fail.sh
|
||||
|
||||
runTest
|
||||
|
||||
lcovFunc
|
||||
#sendReport
|
||||
|
||||
sendReport
|
||||
stopTaosd
|
||||
|
||||
date >> $WORK_DIR/cron.log
|
||||
|
|
|
@ -192,7 +192,7 @@ if $data(5)[4] != ready then
|
|||
goto step5
|
||||
endi
|
||||
|
||||
print =============== step5: drop dnode 2
|
||||
print =============== step5a: drop dnode 2
|
||||
sql_error drop dnode 2
|
||||
sql drop dnode 2 force
|
||||
|
||||
|
@ -204,15 +204,23 @@ if $rows != 4 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
$x = 0
|
||||
step5a:
|
||||
$ = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not online!
|
||||
return -1
|
||||
endi
|
||||
print select * from information_schema.ins_mnodes;
|
||||
sql select * from information_schema.ins_mnodes
|
||||
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
|
||||
print $data[1][0] $data[1][1] $data[1][2] $data[1][3] $data[1][4]
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
goto step5a
|
||||
endi
|
||||
if $data(1)[2] != leader then
|
||||
return -1
|
||||
goto step5a
|
||||
endi
|
||||
|
||||
sql select * from information_schema.ins_qnodes
|
||||
|
|
|
@ -96,7 +96,7 @@ sql_error drop mnode on dnode 4
|
|||
sql_error drop mnode on dnode 5
|
||||
sql_error drop mnode on dnode 6
|
||||
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGKILL
|
||||
system sh/exec.sh -n dnode2 -s stop
|
||||
$x = 0
|
||||
step5:
|
||||
$x = $x + 1
|
||||
|
@ -151,7 +151,7 @@ if $data(4)[4] != ready then
|
|||
endi
|
||||
|
||||
print =============== step6: stop mnode1
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGKILL
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
# sql_error drop mnode on dnode 1
|
||||
|
||||
$x = 0
|
||||
|
@ -205,8 +205,7 @@ if $data(4)[4] != ready then
|
|||
endi
|
||||
|
||||
print =============== step8: stop mnode1 and drop it
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGKILL
|
||||
sql_error drop mnode on dnode 1
|
||||
system sh/exec.sh -n dnode1 -s stop
|
||||
|
||||
$x = 0
|
||||
step81:
|
||||
|
@ -234,42 +233,15 @@ if $leaderNum != 1 then
|
|||
endi
|
||||
|
||||
print =============== step9: start mnode1 and wait it dropped
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql drop mnode on dnode 1 -x step90
|
||||
step90:
|
||||
|
||||
print check mnode has leader step9a
|
||||
$x = 0
|
||||
step91:
|
||||
step9a:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
sql select * from information_schema.ins_dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $data(1)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step91
|
||||
endi
|
||||
|
||||
$x = 0
|
||||
step92:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
return -1
|
||||
endi
|
||||
print check mnode leader
|
||||
sql select * from information_schema.ins_mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
|
@ -285,10 +257,95 @@ if $data(3)[2] == leader then
|
|||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
goto step92
|
||||
goto step9a
|
||||
endi
|
||||
|
||||
print start dnode1 step9b
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
$x = 0
|
||||
step9b:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
print check dnode1 ready
|
||||
sql select * from information_schema.ins_dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
print ===> $data30 $data31 $data32 $data33 $data34 $data35
|
||||
if $data(1)[4] != ready then
|
||||
goto step9b
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step9b
|
||||
endi
|
||||
if $data(3)[4] != ready then
|
||||
goto step9b
|
||||
endi
|
||||
if $data(4)[4] != ready then
|
||||
goto step9b
|
||||
endi
|
||||
|
||||
sleep 4000
|
||||
print check mnode has leader step9c
|
||||
$x = 0
|
||||
step9c:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
return -1
|
||||
endi
|
||||
print check mnode leader
|
||||
sql select * from information_schema.ins_mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
$leaderNum = 0
|
||||
if $data(1)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(2)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
goto step9c
|
||||
endi
|
||||
|
||||
print drop mnode step9d
|
||||
sql drop mnode on dnode 1
|
||||
|
||||
$x = 0
|
||||
step9d:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 20 then
|
||||
return -1
|
||||
endi
|
||||
print check mnode leader
|
||||
sql select * from information_schema.ins_mnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
print ===> $data20 $data21 $data22 $data23 $data24 $data25
|
||||
$leaderNum = 0
|
||||
if $data(1)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(2)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $data(3)[2] == leader then
|
||||
$leaderNum = 1
|
||||
endi
|
||||
if $leaderNum != 1 then
|
||||
goto step9d
|
||||
endi
|
||||
if $rows != 2 then
|
||||
goto step92
|
||||
goto step9d
|
||||
endi
|
||||
|
||||
print =============== stepa: create mnode1 again
|
||||
|
|
|
@ -330,7 +330,7 @@ if $data11 != -1 then
|
|||
endi
|
||||
|
||||
# fill_char_values_to_arithmetic_fields
|
||||
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
|
||||
# fill_multiple_columns
|
||||
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
|
||||
|
@ -355,25 +355,25 @@ endi
|
|||
|
||||
# fill_into_nonarithmetic_fieds
|
||||
print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
|
||||
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
|
||||
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
|
||||
|
||||
|
||||
# fill nonarithmetic values into arithmetic fields
|
||||
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
|
||||
sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
|
||||
print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
|
||||
if $rows != 9 then
|
||||
|
@ -383,7 +383,7 @@ if $data01 != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
|
||||
## linear fill
|
||||
# feature currently switched off 2018/09/29
|
||||
|
@ -1049,4 +1049,103 @@ print =============== clear
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
|
||||
print ============== fill
|
||||
|
||||
sql drop database if exists test;
|
||||
sql create database test vgroups 4;
|
||||
sql use test;
|
||||
sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
|
||||
sql create table t1 using st tags(1,1,1);
|
||||
sql create table t2 using st tags(2,2,2);
|
||||
sql insert into t1 values(1648712211000,1,2,3);
|
||||
sql insert into t1 values(1648712225000,2,2,3);
|
||||
sql insert into t2 values(1648712212000,1,2,3);
|
||||
sql insert into t2 values(1648712226000,2,2,3);
|
||||
|
||||
$loop_count = 0
|
||||
|
||||
loop0:
|
||||
sleep 200
|
||||
|
||||
sql select count(*) from(select count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(value, -1));
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 20 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $data00 != 20026 then
|
||||
print =====data00=$data00
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
if $data10 != 20026 then
|
||||
print =====data10=$data10
|
||||
goto loop0
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(prev);
|
||||
|
||||
if $rows != 40052 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(next);
|
||||
|
||||
if $rows != 40052 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(linear);
|
||||
|
||||
if $rows != 40052 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from st where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(NULL);
|
||||
|
||||
if $rows != 40052 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(value, -1);
|
||||
|
||||
if $rows != 20026 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(NULL);
|
||||
|
||||
if $rows != 20026 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(prev);
|
||||
|
||||
if $rows != 20026 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(next);
|
||||
|
||||
if $rows != 20026 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select _wstart, count(a) from t1 where ts >= 1648712201000 and ts <= 1648732226000 partition by tbname interval(1s) fill(linear);
|
||||
|
||||
if $rows != 20026 then
|
||||
print =====rows=$rows
|
||||
return -1
|
||||
endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
|
|
|
@ -170,7 +170,7 @@ endi
|
|||
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
|
||||
|
||||
# fill_char_values_to_arithmetic_fields
|
||||
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
|
||||
# fill_multiple_columns
|
||||
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
|
||||
|
@ -240,10 +240,10 @@ sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <=
|
|||
sql select first(c7), first(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
|
||||
|
||||
# fill nonarithmetic values into arithmetic fields
|
||||
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
|
||||
sql_error select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'abc');
|
||||
sql select count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
|
||||
sql_error select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
|
||||
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '2e1');
|
||||
|
||||
sql select _wstart, count(*) from $stb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20);
|
||||
if $rows != $val then
|
||||
|
@ -354,7 +354,7 @@ endi
|
|||
|
||||
## NULL fill
|
||||
print fill(NULL)
|
||||
print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL) limit 5
|
||||
print select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(value, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) limit 5
|
||||
sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), count(c5), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu and t1 > 4 partition by t1 interval(5m) fill(NULL) limit 5
|
||||
if $rows != 25 then
|
||||
return -1
|
||||
|
|
|
@ -332,7 +332,7 @@ if $data11 != -1 then
|
|||
endi
|
||||
|
||||
# fill_char_values_to_arithmetic_fields
|
||||
sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
|
||||
|
||||
# fill_multiple_columns
|
||||
sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
|
||||
|
@ -358,24 +358,24 @@ endi
|
|||
|
||||
|
||||
# fill_into_nonarithmetic_fieds
|
||||
sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
|
||||
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
|
||||
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
|
||||
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
|
||||
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
|
||||
sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
|
||||
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
|
||||
|
||||
|
||||
# fill nonarithmetic values into arithmetic fields
|
||||
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
|
||||
sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
|
||||
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
|
||||
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
|
||||
if $rows != 9 then
|
||||
|
@ -385,7 +385,7 @@ if $data01 != 1 then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
|
||||
|
||||
## linear fill
|
||||
# feature currently switched off 2018/09/29
|
||||
|
|
|
@ -821,10 +821,10 @@ sql insert into tm0 values('2015-08-18T00:18:00Z', 2.126) ('2015-08-18T00:24:00Z
|
|||
|
||||
sql_error select derivative(ts) from tm0;
|
||||
sql_error select derivative(k) from tm0;
|
||||
sql select derivative(k, 0, 0) from tm0;
|
||||
sql_error select derivative(k, 0, 0) from tm0;
|
||||
sql_error select derivative(k, 1, 911) from tm0;
|
||||
sql_error select derivative(kx, 1s, 1) from tm0;
|
||||
sql select derivative(k, -20s, 1) from tm0;
|
||||
sql_error select derivative(k, -20s, 1) from tm0;
|
||||
sql select derivative(k, 20a, 0) from tm0;
|
||||
sql select derivative(k, 200a, 0) from tm0;
|
||||
sql select derivative(k, 999a, 0) from tm0;
|
||||
|
@ -932,10 +932,10 @@ sql insert into t0 values('2020-1-1 1:4:10', 10);
|
|||
|
||||
sql insert into t1 values('2020-1-1 1:1:2', 2);
|
||||
print ===========================>td-4739
|
||||
#sql select diff(val) from (select derivative(k, 1s, 0) val from t1);
|
||||
#if $rows != 0 then
|
||||
# return -1
|
||||
#endi
|
||||
sql select diff(val) from (select ts, derivative(k, 1s, 0) val from t1);
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql insert into t1 values('2020-1-1 1:1:4', 20);
|
||||
sql insert into t1 values('2020-1-1 1:1:6', 200);
|
||||
|
|
|
@ -710,6 +710,11 @@ sleep 200
|
|||
|
||||
sql select * from streamt4;
|
||||
|
||||
$loop_count = $loop_count + 1
|
||||
if $loop_count == 10 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
# row 0
|
||||
if $rows != 0 then
|
||||
print =====rows=$rows
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/cfg.sh -n dnode1 -c debugflag -v 131
|
||||
system sh/exec.sh -n dnode1 -s start -v
|
||||
|
||||
sleep 5000
|
||||
|
||||
sql connect
|
||||
|
||||
print ========== interval\session\state window
|
||||
|
||||
sql CREATE DATABASE test1 BUFFER 96 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 14400m WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 KEEP 5256000m,5256000m,5256000m PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 STRICT 'off' WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0;
|
||||
sql use test1;
|
||||
sql CREATE STABLE st (time TIMESTAMP, ca DOUBLE, cb DOUBLE, cc int) TAGS (ta VARCHAR(10) );
|
||||
|
||||
print ========== create table before stream
|
||||
|
||||
sql CREATE TABLE t1 using st TAGS ('aaa');
|
||||
sql CREATE TABLE t2 using st TAGS ('bbb');
|
||||
sql CREATE TABLE t3 using st TAGS ('ccc');
|
||||
sql CREATE TABLE t4 using st TAGS ('ddd');
|
||||
|
||||
sql create stream streamd1 into streamt1 as select ca, _wstart,_wend, count(*) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca interval(60m) fill(linear);
|
||||
sql create stream streamd2 into streamt2 as select tbname, _wstart,_wend, count(*) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname interval(60m) fill(linear);
|
||||
|
||||
sql create stream streamd3 into streamt3 as select ca, _wstart,_wend, count(*), max(ca), min(cb), APERCENTILE(cc, 20) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca session(time, 60m);
|
||||
sql create stream streamd4 into streamt4 as select tbname, _wstart,_wend, count(*), max(ca), min(cb), APERCENTILE(cc, 20) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname session(time, 60m);
|
||||
|
||||
sql create stream streamd5 into streamt5 as select tbname, _wstart,_wend, count(*), max(ca), min(cb) from st where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by tbname state_window(cc);
|
||||
sql create stream streamd6 into streamt6 as select ca, _wstart,_wend, count(*), max(ca), min(cb) from t1 where time > "2022-01-01 00:00:00" and time < "2032-01-01 00:00:00" partition by ca state_window(cc);
|
||||
|
||||
sleep 3000
|
||||
|
||||
sql drop stream if exists streamd1;
|
||||
sql drop stream if exists streamd2;
|
||||
sql drop stream if exists streamd3;
|
||||
sql drop stream if exists streamd4;
|
||||
sql drop stream if exists streamd5;
|
||||
sql drop stream if exists streamd6;
|
||||
|
||||
|
||||
_OVER:
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
print =============== check
|
||||
$null=
|
||||
|
||||
system_content sh/checkValgrind.sh -n dnode1
|
||||
print cmd return result ----> [ $system_content ]
|
||||
if $system_content > 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
if $system_content == $null then
|
||||
return -1
|
||||
endi
|
|
@ -4,7 +4,7 @@ looptest:
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
#==system sh/exec.sh -n dnode1 -s start -v
|
||||
|
||||
sleep 200
|
||||
sql connect
|
||||
|
||||
|
|
|
@ -201,31 +201,39 @@ system sh/exec.sh -n dnode2 -s start
|
|||
sleep 3000
|
||||
|
||||
print ======== step6
|
||||
sql select count(*) from db1.tb1
|
||||
$y = 0
|
||||
step6:
|
||||
$y = $y + 1
|
||||
sleep 1000
|
||||
if $y == 50 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select count(*) from db1.tb1 -x step6
|
||||
print select count(*) from db1.tb1 ==> $data00 $lastRows1
|
||||
if $data00 <= $lastRows1 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows1 = $data00
|
||||
|
||||
sql select count(*) from db2.tb2
|
||||
sql select count(*) from db2.tb2 -x step6
|
||||
print select count(*) from db2.tb2 ==> $data00 $lastRows2
|
||||
if $data00 <= $lastRows2 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows2 = $data00
|
||||
|
||||
sql select count(*) from db3.tb3
|
||||
sql select count(*) from db3.tb3 -x step6
|
||||
print select count(*) from db3.tb3 ==> $data00 $lastRows3
|
||||
if $data00 <= $lastRows3 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows3 = $data00
|
||||
|
||||
sql select count(*) from db4.tb4
|
||||
sql select count(*) from db4.tb4 -x step6
|
||||
print select count(*) from db4.tb4 ==> $data00 $lastRows4
|
||||
if $data00 <= $lastRows4 then
|
||||
return -1
|
||||
goto step6
|
||||
endi
|
||||
$lastRows4 = $data00
|
||||
|
||||
|
|
|
@ -135,12 +135,21 @@ class TDTestCase:
|
|||
if rows != len(topicNameList):
|
||||
tdLog.exit("show consumers rows error")
|
||||
|
||||
tdLog.info("check show subscriptions")
|
||||
tdSql.query("show subscriptions")
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
rows = tdSql.getRows()
|
||||
expectSubscriptions = paraDict['vgroups'] * len(topicNameList)
|
||||
tdLog.info("show subscriptions rows: %d, expect Subscriptions: %d"%(rows,expectSubscriptions))
|
||||
for i in range(0, 10, 1):
|
||||
tdLog.info("check show subscriptions")
|
||||
tdSql.query("show subscriptions")
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
rows = tdSql.getRows()
|
||||
expectSubscriptions = paraDict['vgroups'] * len(topicNameList)
|
||||
tdLog.info("show subscriptions rows: %d, expect Subscriptions: %d"%(rows,expectSubscriptions))
|
||||
if rows != expectSubscriptions:
|
||||
# tdLog.exit("show subscriptions rows error")
|
||||
tdLog.info("continue retry[%d] to show subscriptions"%(i))
|
||||
time.sleep(1)
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
if rows != expectSubscriptions:
|
||||
tdLog.exit("show subscriptions rows error")
|
||||
|
||||
|
|
Loading…
Reference in New Issue