Merge branch '3.0' into feat/TS-5215-2

This commit is contained in:
Minglei Jin 2024-10-21 10:48:37 +08:00
commit 4401b5e568
67 changed files with 1417 additions and 648 deletions

View File

@ -169,11 +169,48 @@ ELSE ()
SET(COMPILER_SUPPORT_AVX512VL false)
ELSE()
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
INCLUDE(CheckCSourceRuns)
SET(CMAKE_REQUIRED_FLAGS "-mavx")
check_c_source_runs("
#include <immintrin.h>
int main() {
__m256d a, b, c;
double buf[4] = {0};
a = _mm256_loadu_pd(buf);
b = _mm256_loadu_pd(buf);
c = _mm256_add_pd(a, b);
_mm256_storeu_pd(buf, c);
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
if (buf[i] != 0) {
return 1;
}
}
return 0;
}
" COMPILER_SUPPORT_AVX)
SET(CMAKE_REQUIRED_FLAGS "-mavx2")
check_c_source_runs("
#include <immintrin.h>
int main() {
__m256i a, b, c;
int buf[8] = {0};
a = _mm256_loadu_si256((__m256i *)buf);
b = _mm256_loadu_si256((__m256i *)buf);
c = _mm256_and_si256(a, b);
_mm256_storeu_si256((__m256i *)buf, c);
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
if (buf[i] != 0) {
return 1;
}
}
return 0;
}
" COMPILER_SUPPORT_AVX2)
ENDIF()
IF (COMPILER_SUPPORT_SSE42)

View File

@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`;
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s如果低于5s创建流计算时会报错。
MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算

View File

@ -10,7 +10,7 @@ description: 3.3.3.0 版本说明
4. TDengine支持macOS企业版客户端 [企业版]
5. taosX日志默认不写入syslog [企业版]
6. 服务端记录所有慢查询信息到log库
7. show cluster machines 查询结果中添加服务端版本号
7. show cluster machines 查询结果中添加服务端版本号 [企业版]
8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用
9. 禁止动态修改临时目录
10. round 函数:支持四舍五入的精度

View File

@ -236,7 +236,7 @@ typedef struct {
void* vnode; // not available to encoder and decoder
FTbSink* tbSinkFunc;
STSchema* pTSchema;
SSHashObj* pTblInfo;
SSHashObj* pTbInfo;
} STaskSinkTb;
typedef struct {
@ -754,7 +754,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
void streamMetaAcquireOneTask(SStreamTask* pTask);
int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
void streamMetaClear(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);

View File

@ -50,6 +50,7 @@ typedef struct {
int32_t rollPeriod; // secs
int64_t retentionSize;
int64_t segSize;
int64_t committed;
EWalType level; // wal level
int32_t encryptAlgorithm;
char encryptKey[ENCRYPT_KEY_LEN + 1];

View File

@ -84,7 +84,7 @@ void taos_cleanup(void) {
taosCloseRef(id);
nodesDestroyAllocatorSet();
cleanupAppInfo();
// cleanupAppInfo();
rpcCleanup();
tscDebug("rpc cleanup");

View File

@ -2625,6 +2625,8 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf
uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code));
lino = __LINE__;
goto _exit;
} else { // reset the length value
code = TSDB_CODE_SUCCESS;
}
len += tsnprintf(dumpBuf + len, size - len, " %15s |", pBuf);
if (len >= size - 1) goto _exit;

View File

@ -183,6 +183,7 @@ static void dmSetSignalHandle() {
}
#endif
}
extern bool generateNewMeta;
static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.startTime = taosGetTimestampMs();
@ -222,6 +223,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.dumpSdb = true;
} else if (strcmp(argv[i], "-dTxn") == 0) {
global.deleteTrans = true;
} else if (strcmp(argv[i], "-r") == 0) {
generateNewMeta = true;
} else if (strcmp(argv[i], "-E") == 0) {
if (i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) {

View File

@ -123,6 +123,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
int32_t code = 0;
SStatusReq req = {0};
dDebug("send status req to mnode, statusSeq:%d, begin to mgnt lock", pMgmt->statusSeq);
(void)taosThreadRwlockRdlock(&pMgmt->pData->lock);
req.sver = tsVersion;
req.dnodeVer = pMgmt->pData->dnodeVer;
@ -161,14 +162,17 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN);
(void)taosThreadRwlockUnlock(&pMgmt->pData->lock);
dDebug("send status req to mnode, statusSeq:%d, begin to get vnode loads", pMgmt->statusSeq);
SMonVloadInfo vinfo = {0};
(*pMgmt->getVnodeLoadsFp)(&vinfo);
req.pVloads = vinfo.pVloads;
dDebug("send status req to mnode, statusSeq:%d, begin to get mnode loads", pMgmt->statusSeq);
SMonMloadInfo minfo = {0};
(*pMgmt->getMnodeLoadsFp)(&minfo);
req.mload = minfo.load;
dDebug("send status req to mnode, statusSeq:%d, begin to get qnode loads", pMgmt->statusSeq);
(*pMgmt->getQnodeLoadsFp)(&req.qload);
pMgmt->statusSeq++;
@ -206,6 +210,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
int8_t epUpdated = 0;
(void)dmGetMnodeEpSet(pMgmt->pData, &epSet);
dDebug("send status req to mnode, statusSeq:%d, begin to send rpc msg", pMgmt->statusSeq);
code =
rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000);
if (code != 0) {

View File

@ -515,6 +515,7 @@ static int32_t mndInitWal(SMnode *pMnode) {
.fsyncPeriod = 0,
.rollPeriod = -1,
.segSize = -1,
.committed = -1,
.retentionPeriod = 0,
.retentionSize = 0,
.level = TAOS_WAL_FSYNC,

View File

@ -1783,7 +1783,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStreamObj *pStream = NULL;
int32_t code = 0;
int32_t code = 0;
if ((code = grantCheckExpire(TSDB_GRANT_STREAMS)) < 0) {
return code;
@ -1811,6 +1811,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
return 0;
}
mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid);
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
sdbRelease(pMnode->pSdb, pStream);
return -1;

View File

@ -61,7 +61,6 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa
static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) {
SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq));
if (pReq == NULL) {
// terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
}
@ -93,7 +92,6 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT
if (pReq == NULL) {
mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq),
tstrerror(TSDB_CODE_OUT_OF_MEMORY));
// terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
}
@ -106,19 +104,18 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT
bool hasEpset = false;
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
if (code != TSDB_CODE_SUCCESS || (!hasEpset)) {
terrno = code;
taosMemoryFree(pReq);
return terrno;
return code;
}
code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID);
if (code != 0) {
taosMemoryFree(pReq);
return terrno;
return code;
}
mDebug("set the resume action for trans:%d", pTrans->id);
return 0;
return code;
}
static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) {

View File

@ -81,6 +81,9 @@ typedef struct SCommitInfo SCommitInfo;
typedef struct SCompactInfo SCompactInfo;
typedef struct SQueryNode SQueryNode;
#define VNODE_META_TMP_DIR "meta.tmp"
#define VNODE_META_BACKUP_DIR "meta.backup"
#define VNODE_META_DIR "meta"
#define VNODE_TSDB_DIR "tsdb"
#define VNODE_TQ_DIR "tq"

View File

@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) {
}
}
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) {
SMeta *pMeta = NULL;
int32_t code = 0;
int32_t lino;
@ -144,7 +144,11 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
// create handle
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
offset = strlen(path);
snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, VNODE_META_DIR);
snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir);
if (strncmp(metaDir, VNODE_META_TMP_DIR, strlen(VNODE_META_TMP_DIR)) == 0) {
taosRemoveDir(path);
}
if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) {
TSDB_CHECK_CODE(code = terrno, lino, _exit);
@ -245,6 +249,188 @@ _exit:
return code;
}
bool generateNewMeta = false;
static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
SMeta *pNewMeta = NULL;
SMeta *pMeta = *ppMeta;
SVnode *pVnode = pMeta->pVnode;
metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode));
// Open a new meta for orgainzation
int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false);
if (code) {
return code;
}
code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL);
if (code) {
return code;
}
// i == 0, scan super table
// i == 1, scan normal table and child table
for (int i = 0; i < 2; i++) {
TBC *uidCursor = NULL;
int32_t counter = 0;
code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL);
if (code) {
metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = tdbTbcMoveToFirst(uidCursor);
if (code) {
metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code));
tdbTbcClose(uidCursor);
return code;
}
for (;;) {
const void *pKey;
int kLen;
const void *pVal;
int vLen;
if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) {
break;
}
tb_uid_t uid = *(tb_uid_t *)pKey;
SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal;
if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table
|| (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table
) {
counter++;
if (i == 0) {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid);
} else {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter,
pUidIdxVal->suid == 0 ? "normal" : "child", uid);
}
// fetch table entry
void *value = NULL;
int valueSize = 0;
if (tdbTbGet(pMeta->pTbDb,
&(STbDbKey){
.version = pUidIdxVal->version,
.uid = uid,
},
sizeof(uid), &value, &valueSize) == 0) {
SDecoder dc = {0};
SMetaEntry me = {0};
tDecoderInit(&dc, value, valueSize);
if (metaDecodeEntry(&dc, &me) == 0) {
if (metaHandleEntry(pNewMeta, &me) != 0) {
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
}
}
tDecoderClear(&dc);
}
tdbFree(value);
}
code = tdbTbcMoveToNext(uidCursor);
if (code) {
metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
}
tdbTbcClose(uidCursor);
}
code = metaCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = metaFinishCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) {
metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(&pNewMeta);
metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode));
return 0;
}
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
if (generateNewMeta) {
char path[TSDB_FILENAME_LEN] = {0};
char oldMetaPath[TSDB_FILENAME_LEN] = {0};
char newMetaPath[TSDB_FILENAME_LEN] = {0};
char backupMetaPath[TSDB_FILENAME_LEN] = {0};
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR);
snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR);
snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR);
bool oldMetaExist = taosCheckExistFile(oldMetaPath);
bool newMetaExist = taosCheckExistFile(newMetaPath);
bool backupMetaExist = taosCheckExistFile(backupMetaPath);
if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2
|| (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4
|| (backupMetaExist && oldMetaExist && newMetaExist) // case 8
) {
metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode));
return TSDB_CODE_FAILED;
} else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7
|| (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1
) {
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
} else if (backupMetaExist && !oldMetaExist && newMetaExist) {
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
} else {
int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
if (code) {
return code;
}
code = metaGenerateNewMeta(ppMeta);
if (code) {
metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(ppMeta);
if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) {
metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
// rename the new meta to old meta
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false);
if (code) {
metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
}
} else {
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
}
return TSDB_CODE_SUCCESS;
}
int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino;

View File

@ -2985,9 +2985,6 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
}
}
end:
if (terrno != 0) {
ret = terrno;
}
tDecoderClear(&dc);
tdbFree(pData);
return ret;

View File

@ -746,13 +746,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
return terrno;
}
pOutputInfo->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
if (pOutputInfo->tbSink.pTblInfo == NULL) {
pOutputInfo->tbSink.pTbInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
if (pOutputInfo->tbSink.pTbInfo == NULL) {
tqError("vgId:%d failed init sink tableInfo, code:%s", vgId, tstrerror(terrno));
return terrno;
}
tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTblInfo, freePtr);
tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTbInfo, freePtr);
}
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {

View File

@ -18,6 +18,8 @@
#include "tmsg.h"
#include "tq.h"
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
typedef struct STableSinkInfo {
uint64_t uid;
tstr name;
@ -35,16 +37,22 @@ static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema
int64_t earlyTs, const char* id);
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
const char* dstTableName, int64_t* uid);
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
const char* id);
static int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id);
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName,
int32_t numOfTags);
static int32_t createDefaultTagColName(SArray** pColNameList);
static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
int64_t gid, bool newSubTableRule);
static int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo);
static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock,
const char* stbFullName, int64_t gid, bool newSubTableRule);
static int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo);
static int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
const char* id);
static bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo);
static int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id);
static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode);
static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs);
static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode,
int64_t earlyTs);
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr, bool newSubTableRule) {
@ -81,7 +89,8 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
memcpy(name, varDataVal(varTbName), varDataLen(varTbName));
name[varDataLen(varTbName)] = '\0';
if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && stbFullName) {
if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 &&
stbFullName) {
int32_t code = buildCtbNameAddGroupId(stbFullName, name, groupId, cap);
if (code != TSDB_CODE_SUCCESS) {
return code;
@ -161,16 +170,6 @@ end:
return ret;
}
static bool tqGetTableInfo(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
if (pVal) {
*pInfo = *(STableSinkInfo**)pVal;
return true;
}
return false;
}
static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
void* buf = NULL;
int32_t tlen = 0;
@ -201,7 +200,7 @@ int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const
int32_t code = tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
if (code == 0) {
pCreateTableReq->ctb.stbName = taosStrdup((char*)tNameGetTableName(&name));
if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code
if (pCreateTableReq->ctb.stbName == NULL) { // ignore this error code
tqError("failed to duplicate the stb name:%s, failed to init create-table msg and create req table", stbFullName);
code = terrno;
}
@ -231,7 +230,7 @@ int32_t createDefaultTagColName(SArray** pColNameList) {
}
int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
int64_t gid, bool newSubTableRule) {
int64_t gid, bool newSubTableRule) {
if (pDataBlock->info.parTbName[0]) {
if (newSubTableRule && !isAutoTableName(pDataBlock->info.parTbName) &&
!alreadyAddGroupId(pDataBlock->info.parTbName, gid) && gid != 0 && stbFullName) {
@ -245,18 +244,17 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock*
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// tqDebug("gen name from:%s", pDataBlock->info.parTbName);
// tqDebug("gen name from:%s", pDataBlock->info.parTbName);
} else {
pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName);
if (pCreateTableReq->name == NULL) {
return terrno;
}
// tqDebug("copy name:%s", pDataBlock->info.parTbName);
// tqDebug("copy name:%s", pDataBlock->info.parTbName);
}
} else {
int32_t code = buildCtbNameByGroupId(stbFullName, gid, &pCreateTableReq->name);
return code;
// tqDebug("gen name from stbFullName:%s gid:%"PRId64, stbFullName, gid);
}
return 0;
@ -264,16 +262,20 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock*
static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock,
SStreamTask* pTask, int64_t suid) {
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
int32_t rows = pDataBlock->info.rows;
SArray* tagArray = taosArrayInit(4, sizeof(STagVal));
const char* id = pTask->id.idStr;
int32_t vgId = pTask->pMeta->vgId;
int32_t code = 0;
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
int32_t rows = pDataBlock->info.rows;
SArray* tagArray = NULL;
const char* id = pTask->id.idStr;
int32_t vgId = pTask->pMeta->vgId;
int32_t code = 0;
STableSinkInfo* pInfo = NULL;
SVCreateTbBatchReq reqs = {0};
SArray* crTblArray = NULL;
tqDebug("s-task:%s build create %d table(s) msg", id, rows);
SVCreateTbBatchReq reqs = {0};
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
tagArray = taosArrayInit(4, sizeof(STagVal));
crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
if ((NULL == reqs.pArray) || (tagArray == NULL)) {
tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno));
code = terrno;
@ -291,6 +293,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
tqError("s-task:%s vgId:%d failed to init create table msg", id, vgId);
continue;
}
taosArrayClear(tagArray);
if (size == 2) {
@ -356,8 +359,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
}
}
code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid,
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, IS_NEW_SUBTB_RULE(pTask));
if (code) {
goto _end;
}
@ -368,16 +370,15 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
goto _end;
}
STableSinkInfo* pInfo = NULL;
bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, gid, &pInfo);
bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, gid, &pInfo);
if (!alreadyCached) {
code = doCreateSinkInfo(pCreateTbReq->name, &pInfo);
code = doCreateSinkTableInfo(pCreateTbReq->name, &pInfo);
if (code) {
tqError("vgId:%d failed to create sink tableInfo for table:%s, s-task:%s", vgId, pCreateTbReq->name, id);
continue;
}
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pInfo, gid, id);
code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pInfo, gid, id);
if (code) {
tqError("vgId:%d failed to put sink tableInfo:%s into cache, s-task:%s", vgId, pCreateTbReq->name, id);
}
@ -465,45 +466,45 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
k += 1;
} else {
// check for the existance of primary key
if (pNewRow->numOfPKs == 0) {
// check for the existance of primary key
if (pNewRow->numOfPKs == 0) {
void* p = taosArrayPush(pFinal, &pNewRow);
if (p == NULL) {
return terrno;
}
k += 1;
j += 1;
tRowDestroy(pOldRow);
} else {
numOfPk = pNewRow->numOfPKs;
SRowKey kNew, kOld;
tRowGetKey(pNewRow, &kNew);
tRowGetKey(pOldRow, &kOld);
int32_t ret = tRowKeyCompare(&kNew, &kOld);
if (ret <= 0) {
void* p = taosArrayPush(pFinal, &pNewRow);
if (p == NULL) {
return terrno;
}
k += 1;
j += 1;
tRowDestroy(pOldRow);
} else {
numOfPk = pNewRow->numOfPKs;
SRowKey kNew, kOld;
tRowGetKey(pNewRow, &kNew);
tRowGetKey(pOldRow, &kOld);
int32_t ret = tRowKeyCompare(&kNew, &kOld);
if (ret <= 0) {
void* p = taosArrayPush(pFinal, &pNewRow);
if (p == NULL) {
return terrno;
}
j += 1;
if (ret == 0) {
k += 1;
tRowDestroy(pOldRow);
}
} else {
void* p = taosArrayPush(pFinal, &pOldRow);
if (p == NULL) {
return terrno;
}
if (ret == 0) {
k += 1;
tRowDestroy(pOldRow);
}
} else {
void* p = taosArrayPush(pFinal, &pOldRow);
if (p == NULL) {
return terrno;
}
k += 1;
}
}
}
}
@ -527,8 +528,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
taosArrayDestroy(pExisted->aRowP);
pExisted->aRowP = pFinal;
tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d",
id, (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL),
tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
(int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL),
(pNew->pCreateTbReq != NULL));
tdDestroySVCreateTbReq(pNew->pCreateTbReq);
@ -727,7 +728,7 @@ int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema, SSDat
dataIndex++;
} else {
void* colData = colDataGetData(pColData, j);
if (IS_VAR_DATA_TYPE(pCol->type)) { // address copy, no value
if (IS_VAR_DATA_TYPE(pCol->type)) { // address copy, no value
SValue sv =
(SValue){.type = pCol->type, .nData = varDataLen(colData), .pData = (uint8_t*)varDataVal(colData)};
SColVal cv = COL_VAL_VALUE(pCol->colId, sv);
@ -806,7 +807,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
return TSDB_CODE_SUCCESS;
}
int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
int32_t nameLen = strlen(pDstTableName);
(*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1);
if (*pInfo == NULL) {
@ -830,7 +831,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
STableSinkInfo* pTableSinkInfo = NULL;
int32_t code = 0;
bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, groupId, &pTableSinkInfo);
bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, groupId, &pTableSinkInfo);
if (alreadyCached) {
if (dstTableName[0] == 0) { // data block does not set the destination table name
@ -870,7 +871,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
}
}
code = doCreateSinkInfo(dstTableName, &pTableSinkInfo);
code = doCreateSinkTableInfo(dstTableName, &pTableSinkInfo);
if (code == 0) {
tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName);
} else {
@ -906,14 +907,14 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal));
if (pTagArray == NULL) {
tqError("s-task:%s failed to build auto create submit msg in sink, vgId:%d, due to %s", id, vgId,
tstrerror(terrno));
return terrno;
}
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
code =
buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray,
(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1),
&pTableData->pCreateTbReq);
code = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray,
IS_NEW_SUBTB_RULE(pTask), &pTableData->pCreateTbReq);
taosArrayDestroy(pTagArray);
if (code) {
@ -923,12 +924,12 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
}
pTableSinkInfo->uid = 0;
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id);
code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id);
} else {
metaReaderClear(&mr);
tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id,
vgId, dstTableName);
tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, vgId,
dstTableName);
return TSDB_CODE_TDB_TABLE_NOT_EXIST;
}
} else {
@ -944,7 +945,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
pTableSinkInfo->uid = mr.me.uid;
metaReaderClear(&mr);
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id);
code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id);
}
}
}
@ -952,8 +953,8 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
return code;
}
int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,
SSubmitTbData* pTableData, int64_t earlyTs, const char* id) {
int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,
SSubmitTbData* pTableData, int64_t earlyTs, const char* id) {
int32_t numOfRows = pDataBlock->info.rows;
char* dstTableName = pDataBlock->info.parTbName;
@ -975,6 +976,43 @@ int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_
return code;
}
int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode) {
int32_t code = TSDB_CODE_SUCCESS;
const char* id = pTask->id.idStr;
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
int32_t vgId = pTask->pMeta->vgId;
if (pTask->outputInfo.tbSink.pTagSchema == NULL) {
SMetaReader mer1 = {0};
metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK);
code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid);
if (code != TSDB_CODE_SUCCESS) {
tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId);
metaReaderClear(&mer1);
return code;
}
pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag);
metaReaderClear(&mer1);
if (pOutputInfo->tbSink.pTagSchema == NULL) {
tqError("s-task:%s failed to clone tag schema, code:%s, failed to sink results", id, tstrerror(terrno));
return terrno;
}
SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema;
SSchema* pCol1 = &pTagSchema->pSchema[0];
if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) {
pOutputInfo->tbSink.autoCreateCtb = true;
} else {
pOutputInfo->tbSink.autoCreateCtb = false;
}
}
return code;
}
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
const SArray* pBlocks = (const SArray*)data;
SVnode* pVnode = (SVnode*)vnode;
@ -988,27 +1026,9 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
int64_t earlyTs = tsdbGetEarliestTs(pVnode->pTsdb);
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
if (pTask->outputInfo.tbSink.pTagSchema == NULL) {
SMetaReader mer1 = {0};
metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK);
code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid);
if (code != TSDB_CODE_SUCCESS) {
tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId);
metaReaderClear(&mer1);
return;
}
pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag);
metaReaderClear(&mer1);
SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema;
SSchema* pCol1 = &pTagSchema->pSchema[0];
if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) {
pOutputInfo->tbSink.autoCreateCtb = true;
} else {
pOutputInfo->tbSink.autoCreateCtb = false;
}
code = checkTagSchema(pTask, pVnode);
if (code != TSDB_CODE_SUCCESS) {
return;
}
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
@ -1033,144 +1053,16 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
continue;
} else {
pTask->execInfo.sink.numOfBlocks += 1;
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
if (submitReq.aSubmitTbData == NULL) {
code = terrno;
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
return;
}
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName);
continue;
}
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, pDataBlock->info.id.groupId, id);
tbData.pCreateTbReq = NULL;
}
continue;
}
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
if (p == NULL) {
tqDebug("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id);
}
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
if (code) { // failed and continue
tqDebug("vgId:%d, s-task:%s submit msg failed, data lost", vgId, id);
}
code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
}
}
} else {
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks);
SHashObj* pTableIndexMap =
taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
if (submitReq.aSubmitTbData == NULL) {
code = terrno;
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
taosHashCleanup(pTableIndexMap);
if (streamTaskShouldStop(pTask)) {
return;
}
bool hasSubmit = false;
for (int32_t i = 0; i < numOfBlocks; i++) {
if (streamTaskShouldStop(pTask)) {
taosHashCleanup(pTableIndexMap);
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
return;
}
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock == NULL) {
continue;
}
if (pDataBlock->info.type == STREAM_CHECKPOINT) {
continue;
}
hasSubmit = true;
pTask->execInfo.sink.numOfBlocks += 1;
uint64_t groupId = pDataBlock->info.id.groupId;
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId));
if (index == NULL) { // no data yet, append it
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d dst-table gid:%" PRId64 " not exist, discard stream results", vgId, groupId);
continue;
}
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, groupId, id);
tbData.pCreateTbReq = NULL;
}
continue;
}
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
if (p == NULL) {
tqError("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id);
continue;
}
int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1;
code = taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size));
if (code) {
tqError("vgId:%d, s-task:%s failed to put group into index map, code:%s", vgId, id, tstrerror(code));
continue;
}
} else {
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
tbData.pCreateTbReq = NULL;
}
continue;
}
SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index);
if (pExisted == NULL) {
continue;
}
code = doMergeExistedRows(pExisted, &tbData, id);
if (code != TSDB_CODE_SUCCESS) {
continue;
}
}
pTask->execInfo.sink.numOfRows += pDataBlock->info.rows;
}
taosHashCleanup(pTableIndexMap);
if (hasSubmit) {
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks);
if (code) { // failed and continue
tqError("vgId:%d failed to build and send submit msg", vgId);
}
} else {
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
tqDebug("vgId:%d, s-task:%s write results completed", vgId, id);
}
reubuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs);
}
}
@ -1190,7 +1082,7 @@ bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) {
return true;
}
int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pTableSinkInfo);
@ -1202,7 +1094,17 @@ int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo,
return code;
}
int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) {
bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
if (pVal) {
*pInfo = *(STableSinkInfo**)pVal;
return true;
}
return false;
}
int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) {
if (tSimpleHashGetSize(pSinkTableMap) == 0) {
return TSDB_CODE_SUCCESS;
}
@ -1223,8 +1125,8 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock*
return terrno;
}
int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr,
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
int32_t code =
tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, IS_NEW_SUBTB_RULE(pTask));
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -1262,3 +1164,155 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock*
return TSDB_CODE_SUCCESS;
}
void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) {
int32_t code = 0;
const char* id = pTask->id.idStr;
int32_t vgId = pTask->pMeta->vgId;
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
int64_t suid = pTask->outputInfo.tbSink.stbUid;
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
char* stbFullName = pTask->outputInfo.tbSink.stbFullName;
SHashObj* pTableIndexMap =
taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
if (submitReq.aSubmitTbData == NULL) {
code = terrno;
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
taosHashCleanup(pTableIndexMap);
return;
}
bool hasSubmit = false;
for (int32_t i = 0; i < numOfBlocks; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock == NULL) {
continue;
}
if (pDataBlock->info.type == STREAM_CHECKPOINT) {
continue;
}
hasSubmit = true;
pTask->execInfo.sink.numOfBlocks += 1;
uint64_t groupId = pDataBlock->info.id.groupId;
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
int32_t* index = taosHashGet(pTableIndexMap, &groupId, sizeof(groupId));
if (index == NULL) { // no data yet, append it
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d dst-table gid:%" PRId64 " not exist, discard stream results", vgId, groupId);
continue;
}
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, groupId, id);
tbData.pCreateTbReq = NULL;
}
continue;
}
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
if (p == NULL) {
tqError("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id);
continue;
}
int32_t size = (int32_t)taosArrayGetSize(submitReq.aSubmitTbData) - 1;
code = taosHashPut(pTableIndexMap, &groupId, sizeof(groupId), &size, sizeof(size));
if (code) {
tqError("vgId:%d, s-task:%s failed to put group into index map, code:%s", vgId, id, tstrerror(code));
continue;
}
} else {
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
tbData.pCreateTbReq = NULL;
}
continue;
}
SSubmitTbData* pExisted = taosArrayGet(submitReq.aSubmitTbData, *index);
if (pExisted == NULL) {
continue;
}
code = doMergeExistedRows(pExisted, &tbData, id);
if (code != TSDB_CODE_SUCCESS) {
continue;
}
}
pTask->execInfo.sink.numOfRows += pDataBlock->info.rows;
}
taosHashCleanup(pTableIndexMap);
if (hasSubmit) {
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, numOfBlocks);
if (code) { // failed and continue
tqError("vgId:%d failed to build and send submit msg", vgId);
}
} else {
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
tqDebug("vgId:%d, s-task:%s write results completed", vgId, id);
}
}
int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, int64_t earlyTs) {
int32_t code = 0;
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
int64_t suid = pTask->outputInfo.tbSink.stbUid;
const char* id = pTask->id.idStr;
int32_t vgId = TD_VID(pVnode);
char* stbFullName = pTask->outputInfo.tbSink.stbFullName;
pTask->execInfo.sink.numOfBlocks += 1;
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
if (submitReq.aSubmitTbData == NULL) {
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(terrno));
return terrno;
}
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName);
return code;
}
code = tqSetDstTableDataPayload(suid, pTSchema, index, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, id);
tbData.pCreateTbReq = NULL;
}
return code;
}
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
if (p == NULL) {
tqDebug("vgId:%d, s-task:%s failed to build submit msg, code:%s, data lost", vgId, id, tstrerror(terrno));
return terrno;
}
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
if (code) { // failed and continue
tqDebug("vgId:%d, s-task:%s submit msg failed, code:%s data lost", vgId, id, tstrerror(code));
}
return code;
}

View File

@ -692,7 +692,7 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
if ((ppTask != NULL) && ((*ppTask) != NULL)) {
streamMetaAcquireOneTask(*ppTask);
int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask);
SStreamTask* pTask = *ppTask;
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
@ -1119,10 +1119,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
int32_t vgId = pMeta->vgId;
int32_t code = 0;
if (pTask == NULL) {
return -1;
}
streamTaskResume(pTask);
ETaskStatus status = streamTaskGetStatus(pTask).state;
@ -1150,7 +1146,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
}
}
streamMetaReleaseTask(pMeta, pTask);
return code;
}
@ -1173,6 +1168,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode);
if (code != 0) {
streamMetaReleaseTask(pMeta, pTask);
return code;
}
@ -1186,6 +1182,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
streamMutexUnlock(&pHTask->lock);
code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode);
streamMetaReleaseTask(pMeta, pHTask);
}
return code;

View File

@ -602,14 +602,14 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
SSttLvl *lvl;
code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl);
if (code) {
taosMemoryFree(lvl);
tsdbSttLvlClear(&lvl);
tsdbTFileSetClear(fset);
return code;
}
code = TARRAY2_APPEND(fset[0]->lvlArr, lvl);
if (code) {
taosMemoryFree(lvl);
tsdbSttLvlClear(&lvl);
tsdbTFileSetClear(fset);
return code;
}

View File

@ -855,6 +855,7 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList);
if (p == NULL) {
clearBrinBlockIter(&iter);
tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr);
return TSDB_CODE_INVALID_PARA;
}
@ -5256,7 +5257,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
// NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit
// the data should be ingested in round-robin and all the child tables should be createted before ingesting data
// the version range of query will be used to identify the correctness of suspend/resume functions.
// this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
// this function will be blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
#if SUSPEND_RESUME_TEST
if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) {
tsem_wait(&pReader->resumeAfterSuspend);
@ -5909,6 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_
} else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing
} else {
code = TSDB_CODE_INVALID_PARA;
tsdbError("invalid mr.me.type:%d, code:%s", mr.me.type, tstrerror(code));
metaReaderClear(&mr);
return code;
}

View File

@ -45,6 +45,7 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
.retentionPeriod = -1,
.rollPeriod = 0,
.segSize = 0,
.committed = 0,
.retentionSize = -1,
.level = TAOS_WAL_WRITE,
.clearFiles = 0,

View File

@ -257,6 +257,7 @@ int vnodeLoadInfo(const char *dir, SVnodeInfo *pInfo) {
code = vnodeDecodeInfo(pData, pInfo);
TSDB_CHECK_CODE(code, lino, _exit);
pInfo->config.walCfg.committed = pInfo->state.committed;
_exit:
if (code) {
if (pFile) {

View File

@ -633,47 +633,44 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
}
break;
case TDMT_STREAM_TASK_DEPLOY: {
int32_t code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
if ((code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len)) != TSDB_CODE_SUCCESS) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_DROP: {
if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) {
if ((code = tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_UPDATE_CHKPT: {
if (tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) {
if ((code = tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_CONSEN_CHKPT: {
if (pVnode->restored) {
if (tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg) < 0) {
goto _err;
}
if (pVnode->restored && (code = tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg)) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_PAUSE: {
if (pVnode->restored && vnodeIsLeader(pVnode) &&
tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
(code = tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_RESUME: {
if (pVnode->restored && vnodeIsLeader(pVnode) &&
tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
(code = tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err;
}
} break;
case TDMT_VND_STREAM_TASK_RESET: {
if (pVnode->restored && vnodeIsLeader(pVnode)) {
if (tqProcessTaskResetReq(pVnode->pTq, pMsg) < 0) {
if (pVnode->restored && vnodeIsLeader(pVnode) &&
(code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) {
goto _err;
}
}
} break;
case TDMT_VND_ALTER_CONFIRM:
needCommit = pVnode->config.hashChange;
@ -693,10 +690,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
case TDMT_VND_DROP_INDEX:
vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp);
break;
case TDMT_VND_STREAM_CHECK_POINT_SOURCE:
case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true
tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp);
break;
case TDMT_VND_STREAM_TASK_UPDATE:
case TDMT_VND_STREAM_TASK_UPDATE: // always return true
tqProcessTaskUpdateReq(pVnode->pTq, pMsg);
break;
case TDMT_VND_COMPACT:
@ -752,7 +749,7 @@ _exit:
_err:
vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
tstrerror(terrno), ver);
tstrerror(code), ver);
return code;
}

View File

@ -551,7 +551,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
(int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
}
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, sizeof(type) - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s",
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s",
((i > 0) ? ", " : ""), pSchema->name, type);
}
}

View File

@ -278,7 +278,7 @@ static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t in
}
static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock,
SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo, bool genAfterBlock) {
SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo) {
int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0;
int32_t rows = pResBlock->info.rows;
@ -427,7 +427,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
break;
}
if (start.key == INT64_MIN || end.key == INT64_MIN || genAfterBlock) {
if (start.key == INT64_MIN || end.key == INT64_MIN) {
colDataSetNULL(pDst, rows);
break;
}
@ -463,13 +463,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
break;
}
if (genAfterBlock && rows == 0) {
hasInterp = false;
break;
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
if (pkey->isNull == false && !genAfterBlock) {
if (pkey->isNull == false) {
code = colDataSetVal(pDst, rows, pkey->pData, false);
QUERY_CHECK_CODE(code, lino, _end);
} else {
@ -841,7 +836,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo, false) &&
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo) &&
pSliceInfo->fillType == TSDB_FILL_LINEAR) {
break;
} else {
@ -869,7 +864,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
doKeepLinearInfo(pSliceInfo, pBlock, i);
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo, false) &&
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo) &&
pSliceInfo->fillType == TSDB_FILL_LINEAR) {
break;
} else {
@ -914,12 +909,13 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato
SSDataBlock* pResBlock = pSliceInfo->pRes;
SInterval* pInterval = &pSliceInfo->interval;
if (pSliceInfo->pPrevGroupKey == NULL) {
if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR ||
pSliceInfo->pPrevGroupKey == NULL) {
return;
}
while (pSliceInfo->current <= pSliceInfo->win.ekey) {
(void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo, true);
(void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo);
pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
}

View File

@ -224,19 +224,18 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index
*index = -1;
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) {
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) {
return TSDB_CODE_SUCCESS;
}
// divide a range of [dMinVal, dMaxVal] into 1024 buckets
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
if (span < pBucket->numOfSlots) {
int32_t delta = (int32_t)(v - pBucket->range.dMinVal);
*index = (delta % pBucket->numOfSlots);
if (fabs(span) < DBL_EPSILON) {
*index = 0;
} else {
double slotSpan = span / pBucket->numOfSlots;
*index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
if (v == pBucket->range.dMaxVal) {
if (fabs(v - pBucket->range.dMaxVal) < DBL_EPSILON) {
*index -= 1;
}
}
@ -583,48 +582,52 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
*result = getIdenticalDataVal(pMemBucket, i);
return TSDB_CODE_SUCCESS;
}
// try next round
pMemBucket->times += 1;
// qDebug("MemBucket:%p, start next round data bucketing, time:%d", pMemBucket, pMemBucket->times);
pMemBucket->range = pSlot->range;
pMemBucket->total = 0;
resetSlotInfo(pMemBucket);
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
tMemBucket *tmpBucket = NULL;
int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal,
false, &tmpBucket);
if (TSDB_CODE_SUCCESS != code) {
tMemBucketDestroy(&tmpBucket);
return code;
}
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
SArray* list;
void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
if (p != NULL) {
list = *(SArray **)p;
if (list == NULL || list->size <= 0) {
tMemBucketDestroy(&tmpBucket);
return -1;
}
} else {
tMemBucketDestroy(&tmpBucket);
return -1;
}
for (int32_t f = 0; f < list->size; ++f) {
int32_t *pageId = taosArrayGet(list, f);
if (NULL == pageId) {
tMemBucketDestroy(&tmpBucket);
return TSDB_CODE_OUT_OF_RANGE;
}
SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId);
if (pg == NULL) {
tMemBucketDestroy(&tmpBucket);
return terrno;
}
int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num);
code = tMemBucketPut(tmpBucket, pg->data, (int32_t)pg->num);
if (code != TSDB_CODE_SUCCESS) {
tMemBucketDestroy(&tmpBucket);
return code;
}
setBufPageDirty(pg, true);
releaseBufPage(pMemBucket->pBuffer, pg);
}
return getPercentileImpl(pMemBucket, count - num, fraction, result);
code = getPercentileImpl(tmpBucket, count - num, fraction, result);
tMemBucketDestroy(&tmpBucket);
return code;
}
} else {
num += pSlot->info.size;

View File

@ -887,7 +887,8 @@ _err:
}
static int32_t addParamToLogicConditionNode(SLogicConditionNode* pCond, SNode* pParam) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType &&
((SLogicConditionNode*)pParam)->condType != LOGIC_COND_TYPE_NOT) {
int32_t code = nodesListAppendList(pCond->pParameterList, ((SLogicConditionNode*)pParam)->pParameterList);
((SLogicConditionNode*)pParam)->pParameterList = NULL;
nodesDestroyNode(pParam);

View File

@ -10609,6 +10609,19 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
"Non window query only support scalar function, aggregate function is not allowed");
}
if (NULL != pStmt->pOptions->pDelay) {
SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay;
int64_t minDelay = 0;
char* str = "5s";
if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS ==
parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) {
if (pVal->datum.i < minDelay) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
"stream max delay must be bigger than 5 session");
}
}
}
return TSDB_CODE_SUCCESS;
}

View File

@ -4679,6 +4679,9 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) {
cell = cell->pNext;
}
if (node->condType == LOGIC_COND_TYPE_NOT) {
stat->scalarMode = true;
}
return DEAL_RES_CONTINUE;
}

View File

@ -299,7 +299,7 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
return;
}
/*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);

View File

@ -347,7 +347,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
if (old == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref);
streamMetaAcquireOneTask(pTask);
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
"trigger-recv-monitor");
pTmrInfo->launchChkptId = pActiveInfo->activeId;

View File

@ -1162,7 +1162,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
if (old == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref);
streamMetaAcquireOneTask(pTask);
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
"chkpt-ready-monitor");

View File

@ -753,12 +753,17 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task
return code;
}
void streamMetaAcquireOneTask(SStreamTask* pTask) {
int32_t streamMetaAcquireOneTask(SStreamTask* pTask) {
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref);
return ref;
}
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
if (pTask == NULL) {
return;
}
int32_t taskId = pTask->id.taskId;
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
@ -862,7 +867,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
if (ppTask) {
pTask = *ppTask;
// it is an fill-history task, remove the related stream task's id that points to it
// it is a fill-history task, remove the related stream task's id that points to it
if (pTask->info.fillHistory == 0) {
int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
}

View File

@ -22,7 +22,7 @@ static void streamTaskSchedHelper(void* param, void* tmrId);
void streamSetupScheduleTrigger(SStreamTask* pTask) {
int64_t delaySchema = pTask->info.delaySchedParam;
if (delaySchema != 0 && pTask->info.fillHistory == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
int32_t ref = streamMetaAcquireOneTask(pTask);
stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref,
pTask->info.delaySchedParam);
@ -63,7 +63,11 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3
pRunReq->reqType = execType;
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
return tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
if (code) {
stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId);
}
return code;
}
void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; }
@ -76,7 +80,7 @@ void streamTaskResumeInFuture(SStreamTask* pTask) {
pTask->status.schedIdleTime, ref);
// add one ref count for task
streamMetaAcquireOneTask(pTask);
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer,
pTask->pMeta->vgId, "resume-task-tmr");
}

View File

@ -477,6 +477,7 @@ int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal
if (!pStr) {
if (onlyCache && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) {
(*pWinCode) = TSDB_CODE_FAILED;
goto _end;
}
(*pWinCode) = streamStateGetParName_rocksdb(pState, groupId, pVal);
if ((*pWinCode) == TSDB_CODE_SUCCESS && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) {

View File

@ -258,10 +258,12 @@ void tFreeStreamTask(SStreamTask* pTask) {
if (pTask->inputq.queue) {
streamQueueClose(pTask->inputq.queue, pTask->id.taskId);
pTask->inputq.queue = NULL;
}
if (pTask->outputq.queue) {
streamQueueClose(pTask->outputq.queue, pTask->id.taskId);
pTask->outputq.queue = NULL;
}
if (pTask->exec.qmsg) {
@ -275,6 +277,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
if (pTask->exec.pWalReader != NULL) {
walCloseReader(pTask->exec.pWalReader);
pTask->exec.pWalReader = NULL;
}
streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo);
@ -286,7 +289,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pSchemaWrapper);
taosMemoryFree(pTask->outputInfo.tbSink.pTSchema);
tSimpleHashCleanup(pTask->outputInfo.tbSink.pTblInfo);
tSimpleHashCleanup(pTask->outputInfo.tbSink.pTbInfo);
tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pTagSchema);
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
taosArrayDestroy(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos);

View File

@ -501,9 +501,10 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
if (pTrans == NULL) {
ETaskStatus s = pSM->current.state;
if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP &&
s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) {
stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, GET_EVT_NAME(pSM->prev.evt));
if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT &&
s != TASK_STATUS__READY) {
stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name,
GET_EVT_NAME(pSM->prev.evt));
}
// the pSM->prev.evt may be 0, so print string is not appropriate.
@ -521,11 +522,15 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
return TSDB_CODE_STREAM_INVALID_STATETRANS;
}
keepPrevInfo(pSM);
// repeat pause will not overwrite the previous pause state
if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) {
keepPrevInfo(pSM);
pSM->current = pTrans->next;
} else {
stDebug("s-task:%s repeat pause evt recv, not update prev status", id);
}
pSM->current = pTrans->next;
pSM->pActiveTrans = NULL;
// todo remove it
// todo: handle the error code
// on success callback, add into lock if necessary, or maybe we should add an option for this?

View File

@ -56,7 +56,7 @@ void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void*
}
}
stDebug("vgId:%d start %s tmr succ", vgId, pMsg);
stTrace("vgId:%d start %s tmr succ", vgId, pMsg);
}
void streamTmrStop(tmr_h tmrId) {

View File

@ -24,7 +24,6 @@ extern "C" {
#define TIMER_MAX_MS 0x7FFFFFFF
#define PING_TIMER_MS 5000
#define HEARTBEAT_TICK_NUM 20
typedef struct SSyncEnv {
uint8_t isStart;

View File

@ -977,9 +977,10 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
pData->logicClock = pSyncTimer->logicClock;
pData->execTime = tsNow + pSyncTimer->timerMS;
sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr);
sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid,
pData->destId.addr, pSyncTimer->timerMS);
TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid),
TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid),
syncEnv()->pTimerManager, &pSyncTimer->pTimer));
} else {
code = TSDB_CODE_SYN_INTERNAL_ERROR;
@ -2711,7 +2712,8 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
return;
}
sTrace("vgId:%d, eq peer hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, pData->destId.addr);
sTrace("vgId:%d, peer hb timer execution, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid,
pData->destId.addr);
if (pSyncNode->totalReplicaNum > 1) {
int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
@ -2753,13 +2755,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
if (ret != 0) {
sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret));
}
} else {
}
if (syncIsInit()) {
// sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId);
if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM,
(void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) {
sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS);
if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid,
syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) {
sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code));
syncNodeRelease(pSyncNode);
syncHbTimerDataRelease(pData);

View File

@ -1446,6 +1446,9 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
return ret;
}
ofpCell = tdbPageGetCell(ofp, 0);
if (ofpCell == NULL) {
return TSDB_CODE_INVALID_DATA_FMT;
}
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;

View File

@ -282,6 +282,17 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList)
}
static void walAlignVersions(SWal* pWal) {
if (pWal->cfg.committed > 0 && pWal->cfg.committed != pWal->vers.snapshotVer) {
wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is different from commited:%" PRId64
". in vnode/mnode. align with it.",
pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed);
pWal->vers.snapshotVer = pWal->cfg.committed;
}
if (pWal->vers.snapshotVer < 0 && pWal->vers.firstVer > 0) {
wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".",
pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer);
pWal->vers.snapshotVer = pWal->vers.firstVer;
}
if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) {
wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId,
pWal->vers.firstVer, pWal->vers.snapshotVer);
@ -400,6 +411,17 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) {
TAOS_RETURN(TSDB_CODE_SUCCESS);
}
void printFileSet(SArray* fileSet) {
int32_t sz = taosArrayGetSize(fileSet);
for (int32_t i = 0; i < sz; i++) {
SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i);
wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64
", closeTs:%" PRId64,
pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs,
pFileInfo->closeTs);
}
}
int32_t walCheckAndRepairMeta(SWal* pWal) {
// load log files, get first/snapshot/last version info
int32_t code = 0;
@ -460,6 +482,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
taosArraySort(actualLog, compareWalFileInfo);
wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path,
(int32_t)taosArrayGetSize(actualLog));
printFileSet(actualLog);
int metaFileNum = taosArrayGetSize(pWal->fileInfoSet);
int actualFileNum = taosArrayGetSize(actualLog);
int64_t firstVerPrev = pWal->vers.firstVer;
@ -474,6 +500,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
TAOS_RETURN(code);
}
wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path,
(int32_t)taosArrayGetSize(pWal->fileInfoSet));
printFileSet(pWal->fileInfoSet);
int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
// scan and determine the lastVer
@ -533,6 +563,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
// repair ts of files
TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta));
printFileSet(pWal->fileInfoSet);
// update meta file
if (updateMeta) {
TAOS_CHECK_RETURN(walSaveMeta(pWal));
@ -1124,6 +1155,10 @@ int32_t walLoadMeta(SWal* pWal) {
(void)taosCloseFile(&pFile);
taosMemoryFree(buf);
wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr,
(int32_t)taosArrayGetSize(pWal->fileInfoSet));
printFileSet(pWal->fileInfoSet);
TAOS_RETURN(code);
}

View File

@ -91,7 +91,8 @@ static int32_t walInitLock(SWal *pWal) {
}
SWal *walOpen(const char *path, SWalCfg *pCfg) {
SWal *pWal = taosMemoryCalloc(1, sizeof(SWal));
int32_t code = 0;
SWal *pWal = taosMemoryCalloc(1, sizeof(SWal));
if (pWal == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
@ -160,17 +161,20 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
pWal->writeHead.magic = WAL_MAGIC;
// load meta
if (walLoadMeta(pWal) < 0) {
wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno));
code = walLoadMeta(pWal);
if (code < 0) {
wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code));
}
if (walCheckAndRepairMeta(pWal) < 0) {
wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId);
code = walCheckAndRepairMeta(pWal);
if (code < 0) {
wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code));
goto _err;
}
if (walCheckAndRepairIdx(pWal) < 0) {
wError("vgId:%d, cannot open wal since repair idx file failed", pWal->cfg.vgId);
code = walCheckAndRepairIdx(pWal);
if (code < 0) {
wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code));
goto _err;
}

View File

@ -127,6 +127,7 @@ class WalRetentionEnv : public ::testing::Test {
SWalCfg cfg;
cfg.rollPeriod = -1;
cfg.segSize = -1;
cfg.committed =-1;
cfg.retentionPeriod = -1;
cfg.retentionSize = 0;
cfg.rollPeriod = 0;

View File

@ -200,6 +200,7 @@ void* taosArrayPop(SArray* pArray) {
void* taosArrayGet(const SArray* pArray, size_t index) {
if (NULL == pArray) {
terrno = TSDB_CODE_INVALID_PARA;
uError("failed to return value from array of null ptr");
return NULL;
}

View File

@ -13,6 +13,7 @@ from frame.srvCtl import *
from frame.caseBase import *
from frame import *
from frame.autogen import *
from frame import epath
# from frame.server.dnodes import *
# from frame.server.cluster import *
@ -20,7 +21,9 @@ from frame.autogen import *
class TDTestCase(TBase):
def init(self, conn, logSql, replicaVar=1):
updatecfgDict = {'dDebugFlag':131}
super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1")
self.valgrind = 0
self.db = "test"
self.stb = "meters"
@ -50,9 +53,36 @@ class TDTestCase(TBase):
tdSql.error("create encrypt_key '12345678abcdefghi'")
tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'")
def recreate_dndoe_encrypt_key(self):
"""
Description: From the jira TS-5507, the encrypt key can be recreated.
create:
2024-09-23 created by Charles
update:
None
"""
# taosd path
taosd_path = epath.binPath()
tdLog.info(f"taosd_path: {taosd_path}")
# dnode2 path
dndoe2_path = tdDnodes.getDnodeDir(2)
dnode2_data_path = os.sep.join([dndoe2_path, "data"])
dnode2_cfg_path = os.sep.join([dndoe2_path, "cfg"])
tdLog.info(f"dnode2_path: {dnode2_data_path}")
# stop dnode2
tdDnodes.stoptaosd(2)
tdLog.info("stop dndoe2")
# delete dndoe2 data
cmd = f"rm -rf {dnode2_data_path}"
os.system(cmd)
# recreate the encrypt key for dnode2
os.system(f"{os.sep.join([taosd_path, 'taosd'])} -y '1234567890' -c {dnode2_cfg_path}")
tdLog.info("test case: recreate the encrypt key for dnode2 passed")
def run(self):
self.create_encrypt_db_error()
self.create_encrypt_db()
self.recreate_dndoe_encrypt_key()
def stop(self):
tdSql.close()

View File

@ -18,6 +18,7 @@ import time
import socket
import json
import toml
import subprocess
from frame.boundary import DataBoundary
import taos
from frame.log import *
@ -1830,6 +1831,51 @@ class TDCom:
if i == 1:
self.record_history_ts = ts_value
def generate_query_result(self, inputfile, test_case):
if not os.path.exists(inputfile):
tdLog.exit(f"Input file '{inputfile}' does not exist.")
else:
self.query_result_file = f"./temp_{test_case}.result"
os.system(f"taos -f {inputfile} | grep -v 'Query OK'|grep -v 'Copyright'| grep -v 'Welcome to the TDengine Command' > {self.query_result_file} ")
return self.query_result_file
def compare_result_files(self, file1, file2):
try:
# use subprocess.run to execute diff/fc commands
# print(file1, file2)
if platform.system().lower() != 'windows':
cmd='diff'
result = subprocess.run([cmd, "-u", "--color", file1, file2], text=True, capture_output=True)
else:
cmd='fc'
result = subprocess.run([cmd, file1, file2], text=True, capture_output=True)
# if result is not empty, print the differences and files name. Otherwise, the files are identical.
if result.stdout:
tdLog.debug(f"Differences between {file1} and {file2}")
tdLog.notice(f"\r\n{result.stdout}")
return False
else:
return True
except FileNotFoundError:
tdLog.debug("The 'diff' command is not found. Please make sure it's installed and available in your PATH.")
except Exception as e:
tdLog.debug(f"An error occurred: {e}")
def compare_testcase_result(self, inputfile,expected_file,test_case):
test_reulst_file = self.generate_query_result(inputfile,test_case)
if self.compare_result_files(expected_file, test_reulst_file ):
tdLog.info("Test passed: Result files are identical.")
os.system(f"rm -f {test_reulst_file}")
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.lineno}(line:{caller.lineno}) failed: sqlfile is {inputfile}, expect_file:{expected_file} != reult_file:{test_reulst_file} ")
tdLog.exit("Test failed: Result files are different.")
def is_json(msg):
if isinstance(msg, str):
try:
@ -1864,4 +1910,6 @@ def dict2toml(in_dict: dict, file:str):
with open(file, 'w') as f:
toml.dump(in_dict, f)
tdCom = TDCom()

View File

@ -0,0 +1,114 @@
taos> select pi()
pi() |
============================
3.141592653589793 |
taos> select pi() + 1
pi() + 1 |
============================
4.141592653589793 |
taos> select pi() - 1
pi() - 1 |
============================
2.141592653589793 |
taos> select pi() * 2
pi() * 2 |
============================
6.283185307179586 |
taos> select pi() / 2
pi() / 2 |
============================
1.570796326794897 |
taos> select pi() from ts_4893.meters limit 5
pi() |
============================
3.141592653589793 |
3.141592653589793 |
3.141592653589793 |
3.141592653589793 |
3.141592653589793 |
taos> select pi() + 1 from ts_4893.meters limit 1
pi() + 1 |
============================
4.141592653589793 |
taos> select pi() - 1 from ts_4893.meters limit 1
pi() - 1 |
============================
2.141592653589793 |
taos> select pi() * 2 from ts_4893.meters limit 1
pi() * 2 |
============================
6.283185307179586 |
taos> select pi() / 2 from ts_4893.meters limit 1
pi() / 2 |
============================
1.570796326794897 |
taos> select pi() + pi() from ts_4893.meters limit 1
pi() + pi() |
============================
6.283185307179586 |
taos> select pi() - pi() from ts_4893.meters limit 1
pi() - pi() |
============================
0.000000000000000 |
taos> select pi() * pi() from ts_4893.meters limit 1
pi() * pi() |
============================
9.869604401089358 |
taos> select pi() / pi() from ts_4893.meters limit 1
pi() / pi() |
============================
1.000000000000000 |
taos> select pi() + id from ts_4893.meters order by ts limit 5
pi() + id |
============================
3.141592653589793 |
4.141592653589793 |
5.141592653589793 |
6.141592653589793 |
7.141592653589793 |
taos> select abs(pi())
abs(pi()) |
============================
3.141592653589793 |
taos> select pow(pi(), 2)
pow(pi(), 2) |
============================
9.869604401089358 |
taos> select sqrt(pi())
sqrt(pi()) |
============================
1.772453850905516 |
taos> select cast(pi() as int)
cast(pi() as int) |
====================
3 |
taos> select pi()
pi() |
============================
3.141592653589793 |
taos> select substring_index(null, '.', 2)
substring_index(null, '.', 2) |
================================
NULL |
Can't render this file because it has a wrong number of fields in line 90.

View File

@ -1,20 +1,21 @@
select pi();
select pi() + 1;
select pi() - 1;
select pi() * 2;
select pi() / 2;
select pi() from ts_4893.meters limit 5;
select pi() + 1 from ts_4893.meters limit 1;
select pi() - 1 from ts_4893.meters limit 1;
select pi() * 2 from ts_4893.meters limit 1;
select pi() / 2 from ts_4893.meters limit 1;
select pi() + pi() from ts_4893.meters limit 1;
select pi() - pi() from ts_4893.meters limit 1;
select pi() * pi() from ts_4893.meters limit 1;
select pi() / pi() from ts_4893.meters limit 1;
select pi() + id from ts_4893.meters order by ts limit 5;
select abs(pi());
select pow(pi(), 2);
select sqrt(pi());
select cast(pi() as int);
select pi();
select pi()
select pi() + 1
select pi() - 1
select pi() * 2
select pi() / 2
select pi() from ts_4893.meters limit 5
select pi() + 1 from ts_4893.meters limit 1
select pi() - 1 from ts_4893.meters limit 1
select pi() * 2 from ts_4893.meters limit 1
select pi() / 2 from ts_4893.meters limit 1
select pi() + pi() from ts_4893.meters limit 1
select pi() - pi() from ts_4893.meters limit 1
select pi() * pi() from ts_4893.meters limit 1
select pi() / pi() from ts_4893.meters limit 1
select pi() + id from ts_4893.meters order by ts limit 5
select abs(pi())
select pow(pi(), 2)
select sqrt(pi())
select cast(pi() as int)
select pi()
select substring_index(null, '.', 2)

View File

@ -17,14 +17,15 @@ import random
import taos
import frame
import frame.etool
from frame.etool import *
from frame.log import *
from frame.cases import *
from frame.sql import *
from frame.caseBase import *
from frame import *
from frame import etool
from frame.common import *
class TDTestCase(TBase):
updatecfgDict = {
@ -84,8 +85,16 @@ class TDTestCase(TBase):
tdSql.error(err_statement)
err_statement = ''
def test_normal_query_new(self, testCase):
# read sql from .sql file and execute
tdLog.info(f"test normal query.")
self.sqlFile = etool.curFile(__file__, f"in/{testCase}.in")
self.ansFile = etool.curFile(__file__, f"ans/{testCase}_1.csv")
tdCom.compare_testcase_result(self.sqlFile, self.ansFile, testCase)
def test_pi(self):
self.test_normal_query("pi")
self.test_normal_query_new("pi")
def test_round(self):
self.test_normal_query("round")

View File

@ -10,7 +10,7 @@
# army-test
#
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3
,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py

View File

@ -225,6 +225,7 @@ python3 test.py -f query/distinctOneColTb.py
python3 ./test.py -f query/filter.py
python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/not.py
python3 ./test.py -f query/queryError.py
python3 ./test.py -f query/filterAllIntTypes.py
python3 ./test.py -f query/filterFloatAndDouble.py

View File

@ -139,6 +139,7 @@ python3 ./test.py -f query/querySort.py
python3 ./test.py -f query/queryJoin.py
python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/not.py
python3 ./test.py -f query/select_last_crash.py
python3 ./test.py -f query/queryNullValueTest.py
python3 ./test.py -f query/queryInsertValue.py

View File

@ -133,4 +133,17 @@ if $data13 != -111 then
goto loop1
endi
print step 2====================
sql create database test vgroups 1 ;
sql use test;
sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
sql_error create stream streams1 trigger max_delay 4000a ignore update 0 ignore expired 0 into streamtST1 as select _wstart, count(*) from st interval(5s);
sql_error create stream streams2 trigger max_delay 4s ignore update 0 ignore expired 0 into streamtST2 as select _wstart, count(*) from st interval(5s);
sql create stream streams3 trigger max_delay 5000a ignore update 0 ignore expired 0 into streamtST3 as select _wstart, count(*) from st interval(5s);
sql create stream streams4 trigger max_delay 5s ignore update 0 ignore expired 0 into streamtST4 as select _wstart, count(*) from st interval(5s);
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -48,15 +48,15 @@ sql create table t1 using st tags(1);
sql create table t2 using st tags(2);
sql create stream stream2 trigger window_close into streamt2 as select _wstart, sum(a) from st interval(10s);
sql create stream stream3 trigger max_delay 1s into streamt3 as select _wstart, sum(a) from st interval(10s);
sql create stream stream3 trigger max_delay 5s into streamt3 as select _wstart, sum(a) from st interval(10s);
sql create stream stream4 trigger window_close into streamt4 as select _wstart, sum(a) from t1 interval(10s);
sql create stream stream5 trigger max_delay 1s into streamt5 as select _wstart, sum(a) from t1 interval(10s);
sql create stream stream5 trigger max_delay 5s into streamt5 as select _wstart, sum(a) from t1 interval(10s);
sql create stream stream6 trigger window_close into streamt6 as select _wstart, sum(a) from st session(ts, 10s);
sql create stream stream7 trigger max_delay 1s into streamt7 as select _wstart, sum(a) from st session(ts, 10s);
sql create stream stream7 trigger max_delay 5s into streamt7 as select _wstart, sum(a) from st session(ts, 10s);
sql create stream stream8 trigger window_close into streamt8 as select _wstart, sum(a) from t1 session(ts, 10s);
sql create stream stream9 trigger max_delay 1s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s);
sql create stream stream9 trigger max_delay 5s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s);
sql create stream stream10 trigger window_close into streamt10 as select _wstart, sum(a) from t1 state_window(b);
sql create stream stream11 trigger max_delay 1s into streamt11 as select _wstart, sum(a) from t1 state_window(b);
sql create stream stream11 trigger max_delay 5s into streamt11 as select _wstart, sum(a) from t1 state_window(b);
run tsim/stream/checkTaskStatus.sim
@ -138,12 +138,12 @@ if $rows != 2 then
goto loop1
endi
print step 1 max delay 2s
print step 1 max delay 5s
sql create database test3 vgroups 4;
sql use test3;
sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream13 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 interval(10s);
sql create stream stream13 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 interval(10s);
run tsim/stream/checkTaskStatus.sim
@ -172,8 +172,8 @@ $now02 = $data02
$now12 = $data12
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;
@ -188,7 +188,7 @@ if $data12 != $now12 then
return -1
endi
print step 2 max delay 2s
print step 2 max delay 5s
sql create database test4 vgroups 4;
sql use test4;
@ -197,7 +197,7 @@ sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,t
sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
sql create stream stream14 trigger max_delay 2s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s);
sql create stream stream14 trigger max_delay 5s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s);
run tsim/stream/checkTaskStatus.sim
@ -234,8 +234,8 @@ $now12 = $data12
$now22 = $data22
$now32 = $data32
print step2 max delay 2s......... sleep 3s
sleep 3000
print step2 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt14 order by 2;
print $data00 $data01 $data02
@ -264,8 +264,8 @@ if $data32 != $now32 then
return -1
endi
print step2 max delay 2s......... sleep 3s
sleep 3000
print step2 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt14 order by 2;
print $data00 $data01 $data02
@ -294,12 +294,12 @@ if $data32 != $now32 then
return -1
endi
print step 2 max delay 2s
print step 2 max delay 5s
sql create database test15 vgroups 4;
sql use test15;
sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream15 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s);
sql create stream stream15 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s);
run tsim/stream/checkTaskStatus.sim
@ -328,8 +328,8 @@ $now02 = $data02
$now12 = $data12
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;
@ -344,8 +344,8 @@ if $data12 != $now12 then
return -1
endi
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;
@ -362,12 +362,12 @@ endi
print session max delay over
print step 3 max delay 2s
print step 3 max delay 5s
sql create database test16 vgroups 4;
sql use test16;
sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream16 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 state_window(a);
sql create stream stream16 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 state_window(a);
run tsim/stream/checkTaskStatus.sim
@ -396,8 +396,8 @@ $now02 = $data02
$now12 = $data12
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;
@ -412,8 +412,8 @@ if $data12 != $now12 then
return -1
endi
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;
@ -430,12 +430,12 @@ endi
print state max delay over
print step 4 max delay 2s
print step 4 max delay 5s
sql create database test17 vgroups 4;
sql use test17;
sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream17 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9;
sql create stream stream17 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9;
run tsim/stream/checkTaskStatus.sim
@ -467,8 +467,8 @@ $now02 = $data02
$now12 = $data12
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;
@ -483,8 +483,8 @@ if $data12 != $now12 then
return -1
endi
print step1 max delay 2s......... sleep 3s
sleep 3000
print step1 max delay 5s......... sleep 6s
sleep 6000
sql select * from streamt13;

View File

@ -907,7 +907,7 @@ class TDTestCase:
## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(13)
tdSql.checkRows(12)
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 10)
@ -920,7 +920,6 @@ class TDTestCase:
tdSql.checkData(9, 0, 15)
tdSql.checkData(10, 0, 15)
tdSql.checkData(11, 0, 15)
tdSql.checkData(12, 0, None)
## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)")
@ -958,12 +957,10 @@ class TDTestCase:
## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)")
tdSql.checkRows(5)
tdSql.checkRows(3)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)")
@ -1275,7 +1272,7 @@ class TDTestCase:
tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(13)
tdSql.checkRows(12)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
@ -1290,7 +1287,6 @@ class TDTestCase:
tdSql.checkData(9, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, False)
@ -1304,7 +1300,6 @@ class TDTestCase:
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(11, 1, False)
tdSql.checkData(12, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)")
tdSql.checkRows(6)
@ -1682,13 +1677,9 @@ class TDTestCase:
## | . | { | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)")
tdSql.checkRows(6)
tdSql.checkRows(2)
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
tdSql.checkData(5, 0, None)
# test fill linear
@ -2741,7 +2732,7 @@ class TDTestCase:
tdSql.checkData(4, i, 15)
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)")
tdSql.checkRows(5)
tdSql.checkRows(3)
tdSql.checkCols(4)
for i in range (tdSql.queryCols):
@ -2837,7 +2828,7 @@ class TDTestCase:
# test fill next
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)")
tdSql.checkRows(19)
tdSql.checkRows(18)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-02 00:00:00.000')
@ -2860,7 +2851,6 @@ class TDTestCase:
tdSql.checkData(15, 2, None)
tdSql.checkData(16, 2, None)
tdSql.checkData(17, 2, None)
tdSql.checkData(18, 2, None)
tdSql.checkData(17, 0, '2020-02-02 00:00:17.000')
@ -3091,7 +3081,7 @@ class TDTestCase:
# test fill linear
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(18)
tdSql.checkRows(17)
tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-02 00:00:01.000')
@ -3113,9 +3103,8 @@ class TDTestCase:
tdSql.checkData(14, 2, None)
tdSql.checkData(15, 2, None)
tdSql.checkData(16, 2, None)
tdSql.checkData(17, 2, None)
tdSql.checkData(17, 0, '2020-02-02 00:00:18.000')
tdSql.checkData(16, 0, '2020-02-02 00:00:17.000')
tdLog.printNoPrefix("==========step13:test error cases")
@ -3231,7 +3220,7 @@ class TDTestCase:
tdSql.checkData(17, 1, True)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(19)
tdSql.checkRows(18)
tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
tdSql.checkData(0, 1, True)
@ -3254,12 +3243,9 @@ class TDTestCase:
tdSql.checkData(15, 2, 15)
tdSql.checkData(16, 2, 17)
tdSql.checkData(17, 2, 17)
tdSql.checkData(18, 2, None)
tdSql.checkData(17, 0, '2020-02-01 00:00:17.000')
tdSql.checkData(17, 1, False)
tdSql.checkData(18, 0, '2020-02-01 00:00:18.000')
tdSql.checkData(18, 1, True)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(17)
@ -3376,24 +3362,24 @@ class TDTestCase:
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(57)
for i in range(0, 19):
tdSql.checkRows(48)
for i in range(0, 14):
tdSql.checkData(i, 0, 'ctb1')
for i in range(19, 38):
for i in range(14, 30):
tdSql.checkData(i, 0, 'ctb2')
for i in range(38, 57):
for i in range(30, 48):
tdSql.checkData(i, 0, 'ctb3')
tdSql.checkData(0, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(18, 1, '2020-02-01 00:00:18.000')
tdSql.checkData(13, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(19, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(37, 1, '2020-02-01 00:00:18.000')
tdSql.checkData(14, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(29, 1, '2020-02-01 00:00:15.000')
tdSql.checkData(38, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(56, 1, '2020-02-01 00:00:18.000')
tdSql.checkData(30, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(47, 1, '2020-02-01 00:00:17.000')
for i in range(0, 2):
tdSql.checkData(i, 3, 1)
@ -3404,33 +3390,24 @@ class TDTestCase:
for i in range(8, 14):
tdSql.checkData(i, 3, 13)
for i in range(14, 19):
tdSql.checkData(i, 3, None)
for i in range(19, 23):
for i in range(14, 18):
tdSql.checkData(i, 3, 3)
for i in range(23, 29):
for i in range(18, 24):
tdSql.checkData(i, 3, 9)
for i in range(29, 35):
for i in range(24, 30):
tdSql.checkData(i, 3, 15)
for i in range(35, 38):
tdSql.checkData(i, 3, None)
for i in range(38, 44):
for i in range(30, 36):
tdSql.checkData(i, 3, 5)
for i in range(44, 50):
for i in range(36, 42):
tdSql.checkData(i, 3, 11)
for i in range(50, 56):
for i in range(42, 48):
tdSql.checkData(i, 3, 17)
for i in range(56, 57):
tdSql.checkData(i, 3, None)
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(39)
@ -3473,7 +3450,7 @@ class TDTestCase:
tdSql.checkRows(90)
tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(171)
tdSql.checkRows(90)
tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(9)
@ -3490,7 +3467,7 @@ class TDTestCase:
tdSql.checkRows(48)
tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(57)
tdSql.checkRows(48)
tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(39)
@ -4386,7 +4363,7 @@ class TDTestCase:
tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)")
tdSql.checkRows(11)
tdSql.checkRows(9)
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, False)
@ -4396,8 +4373,6 @@ class TDTestCase:
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, False)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 3)
@ -4408,13 +4383,11 @@ class TDTestCase:
tdSql.checkData(6, 2, 8)
tdSql.checkData(7, 2, 8)
tdSql.checkData(8, 2, 9)
tdSql.checkData(9, 2, None)
tdSql.checkData(10, 2, None)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)")
tdSql.checkRows(11)
tdSql.checkRows(9)
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, False)
@ -4424,9 +4397,6 @@ class TDTestCase:
tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, False)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 3)
@ -4437,8 +4407,6 @@ class TDTestCase:
tdSql.checkData(6, 2, 8)
tdSql.checkData(7, 2, 8)
tdSql.checkData(8, 2, 9)
tdSql.checkData(9, 2, None)
tdSql.checkData(10, 2, None)
# super table
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
@ -4475,7 +4443,7 @@ class TDTestCase:
tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(9)
tdSql.checkRows(8)
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
@ -4484,7 +4452,6 @@ class TDTestCase:
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, True)
tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 9)
@ -4494,12 +4461,11 @@ class TDTestCase:
tdSql.checkData(5, 2, 13)
tdSql.checkData(6, 2, 13)
tdSql.checkData(7, 2, 15)
tdSql.checkData(8, 2, None)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(9)
tdSql.checkRows(8)
tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True)
@ -4508,7 +4474,6 @@ class TDTestCase:
tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, True)
tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 9)
@ -4518,37 +4483,36 @@ class TDTestCase:
tdSql.checkData(5, 2, 13)
tdSql.checkData(6, 2, 13)
tdSql.checkData(7, 2, 15)
tdSql.checkData(8, 2, None)
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(18)
for i in range(0, 9):
tdSql.checkRows(15)
for i in range(0, 7):
tdSql.checkData(i, 0, 'ctb1_null')
for i in range(9, 18):
for i in range(7, 15):
tdSql.checkData(i, 0, 'ctb2_null')
tdSql.checkData(0, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(8, 1, '2020-02-01 00:00:17.000')
tdSql.checkData(6, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(9, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(17, 1, '2020-02-01 00:00:17.000')
tdSql.checkData(7, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(14, 1, '2020-02-01 00:00:15.000')
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(18)
for i in range(0, 9):
tdSql.checkRows(15)
for i in range(0, 7):
tdSql.checkData(i, 0, 'ctb1_null')
for i in range(9, 18):
for i in range(7, 15):
tdSql.checkData(i, 0, 'ctb2_null')
tdSql.checkData(0, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(8, 1, '2020-02-01 00:00:17.000')
tdSql.checkData(6, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(9, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(17, 1, '2020-02-01 00:00:17.000')
tdSql.checkData(7, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(14, 1, '2020-02-01 00:00:15.000')
# fill linear
# normal table

View File

@ -0,0 +1,132 @@
from wsgiref.headers import tspecials
from util.log import *
from util.cases import *
from util.sql import *
from util.common import tdCom
import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = "db"
self.rowNum = 10
self.ts = 1537146000000
def notConditionTest(self):
dbname = "nottest"
stbname = "st1"
tdsql = tdCom.newTdSql()
tdsql.execute(f"create database if not exists {dbname}")
stype = ["INT", "INT UNSIGNED", "BIGINT", "BIGINT UNSIGNED", "DOUBLE", "FLOAT", "SMALLINT", "SMALLINT UNSIGNED", "TINYINT", "TINYINT UNSIGNED"]
for type_name in stype:
tdsql.execute(f"drop table if exists {dbname}.{stbname}")
tdsql.execute(f"create table if not exists {dbname}.{stbname} (ts timestamp, v1 {type_name}) tags(t1 {type_name})")
tdsql.execute(f"insert into {dbname}.sub_1 using {dbname}.{stbname} tags(1) values({self.ts}, 10)")
tdsql.execute(f"insert into {dbname}.sub_2 using {dbname}.{stbname} tags(2) values({self.ts + 1000}, 20)")
tdsql.execute(f"insert into {dbname}.sub_3 using {dbname}.{stbname} tags(3) values({self.ts + 2000}, 30)")
# Test case 1: NOT IN
tdsql.query(f"select t1, * from {dbname}.{stbname} where t1 not in (1, 2) order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 0, 3)
# Test case 2: NOT BETWEEN
tdsql.query(f"select * from {dbname}.{stbname} where v1 not between 10 and 20 order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not(v1 not between 10 and 20) order by t1")
tdsql.checkRows(2)
# Test case 4: NOT EQUAL
tdsql.query(f"select * from {dbname}.{stbname} where v1 != 20 order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 30)
# Test case 8: NOT (v1 < 20 OR v1 > 30)
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 > 30) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 20)
tdsql.checkData(1, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 >= 30) order by t1")
tdsql.checkRows(1)
# Test case 9: NOT (t1 != 1)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 1, 10)
tdsql.query(f"select * from {dbname}.{stbname} where (t1 != 1) or not (v1 == 20) order by t1")
tdsql.checkRows(3)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 20)
tdsql.checkData(2, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not((t1 != 1) or not (v1 == 20)) order by t1")
tdsql.checkRows(0)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 20) order by t1")
tdsql.checkRows(0)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1) and not (v1 != 20)) order by t1")
tdsql.checkRows(3)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 10) order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 1, 10)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 > 2) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 20)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 == 2) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 > 10 and v1 < 30) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1")
tdsql.checkRows(1)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1")
tdsql.checkRows(2)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1")
tdsql.checkRows(2)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1")
tdsql.checkRows(1)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1")
tdsql.checkRows(1)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1")
tdsql.checkRows(1)
def run(self):
dbname = "db"
tdSql.prepare()
self.notConditionTest()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,66 @@
import random
import string
from util.log import *
from util.cases import *
from util.sql import *
from util.common import *
from util.sqlset import *
import numpy as np
class TDTestCase:
updatecfgDict = {'slowLogThresholdTest': ''}
updatecfgDict["slowLogThresholdTest"] = 0
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def getPath(self, tool="taosBenchmark"):
if (platform.system().lower() == 'windows'):
tool = tool + ".exe"
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
paths = []
for root, dirs, files in os.walk(projPath):
if ((tool) in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
paths.append(os.path.join(root, tool))
break
if (len(paths) == 0):
tdLog.exit("taosBenchmark not found!")
return
else:
tdLog.info("taosBenchmark found in %s" % paths[0])
return paths[0]
def taosBenchmark(self, param):
binPath = self.getPath()
cmd = f"{binPath} {param}"
tdLog.info(cmd)
os.system(cmd)
def testSlowQuery(self):
self.taosBenchmark(" -d db -t 2 -v 2 -n 1000000 -y")
sql = "select count(*) from db.meters"
for i in range(10):
tdSql.query(sql)
tdSql.checkData(0, 0, 2 * 1000000)
def run(self):
self.testSlowQuery()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -24,45 +24,6 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
# def consume_TS_4674_Test(self):
#
# os.system("nohup taosBenchmark -y -B 1 -t 4 -S 1000 -n 1000000 -i 1000 -v 1 -a 3 > /dev/null 2>&1 &")
# time.sleep()
# tdSql.execute(f'create topic topic_all with meta as database test')
# consumer_dict = {
# "group.id": "g1",
# "td.connect.user": "root",
# "td.connect.pass": "taosdata",
# "auto.offset.reset": "earliest",
# }
# consumer = Consumer(consumer_dict)
#
# try:
# consumer.subscribe(["topic_all"])
# except TmqError:
# tdLog.exit(f"subscribe error")
#
# try:
# while True:
# res = consumer.poll(5)
# if not res:
# print(f"null")
# continue
# val = res.value()
# if val is None:
# print(f"null")
# continue
# cnt = 0;
# for block in val:
# cnt += len(block.fetchall())
#
# print(f"block {cnt} rows")
#
# finally:
# consumer.close()
def get_leader(self):
tdLog.debug("get leader")
tdSql.query("show vnodes")
@ -74,19 +35,20 @@ class TDTestCase:
def balance_vnode(self):
leader_before = self.get_leader()
tdSql.query("balance vgroup leader")
while True:
leader_after = -1
tdSql.query("balance vgroup leader")
tdLog.debug("balancing vgroup leader")
while True:
tdLog.debug("get new vgroup leader")
leader_after = self.get_leader()
if leader_after != -1 :
break;
break
else:
time.sleep(1)
if leader_after != leader_before:
tdLog.debug("leader changed")
break;
break
else :
time.sleep(1)
@ -115,7 +77,7 @@ class TDTestCase:
except TmqError:
tdLog.exit(f"subscribe error")
cnt = 0;
cnt = 0
balance = False
try:
while True:

View File

@ -92,7 +92,7 @@ class TDTestCase:
def run(self):
for fill_history_value in [None, 1]:
for watermark in [None, random.randint(20, 30)]:
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value)
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(5, 8)}s", fill_history_value=fill_history_value)
def stop(self):
tdSql.close()

View File

@ -245,6 +245,8 @@ python3 ./test.py -f 2-query/min.py -P
python3 ./test.py -f 2-query/min.py -P -R
python3 ./test.py -f 2-query/normal.py -P
python3 ./test.py -f 2-query/normal.py -P -R
python3 ./test.py -f 2-query/not.py -P
python3 ./test.py -f 2-query/not.py -P -R
python3 ./test.py -f 2-query/mode.py -P
python3 ./test.py -f 2-query/mode.py -P -R
python3 ./test.py -f 2-query/Now.py -P
@ -427,6 +429,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 2
python3 ./test.py -f 2-query/max.py -P -Q 2
python3 ./test.py -f 2-query/min.py -P -Q 2
python3 ./test.py -f 2-query/normal.py -P -Q 2
python3 ./test.py -f 2-query/not.py -P -Q 2
python3 ./test.py -f 2-query/mode.py -P -Q 2
python3 ./test.py -f 2-query/count.py -P -Q 2
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 2
@ -526,6 +529,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 3
python3 ./test.py -f 2-query/max.py -P -Q 3
python3 ./test.py -f 2-query/min.py -P -Q 3
python3 ./test.py -f 2-query/normal.py -P -Q 3
python3 ./test.py -f 2-query/not.py -P -Q 3
python3 ./test.py -f 2-query/mode.py -P -Q 3
python3 ./test.py -f 2-query/count.py -P -Q 3
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 3
@ -624,6 +628,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 4
python3 ./test.py -f 2-query/max.py -P -Q 4
python3 ./test.py -f 2-query/min.py -P -Q 4
python3 ./test.py -f 2-query/normal.py -P -Q 4
python3 ./test.py -f 2-query/not.py -P -Q 4
python3 ./test.py -f 2-query/mode.py -P -Q 4
python3 ./test.py -f 2-query/count.py -P -Q 4
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 4

View File

@ -925,3 +925,4 @@ python3 ./test.py -f 99-TDcase/TD-20582.py
python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3
python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3
python3 ./test.py -f eco-system/meta/database/keep_time_offset.py
python3 ./test.py -f 2-query/slow_query_basic.py

View File

@ -8,15 +8,14 @@
"confirm_parameter_prompt": "no",
"continue_if_fail": "yes",
"databases": "dbrate",
"query_times": 20,
"query_times": 5,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 0,
"concurrent": 10,
"threads": 10,
"sqls": [
{
"sql": "select count(*) from meters",
"result": "./query_result.txt"
"sql": "select count(*) from meters"
}
]
}

View File

@ -16,18 +16,20 @@
{
"dbinfo": {
"name": "dbrate",
"vgroups": 1,
"drop": "yes",
"vgroups": 2
"wal_retention_size": 1,
"wal_retention_period": 1
},
"super_tables": [
{
"name": "meters",
"child_table_exists": "no",
"childtable_count": 10,
"childtable_count": 1,
"childtable_prefix": "d",
"insert_mode": "@STMT_MODE",
"interlace_rows": @INTERLACE_MODE,
"insert_rows": 100000,
"insert_rows": 10000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"auto_create_table": "no",

View File

@ -34,28 +34,6 @@ def exec(command, show=True):
print(f"exec {command}\n")
return os.system(command)
# run return output and error
def run(command, timeout = 60, show=True):
if(show):
print(f"run {command} timeout={timeout}s\n")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait(timeout)
output = process.stdout.read().decode(encoding="gbk")
error = process.stderr.read().decode(encoding="gbk")
return output, error
# return list after run
def runRetList(command, timeout=10, first=True):
output,error = run(command, timeout)
if first:
return output.splitlines()
else:
return error.splitlines()
def readFileContext(filename):
file = open(filename)
context = file.read()
@ -78,6 +56,27 @@ def appendFileContext(filename, context):
except:
print(f"appand file error context={context} .")
# run return output and error
def run(command, show=True):
# out to file
out = "out.txt"
err = "err.txt"
ret = exec(command + f" 1>{out} 2>{err}", True)
# read from file
output = readFileContext(out)
error = readFileContext(err)
return output, error
# return list after run
def runRetList(command, first=True):
output,error = run(command)
if first:
return output.splitlines()
else:
return error.splitlines()
def getFolderSize(folder):
total_size = 0
for dirpath, dirnames, filenames in os.walk(folder):
@ -134,8 +133,6 @@ def getMatch(datatype, algo):
def generateJsonFile(stmt, interlace):
print(f"doTest stmt: {stmt} interlace_rows={interlace}\n")
# replace datatype
context = readFileContext(templateFile)
# replace compress
@ -198,16 +195,27 @@ def findContextValue(context, label):
def writeTemplateInfo(resultFile):
# create info
context = readFileContext(templateFile)
context = readFileContext(templateFile)
vgroups = findContextValue(context, "vgroups")
childCount = findContextValue(context, "childtable_count")
insertRows = findContextValue(context, "insert_rows")
line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n"
bindVGroup = findContextValue(context, "thread_bind_vgroup")
nThread = findContextValue(context, "thread_count")
batch = findContextValue(context, "num_of_records_per_req")
if bindVGroup.lower().find("yes") != -1:
nThread = vgroups
line = f"thread_bind_vgroup = {bindVGroup}\n"
line += f"vgroups = {vgroups}\n"
line += f"childtable_count = {childCount}\n"
line += f"insert_rows = {insertRows}\n"
line += f"insertThreads = {nThread}\n"
line += f"batchSize = {batch}\n\n"
print(line)
appendFileContext(resultFile, line)
def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed):
global Number
# flush
command = 'taos -s "flush database dbrate;"'
@ -220,7 +228,7 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
# read compress rate
command = 'taos -s "show table distributed dbrate.meters\G;"'
rets = runRetList(command)
print(rets)
#print(rets)
str1 = rets[5]
arr = str1.split(" ")
@ -234,50 +242,86 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
str2 = arr[6]
pos = str2.find("=[")
rate = str2[pos+2:]
print("rate =" + rate)
# total data file size
#dataSize = getFolderSize(f"{dataDir}/vnode/")
#dataSizeMB = int(dataSize/1024/1024)
# appand to file
# %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate")
Number += 1
context = "%10s %10s %15s %10s %10s %30s %15s\n"%( Number, stmt, interlace, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed)
context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%(
Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " r/s", writeReal + " r/s",
min, avg, p90, p99, max + "ms",
querySpeed, str(totalSize) + " MB", rate + "%")
showLog(context)
appendFileContext(resultFile, context)
def cutEnd(line, start, endChar):
pos = line.find(endChar, start)
if pos == -1:
return line[start:]
return line[start : pos]
def findValue(context, pos, key, endChar,command):
pos = context.find(key, pos)
if pos == -1:
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
exit(1)
pos += len(key)
value = cutEnd(context, pos, endChar)
return (value, pos)
def testWrite(jsonFile):
command = f"taosBenchmark -f {jsonFile}"
output, context = run(command, 60000)
print(context)
# SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second
# find second real
pos = context.find("(real ")
# spent
key = "Spent "
pos = -1
pos1 = 0
while pos1 != -1: # find last "Spent "
pos1 = context.find(key, pos1)
if pos1 != -1:
pos = pos1 # update last found
pos1 += len(key)
if pos == -1:
print(f"error, run command={command} output not found first \"(real\" keyword. error={context}")
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
exit(1)
pos = context.find("(real ", pos + 5)
pos += len(key)
spent = cutEnd(context, pos, ".")
# spent-real
spentReal, pos = findValue(context, pos, "(real ", ".", command)
# writeSpeed
key = "into "
pos = context.find(key, pos)
if pos == -1:
print(f"error, run command={command} output not found second \"(real\" keyword. error={context}")
exit(1)
pos += 5
length = len(context)
while pos < length and context[pos] == ' ':
pos += 1
end = context.find(".", pos)
if end == -1:
print(f"error, run command={command} output not found second \".\" keyword. error={context}")
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
exit(1)
pos += len(key)
writeSpeed, pos = findValue(context, pos, " ", ".", command)
# writeReal
writeReal, pos = findValue(context, pos, "(real ", ".", command)
speed = context[pos: end]
#print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n")
return speed
# delay
min, pos = findValue(context, pos, "min: ", ",", command)
avg, pos = findValue(context, pos, "avg: ", ",", command)
p90, pos = findValue(context, pos, "p90: ", ",", command)
p99, pos = findValue(context, pos, "p99: ", ",", command)
max, pos = findValue(context, pos, "max: ", "ms", command)
return (spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max)
def testQuery():
command = f"taosBenchmark -f json/query.json"
lines = runRetList(command, 60000)
lines = runRetList(command)
# INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485
speed = None
@ -308,13 +352,13 @@ def doTest(stmt, interlace, resultFile):
# run taosBenchmark
t1 = time.time()
writeSpeed = testWrite(jsonFile)
spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max = testWrite(jsonFile)
t2 = time.time()
# total write speed
querySpeed = testQuery()
# total compress rate
totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed)
totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed)
def main():
@ -333,7 +377,17 @@ def main():
# json info
writeTemplateInfo(resultFile)
# head
context = "\n%10s %10s %15s %10s %10s %30s %15s\n"%("No", "stmtMode", "interlaceRows", "dataSize", "rate", "writeSpeed", "query-QPS")
'''
context = "%3s %8s %10s %10s %10s %15s %15s %10s %10s %10s %10s %10s %8s %8s %8s\n"%(
"No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real",
"min", "avg", "p90", "p99", "max",
"query-QPS", "dataSize", "rate")
'''
context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%(
"No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real",
"min", "avg", "p90", "p99", "max",
"query-QPS", "dataSize", "rate")
appendFileContext(resultFile, context)

View File

@ -8,15 +8,14 @@
"confirm_parameter_prompt": "no",
"continue_if_fail": "yes",
"databases": "dbrate",
"query_times": 20,
"query_times": 5,
"query_mode": "taosc",
"specified_table_query": {
"query_interval": 0,
"concurrent": 10,
"threads": 10,
"sqls": [
{
"sql": "select * from meters",
"result": "./query_res0.txt"
"sql": "select * from meters"
}
]
}

View File

@ -34,28 +34,6 @@ def exec(command, show=True):
print(f"exec {command}\n")
return os.system(command)
# run return output and error
def run(command, timeout = 60, show=True):
if(show):
print(f"run {command} timeout={timeout}s\n")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait(timeout)
output = process.stdout.read().decode(encoding="gbk")
error = process.stderr.read().decode(encoding="gbk")
return output, error
# return list after run
def runRetList(command, timeout=10, first=True):
output,error = run(command, timeout)
if first:
return output.splitlines()
else:
return error.splitlines()
def readFileContext(filename):
file = open(filename)
context = file.read()
@ -78,6 +56,27 @@ def appendFileContext(filename, context):
except:
print(f"appand file error context={context} .")
# run return output and error
def run(command, show=True):
# out to file
out = "out.txt"
err = "err.txt"
ret = exec(command + f" 1>{out} 2>{err}", True)
# read from file
output = readFileContext(out)
error = readFileContext(err)
return output, error
# return list after run
def runRetList(command, first=True):
output,error = run(command)
if first:
return output.splitlines()
else:
return error.splitlines()
def getFolderSize(folder):
total_size = 0
for dirpath, dirnames, filenames in os.walk(folder):
@ -134,8 +133,6 @@ def getMatch(datatype, algo):
def generateJsonFile(algo):
print(f"doTest algo: {algo} \n")
# replace datatype
context = readFileContext(templateFile)
# replace compress
@ -192,56 +189,61 @@ def findContextValue(context, label):
ends = [',','}',']', 0]
while context[end] not in ends:
end += 1
print(f"start = {start} end={end}\n")
return context[start:end]
def writeTemplateInfo(resultFile):
# create info
context = readFileContext(templateFile)
dbname = findContextValue(context, "name")
vgroups = findContextValue(context, "vgroups")
childCount = findContextValue(context, "childtable_count")
insertRows = findContextValue(context, "insert_rows")
line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n"
print(line)
appendFileContext(resultFile, line)
return dbname
def totalCompressRate(algo, resultFile, writeSpeed, querySpeed):
global Number
# flush
command = 'taos -s "flush database dbrate;"'
rets = exec(command)
command = 'taos -s "compact database dbrate;"'
rets = exec(command)
waitCompactFinish(60)
loop = 30
# read compress rate
command = 'taos -s "show table distributed dbrate.meters\G;"'
rets = runRetList(command)
print(rets)
str1 = rets[5]
arr = str1.split(" ")
while loop > 0:
loop -= 1
# Total_Size KB
str2 = arr[2]
pos = str2.find("=[")
totalSize = int(float(str2[pos+2:])/1024)
# flush database
command = 'taos -s "flush database dbrate;"'
exec(command)
time.sleep(1)
# Compression_Ratio
str2 = arr[6]
pos = str2.find("=[")
rate = str2[pos+2:]
print("rate =" + rate)
# read compress rate
command = 'taos -s "show table distributed dbrate.meters\G;"'
rets = runRetList(command)
print(rets)
# total data file size
#dataSize = getFolderSize(f"{dataDir}/vnode/")
#dataSizeMB = int(dataSize/1024/1024)
str1 = rets[5]
arr = str1.split(" ")
# appand to file
# Total_Size KB
str2 = arr[2]
pos = str2.find("=[")
totalSize = int(float(str2[pos+2:])/1024)
# Compression_Ratio
str2 = arr[6]
pos = str2.find("=[")
rate = str2[pos+2:]
print("rate =" + rate)
if rate != "0.00":
break
# total data file size
#dataSize = getFolderSize(f"{dataDir}/vnode/")
#dataSizeMB = int(dataSize/1024/1024)
# appand to file
Number += 1
context = "%10s %10s %10s %10s %30s %15s\n"%( Number, algo, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed)
showLog(context)
@ -273,18 +275,22 @@ def testWrite(jsonFile):
speed = context[pos: end]
#print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n")
# flush database
command = 'taos -s "flush database dbrate;"'
exec(command)
return speed
def testQuery():
command = f"taosBenchmark -f json/query.json"
lines = runRetList(command, 60000)
lines = runRetList(command)
# INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485
speed = None
for i in range(20, len(lines)):
for i in range(0, len(lines)):
# find second real
context = lines[i]
pos = context.find("the QPS of all threads:")
context = lines[26]
if pos == -1 :
continue
pos += 24
@ -300,8 +306,6 @@ def testQuery():
def doTest(algo, resultFile):
print(f"doTest algo: {algo} \n")
#cleanAndStartTaosd()
# json
jsonFile = generateJsonFile(algo)