Merge branch '3.0' into feat/TS-5215-2

This commit is contained in:
Minglei Jin 2024-10-21 10:48:37 +08:00
commit 4401b5e568
67 changed files with 1417 additions and 648 deletions

View File

@ -169,11 +169,48 @@ ELSE ()
SET(COMPILER_SUPPORT_AVX512VL false) SET(COMPILER_SUPPORT_AVX512VL false)
ELSE() ELSE()
CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA) CHECK_C_COMPILER_FLAG("-mfma" COMPILER_SUPPORT_FMA)
CHECK_C_COMPILER_FLAG("-mavx" COMPILER_SUPPORT_AVX)
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL) CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
INCLUDE(CheckCSourceRuns)
SET(CMAKE_REQUIRED_FLAGS "-mavx")
check_c_source_runs("
#include <immintrin.h>
int main() {
__m256d a, b, c;
double buf[4] = {0};
a = _mm256_loadu_pd(buf);
b = _mm256_loadu_pd(buf);
c = _mm256_add_pd(a, b);
_mm256_storeu_pd(buf, c);
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
if (buf[i] != 0) {
return 1;
}
}
return 0;
}
" COMPILER_SUPPORT_AVX)
SET(CMAKE_REQUIRED_FLAGS "-mavx2")
check_c_source_runs("
#include <immintrin.h>
int main() {
__m256i a, b, c;
int buf[8] = {0};
a = _mm256_loadu_si256((__m256i *)buf);
b = _mm256_loadu_si256((__m256i *)buf);
c = _mm256_and_si256(a, b);
_mm256_storeu_si256((__m256i *)buf, c);
for (int i = 0; i < sizeof(buf) / sizeof(buf[0]); ++i) {
if (buf[i] != 0) {
return 1;
}
}
return 0;
}
" COMPILER_SUPPORT_AVX2)
ENDIF() ENDIF()
IF (COMPILER_SUPPORT_SSE42) IF (COMPILER_SUPPORT_SSE42)

View File

@ -153,7 +153,7 @@ SELECT * from information_schema.`ins_streams`;
由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。 由于窗口关闭是由事件时间决定的,如事件流中断、或持续延迟,则事件时间无法更新,可能导致无法得到最新的计算结果。
因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。 因此,流式计算提供了以事件时间结合处理时间计算的 MAX_DELAY 触发模式。MAX_DELAY最小时间是5s如果低于5s创建流计算时会报错。
MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算 MAX_DELAY 模式在窗口关闭时会立即触发计算。此外,当数据写入后,计算触发的时间超过 max delay 指定的时间,则立即触发计算

View File

@ -10,7 +10,7 @@ description: 3.3.3.0 版本说明
4. TDengine支持macOS企业版客户端 [企业版] 4. TDengine支持macOS企业版客户端 [企业版]
5. taosX日志默认不写入syslog [企业版] 5. taosX日志默认不写入syslog [企业版]
6. 服务端记录所有慢查询信息到log库 6. 服务端记录所有慢查询信息到log库
7. show cluster machines 查询结果中添加服务端版本号 7. show cluster machines 查询结果中添加服务端版本号 [企业版]
8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用 8. 删除保留关键字LEVEL/ENCODE/COMPRESS, 可以作为列名/表名/数据库名等使用
9. 禁止动态修改临时目录 9. 禁止动态修改临时目录
10. round 函数:支持四舍五入的精度 10. round 函数:支持四舍五入的精度

View File

@ -236,7 +236,7 @@ typedef struct {
void* vnode; // not available to encoder and decoder void* vnode; // not available to encoder and decoder
FTbSink* tbSinkFunc; FTbSink* tbSinkFunc;
STSchema* pTSchema; STSchema* pTSchema;
SSHashObj* pTblInfo; SSHashObj* pTbInfo;
} STaskSinkTb; } STaskSinkTb;
typedef struct { typedef struct {
@ -754,7 +754,7 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
void streamMetaAcquireOneTask(SStreamTask* pTask); int32_t streamMetaAcquireOneTask(SStreamTask* pTask);
void streamMetaClear(SStreamMeta* pMeta); void streamMetaClear(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta);

View File

@ -50,6 +50,7 @@ typedef struct {
int32_t rollPeriod; // secs int32_t rollPeriod; // secs
int64_t retentionSize; int64_t retentionSize;
int64_t segSize; int64_t segSize;
int64_t committed;
EWalType level; // wal level EWalType level; // wal level
int32_t encryptAlgorithm; int32_t encryptAlgorithm;
char encryptKey[ENCRYPT_KEY_LEN + 1]; char encryptKey[ENCRYPT_KEY_LEN + 1];

View File

@ -84,7 +84,7 @@ void taos_cleanup(void) {
taosCloseRef(id); taosCloseRef(id);
nodesDestroyAllocatorSet(); nodesDestroyAllocatorSet();
cleanupAppInfo(); // cleanupAppInfo();
rpcCleanup(); rpcCleanup();
tscDebug("rpc cleanup"); tscDebug("rpc cleanup");

View File

@ -2625,6 +2625,8 @@ int32_t dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf
uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code)); uError("func %s failed to convert to ucs charset since %s", __func__, tstrerror(code));
lino = __LINE__; lino = __LINE__;
goto _exit; goto _exit;
} else { // reset the length value
code = TSDB_CODE_SUCCESS;
} }
len += tsnprintf(dumpBuf + len, size - len, " %15s |", pBuf); len += tsnprintf(dumpBuf + len, size - len, " %15s |", pBuf);
if (len >= size - 1) goto _exit; if (len >= size - 1) goto _exit;

View File

@ -183,6 +183,7 @@ static void dmSetSignalHandle() {
} }
#endif #endif
} }
extern bool generateNewMeta;
static int32_t dmParseArgs(int32_t argc, char const *argv[]) { static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.startTime = taosGetTimestampMs(); global.startTime = taosGetTimestampMs();
@ -222,6 +223,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
global.dumpSdb = true; global.dumpSdb = true;
} else if (strcmp(argv[i], "-dTxn") == 0) { } else if (strcmp(argv[i], "-dTxn") == 0) {
global.deleteTrans = true; global.deleteTrans = true;
} else if (strcmp(argv[i], "-r") == 0) {
generateNewMeta = true;
} else if (strcmp(argv[i], "-E") == 0) { } else if (strcmp(argv[i], "-E") == 0) {
if (i < argc - 1) { if (i < argc - 1) {
if (strlen(argv[++i]) >= PATH_MAX) { if (strlen(argv[++i]) >= PATH_MAX) {

View File

@ -123,6 +123,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
int32_t code = 0; int32_t code = 0;
SStatusReq req = {0}; SStatusReq req = {0};
dDebug("send status req to mnode, statusSeq:%d, begin to mgnt lock", pMgmt->statusSeq);
(void)taosThreadRwlockRdlock(&pMgmt->pData->lock); (void)taosThreadRwlockRdlock(&pMgmt->pData->lock);
req.sver = tsVersion; req.sver = tsVersion;
req.dnodeVer = pMgmt->pData->dnodeVer; req.dnodeVer = pMgmt->pData->dnodeVer;
@ -161,14 +162,17 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN); memcpy(req.clusterCfg.charset, tsCharset, TD_LOCALE_LEN);
(void)taosThreadRwlockUnlock(&pMgmt->pData->lock); (void)taosThreadRwlockUnlock(&pMgmt->pData->lock);
dDebug("send status req to mnode, statusSeq:%d, begin to get vnode loads", pMgmt->statusSeq);
SMonVloadInfo vinfo = {0}; SMonVloadInfo vinfo = {0};
(*pMgmt->getVnodeLoadsFp)(&vinfo); (*pMgmt->getVnodeLoadsFp)(&vinfo);
req.pVloads = vinfo.pVloads; req.pVloads = vinfo.pVloads;
dDebug("send status req to mnode, statusSeq:%d, begin to get mnode loads", pMgmt->statusSeq);
SMonMloadInfo minfo = {0}; SMonMloadInfo minfo = {0};
(*pMgmt->getMnodeLoadsFp)(&minfo); (*pMgmt->getMnodeLoadsFp)(&minfo);
req.mload = minfo.load; req.mload = minfo.load;
dDebug("send status req to mnode, statusSeq:%d, begin to get qnode loads", pMgmt->statusSeq);
(*pMgmt->getQnodeLoadsFp)(&req.qload); (*pMgmt->getQnodeLoadsFp)(&req.qload);
pMgmt->statusSeq++; pMgmt->statusSeq++;
@ -206,6 +210,7 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
int8_t epUpdated = 0; int8_t epUpdated = 0;
(void)dmGetMnodeEpSet(pMgmt->pData, &epSet); (void)dmGetMnodeEpSet(pMgmt->pData, &epSet);
dDebug("send status req to mnode, statusSeq:%d, begin to send rpc msg", pMgmt->statusSeq);
code = code =
rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000); rpcSendRecvWithTimeout(pMgmt->msgCb.statusRpc, &epSet, &rpcMsg, &rpcRsp, &epUpdated, tsStatusInterval * 5 * 1000);
if (code != 0) { if (code != 0) {

View File

@ -515,6 +515,7 @@ static int32_t mndInitWal(SMnode *pMnode) {
.fsyncPeriod = 0, .fsyncPeriod = 0,
.rollPeriod = -1, .rollPeriod = -1,
.segSize = -1, .segSize = -1,
.committed = -1,
.retentionPeriod = 0, .retentionPeriod = 0,
.retentionSize = 0, .retentionSize = 0,
.level = TAOS_WAL_FSYNC, .level = TAOS_WAL_FSYNC,

View File

@ -1811,6 +1811,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
return 0; return 0;
} }
mInfo("stream:%s,%" PRId64 " start to resume stream from pause", resumeReq.name, pStream->uid);
if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) { if (mndCheckDbPrivilegeByName(pMnode, pReq->info.conn.user, MND_OPER_WRITE_DB, pStream->targetDb) != 0) {
sdbRelease(pMnode->pSdb, pStream); sdbRelease(pMnode->pSdb, pStream);
return -1; return -1;

View File

@ -61,7 +61,6 @@ static int32_t doSetPauseAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTa
static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) { static int32_t doSetDropAction(SMnode *pMnode, STrans *pTrans, SStreamTask *pTask) {
SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq)); SVDropStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVDropStreamTaskReq));
if (pReq == NULL) { if (pReq == NULL) {
// terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno; return terrno;
} }
@ -93,7 +92,6 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT
if (pReq == NULL) { if (pReq == NULL) {
mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq), mError("failed to malloc in resume stream, size:%" PRIzu ", code:%s", sizeof(SVResumeStreamTaskReq),
tstrerror(TSDB_CODE_OUT_OF_MEMORY)); tstrerror(TSDB_CODE_OUT_OF_MEMORY));
// terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno; return terrno;
} }
@ -106,19 +104,18 @@ static int32_t doSetResumeAction(STrans *pTrans, SMnode *pMnode, SStreamTask *pT
bool hasEpset = false; bool hasEpset = false;
int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId); int32_t code = extractNodeEpset(pMnode, &epset, &hasEpset, pTask->id.taskId, pTask->info.nodeId);
if (code != TSDB_CODE_SUCCESS || (!hasEpset)) { if (code != TSDB_CODE_SUCCESS || (!hasEpset)) {
terrno = code;
taosMemoryFree(pReq); taosMemoryFree(pReq);
return terrno; return code;
} }
code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID); code = setTransAction(pTrans, pReq, sizeof(SVResumeStreamTaskReq), TDMT_STREAM_TASK_RESUME, &epset, 0, TSDB_CODE_VND_INVALID_VGROUP_ID);
if (code != 0) { if (code != 0) {
taosMemoryFree(pReq); taosMemoryFree(pReq);
return terrno; return code;
} }
mDebug("set the resume action for trans:%d", pTrans->id); mDebug("set the resume action for trans:%d", pTrans->id);
return 0; return code;
} }
static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) { static int32_t doSetDropActionFromId(SMnode *pMnode, STrans *pTrans, SOrphanTask* pTask) {

View File

@ -81,6 +81,9 @@ typedef struct SCommitInfo SCommitInfo;
typedef struct SCompactInfo SCompactInfo; typedef struct SCompactInfo SCompactInfo;
typedef struct SQueryNode SQueryNode; typedef struct SQueryNode SQueryNode;
#define VNODE_META_TMP_DIR "meta.tmp"
#define VNODE_META_BACKUP_DIR "meta.backup"
#define VNODE_META_DIR "meta" #define VNODE_META_DIR "meta"
#define VNODE_TSDB_DIR "tsdb" #define VNODE_TSDB_DIR "tsdb"
#define VNODE_TQ_DIR "tq" #define VNODE_TQ_DIR "tq"

View File

@ -133,7 +133,7 @@ static void doScan(SMeta *pMeta) {
} }
} }
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) { static int32_t metaOpenImpl(SVnode *pVnode, SMeta **ppMeta, const char *metaDir, int8_t rollback) {
SMeta *pMeta = NULL; SMeta *pMeta = NULL;
int32_t code = 0; int32_t code = 0;
int32_t lino; int32_t lino;
@ -144,7 +144,11 @@ int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
// create handle // create handle
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN); vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
offset = strlen(path); offset = strlen(path);
snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, VNODE_META_DIR); snprintf(path + offset, TSDB_FILENAME_LEN - offset - 1, "%s%s", TD_DIRSEP, metaDir);
if (strncmp(metaDir, VNODE_META_TMP_DIR, strlen(VNODE_META_TMP_DIR)) == 0) {
taosRemoveDir(path);
}
if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) { if ((pMeta = taosMemoryCalloc(1, sizeof(*pMeta) + strlen(path) + 1)) == NULL) {
TSDB_CHECK_CODE(code = terrno, lino, _exit); TSDB_CHECK_CODE(code = terrno, lino, _exit);
@ -245,6 +249,188 @@ _exit:
return code; return code;
} }
bool generateNewMeta = false;
static int32_t metaGenerateNewMeta(SMeta **ppMeta) {
SMeta *pNewMeta = NULL;
SMeta *pMeta = *ppMeta;
SVnode *pVnode = pMeta->pVnode;
metaInfo("vgId:%d start to generate new meta", TD_VID(pMeta->pVnode));
// Open a new meta for orgainzation
int32_t code = metaOpenImpl(pMeta->pVnode, &pNewMeta, VNODE_META_TMP_DIR, false);
if (code) {
return code;
}
code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL);
if (code) {
return code;
}
// i == 0, scan super table
// i == 1, scan normal table and child table
for (int i = 0; i < 2; i++) {
TBC *uidCursor = NULL;
int32_t counter = 0;
code = tdbTbcOpen(pMeta->pUidIdx, &uidCursor, NULL);
if (code) {
metaError("vgId:%d failed to open uid index cursor, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = tdbTbcMoveToFirst(uidCursor);
if (code) {
metaError("vgId:%d failed to move to first, reason:%s", TD_VID(pVnode), tstrerror(code));
tdbTbcClose(uidCursor);
return code;
}
for (;;) {
const void *pKey;
int kLen;
const void *pVal;
int vLen;
if (tdbTbcGet(uidCursor, &pKey, &kLen, &pVal, &vLen) < 0) {
break;
}
tb_uid_t uid = *(tb_uid_t *)pKey;
SUidIdxVal *pUidIdxVal = (SUidIdxVal *)pVal;
if ((i == 0 && (pUidIdxVal->suid && pUidIdxVal->suid == uid)) // super table
|| (i == 1 && (pUidIdxVal->suid == 0 || pUidIdxVal->suid != uid)) // normal table and child table
) {
counter++;
if (i == 0) {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter, "super", uid);
} else {
metaInfo("vgId:%d counter:%d new meta handle %s table uid:%" PRId64, TD_VID(pVnode), counter,
pUidIdxVal->suid == 0 ? "normal" : "child", uid);
}
// fetch table entry
void *value = NULL;
int valueSize = 0;
if (tdbTbGet(pMeta->pTbDb,
&(STbDbKey){
.version = pUidIdxVal->version,
.uid = uid,
},
sizeof(uid), &value, &valueSize) == 0) {
SDecoder dc = {0};
SMetaEntry me = {0};
tDecoderInit(&dc, value, valueSize);
if (metaDecodeEntry(&dc, &me) == 0) {
if (metaHandleEntry(pNewMeta, &me) != 0) {
metaError("vgId:%d failed to handle entry, uid:%" PRId64, TD_VID(pVnode), uid);
}
}
tDecoderClear(&dc);
}
tdbFree(value);
}
code = tdbTbcMoveToNext(uidCursor);
if (code) {
metaError("vgId:%d failed to move to next, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
}
tdbTbcClose(uidCursor);
}
code = metaCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
code = metaFinishCommit(pNewMeta, pNewMeta->txn);
if (code) {
metaError("vgId:%d failed to finish commit, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
if ((code = metaBegin(pNewMeta, META_BEGIN_HEAP_NIL)) != 0) {
metaError("vgId:%d failed to begin new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(&pNewMeta);
metaInfo("vgId:%d finish to generate new meta", TD_VID(pVnode));
return 0;
}
int32_t metaOpen(SVnode *pVnode, SMeta **ppMeta, int8_t rollback) {
if (generateNewMeta) {
char path[TSDB_FILENAME_LEN] = {0};
char oldMetaPath[TSDB_FILENAME_LEN] = {0};
char newMetaPath[TSDB_FILENAME_LEN] = {0};
char backupMetaPath[TSDB_FILENAME_LEN] = {0};
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, path, TSDB_FILENAME_LEN);
snprintf(oldMetaPath, sizeof(oldMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_DIR);
snprintf(newMetaPath, sizeof(newMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_TMP_DIR);
snprintf(backupMetaPath, sizeof(backupMetaPath) - 1, "%s%s%s", path, TD_DIRSEP, VNODE_META_BACKUP_DIR);
bool oldMetaExist = taosCheckExistFile(oldMetaPath);
bool newMetaExist = taosCheckExistFile(newMetaPath);
bool backupMetaExist = taosCheckExistFile(backupMetaPath);
if ((!backupMetaExist && !oldMetaExist && newMetaExist) // case 2
|| (backupMetaExist && !oldMetaExist && !newMetaExist) // case 4
|| (backupMetaExist && oldMetaExist && newMetaExist) // case 8
) {
metaError("vgId:%d invalid meta state, please check", TD_VID(pVnode));
return TSDB_CODE_FAILED;
} else if ((backupMetaExist && oldMetaExist && !newMetaExist) // case 7
|| (!backupMetaExist && !oldMetaExist && !newMetaExist) // case 1
) {
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
} else if (backupMetaExist && !oldMetaExist && newMetaExist) {
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
} else {
int32_t code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
if (code) {
return code;
}
code = metaGenerateNewMeta(ppMeta);
if (code) {
metaError("vgId:%d failed to generate new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
}
metaClose(ppMeta);
if (taosRenameFile(oldMetaPath, backupMetaPath) != 0) {
metaError("vgId:%d failed to rename old meta to backup, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
// rename the new meta to old meta
if (taosRenameFile(newMetaPath, oldMetaPath) != 0) {
metaError("vgId:%d failed to rename new meta to old meta, reason:%s", TD_VID(pVnode), tstrerror(terrno));
return terrno;
}
code = metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, false);
if (code) {
metaError("vgId:%d failed to open new meta, reason:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
}
} else {
return metaOpenImpl(pVnode, ppMeta, VNODE_META_DIR, rollback);
}
return TSDB_CODE_SUCCESS;
}
int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) { int32_t metaUpgrade(SVnode *pVnode, SMeta **ppMeta) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
int32_t lino; int32_t lino;

View File

@ -2985,9 +2985,6 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
} }
} }
end: end:
if (terrno != 0) {
ret = terrno;
}
tDecoderClear(&dc); tDecoderClear(&dc);
tdbFree(pData); tdbFree(pData);
return ret; return ret;

View File

@ -746,13 +746,13 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
return terrno; return terrno;
} }
pOutputInfo->tbSink.pTblInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); pOutputInfo->tbSink.pTbInfo = tSimpleHashInit(10240, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
if (pOutputInfo->tbSink.pTblInfo == NULL) { if (pOutputInfo->tbSink.pTbInfo == NULL) {
tqError("vgId:%d failed init sink tableInfo, code:%s", vgId, tstrerror(terrno)); tqError("vgId:%d failed init sink tableInfo, code:%s", vgId, tstrerror(terrno));
return terrno; return terrno;
} }
tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTblInfo, freePtr); tSimpleHashSetFreeFp(pOutputInfo->tbSink.pTbInfo, freePtr);
} }
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {

View File

@ -18,6 +18,8 @@
#include "tmsg.h" #include "tmsg.h"
#include "tq.h" #include "tq.h"
#define IS_NEW_SUBTB_RULE(_t) (((_t)->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER) && ((_t)->subtableWithoutMd5 != 1))
typedef struct STableSinkInfo { typedef struct STableSinkInfo {
uint64_t uid; uint64_t uid;
tstr name; tstr name;
@ -35,16 +37,22 @@ static int32_t doConvertRows(SSubmitTbData* pTableData, const STSchema* pTSchema
int64_t earlyTs, const char* id); int64_t earlyTs, const char* id);
static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo, static int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkInfo* pTableSinkInfo,
const char* dstTableName, int64_t* uid); const char* dstTableName, int64_t* uid);
static int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
const char* id);
static int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id);
static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid); static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbName, int64_t suid);
static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName,
int32_t numOfTags); int32_t numOfTags);
static int32_t createDefaultTagColName(SArray** pColNameList); static int32_t createDefaultTagColName(SArray** pColNameList);
static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, static int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock,
int64_t gid, bool newSubTableRule); const char* stbFullName, int64_t gid, bool newSubTableRule);
static int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo); static int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo);
static int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId,
const char* id);
static bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo);
static int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id);
static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode);
static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs);
static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode,
int64_t earlyTs);
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr, bool newSubTableRule) { const char* pIdStr, bool newSubTableRule) {
@ -81,7 +89,8 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
memcpy(name, varDataVal(varTbName), varDataLen(varTbName)); memcpy(name, varDataVal(varTbName), varDataLen(varTbName));
name[varDataLen(varTbName)] = '\0'; name[varDataLen(varTbName)] = '\0';
if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 && stbFullName) { if (newSubTableRule && !isAutoTableName(name) && !alreadyAddGroupId(name, groupId) && groupId != 0 &&
stbFullName) {
int32_t code = buildCtbNameAddGroupId(stbFullName, name, groupId, cap); int32_t code = buildCtbNameAddGroupId(stbFullName, name, groupId, cap);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
@ -161,16 +170,6 @@ end:
return ret; return ret;
} }
static bool tqGetTableInfo(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
if (pVal) {
*pInfo = *(STableSinkInfo**)pVal;
return true;
}
return false;
}
static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) { static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
void* buf = NULL; void* buf = NULL;
int32_t tlen = 0; int32_t tlen = 0;
@ -256,7 +255,6 @@ int32_t setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock*
} else { } else {
int32_t code = buildCtbNameByGroupId(stbFullName, gid, &pCreateTableReq->name); int32_t code = buildCtbNameByGroupId(stbFullName, gid, &pCreateTableReq->name);
return code; return code;
// tqDebug("gen name from stbFullName:%s gid:%"PRId64, stbFullName, gid);
} }
return 0; return 0;
@ -266,14 +264,18 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
SStreamTask* pTask, int64_t suid) { SStreamTask* pTask, int64_t suid) {
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema; STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
int32_t rows = pDataBlock->info.rows; int32_t rows = pDataBlock->info.rows;
SArray* tagArray = taosArrayInit(4, sizeof(STagVal)); SArray* tagArray = NULL;
const char* id = pTask->id.idStr; const char* id = pTask->id.idStr;
int32_t vgId = pTask->pMeta->vgId; int32_t vgId = pTask->pMeta->vgId;
int32_t code = 0; int32_t code = 0;
STableSinkInfo* pInfo = NULL;
SVCreateTbBatchReq reqs = {0};
SArray* crTblArray = NULL;
tqDebug("s-task:%s build create %d table(s) msg", id, rows); tqDebug("s-task:%s build create %d table(s) msg", id, rows);
SVCreateTbBatchReq reqs = {0};
SArray* crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq)); tagArray = taosArrayInit(4, sizeof(STagVal));
crTblArray = reqs.pArray = taosArrayInit(1, sizeof(SVCreateTbReq));
if ((NULL == reqs.pArray) || (tagArray == NULL)) { if ((NULL == reqs.pArray) || (tagArray == NULL)) {
tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno)); tqError("s-task:%s failed to init create table msg, code:%s", id, tstrerror(terrno));
code = terrno; code = terrno;
@ -291,6 +293,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
tqError("s-task:%s vgId:%d failed to init create table msg", id, vgId); tqError("s-task:%s vgId:%d failed to init create table msg", id, vgId);
continue; continue;
} }
taosArrayClear(tagArray); taosArrayClear(tagArray);
if (size == 2) { if (size == 2) {
@ -356,8 +359,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
} }
} }
code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, code = setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, IS_NEW_SUBTB_RULE(pTask));
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1);
if (code) { if (code) {
goto _end; goto _end;
} }
@ -368,16 +370,15 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
goto _end; goto _end;
} }
STableSinkInfo* pInfo = NULL; bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, gid, &pInfo);
bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, gid, &pInfo);
if (!alreadyCached) { if (!alreadyCached) {
code = doCreateSinkInfo(pCreateTbReq->name, &pInfo); code = doCreateSinkTableInfo(pCreateTbReq->name, &pInfo);
if (code) { if (code) {
tqError("vgId:%d failed to create sink tableInfo for table:%s, s-task:%s", vgId, pCreateTbReq->name, id); tqError("vgId:%d failed to create sink tableInfo for table:%s, s-task:%s", vgId, pCreateTbReq->name, id);
continue; continue;
} }
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pInfo, gid, id); code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pInfo, gid, id);
if (code) { if (code) {
tqError("vgId:%d failed to put sink tableInfo:%s into cache, s-task:%s", vgId, pCreateTbReq->name, id); tqError("vgId:%d failed to put sink tableInfo:%s into cache, s-task:%s", vgId, pCreateTbReq->name, id);
} }
@ -527,8 +528,8 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
taosArrayDestroy(pExisted->aRowP); taosArrayDestroy(pExisted->aRowP);
pExisted->aRowP = pFinal; pExisted->aRowP = pFinal;
tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", tqTrace("s-task:%s rows merged, final rows:%d, pk:%d uid:%" PRId64 ", existed auto-create table:%d, new-block:%d", id,
id, (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL), (int32_t)taosArrayGetSize(pFinal), numOfPk, pExisted->uid, (pExisted->pCreateTbReq != NULL),
(pNew->pCreateTbReq != NULL)); (pNew->pCreateTbReq != NULL));
tdDestroySVCreateTbReq(pNew->pCreateTbReq); tdDestroySVCreateTbReq(pNew->pCreateTbReq);
@ -806,7 +807,7 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t doCreateSinkInfo(const char* pDstTableName, STableSinkInfo** pInfo) { int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
int32_t nameLen = strlen(pDstTableName); int32_t nameLen = strlen(pDstTableName);
(*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1); (*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1);
if (*pInfo == NULL) { if (*pInfo == NULL) {
@ -830,7 +831,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
STableSinkInfo* pTableSinkInfo = NULL; STableSinkInfo* pTableSinkInfo = NULL;
int32_t code = 0; int32_t code = 0;
bool alreadyCached = tqGetTableInfo(pTask->outputInfo.tbSink.pTblInfo, groupId, &pTableSinkInfo); bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, groupId, &pTableSinkInfo);
if (alreadyCached) { if (alreadyCached) {
if (dstTableName[0] == 0) { // data block does not set the destination table name if (dstTableName[0] == 0) { // data block does not set the destination table name
@ -870,7 +871,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
} }
} }
code = doCreateSinkInfo(dstTableName, &pTableSinkInfo); code = doCreateSinkTableInfo(dstTableName, &pTableSinkInfo);
if (code == 0) { if (code == 0) {
tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName); tqDebug("s-task:%s build new sinkTableInfo to add cache, dstTable:%s", id, dstTableName);
} else { } else {
@ -906,14 +907,14 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal)); SArray* pTagArray = taosArrayInit(pTSchema->numOfCols + 1, sizeof(STagVal));
if (pTagArray == NULL) { if (pTagArray == NULL) {
tqError("s-task:%s failed to build auto create submit msg in sink, vgId:%d, due to %s", id, vgId,
tstrerror(terrno));
return terrno; return terrno;
} }
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
code = code = buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray,
buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, IS_NEW_SUBTB_RULE(pTask), &pTableData->pCreateTbReq);
(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1),
&pTableData->pCreateTbReq);
taosArrayDestroy(pTagArray); taosArrayDestroy(pTagArray);
if (code) { if (code) {
@ -923,12 +924,12 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
} }
pTableSinkInfo->uid = 0; pTableSinkInfo->uid = 0;
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id); code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id);
} else { } else {
metaReaderClear(&mr); metaReaderClear(&mr);
tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, tqError("s-task:%s vgId:%d dst-table:%s not auto-created, and not create in tsdb, discard data", id, vgId,
vgId, dstTableName); dstTableName);
return TSDB_CODE_TDB_TABLE_NOT_EXIST; return TSDB_CODE_TDB_TABLE_NOT_EXIST;
} }
} else { } else {
@ -944,7 +945,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
pTableSinkInfo->uid = mr.me.uid; pTableSinkInfo->uid = mr.me.uid;
metaReaderClear(&mr); metaReaderClear(&mr);
code = doPutIntoCache(pTask->outputInfo.tbSink.pTblInfo, pTableSinkInfo, groupId, id); code = doPutSinkTableInfoIntoCache(pTask->outputInfo.tbSink.pTbInfo, pTableSinkInfo, groupId, id);
} }
} }
} }
@ -975,6 +976,43 @@ int32_t tqSetDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_
return code; return code;
} }
int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode) {
int32_t code = TSDB_CODE_SUCCESS;
const char* id = pTask->id.idStr;
STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
int32_t vgId = pTask->pMeta->vgId;
if (pTask->outputInfo.tbSink.pTagSchema == NULL) {
SMetaReader mer1 = {0};
metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK);
code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid);
if (code != TSDB_CODE_SUCCESS) {
tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId);
metaReaderClear(&mer1);
return code;
}
pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag);
metaReaderClear(&mer1);
if (pOutputInfo->tbSink.pTagSchema == NULL) {
tqError("s-task:%s failed to clone tag schema, code:%s, failed to sink results", id, tstrerror(terrno));
return terrno;
}
SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema;
SSchema* pCol1 = &pTagSchema->pSchema[0];
if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) {
pOutputInfo->tbSink.autoCreateCtb = true;
} else {
pOutputInfo->tbSink.autoCreateCtb = false;
}
}
return code;
}
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) { void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
const SArray* pBlocks = (const SArray*)data; const SArray* pBlocks = (const SArray*)data;
SVnode* pVnode = (SVnode*)vnode; SVnode* pVnode = (SVnode*)vnode;
@ -988,29 +1026,11 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
int64_t earlyTs = tsdbGetEarliestTs(pVnode->pTsdb); int64_t earlyTs = tsdbGetEarliestTs(pVnode->pTsdb);
STaskOutputInfo* pOutputInfo = &pTask->outputInfo; STaskOutputInfo* pOutputInfo = &pTask->outputInfo;
if (pTask->outputInfo.tbSink.pTagSchema == NULL) { code = checkTagSchema(pTask, pVnode);
SMetaReader mer1 = {0};
metaReaderDoInit(&mer1, pVnode->pMeta, META_READER_LOCK);
code = metaReaderGetTableEntryByUid(&mer1, pOutputInfo->tbSink.stbUid);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tqError("s-task:%s vgId:%d failed to get the dst stable, failed to sink results", id, vgId);
metaReaderClear(&mer1);
return; return;
} }
pOutputInfo->tbSink.pTagSchema = tCloneSSchemaWrapper(&mer1.me.stbEntry.schemaTag);
metaReaderClear(&mer1);
SSchemaWrapper* pTagSchema = pOutputInfo->tbSink.pTagSchema;
SSchema* pCol1 = &pTagSchema->pSchema[0];
if (pTagSchema->nCols == 1 && pCol1->type == TSDB_DATA_TYPE_UBIGINT && strcmp(pCol1->name, "group_id") == 0) {
pOutputInfo->tbSink.autoCreateCtb = true;
} else {
pOutputInfo->tbSink.autoCreateCtb = false;
}
}
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks); bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
if (!onlySubmitData) { if (!onlySubmitData) {
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id, tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
@ -1033,45 +1053,127 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) { } else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
continue; continue;
} else { } else {
pTask->execInfo.sink.numOfBlocks += 1; code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
if (submitReq.aSubmitTbData == NULL) {
code = terrno;
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(code));
return;
}
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName);
continue;
}
code = tqSetDstTableDataPayload(suid, pTSchema, i, pDataBlock, &tbData, earlyTs, id);
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, pDataBlock->info.id.groupId, id);
tbData.pCreateTbReq = NULL;
}
continue;
}
void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
if (p == NULL) {
tqDebug("vgId:%d, s-task:%s failed to build submit msg, data lost", vgId, id);
}
code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
if (code) { // failed and continue
tqDebug("vgId:%d, s-task:%s submit msg failed, data lost", vgId, id);
}
} }
} }
} else { } else {
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks); tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, merge submit msg", vgId, id, numOfBlocks);
if (streamTaskShouldStop(pTask)) {
return;
}
reubuildAndSendMultiResBlock(pTask, pBlocks, pVnode, earlyTs);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) {
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* p = taosArrayGet(pBlocks, i);
if (p == NULL) {
continue;
}
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) {
return false;
}
}
return true;
}
int32_t doPutSinkTableInfoIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pTableSinkInfo);
} else {
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
}
return code;
}
bool doGetSinkTableInfoFromCache(SSHashObj* pTableInfoMap, uint64_t groupId, STableSinkInfo** pInfo) {
void* pVal = tSimpleHashGet(pTableInfoMap, &groupId, sizeof(uint64_t));
if (pVal) {
*pInfo = *(STableSinkInfo**)pVal;
return true;
}
return false;
}
int32_t doRemoveSinkTableInfoInCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) {
if (tSimpleHashGetSize(pSinkTableMap) == 0) {
return TSDB_CODE_SUCCESS;
}
int32_t code = tSimpleHashRemove(pSinkTableMap, &groupId, sizeof(groupId));
if (code == 0) {
tqDebug("s-task:%s remove cached table meta for groupId:%" PRId64, id, groupId);
} else {
tqError("s-task:%s failed to remove table meta from hashmap, groupId:%" PRId64, id, groupId);
}
return code;
}
int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
int64_t suid) {
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
if (deleteReq.deleteReqs == NULL) {
return terrno;
}
int32_t code =
tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, IS_NEW_SUBTB_RULE(pTask));
if (code != TSDB_CODE_SUCCESS) {
return code;
}
if (taosArrayGetSize(deleteReq.deleteReqs) == 0) {
taosArrayDestroy(deleteReq.deleteReqs);
return TSDB_CODE_SUCCESS;
}
int32_t len;
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
if (code != TSDB_CODE_SUCCESS) {
qError("s-task:%s failed to encode delete request", pTask->id.idStr);
return code;
}
SEncoder encoder = {0};
void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead));
void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead));
tEncoderInit(&encoder, abuf, len);
code = tEncodeSBatchDeleteReq(&encoder, &deleteReq);
tEncoderClear(&encoder);
taosArrayDestroy(deleteReq.deleteReqs);
if (code) {
return code;
}
((SMsgHead*)serializedDeleteReq)->vgId = TD_VID(pVnode);
SRpcMsg msg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = serializedDeleteReq, .contLen = len + sizeof(SMsgHead)};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
tqDebug("failed to put delete req into write-queue since %s", terrstr());
}
return TSDB_CODE_SUCCESS;
}
void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs) {
int32_t code = 0;
const char* id = pTask->id.idStr;
int32_t vgId = pTask->pMeta->vgId;
int32_t numOfBlocks = taosArrayGetSize(pBlocks);
int64_t suid = pTask->outputInfo.tbSink.stbUid;
STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
char* stbFullName = pTask->outputInfo.tbSink.stbFullName;
SHashObj* pTableIndexMap = SHashObj* pTableIndexMap =
taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK); taosHashInit(numOfBlocks, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
@ -1085,12 +1187,6 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
bool hasSubmit = false; bool hasSubmit = false;
for (int32_t i = 0; i < numOfBlocks; i++) { for (int32_t i = 0; i < numOfBlocks; i++) {
if (streamTaskShouldStop(pTask)) {
taosHashCleanup(pTableIndexMap);
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
return;
}
SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i); SSDataBlock* pDataBlock = taosArrayGet(pBlocks, i);
if (pDataBlock == NULL) { if (pDataBlock == NULL) {
continue; continue;
@ -1118,7 +1214,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) { if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
if (tbData.pCreateTbReq != NULL) { if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq); tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void) doRemoveFromCache(pTask->outputInfo.tbSink.pTblInfo, groupId, id); (void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, groupId, id);
tbData.pCreateTbReq = NULL; tbData.pCreateTbReq = NULL;
} }
continue; continue;
@ -1172,93 +1268,51 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
tqDebug("vgId:%d, s-task:%s write results completed", vgId, id); tqDebug("vgId:%d, s-task:%s write results completed", vgId, id);
} }
} }
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode, int64_t earlyTs) {
bool hasOnlySubmitData(const SArray* pBlocks, int32_t numOfBlocks) { int32_t code = 0;
for (int32_t i = 0; i < numOfBlocks; ++i) { STSchema* pTSchema = pTask->outputInfo.tbSink.pTSchema;
SSDataBlock* p = taosArrayGet(pBlocks, i); int64_t suid = pTask->outputInfo.tbSink.stbUid;
if (p == NULL) { const char* id = pTask->id.idStr;
continue; int32_t vgId = TD_VID(pVnode);
} char* stbFullName = pTask->outputInfo.tbSink.stbFullName;
if (p->info.type == STREAM_DELETE_RESULT || p->info.type == STREAM_CREATE_CHILD_TABLE) { pTask->execInfo.sink.numOfBlocks += 1;
return false;
}
}
return true; SSubmitReq2 submitReq = {.aSubmitTbData = taosArrayInit(1, sizeof(SSubmitTbData))};
} if (submitReq.aSubmitTbData == NULL) {
tqError("s-task:%s vgId:%d failed to prepare submit msg in sink task, code:%s", id, vgId, tstrerror(terrno));
int32_t doPutIntoCache(SSHashObj* pSinkTableMap, STableSinkInfo* pTableSinkInfo, uint64_t groupId, const char* id) {
int32_t code = tSimpleHashPut(pSinkTableMap, &groupId, sizeof(uint64_t), &pTableSinkInfo, POINTER_BYTES);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pTableSinkInfo);
} else {
tqDebug("s-task:%s new dst table:%s(uid:%" PRIu64 ") added into cache, total:%d", id, pTableSinkInfo->name.data,
pTableSinkInfo->uid, tSimpleHashGetSize(pSinkTableMap));
}
return code;
}
int32_t doRemoveFromCache(SSHashObj* pSinkTableMap, uint64_t groupId, const char* id) {
if (tSimpleHashGetSize(pSinkTableMap) == 0) {
return TSDB_CODE_SUCCESS;
}
int32_t code = tSimpleHashRemove(pSinkTableMap, &groupId, sizeof(groupId));
if (code == 0) {
tqDebug("s-task:%s remove cached table meta for groupId:%" PRId64, id, groupId);
} else {
tqError("s-task:%s failed to remove table meta from hashmap, groupId:%" PRId64, id, groupId);
}
return code;
}
int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock* pDataBlock, SStreamTask* pTask,
int64_t suid) {
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
if (deleteReq.deleteReqs == NULL) {
return terrno; return terrno;
} }
int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr, SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = TD_REQ_FROM_APP};
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER && pTask->subtableWithoutMd5 != 1); code = setDstTableDataUid(pVnode, pTask, pDataBlock, stbFullName, &tbData);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d s-task:%s dst-table not exist, stb:%s discard stream results", vgId, id, stbFullName);
return code; return code;
} }
if (taosArrayGetSize(deleteReq.deleteReqs) == 0) { code = tqSetDstTableDataPayload(suid, pTSchema, index, pDataBlock, &tbData, earlyTs, id);
taosArrayDestroy(deleteReq.deleteReqs); if (code != TSDB_CODE_SUCCESS || tbData.aRowP == NULL) {
return TSDB_CODE_SUCCESS; if (tbData.pCreateTbReq != NULL) {
tdDestroySVCreateTbReq(tbData.pCreateTbReq);
(void)doRemoveSinkTableInfoInCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, id);
tbData.pCreateTbReq = NULL;
} }
int32_t len;
tEncodeSize(tEncodeSBatchDeleteReq, &deleteReq, len, code);
if (code != TSDB_CODE_SUCCESS) {
qError("s-task:%s failed to encode delete request", pTask->id.idStr);
return code; return code;
} }
SEncoder encoder = {0}; void* p = taosArrayPush(submitReq.aSubmitTbData, &tbData);
void* serializedDeleteReq = rpcMallocCont(len + sizeof(SMsgHead)); if (p == NULL) {
void* abuf = POINTER_SHIFT(serializedDeleteReq, sizeof(SMsgHead)); tqDebug("vgId:%d, s-task:%s failed to build submit msg, code:%s, data lost", vgId, id, tstrerror(terrno));
tEncoderInit(&encoder, abuf, len); return terrno;
code = tEncodeSBatchDeleteReq(&encoder, &deleteReq); }
tEncoderClear(&encoder);
taosArrayDestroy(deleteReq.deleteReqs); code = doBuildAndSendSubmitMsg(pVnode, pTask, &submitReq, 1);
if (code) { // failed and continue
tqDebug("vgId:%d, s-task:%s submit msg failed, code:%s data lost", vgId, id, tstrerror(code));
}
if (code) {
return code; return code;
} }
((SMsgHead*)serializedDeleteReq)->vgId = TD_VID(pVnode);
SRpcMsg msg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = serializedDeleteReq, .contLen = len + sizeof(SMsgHead)};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
tqDebug("failed to put delete req into write-queue since %s", terrstr());
}
return TSDB_CODE_SUCCESS;
}

View File

@ -692,7 +692,7 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId};
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
if ((ppTask != NULL) && ((*ppTask) != NULL)) { if ((ppTask != NULL) && ((*ppTask) != NULL)) {
streamMetaAcquireOneTask(*ppTask); int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask);
SStreamTask* pTask = *ppTask; SStreamTask* pTask = *ppTask;
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
@ -1119,10 +1119,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
int32_t vgId = pMeta->vgId; int32_t vgId = pMeta->vgId;
int32_t code = 0; int32_t code = 0;
if (pTask == NULL) {
return -1;
}
streamTaskResume(pTask); streamTaskResume(pTask);
ETaskStatus status = streamTaskGetStatus(pTask).state; ETaskStatus status = streamTaskGetStatus(pTask).state;
@ -1150,7 +1146,6 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
} }
} }
streamMetaReleaseTask(pMeta, pTask);
return code; return code;
} }
@ -1173,6 +1168,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode); code = tqProcessTaskResumeImpl(handle, pTask, sversion, pReq->igUntreated, fromVnode);
if (code != 0) { if (code != 0) {
streamMetaReleaseTask(pMeta, pTask);
return code; return code;
} }
@ -1186,6 +1182,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
streamMutexUnlock(&pHTask->lock); streamMutexUnlock(&pHTask->lock);
code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode); code = tqProcessTaskResumeImpl(handle, pHTask, sversion, pReq->igUntreated, fromVnode);
streamMetaReleaseTask(pMeta, pHTask);
} }
return code; return code;

View File

@ -602,14 +602,14 @@ int32_t tsdbTFileSetInitRef(STsdb *pTsdb, const STFileSet *fset1, STFileSet **fs
SSttLvl *lvl; SSttLvl *lvl;
code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl); code = tsdbSttLvlInitRef(pTsdb, lvl1, &lvl);
if (code) { if (code) {
taosMemoryFree(lvl); tsdbSttLvlClear(&lvl);
tsdbTFileSetClear(fset); tsdbTFileSetClear(fset);
return code; return code;
} }
code = TARRAY2_APPEND(fset[0]->lvlArr, lvl); code = TARRAY2_APPEND(fset[0]->lvlArr, lvl);
if (code) { if (code) {
taosMemoryFree(lvl); tsdbSttLvlClear(&lvl);
tsdbTFileSetClear(fset); tsdbTFileSetClear(fset);
return code; return code;
} }

View File

@ -855,6 +855,7 @@ static int32_t loadFileBlockBrinInfo(STsdbReader* pReader, SArray* pIndexList, S
STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList); STableBlockScanInfo** p = taosArrayGetLast(pTableScanInfoList);
if (p == NULL) { if (p == NULL) {
clearBrinBlockIter(&iter); clearBrinBlockIter(&iter);
tsdbError("invalid param, empty in tablescanInfoList, %s", pReader->idStr);
return TSDB_CODE_INVALID_PARA; return TSDB_CODE_INVALID_PARA;
} }
@ -5256,7 +5257,7 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
// NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit // NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit
// the data should be ingested in round-robin and all the child tables should be createted before ingesting data // the data should be ingested in round-robin and all the child tables should be createted before ingesting data
// the version range of query will be used to identify the correctness of suspend/resume functions. // the version range of query will be used to identify the correctness of suspend/resume functions.
// this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files // this function will be blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
#if SUSPEND_RESUME_TEST #if SUSPEND_RESUME_TEST
if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) { if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) {
tsem_wait(&pReader->resumeAfterSuspend); tsem_wait(&pReader->resumeAfterSuspend);
@ -5909,6 +5910,7 @@ int32_t tsdbGetTableSchema(SMeta* pMeta, int64_t uid, STSchema** pSchema, int64_
} else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing } else if (mr.me.type == TSDB_NORMAL_TABLE) { // do nothing
} else { } else {
code = TSDB_CODE_INVALID_PARA; code = TSDB_CODE_INVALID_PARA;
tsdbError("invalid mr.me.type:%d, code:%s", mr.me.type, tstrerror(code));
metaReaderClear(&mr); metaReaderClear(&mr);
return code; return code;
} }

View File

@ -45,6 +45,7 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
.retentionPeriod = -1, .retentionPeriod = -1,
.rollPeriod = 0, .rollPeriod = 0,
.segSize = 0, .segSize = 0,
.committed = 0,
.retentionSize = -1, .retentionSize = -1,
.level = TAOS_WAL_WRITE, .level = TAOS_WAL_WRITE,
.clearFiles = 0, .clearFiles = 0,

View File

@ -257,6 +257,7 @@ int vnodeLoadInfo(const char *dir, SVnodeInfo *pInfo) {
code = vnodeDecodeInfo(pData, pInfo); code = vnodeDecodeInfo(pData, pInfo);
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);
pInfo->config.walCfg.committed = pInfo->state.committed;
_exit: _exit:
if (code) { if (code) {
if (pFile) { if (pFile) {

View File

@ -633,47 +633,44 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
} }
break; break;
case TDMT_STREAM_TASK_DEPLOY: { case TDMT_STREAM_TASK_DEPLOY: {
int32_t code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len); if ((code = tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len)) != TSDB_CODE_SUCCESS) {
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
goto _err; goto _err;
} }
} break; } break;
case TDMT_STREAM_TASK_DROP: { case TDMT_STREAM_TASK_DROP: {
if (tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { if ((code = tqProcessTaskDropReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err; goto _err;
} }
} break; } break;
case TDMT_STREAM_TASK_UPDATE_CHKPT: { case TDMT_STREAM_TASK_UPDATE_CHKPT: {
if (tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen) < 0) { if ((code = tqProcessTaskUpdateCheckpointReq(pVnode->pTq, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err; goto _err;
} }
} break; } break;
case TDMT_STREAM_CONSEN_CHKPT: { case TDMT_STREAM_CONSEN_CHKPT: {
if (pVnode->restored) { if (pVnode->restored && (code = tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg)) < 0) {
if (tqProcessTaskConsenChkptIdReq(pVnode->pTq, pMsg) < 0) {
goto _err; goto _err;
} }
}
} break; } break;
case TDMT_STREAM_TASK_PAUSE: { case TDMT_STREAM_TASK_PAUSE: {
if (pVnode->restored && vnodeIsLeader(pVnode) && if (pVnode->restored && vnodeIsLeader(pVnode) &&
tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { (code = tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err; goto _err;
} }
} break; } break;
case TDMT_STREAM_TASK_RESUME: { case TDMT_STREAM_TASK_RESUME: {
if (pVnode->restored && vnodeIsLeader(pVnode) && if (pVnode->restored && vnodeIsLeader(pVnode) &&
tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) { (code = tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen)) < 0) {
goto _err; goto _err;
} }
} break; } break;
case TDMT_VND_STREAM_TASK_RESET: { case TDMT_VND_STREAM_TASK_RESET: {
if (pVnode->restored && vnodeIsLeader(pVnode)) { if (pVnode->restored && vnodeIsLeader(pVnode) &&
if (tqProcessTaskResetReq(pVnode->pTq, pMsg) < 0) { (code = tqProcessTaskResetReq(pVnode->pTq, pMsg)) < 0) {
goto _err; goto _err;
} }
}
} break; } break;
case TDMT_VND_ALTER_CONFIRM: case TDMT_VND_ALTER_CONFIRM:
needCommit = pVnode->config.hashChange; needCommit = pVnode->config.hashChange;
@ -693,10 +690,10 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg
case TDMT_VND_DROP_INDEX: case TDMT_VND_DROP_INDEX:
vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp); vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp);
break; break;
case TDMT_VND_STREAM_CHECK_POINT_SOURCE: case TDMT_VND_STREAM_CHECK_POINT_SOURCE: // always return true
tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp); tqProcessTaskCheckPointSourceReq(pVnode->pTq, pMsg, pRsp);
break; break;
case TDMT_VND_STREAM_TASK_UPDATE: case TDMT_VND_STREAM_TASK_UPDATE: // always return true
tqProcessTaskUpdateReq(pVnode->pTq, pMsg); tqProcessTaskUpdateReq(pVnode->pTq, pMsg);
break; break;
case TDMT_VND_COMPACT: case TDMT_VND_COMPACT:
@ -752,7 +749,7 @@ _exit:
_err: _err:
vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
tstrerror(terrno), ver); tstrerror(code), ver);
return code; return code;
} }

View File

@ -551,7 +551,7 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
(int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
} }
*len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, sizeof(type) - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s",
((i > 0) ? ", " : ""), pSchema->name, type); ((i > 0) ? ", " : ""), pSchema->name, type);
} }
} }

View File

@ -278,7 +278,7 @@ static bool checkNullRow(SExprSupp* pExprSup, SSDataBlock* pSrcBlock, int32_t in
} }
static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock, static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock,
SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo, bool genAfterBlock) { SSDataBlock* pSrcBlock, int32_t index, bool beforeTs, SExecTaskInfo* pTaskInfo) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
int32_t lino = 0; int32_t lino = 0;
int32_t rows = pResBlock->info.rows; int32_t rows = pResBlock->info.rows;
@ -427,7 +427,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
break; break;
} }
if (start.key == INT64_MIN || end.key == INT64_MIN || genAfterBlock) { if (start.key == INT64_MIN || end.key == INT64_MIN) {
colDataSetNULL(pDst, rows); colDataSetNULL(pDst, rows);
break; break;
} }
@ -463,13 +463,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
break; break;
} }
if (genAfterBlock && rows == 0) {
hasInterp = false;
break;
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot); SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
if (pkey->isNull == false && !genAfterBlock) { if (pkey->isNull == false) {
code = colDataSetVal(pDst, rows, pkey->pData, false); code = colDataSetVal(pDst, rows, pkey->pData, false);
QUERY_CHECK_CODE(code, lino, _end); QUERY_CHECK_CODE(code, lino, _end);
} else { } else {
@ -841,7 +836,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1);
if (nextTs > pSliceInfo->current) { if (nextTs > pSliceInfo->current) {
while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) {
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo, false) && if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, false, pTaskInfo) &&
pSliceInfo->fillType == TSDB_FILL_LINEAR) { pSliceInfo->fillType == TSDB_FILL_LINEAR) {
break; break;
} else { } else {
@ -869,7 +864,7 @@ static void doTimesliceImpl(SOperatorInfo* pOperator, STimeSliceOperatorInfo* pS
doKeepLinearInfo(pSliceInfo, pBlock, i); doKeepLinearInfo(pSliceInfo, pBlock, i);
while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) { while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) {
if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo, false) && if (!genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, pBlock, i, true, pTaskInfo) &&
pSliceInfo->fillType == TSDB_FILL_LINEAR) { pSliceInfo->fillType == TSDB_FILL_LINEAR) {
break; break;
} else { } else {
@ -914,12 +909,13 @@ static void genInterpAfterDataBlock(STimeSliceOperatorInfo* pSliceInfo, SOperato
SSDataBlock* pResBlock = pSliceInfo->pRes; SSDataBlock* pResBlock = pSliceInfo->pRes;
SInterval* pInterval = &pSliceInfo->interval; SInterval* pInterval = &pSliceInfo->interval;
if (pSliceInfo->pPrevGroupKey == NULL) { if (pSliceInfo->fillType == TSDB_FILL_NEXT || pSliceInfo->fillType == TSDB_FILL_LINEAR ||
pSliceInfo->pPrevGroupKey == NULL) {
return; return;
} }
while (pSliceInfo->current <= pSliceInfo->win.ekey) { while (pSliceInfo->current <= pSliceInfo->win.ekey) {
(void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo, true); (void)genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pResBlock, NULL, index, false, pOperator->pTaskInfo);
pSliceInfo->current = pSliceInfo->current =
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
} }

View File

@ -224,19 +224,18 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index
*index = -1; *index = -1;
if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal) { if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
// divide a range of [dMinVal, dMaxVal] into 1024 buckets // divide a range of [dMinVal, dMaxVal] into 1024 buckets
double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; double span = pBucket->range.dMaxVal - pBucket->range.dMinVal;
if (span < pBucket->numOfSlots) { if (fabs(span) < DBL_EPSILON) {
int32_t delta = (int32_t)(v - pBucket->range.dMinVal); *index = 0;
*index = (delta % pBucket->numOfSlots);
} else { } else {
double slotSpan = span / pBucket->numOfSlots; double slotSpan = span / pBucket->numOfSlots;
*index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); *index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan);
if (v == pBucket->range.dMaxVal) { if (fabs(v - pBucket->range.dMaxVal) < DBL_EPSILON) {
*index -= 1; *index -= 1;
} }
} }
@ -583,48 +582,52 @@ int32_t getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction
*result = getIdenticalDataVal(pMemBucket, i); *result = getIdenticalDataVal(pMemBucket, i);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
// try next round // try next round
pMemBucket->times += 1; tMemBucket *tmpBucket = NULL;
// qDebug("MemBucket:%p, start next round data bucketing, time:%d", pMemBucket, pMemBucket->times); int32_t code = tMemBucketCreate(pMemBucket->bytes, pMemBucket->type, pSlot->range.dMinVal, pSlot->range.dMaxVal,
false, &tmpBucket);
pMemBucket->range = pSlot->range; if (TSDB_CODE_SUCCESS != code) {
pMemBucket->total = 0; tMemBucketDestroy(&tmpBucket);
return code;
resetSlotInfo(pMemBucket); }
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
SArray* list; SArray* list;
void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId)); void *p = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
if (p != NULL) { if (p != NULL) {
list = *(SArray **)p; list = *(SArray **)p;
if (list == NULL || list->size <= 0) { if (list == NULL || list->size <= 0) {
tMemBucketDestroy(&tmpBucket);
return -1; return -1;
} }
} else { } else {
tMemBucketDestroy(&tmpBucket);
return -1; return -1;
} }
for (int32_t f = 0; f < list->size; ++f) { for (int32_t f = 0; f < list->size; ++f) {
int32_t *pageId = taosArrayGet(list, f); int32_t *pageId = taosArrayGet(list, f);
if (NULL == pageId) { if (NULL == pageId) {
tMemBucketDestroy(&tmpBucket);
return TSDB_CODE_OUT_OF_RANGE; return TSDB_CODE_OUT_OF_RANGE;
} }
SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId); SFilePage *pg = getBufPage(pMemBucket->pBuffer, *pageId);
if (pg == NULL) { if (pg == NULL) {
tMemBucketDestroy(&tmpBucket);
return terrno; return terrno;
} }
int32_t code = tMemBucketPut(pMemBucket, pg->data, (int32_t)pg->num); code = tMemBucketPut(tmpBucket, pg->data, (int32_t)pg->num);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
tMemBucketDestroy(&tmpBucket);
return code; return code;
} }
setBufPageDirty(pg, true); setBufPageDirty(pg, true);
releaseBufPage(pMemBucket->pBuffer, pg); releaseBufPage(pMemBucket->pBuffer, pg);
} }
code = getPercentileImpl(tmpBucket, count - num, fraction, result);
return getPercentileImpl(pMemBucket, count - num, fraction, result); tMemBucketDestroy(&tmpBucket);
return code;
} }
} else { } else {
num += pSlot->info.size; num += pSlot->info.size;

View File

@ -887,7 +887,8 @@ _err:
} }
static int32_t addParamToLogicConditionNode(SLogicConditionNode* pCond, SNode* pParam) { static int32_t addParamToLogicConditionNode(SLogicConditionNode* pCond, SNode* pParam) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType) { if (QUERY_NODE_LOGIC_CONDITION == nodeType(pParam) && pCond->condType == ((SLogicConditionNode*)pParam)->condType &&
((SLogicConditionNode*)pParam)->condType != LOGIC_COND_TYPE_NOT) {
int32_t code = nodesListAppendList(pCond->pParameterList, ((SLogicConditionNode*)pParam)->pParameterList); int32_t code = nodesListAppendList(pCond->pParameterList, ((SLogicConditionNode*)pParam)->pParameterList);
((SLogicConditionNode*)pParam)->pParameterList = NULL; ((SLogicConditionNode*)pParam)->pParameterList = NULL;
nodesDestroyNode(pParam); nodesDestroyNode(pParam);

View File

@ -10609,6 +10609,19 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm
"Non window query only support scalar function, aggregate function is not allowed"); "Non window query only support scalar function, aggregate function is not allowed");
} }
if (NULL != pStmt->pOptions->pDelay) {
SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay;
int64_t minDelay = 0;
char* str = "5s";
if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS ==
parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) {
if (pVal->datum.i < minDelay) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY,
"stream max delay must be bigger than 5 session");
}
}
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -4679,6 +4679,9 @@ EDealRes fltReviseRewriter(SNode **pNode, void *pContext) {
cell = cell->pNext; cell = cell->pNext;
} }
if (node->condType == LOGIC_COND_TYPE_NOT) {
stat->scalarMode = true;
}
return DEAL_RES_CONTINUE; return DEAL_RES_CONTINUE;
} }

View File

@ -299,7 +299,7 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) {
return; return;
} }
/*SStreamTask* p = */ streamMetaAcquireOneTask(pTask); // add task ref here int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here
streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs()); streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs());
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);

View File

@ -347,7 +347,8 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock
if (old == 0) { if (old == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref);
streamMetaAcquireOneTask(pTask);
int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
"trigger-recv-monitor"); "trigger-recv-monitor");
pTmrInfo->launchChkptId = pActiveInfo->activeId; pTmrInfo->launchChkptId = pActiveInfo->activeId;

View File

@ -1162,7 +1162,7 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) {
if (old == 0) { if (old == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1);
stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref);
streamMetaAcquireOneTask(pTask); int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId,
"chkpt-ready-monitor"); "chkpt-ready-monitor");

View File

@ -753,12 +753,17 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task
return code; return code;
} }
void streamMetaAcquireOneTask(SStreamTask* pTask) { int32_t streamMetaAcquireOneTask(SStreamTask* pTask) {
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1);
stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref); stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref);
return ref;
} }
void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) {
if (pTask == NULL) {
return;
}
int32_t taskId = pTask->id.taskId; int32_t taskId = pTask->id.taskId;
int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1); int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1);
@ -862,7 +867,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t
ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
if (ppTask) { if (ppTask) {
pTask = *ppTask; pTask = *ppTask;
// it is an fill-history task, remove the related stream task's id that points to it // it is a fill-history task, remove the related stream task's id that points to it
if (pTask->info.fillHistory == 0) { if (pTask->info.fillHistory == 0) {
int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1); int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1);
} }

View File

@ -22,7 +22,7 @@ static void streamTaskSchedHelper(void* param, void* tmrId);
void streamSetupScheduleTrigger(SStreamTask* pTask) { void streamSetupScheduleTrigger(SStreamTask* pTask) {
int64_t delaySchema = pTask->info.delaySchedParam; int64_t delaySchema = pTask->info.delaySchedParam;
if (delaySchema != 0 && pTask->info.fillHistory == 0) { if (delaySchema != 0 && pTask->info.fillHistory == 0) {
int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); int32_t ref = streamMetaAcquireOneTask(pTask);
stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref, stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref,
pTask->info.delaySchedParam); pTask->info.delaySchedParam);
@ -63,7 +63,11 @@ int32_t streamTaskSchedTask(SMsgCb* pMsgCb, int32_t vgId, int64_t streamId, int3
pRunReq->reqType = execType; pRunReq->reqType = execType;
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)}; SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
return tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg); int32_t code = tmsgPutToQueue(pMsgCb, STREAM_QUEUE, &msg);
if (code) {
stError("vgId:%d failed to put msg into stream queue, code:%s, %x", vgId, tstrerror(code), taskId);
}
return code;
} }
void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; } void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleTime = 0; }
@ -76,7 +80,7 @@ void streamTaskResumeInFuture(SStreamTask* pTask) {
pTask->status.schedIdleTime, ref); pTask->status.schedIdleTime, ref);
// add one ref count for task // add one ref count for task
streamMetaAcquireOneTask(pTask); int32_t unusedRetRef = streamMetaAcquireOneTask(pTask);
streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer, streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer,
pTask->pMeta->vgId, "resume-task-tmr"); pTask->pMeta->vgId, "resume-task-tmr");
} }

View File

@ -477,6 +477,7 @@ int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal
if (!pStr) { if (!pStr) {
if (onlyCache && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) { if (onlyCache && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) {
(*pWinCode) = TSDB_CODE_FAILED; (*pWinCode) = TSDB_CODE_FAILED;
goto _end;
} }
(*pWinCode) = streamStateGetParName_rocksdb(pState, groupId, pVal); (*pWinCode) = streamStateGetParName_rocksdb(pState, groupId, pVal);
if ((*pWinCode) == TSDB_CODE_SUCCESS && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) { if ((*pWinCode) == TSDB_CODE_SUCCESS && tSimpleHashGetSize(pState->parNameMap) < MAX_TABLE_NAME_NUM) {

View File

@ -258,10 +258,12 @@ void tFreeStreamTask(SStreamTask* pTask) {
if (pTask->inputq.queue) { if (pTask->inputq.queue) {
streamQueueClose(pTask->inputq.queue, pTask->id.taskId); streamQueueClose(pTask->inputq.queue, pTask->id.taskId);
pTask->inputq.queue = NULL;
} }
if (pTask->outputq.queue) { if (pTask->outputq.queue) {
streamQueueClose(pTask->outputq.queue, pTask->id.taskId); streamQueueClose(pTask->outputq.queue, pTask->id.taskId);
pTask->outputq.queue = NULL;
} }
if (pTask->exec.qmsg) { if (pTask->exec.qmsg) {
@ -275,6 +277,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
if (pTask->exec.pWalReader != NULL) { if (pTask->exec.pWalReader != NULL) {
walCloseReader(pTask->exec.pWalReader); walCloseReader(pTask->exec.pWalReader);
pTask->exec.pWalReader = NULL;
} }
streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo); streamClearChkptReadyMsg(pTask->chkInfo.pActiveInfo);
@ -286,7 +289,7 @@ void tFreeStreamTask(SStreamTask* pTask) {
if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) { if (pTask->outputInfo.type == TASK_OUTPUT__TABLE) {
tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pSchemaWrapper); tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pSchemaWrapper);
taosMemoryFree(pTask->outputInfo.tbSink.pTSchema); taosMemoryFree(pTask->outputInfo.tbSink.pTSchema);
tSimpleHashCleanup(pTask->outputInfo.tbSink.pTblInfo); tSimpleHashCleanup(pTask->outputInfo.tbSink.pTbInfo);
tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pTagSchema); tDeleteSchemaWrapper(pTask->outputInfo.tbSink.pTagSchema);
} else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) {
taosArrayDestroy(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos); taosArrayDestroy(pTask->outputInfo.shuffleDispatcher.dbInfo.pVgroupInfos);

View File

@ -501,9 +501,10 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
if (pTrans == NULL) { if (pTrans == NULL) {
ETaskStatus s = pSM->current.state; ETaskStatus s = pSM->current.state;
if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && if (s != TASK_STATUS__DROPPING && s != TASK_STATUS__PAUSE && s != TASK_STATUS__STOP && s != TASK_STATUS__UNINIT &&
s != TASK_STATUS__UNINIT && s != TASK_STATUS__READY) { s != TASK_STATUS__READY) {
stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name, GET_EVT_NAME(pSM->prev.evt)); stError("s-task:%s invalid task status:%s on handling event:%s success", id, pSM->current.name,
GET_EVT_NAME(pSM->prev.evt));
} }
// the pSM->prev.evt may be 0, so print string is not appropriate. // the pSM->prev.evt may be 0, so print string is not appropriate.
@ -521,11 +522,15 @@ int32_t streamTaskOnHandleEventSuccess(SStreamTaskSM* pSM, EStreamTaskEvent even
return TSDB_CODE_STREAM_INVALID_STATETRANS; return TSDB_CODE_STREAM_INVALID_STATETRANS;
} }
// repeat pause will not overwrite the previous pause state
if (pSM->current.state != TASK_STATUS__PAUSE || pTrans->next.state != TASK_STATUS__PAUSE) {
keepPrevInfo(pSM); keepPrevInfo(pSM);
pSM->current = pTrans->next; pSM->current = pTrans->next;
pSM->pActiveTrans = NULL; } else {
stDebug("s-task:%s repeat pause evt recv, not update prev status", id);
}
pSM->pActiveTrans = NULL;
// todo remove it // todo remove it
// todo: handle the error code // todo: handle the error code
// on success callback, add into lock if necessary, or maybe we should add an option for this? // on success callback, add into lock if necessary, or maybe we should add an option for this?

View File

@ -56,7 +56,7 @@ void streamTmrStart(TAOS_TMR_CALLBACK fp, int32_t mseconds, void* pParam, void*
} }
} }
stDebug("vgId:%d start %s tmr succ", vgId, pMsg); stTrace("vgId:%d start %s tmr succ", vgId, pMsg);
} }
void streamTmrStop(tmr_h tmrId) { void streamTmrStop(tmr_h tmrId) {

View File

@ -24,7 +24,6 @@ extern "C" {
#define TIMER_MAX_MS 0x7FFFFFFF #define TIMER_MAX_MS 0x7FFFFFFF
#define PING_TIMER_MS 5000 #define PING_TIMER_MS 5000
#define HEARTBEAT_TICK_NUM 20
typedef struct SSyncEnv { typedef struct SSyncEnv {
uint8_t isStart; uint8_t isStart;

View File

@ -977,9 +977,10 @@ static int32_t syncHbTimerStart(SSyncNode* pSyncNode, SSyncTimer* pSyncTimer) {
pData->logicClock = pSyncTimer->logicClock; pData->logicClock = pSyncTimer->logicClock;
pData->execTime = tsNow + pSyncTimer->timerMS; pData->execTime = tsNow + pSyncTimer->timerMS;
sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, pData->rid, pData->destId.addr); sTrace("vgId:%d, start hb timer, rid:%" PRId64 " addr:%" PRId64 " at %d", pSyncNode->vgId, pData->rid,
pData->destId.addr, pSyncTimer->timerMS);
TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, (void*)(pData->rid), TAOS_CHECK_RETURN(taosTmrReset(pSyncTimer->timerCb, pSyncTimer->timerMS, (void*)(pData->rid),
syncEnv()->pTimerManager, &pSyncTimer->pTimer)); syncEnv()->pTimerManager, &pSyncTimer->pTimer));
} else { } else {
code = TSDB_CODE_SYN_INTERNAL_ERROR; code = TSDB_CODE_SYN_INTERNAL_ERROR;
@ -2711,7 +2712,8 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
return; return;
} }
sTrace("vgId:%d, eq peer hb timer, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid, pData->destId.addr); sTrace("vgId:%d, peer hb timer execution, rid:%" PRId64 " addr:%" PRId64, pSyncNode->vgId, hbDataRid,
pData->destId.addr);
if (pSyncNode->totalReplicaNum > 1) { if (pSyncNode->totalReplicaNum > 1) {
int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock); int64_t timerLogicClock = atomic_load_64(&pSyncTimer->logicClock);
@ -2753,13 +2755,12 @@ static void syncNodeEqPeerHeartbeatTimer(void* param, void* tmrId) {
if (ret != 0) { if (ret != 0) {
sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret)); sError("vgId:%d, failed to send heartbeat since %s", pSyncNode->vgId, tstrerror(ret));
} }
} else {
} }
if (syncIsInit()) { if (syncIsInit()) {
// sTrace("vgId:%d, reset peer hb timer", pSyncNode->vgId); sTrace("vgId:%d, reset peer hb timer at %d", pSyncNode->vgId, pSyncTimer->timerMS);
if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS / HEARTBEAT_TICK_NUM, if ((code = taosTmrReset(syncNodeEqPeerHeartbeatTimer, pSyncTimer->timerMS, (void*)hbDataRid,
(void*)hbDataRid, syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) { syncEnv()->pTimerManager, &pSyncTimer->pTimer)) != 0) {
sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code)); sError("vgId:%d, reset peer hb timer error, %s", pSyncNode->vgId, tstrerror(code));
syncNodeRelease(pSyncNode); syncNodeRelease(pSyncNode);
syncHbTimerDataRelease(pData); syncHbTimerDataRelease(pData);

View File

@ -1446,6 +1446,9 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
return ret; return ret;
} }
ofpCell = tdbPageGetCell(ofp, 0); ofpCell = tdbPageGetCell(ofp, 0);
if (ofpCell == NULL) {
return TSDB_CODE_INVALID_DATA_FMT;
}
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) { if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft; bytes = nLeft;

View File

@ -282,6 +282,17 @@ static int32_t walRebuildFileInfoSet(SArray* metaLogList, SArray* actualLogList)
} }
static void walAlignVersions(SWal* pWal) { static void walAlignVersions(SWal* pWal) {
if (pWal->cfg.committed > 0 && pWal->cfg.committed != pWal->vers.snapshotVer) {
wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is different from commited:%" PRId64
". in vnode/mnode. align with it.",
pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->cfg.committed);
pWal->vers.snapshotVer = pWal->cfg.committed;
}
if (pWal->vers.snapshotVer < 0 && pWal->vers.firstVer > 0) {
wWarn("vgId:%d, snapshotVer:%" PRId64 " in wal is an invalid value. align it with firstVer:%" PRId64 ".",
pWal->cfg.vgId, pWal->vers.snapshotVer, pWal->vers.firstVer);
pWal->vers.snapshotVer = pWal->vers.firstVer;
}
if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) { if (pWal->vers.firstVer > pWal->vers.snapshotVer + 1) {
wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId, wWarn("vgId:%d, firstVer:%" PRId64 " is larger than snapshotVer:%" PRId64 " + 1. align with it.", pWal->cfg.vgId,
pWal->vers.firstVer, pWal->vers.snapshotVer); pWal->vers.firstVer, pWal->vers.snapshotVer);
@ -400,6 +411,17 @@ static int32_t walTrimIdxFile(SWal* pWal, int32_t fileIdx) {
TAOS_RETURN(TSDB_CODE_SUCCESS); TAOS_RETURN(TSDB_CODE_SUCCESS);
} }
void printFileSet(SArray* fileSet) {
int32_t sz = taosArrayGetSize(fileSet);
for (int32_t i = 0; i < sz; i++) {
SWalFileInfo* pFileInfo = taosArrayGet(fileSet, i);
wInfo("firstVer:%" PRId64 ", lastVer:%" PRId64 ", fileSize:%" PRId64 ", syncedOffset:%" PRId64 ", createTs:%" PRId64
", closeTs:%" PRId64,
pFileInfo->firstVer, pFileInfo->lastVer, pFileInfo->fileSize, pFileInfo->syncedOffset, pFileInfo->createTs,
pFileInfo->closeTs);
}
}
int32_t walCheckAndRepairMeta(SWal* pWal) { int32_t walCheckAndRepairMeta(SWal* pWal) {
// load log files, get first/snapshot/last version info // load log files, get first/snapshot/last version info
int32_t code = 0; int32_t code = 0;
@ -460,6 +482,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
taosArraySort(actualLog, compareWalFileInfo); taosArraySort(actualLog, compareWalFileInfo);
wInfo("vgId:%d, wal path:%s, actual log file num:%d", pWal->cfg.vgId, pWal->path,
(int32_t)taosArrayGetSize(actualLog));
printFileSet(actualLog);
int metaFileNum = taosArrayGetSize(pWal->fileInfoSet); int metaFileNum = taosArrayGetSize(pWal->fileInfoSet);
int actualFileNum = taosArrayGetSize(actualLog); int actualFileNum = taosArrayGetSize(actualLog);
int64_t firstVerPrev = pWal->vers.firstVer; int64_t firstVerPrev = pWal->vers.firstVer;
@ -474,6 +500,10 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
TAOS_RETURN(code); TAOS_RETURN(code);
} }
wInfo("vgId:%d, wal path:%s, meta log file num:%d", pWal->cfg.vgId, pWal->path,
(int32_t)taosArrayGetSize(pWal->fileInfoSet));
printFileSet(pWal->fileInfoSet);
int32_t sz = taosArrayGetSize(pWal->fileInfoSet); int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
// scan and determine the lastVer // scan and determine the lastVer
@ -533,6 +563,7 @@ int32_t walCheckAndRepairMeta(SWal* pWal) {
// repair ts of files // repair ts of files
TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta)); TAOS_CHECK_RETURN(walRepairLogFileTs(pWal, &updateMeta));
printFileSet(pWal->fileInfoSet);
// update meta file // update meta file
if (updateMeta) { if (updateMeta) {
TAOS_CHECK_RETURN(walSaveMeta(pWal)); TAOS_CHECK_RETURN(walSaveMeta(pWal));
@ -1124,6 +1155,10 @@ int32_t walLoadMeta(SWal* pWal) {
(void)taosCloseFile(&pFile); (void)taosCloseFile(&pFile);
taosMemoryFree(buf); taosMemoryFree(buf);
wInfo("vgId:%d, load meta file: %s, fileInfoSet size:%d", pWal->cfg.vgId, fnameStr,
(int32_t)taosArrayGetSize(pWal->fileInfoSet));
printFileSet(pWal->fileInfoSet);
TAOS_RETURN(code); TAOS_RETURN(code);
} }

View File

@ -91,6 +91,7 @@ static int32_t walInitLock(SWal *pWal) {
} }
SWal *walOpen(const char *path, SWalCfg *pCfg) { SWal *walOpen(const char *path, SWalCfg *pCfg) {
int32_t code = 0;
SWal *pWal = taosMemoryCalloc(1, sizeof(SWal)); SWal *pWal = taosMemoryCalloc(1, sizeof(SWal));
if (pWal == NULL) { if (pWal == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
@ -160,17 +161,20 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
pWal->writeHead.magic = WAL_MAGIC; pWal->writeHead.magic = WAL_MAGIC;
// load meta // load meta
if (walLoadMeta(pWal) < 0) { code = walLoadMeta(pWal);
wInfo("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(terrno)); if (code < 0) {
wWarn("vgId:%d, failed to load meta since %s", pWal->cfg.vgId, tstrerror(code));
} }
if (walCheckAndRepairMeta(pWal) < 0) { code = walCheckAndRepairMeta(pWal);
wError("vgId:%d, cannot open wal since repair meta file failed", pWal->cfg.vgId); if (code < 0) {
wError("vgId:%d, cannot open wal since repair meta file failed since %s", pWal->cfg.vgId, tstrerror(code));
goto _err; goto _err;
} }
if (walCheckAndRepairIdx(pWal) < 0) { code = walCheckAndRepairIdx(pWal);
wError("vgId:%d, cannot open wal since repair idx file failed", pWal->cfg.vgId); if (code < 0) {
wError("vgId:%d, cannot open wal since repair idx file failed since %s", pWal->cfg.vgId, tstrerror(code));
goto _err; goto _err;
} }

View File

@ -127,6 +127,7 @@ class WalRetentionEnv : public ::testing::Test {
SWalCfg cfg; SWalCfg cfg;
cfg.rollPeriod = -1; cfg.rollPeriod = -1;
cfg.segSize = -1; cfg.segSize = -1;
cfg.committed =-1;
cfg.retentionPeriod = -1; cfg.retentionPeriod = -1;
cfg.retentionSize = 0; cfg.retentionSize = 0;
cfg.rollPeriod = 0; cfg.rollPeriod = 0;

View File

@ -200,6 +200,7 @@ void* taosArrayPop(SArray* pArray) {
void* taosArrayGet(const SArray* pArray, size_t index) { void* taosArrayGet(const SArray* pArray, size_t index) {
if (NULL == pArray) { if (NULL == pArray) {
terrno = TSDB_CODE_INVALID_PARA; terrno = TSDB_CODE_INVALID_PARA;
uError("failed to return value from array of null ptr");
return NULL; return NULL;
} }

View File

@ -13,6 +13,7 @@ from frame.srvCtl import *
from frame.caseBase import * from frame.caseBase import *
from frame import * from frame import *
from frame.autogen import * from frame.autogen import *
from frame import epath
# from frame.server.dnodes import * # from frame.server.dnodes import *
# from frame.server.cluster import * # from frame.server.cluster import *
@ -20,7 +21,9 @@ from frame.autogen import *
class TDTestCase(TBase): class TDTestCase(TBase):
def init(self, conn, logSql, replicaVar=1): def init(self, conn, logSql, replicaVar=1):
updatecfgDict = {'dDebugFlag':131}
super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1") super(TDTestCase, self).init(conn, logSql, replicaVar=1, checkColName="c1")
self.valgrind = 0 self.valgrind = 0
self.db = "test" self.db = "test"
self.stb = "meters" self.stb = "meters"
@ -50,9 +53,36 @@ class TDTestCase(TBase):
tdSql.error("create encrypt_key '12345678abcdefghi'") tdSql.error("create encrypt_key '12345678abcdefghi'")
tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'") tdSql.error("create database test ENCRYPT_ALGORITHM 'sm4'")
def recreate_dndoe_encrypt_key(self):
"""
Description: From the jira TS-5507, the encrypt key can be recreated.
create:
2024-09-23 created by Charles
update:
None
"""
# taosd path
taosd_path = epath.binPath()
tdLog.info(f"taosd_path: {taosd_path}")
# dnode2 path
dndoe2_path = tdDnodes.getDnodeDir(2)
dnode2_data_path = os.sep.join([dndoe2_path, "data"])
dnode2_cfg_path = os.sep.join([dndoe2_path, "cfg"])
tdLog.info(f"dnode2_path: {dnode2_data_path}")
# stop dnode2
tdDnodes.stoptaosd(2)
tdLog.info("stop dndoe2")
# delete dndoe2 data
cmd = f"rm -rf {dnode2_data_path}"
os.system(cmd)
# recreate the encrypt key for dnode2
os.system(f"{os.sep.join([taosd_path, 'taosd'])} -y '1234567890' -c {dnode2_cfg_path}")
tdLog.info("test case: recreate the encrypt key for dnode2 passed")
def run(self): def run(self):
self.create_encrypt_db_error() self.create_encrypt_db_error()
self.create_encrypt_db() self.create_encrypt_db()
self.recreate_dndoe_encrypt_key()
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -18,6 +18,7 @@ import time
import socket import socket
import json import json
import toml import toml
import subprocess
from frame.boundary import DataBoundary from frame.boundary import DataBoundary
import taos import taos
from frame.log import * from frame.log import *
@ -1830,6 +1831,51 @@ class TDCom:
if i == 1: if i == 1:
self.record_history_ts = ts_value self.record_history_ts = ts_value
def generate_query_result(self, inputfile, test_case):
if not os.path.exists(inputfile):
tdLog.exit(f"Input file '{inputfile}' does not exist.")
else:
self.query_result_file = f"./temp_{test_case}.result"
os.system(f"taos -f {inputfile} | grep -v 'Query OK'|grep -v 'Copyright'| grep -v 'Welcome to the TDengine Command' > {self.query_result_file} ")
return self.query_result_file
def compare_result_files(self, file1, file2):
try:
# use subprocess.run to execute diff/fc commands
# print(file1, file2)
if platform.system().lower() != 'windows':
cmd='diff'
result = subprocess.run([cmd, "-u", "--color", file1, file2], text=True, capture_output=True)
else:
cmd='fc'
result = subprocess.run([cmd, file1, file2], text=True, capture_output=True)
# if result is not empty, print the differences and files name. Otherwise, the files are identical.
if result.stdout:
tdLog.debug(f"Differences between {file1} and {file2}")
tdLog.notice(f"\r\n{result.stdout}")
return False
else:
return True
except FileNotFoundError:
tdLog.debug("The 'diff' command is not found. Please make sure it's installed and available in your PATH.")
except Exception as e:
tdLog.debug(f"An error occurred: {e}")
def compare_testcase_result(self, inputfile,expected_file,test_case):
test_reulst_file = self.generate_query_result(inputfile,test_case)
if self.compare_result_files(expected_file, test_reulst_file ):
tdLog.info("Test passed: Result files are identical.")
os.system(f"rm -f {test_reulst_file}")
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.lineno}(line:{caller.lineno}) failed: sqlfile is {inputfile}, expect_file:{expected_file} != reult_file:{test_reulst_file} ")
tdLog.exit("Test failed: Result files are different.")
def is_json(msg): def is_json(msg):
if isinstance(msg, str): if isinstance(msg, str):
try: try:
@ -1864,4 +1910,6 @@ def dict2toml(in_dict: dict, file:str):
with open(file, 'w') as f: with open(file, 'w') as f:
toml.dump(in_dict, f) toml.dump(in_dict, f)
tdCom = TDCom() tdCom = TDCom()

View File

@ -0,0 +1,114 @@
taos> select pi()
pi() |
============================
3.141592653589793 |
taos> select pi() + 1
pi() + 1 |
============================
4.141592653589793 |
taos> select pi() - 1
pi() - 1 |
============================
2.141592653589793 |
taos> select pi() * 2
pi() * 2 |
============================
6.283185307179586 |
taos> select pi() / 2
pi() / 2 |
============================
1.570796326794897 |
taos> select pi() from ts_4893.meters limit 5
pi() |
============================
3.141592653589793 |
3.141592653589793 |
3.141592653589793 |
3.141592653589793 |
3.141592653589793 |
taos> select pi() + 1 from ts_4893.meters limit 1
pi() + 1 |
============================
4.141592653589793 |
taos> select pi() - 1 from ts_4893.meters limit 1
pi() - 1 |
============================
2.141592653589793 |
taos> select pi() * 2 from ts_4893.meters limit 1
pi() * 2 |
============================
6.283185307179586 |
taos> select pi() / 2 from ts_4893.meters limit 1
pi() / 2 |
============================
1.570796326794897 |
taos> select pi() + pi() from ts_4893.meters limit 1
pi() + pi() |
============================
6.283185307179586 |
taos> select pi() - pi() from ts_4893.meters limit 1
pi() - pi() |
============================
0.000000000000000 |
taos> select pi() * pi() from ts_4893.meters limit 1
pi() * pi() |
============================
9.869604401089358 |
taos> select pi() / pi() from ts_4893.meters limit 1
pi() / pi() |
============================
1.000000000000000 |
taos> select pi() + id from ts_4893.meters order by ts limit 5
pi() + id |
============================
3.141592653589793 |
4.141592653589793 |
5.141592653589793 |
6.141592653589793 |
7.141592653589793 |
taos> select abs(pi())
abs(pi()) |
============================
3.141592653589793 |
taos> select pow(pi(), 2)
pow(pi(), 2) |
============================
9.869604401089358 |
taos> select sqrt(pi())
sqrt(pi()) |
============================
1.772453850905516 |
taos> select cast(pi() as int)
cast(pi() as int) |
====================
3 |
taos> select pi()
pi() |
============================
3.141592653589793 |
taos> select substring_index(null, '.', 2)
substring_index(null, '.', 2) |
================================
NULL |
Can't render this file because it has a wrong number of fields in line 90.

View File

@ -1,20 +1,21 @@
select pi(); select pi()
select pi() + 1; select pi() + 1
select pi() - 1; select pi() - 1
select pi() * 2; select pi() * 2
select pi() / 2; select pi() / 2
select pi() from ts_4893.meters limit 5; select pi() from ts_4893.meters limit 5
select pi() + 1 from ts_4893.meters limit 1; select pi() + 1 from ts_4893.meters limit 1
select pi() - 1 from ts_4893.meters limit 1; select pi() - 1 from ts_4893.meters limit 1
select pi() * 2 from ts_4893.meters limit 1; select pi() * 2 from ts_4893.meters limit 1
select pi() / 2 from ts_4893.meters limit 1; select pi() / 2 from ts_4893.meters limit 1
select pi() + pi() from ts_4893.meters limit 1; select pi() + pi() from ts_4893.meters limit 1
select pi() - pi() from ts_4893.meters limit 1; select pi() - pi() from ts_4893.meters limit 1
select pi() * pi() from ts_4893.meters limit 1; select pi() * pi() from ts_4893.meters limit 1
select pi() / pi() from ts_4893.meters limit 1; select pi() / pi() from ts_4893.meters limit 1
select pi() + id from ts_4893.meters order by ts limit 5; select pi() + id from ts_4893.meters order by ts limit 5
select abs(pi()); select abs(pi())
select pow(pi(), 2); select pow(pi(), 2)
select sqrt(pi()); select sqrt(pi())
select cast(pi() as int); select cast(pi() as int)
select pi(); select pi()
select substring_index(null, '.', 2)

View File

@ -17,14 +17,15 @@ import random
import taos import taos
import frame import frame
import frame.etool
from frame.etool import *
from frame.log import * from frame.log import *
from frame.cases import * from frame.cases import *
from frame.sql import * from frame.sql import *
from frame.caseBase import * from frame.caseBase import *
from frame import *
from frame import etool
from frame.common import *
class TDTestCase(TBase): class TDTestCase(TBase):
updatecfgDict = { updatecfgDict = {
@ -84,8 +85,16 @@ class TDTestCase(TBase):
tdSql.error(err_statement) tdSql.error(err_statement)
err_statement = '' err_statement = ''
def test_normal_query_new(self, testCase):
# read sql from .sql file and execute
tdLog.info(f"test normal query.")
self.sqlFile = etool.curFile(__file__, f"in/{testCase}.in")
self.ansFile = etool.curFile(__file__, f"ans/{testCase}_1.csv")
tdCom.compare_testcase_result(self.sqlFile, self.ansFile, testCase)
def test_pi(self): def test_pi(self):
self.test_normal_query("pi") self.test_normal_query_new("pi")
def test_round(self): def test_round(self):
self.test_normal_query("round") self.test_normal_query("round")

View File

@ -10,7 +10,7 @@
# army-test # army-test
# #
,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f multi-level/mlevel_basic.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py ,,y,army,./pytest.sh python3 ./test.py -f db-encrypt/basic.py -N 3 -M 3
,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3 ,,n,army,python3 ./test.py -f storage/s3/s3Basic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py

View File

@ -225,6 +225,7 @@ python3 test.py -f query/distinctOneColTb.py
python3 ./test.py -f query/filter.py python3 ./test.py -f query/filter.py
python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/not.py
python3 ./test.py -f query/queryError.py python3 ./test.py -f query/queryError.py
python3 ./test.py -f query/filterAllIntTypes.py python3 ./test.py -f query/filterAllIntTypes.py
python3 ./test.py -f query/filterFloatAndDouble.py python3 ./test.py -f query/filterFloatAndDouble.py

View File

@ -139,6 +139,7 @@ python3 ./test.py -f query/querySort.py
python3 ./test.py -f query/queryJoin.py python3 ./test.py -f query/queryJoin.py
python3 ./test.py -f query/filterCombo.py python3 ./test.py -f query/filterCombo.py
python3 ./test.py -f query/queryNormal.py python3 ./test.py -f query/queryNormal.py
python3 ./test.py -f query/not.py
python3 ./test.py -f query/select_last_crash.py python3 ./test.py -f query/select_last_crash.py
python3 ./test.py -f query/queryNullValueTest.py python3 ./test.py -f query/queryNullValueTest.py
python3 ./test.py -f query/queryInsertValue.py python3 ./test.py -f query/queryInsertValue.py

View File

@ -133,4 +133,17 @@ if $data13 != -111 then
goto loop1 goto loop1
endi endi
print step 2====================
sql create database test vgroups 1 ;
sql use test;
sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
sql_error create stream streams1 trigger max_delay 4000a ignore update 0 ignore expired 0 into streamtST1 as select _wstart, count(*) from st interval(5s);
sql_error create stream streams2 trigger max_delay 4s ignore update 0 ignore expired 0 into streamtST2 as select _wstart, count(*) from st interval(5s);
sql create stream streams3 trigger max_delay 5000a ignore update 0 ignore expired 0 into streamtST3 as select _wstart, count(*) from st interval(5s);
sql create stream streams4 trigger max_delay 5s ignore update 0 ignore expired 0 into streamtST4 as select _wstart, count(*) from st interval(5s);
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -48,15 +48,15 @@ sql create table t1 using st tags(1);
sql create table t2 using st tags(2); sql create table t2 using st tags(2);
sql create stream stream2 trigger window_close into streamt2 as select _wstart, sum(a) from st interval(10s); sql create stream stream2 trigger window_close into streamt2 as select _wstart, sum(a) from st interval(10s);
sql create stream stream3 trigger max_delay 1s into streamt3 as select _wstart, sum(a) from st interval(10s); sql create stream stream3 trigger max_delay 5s into streamt3 as select _wstart, sum(a) from st interval(10s);
sql create stream stream4 trigger window_close into streamt4 as select _wstart, sum(a) from t1 interval(10s); sql create stream stream4 trigger window_close into streamt4 as select _wstart, sum(a) from t1 interval(10s);
sql create stream stream5 trigger max_delay 1s into streamt5 as select _wstart, sum(a) from t1 interval(10s); sql create stream stream5 trigger max_delay 5s into streamt5 as select _wstart, sum(a) from t1 interval(10s);
sql create stream stream6 trigger window_close into streamt6 as select _wstart, sum(a) from st session(ts, 10s); sql create stream stream6 trigger window_close into streamt6 as select _wstart, sum(a) from st session(ts, 10s);
sql create stream stream7 trigger max_delay 1s into streamt7 as select _wstart, sum(a) from st session(ts, 10s); sql create stream stream7 trigger max_delay 5s into streamt7 as select _wstart, sum(a) from st session(ts, 10s);
sql create stream stream8 trigger window_close into streamt8 as select _wstart, sum(a) from t1 session(ts, 10s); sql create stream stream8 trigger window_close into streamt8 as select _wstart, sum(a) from t1 session(ts, 10s);
sql create stream stream9 trigger max_delay 1s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s); sql create stream stream9 trigger max_delay 5s into streamt9 as select _wstart, sum(a) from t1 session(ts, 10s);
sql create stream stream10 trigger window_close into streamt10 as select _wstart, sum(a) from t1 state_window(b); sql create stream stream10 trigger window_close into streamt10 as select _wstart, sum(a) from t1 state_window(b);
sql create stream stream11 trigger max_delay 1s into streamt11 as select _wstart, sum(a) from t1 state_window(b); sql create stream stream11 trigger max_delay 5s into streamt11 as select _wstart, sum(a) from t1 state_window(b);
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
@ -138,12 +138,12 @@ if $rows != 2 then
goto loop1 goto loop1
endi endi
print step 1 max delay 2s print step 1 max delay 5s
sql create database test3 vgroups 4; sql create database test3 vgroups 4;
sql use test3; sql use test3;
sql create table t1(ts timestamp, a int, b int , c int, d double); sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream13 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 interval(10s); sql create stream stream13 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 interval(10s);
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
@ -172,8 +172,8 @@ $now02 = $data02
$now12 = $data12 $now12 = $data12
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;
@ -188,7 +188,7 @@ if $data12 != $now12 then
return -1 return -1
endi endi
print step 2 max delay 2s print step 2 max delay 5s
sql create database test4 vgroups 4; sql create database test4 vgroups 4;
sql use test4; sql use test4;
@ -197,7 +197,7 @@ sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,t
sql create table t1 using st tags(1,1,1); sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2); sql create table t2 using st tags(2,2,2);
sql create stream stream14 trigger max_delay 2s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s); sql create stream stream14 trigger max_delay 5s into streamt14 as select _wstart, sum(a), now from st partition by tbname interval(10s);
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
@ -234,8 +234,8 @@ $now12 = $data12
$now22 = $data22 $now22 = $data22
$now32 = $data32 $now32 = $data32
print step2 max delay 2s......... sleep 3s print step2 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt14 order by 2; sql select * from streamt14 order by 2;
print $data00 $data01 $data02 print $data00 $data01 $data02
@ -264,8 +264,8 @@ if $data32 != $now32 then
return -1 return -1
endi endi
print step2 max delay 2s......... sleep 3s print step2 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt14 order by 2; sql select * from streamt14 order by 2;
print $data00 $data01 $data02 print $data00 $data01 $data02
@ -294,12 +294,12 @@ if $data32 != $now32 then
return -1 return -1
endi endi
print step 2 max delay 2s print step 2 max delay 5s
sql create database test15 vgroups 4; sql create database test15 vgroups 4;
sql use test15; sql use test15;
sql create table t1(ts timestamp, a int, b int , c int, d double); sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream15 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s); sql create stream stream15 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 session(ts, 10s);
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
@ -328,8 +328,8 @@ $now02 = $data02
$now12 = $data12 $now12 = $data12
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;
@ -344,8 +344,8 @@ if $data12 != $now12 then
return -1 return -1
endi endi
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;
@ -362,12 +362,12 @@ endi
print session max delay over print session max delay over
print step 3 max delay 2s print step 3 max delay 5s
sql create database test16 vgroups 4; sql create database test16 vgroups 4;
sql use test16; sql use test16;
sql create table t1(ts timestamp, a int, b int , c int, d double); sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream16 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 state_window(a); sql create stream stream16 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 state_window(a);
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
@ -396,8 +396,8 @@ $now02 = $data02
$now12 = $data12 $now12 = $data12
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;
@ -412,8 +412,8 @@ if $data12 != $now12 then
return -1 return -1
endi endi
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;
@ -430,12 +430,12 @@ endi
print state max delay over print state max delay over
print step 4 max delay 2s print step 4 max delay 5s
sql create database test17 vgroups 4; sql create database test17 vgroups 4;
sql use test17; sql use test17;
sql create table t1(ts timestamp, a int, b int , c int, d double); sql create table t1(ts timestamp, a int, b int , c int, d double);
sql create stream stream17 trigger max_delay 2s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9; sql create stream stream17 trigger max_delay 5s into streamt13 as select _wstart, sum(a), now from t1 event_window start with a = 1 end with a = 9;
run tsim/stream/checkTaskStatus.sim run tsim/stream/checkTaskStatus.sim
@ -467,8 +467,8 @@ $now02 = $data02
$now12 = $data12 $now12 = $data12
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;
@ -483,8 +483,8 @@ if $data12 != $now12 then
return -1 return -1
endi endi
print step1 max delay 2s......... sleep 3s print step1 max delay 5s......... sleep 6s
sleep 3000 sleep 6000
sql select * from streamt13; sql select * from streamt13;

View File

@ -907,7 +907,7 @@ class TDTestCase:
## {. . .} ## {. . .}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(13) tdSql.checkRows(12)
tdSql.checkData(0, 0, 5) tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 5) tdSql.checkData(1, 0, 5)
tdSql.checkData(2, 0, 10) tdSql.checkData(2, 0, 10)
@ -920,7 +920,6 @@ class TDTestCase:
tdSql.checkData(9, 0, 15) tdSql.checkData(9, 0, 15)
tdSql.checkData(10, 0, 15) tdSql.checkData(10, 0, 15)
tdSql.checkData(11, 0, 15) tdSql.checkData(11, 0, 15)
tdSql.checkData(12, 0, None)
## {} ... ## {} ...
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)") tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:01', '2020-02-01 00:00:04') every(1s) fill(next)")
@ -958,12 +957,10 @@ class TDTestCase:
## ..{.} ## ..{.}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)") tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:13', '2020-02-01 00:00:17') every(1s) fill(next)")
tdSql.checkRows(5) tdSql.checkRows(3)
tdSql.checkData(0, 0, 15) tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15) tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, 15) tdSql.checkData(2, 0, 15)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
## ... {} ## ... {}
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)") tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(next)")
@ -1275,7 +1272,7 @@ class TDTestCase:
tdSql.checkData(8, 1, True) tdSql.checkData(8, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)") tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:04', '2020-02-01 00:00:16') every(1s) fill(next)")
tdSql.checkRows(13) tdSql.checkRows(12)
tdSql.checkCols(3) tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-01 00:00:04.000') tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
@ -1290,7 +1287,6 @@ class TDTestCase:
tdSql.checkData(9, 0, '2020-02-01 00:00:13.000') tdSql.checkData(9, 0, '2020-02-01 00:00:13.000')
tdSql.checkData(10, 0, '2020-02-01 00:00:14.000') tdSql.checkData(10, 0, '2020-02-01 00:00:14.000')
tdSql.checkData(11, 0, '2020-02-01 00:00:15.000') tdSql.checkData(11, 0, '2020-02-01 00:00:15.000')
tdSql.checkData(12, 0, '2020-02-01 00:00:16.000')
tdSql.checkData(0, 1, True) tdSql.checkData(0, 1, True)
tdSql.checkData(1, 1, False) tdSql.checkData(1, 1, False)
@ -1304,7 +1300,6 @@ class TDTestCase:
tdSql.checkData(9, 1, True) tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True) tdSql.checkData(10, 1, True)
tdSql.checkData(11, 1, False) tdSql.checkData(11, 1, False)
tdSql.checkData(12, 1, True)
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)") tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:05', '2020-02-01 00:00:15') every(2s) fill(next)")
tdSql.checkRows(6) tdSql.checkRows(6)
@ -1682,13 +1677,9 @@ class TDTestCase:
## | . | { | .} | ## | . | { | .} |
tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)") tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(next)")
tdSql.checkRows(6) tdSql.checkRows(2)
tdSql.checkData(0, 0, 15) tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15) tdSql.checkData(1, 0, 15)
tdSql.checkData(2, 0, None)
tdSql.checkData(3, 0, None)
tdSql.checkData(4, 0, None)
tdSql.checkData(5, 0, None)
# test fill linear # test fill linear
@ -2741,7 +2732,7 @@ class TDTestCase:
tdSql.checkData(4, i, 15) tdSql.checkData(4, i, 15)
tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)") tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)")
tdSql.checkRows(5) tdSql.checkRows(3)
tdSql.checkCols(4) tdSql.checkCols(4)
for i in range (tdSql.queryCols): for i in range (tdSql.queryCols):
@ -2837,7 +2828,7 @@ class TDTestCase:
# test fill next # test fill next
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)") tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(next)")
tdSql.checkRows(19) tdSql.checkRows(18)
tdSql.checkCols(3) tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-02 00:00:00.000') tdSql.checkData(0, 0, '2020-02-02 00:00:00.000')
@ -2860,7 +2851,6 @@ class TDTestCase:
tdSql.checkData(15, 2, None) tdSql.checkData(15, 2, None)
tdSql.checkData(16, 2, None) tdSql.checkData(16, 2, None)
tdSql.checkData(17, 2, None) tdSql.checkData(17, 2, None)
tdSql.checkData(18, 2, None)
tdSql.checkData(17, 0, '2020-02-02 00:00:17.000') tdSql.checkData(17, 0, '2020-02-02 00:00:17.000')
@ -3091,7 +3081,7 @@ class TDTestCase:
# test fill linear # test fill linear
tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)") tdSql.query(f"select _irowts,_isfilled,interp(c0) from {dbname}.{tbname2} range('2020-02-02 00:00:00', '2020-02-02 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(18) tdSql.checkRows(17)
tdSql.checkCols(3) tdSql.checkCols(3)
tdSql.checkData(0, 0, '2020-02-02 00:00:01.000') tdSql.checkData(0, 0, '2020-02-02 00:00:01.000')
@ -3113,9 +3103,8 @@ class TDTestCase:
tdSql.checkData(14, 2, None) tdSql.checkData(14, 2, None)
tdSql.checkData(15, 2, None) tdSql.checkData(15, 2, None)
tdSql.checkData(16, 2, None) tdSql.checkData(16, 2, None)
tdSql.checkData(17, 2, None)
tdSql.checkData(17, 0, '2020-02-02 00:00:18.000') tdSql.checkData(16, 0, '2020-02-02 00:00:17.000')
tdLog.printNoPrefix("==========step13:test error cases") tdLog.printNoPrefix("==========step13:test error cases")
@ -3231,7 +3220,7 @@ class TDTestCase:
tdSql.checkData(17, 1, True) tdSql.checkData(17, 1, True)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(19) tdSql.checkRows(18)
tdSql.checkData(0, 0, '2020-02-01 00:00:00.000') tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
tdSql.checkData(0, 1, True) tdSql.checkData(0, 1, True)
@ -3254,12 +3243,9 @@ class TDTestCase:
tdSql.checkData(15, 2, 15) tdSql.checkData(15, 2, 15)
tdSql.checkData(16, 2, 17) tdSql.checkData(16, 2, 17)
tdSql.checkData(17, 2, 17) tdSql.checkData(17, 2, 17)
tdSql.checkData(18, 2, None)
tdSql.checkData(17, 0, '2020-02-01 00:00:17.000') tdSql.checkData(17, 0, '2020-02-01 00:00:17.000')
tdSql.checkData(17, 1, False) tdSql.checkData(17, 1, False)
tdSql.checkData(18, 0, '2020-02-01 00:00:18.000')
tdSql.checkData(18, 1, True)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname} range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(17) tdSql.checkRows(17)
@ -3376,24 +3362,24 @@ class TDTestCase:
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(57) tdSql.checkRows(48)
for i in range(0, 19): for i in range(0, 14):
tdSql.checkData(i, 0, 'ctb1') tdSql.checkData(i, 0, 'ctb1')
for i in range(19, 38): for i in range(14, 30):
tdSql.checkData(i, 0, 'ctb2') tdSql.checkData(i, 0, 'ctb2')
for i in range(38, 57): for i in range(30, 48):
tdSql.checkData(i, 0, 'ctb3') tdSql.checkData(i, 0, 'ctb3')
tdSql.checkData(0, 1, '2020-02-01 00:00:00.000') tdSql.checkData(0, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(18, 1, '2020-02-01 00:00:18.000') tdSql.checkData(13, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(19, 1, '2020-02-01 00:00:00.000') tdSql.checkData(14, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(37, 1, '2020-02-01 00:00:18.000') tdSql.checkData(29, 1, '2020-02-01 00:00:15.000')
tdSql.checkData(38, 1, '2020-02-01 00:00:00.000') tdSql.checkData(30, 1, '2020-02-01 00:00:00.000')
tdSql.checkData(56, 1, '2020-02-01 00:00:18.000') tdSql.checkData(47, 1, '2020-02-01 00:00:17.000')
for i in range(0, 2): for i in range(0, 2):
tdSql.checkData(i, 3, 1) tdSql.checkData(i, 3, 1)
@ -3404,33 +3390,24 @@ class TDTestCase:
for i in range(8, 14): for i in range(8, 14):
tdSql.checkData(i, 3, 13) tdSql.checkData(i, 3, 13)
for i in range(14, 19): for i in range(14, 18):
tdSql.checkData(i, 3, None)
for i in range(19, 23):
tdSql.checkData(i, 3, 3) tdSql.checkData(i, 3, 3)
for i in range(23, 29): for i in range(18, 24):
tdSql.checkData(i, 3, 9) tdSql.checkData(i, 3, 9)
for i in range(29, 35): for i in range(24, 30):
tdSql.checkData(i, 3, 15) tdSql.checkData(i, 3, 15)
for i in range(35, 38): for i in range(30, 36):
tdSql.checkData(i, 3, None)
for i in range(38, 44):
tdSql.checkData(i, 3, 5) tdSql.checkData(i, 3, 5)
for i in range(44, 50): for i in range(36, 42):
tdSql.checkData(i, 3, 11) tdSql.checkData(i, 3, 11)
for i in range(50, 56): for i in range(42, 48):
tdSql.checkData(i, 3, 17) tdSql.checkData(i, 3, 17)
for i in range(56, 57):
tdSql.checkData(i, 3, None)
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(39) tdSql.checkRows(39)
@ -3473,7 +3450,7 @@ class TDTestCase:
tdSql.checkRows(90) tdSql.checkRows(90)
tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(171) tdSql.checkRows(90)
tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.query(f"select c0, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by c0 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(9) tdSql.checkRows(9)
@ -3490,7 +3467,7 @@ class TDTestCase:
tdSql.checkRows(48) tdSql.checkRows(48)
tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)") tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(next)")
tdSql.checkRows(57) tdSql.checkRows(48)
tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)") tdSql.query(f"select t1, _irowts, _isfilled, interp(c0) from {dbname}.{stbname} partition by t1 range('2020-02-01 00:00:00', '2020-02-01 00:00:18') every(1s) fill(linear)")
tdSql.checkRows(39) tdSql.checkRows(39)
@ -4386,7 +4363,7 @@ class TDTestCase:
tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{tbname_null} range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)")
tdSql.checkRows(11) tdSql.checkRows(9)
tdSql.checkData(0, 1, False) tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True) tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, False) tdSql.checkData(2, 1, False)
@ -4396,8 +4373,6 @@ class TDTestCase:
tdSql.checkData(6, 1, True) tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, False) tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, False) tdSql.checkData(8, 1, False)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(0, 2, 1) tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 3) tdSql.checkData(1, 2, 3)
@ -4408,13 +4383,11 @@ class TDTestCase:
tdSql.checkData(6, 2, 8) tdSql.checkData(6, 2, 8)
tdSql.checkData(7, 2, 8) tdSql.checkData(7, 2, 8)
tdSql.checkData(8, 2, 9) tdSql.checkData(8, 2, 9)
tdSql.checkData(9, 2, None)
tdSql.checkData(10, 2, None)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_null} where c0 is not null range('2020-02-02 00:00:01', '2020-02-02 00:00:11') every(1s) fill(next)")
tdSql.checkRows(11) tdSql.checkRows(9)
tdSql.checkData(0, 1, False) tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True) tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, False) tdSql.checkData(2, 1, False)
@ -4424,9 +4397,6 @@ class TDTestCase:
tdSql.checkData(6, 1, True) tdSql.checkData(6, 1, True)
tdSql.checkData(7, 1, False) tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, False) tdSql.checkData(8, 1, False)
tdSql.checkData(9, 1, True)
tdSql.checkData(10, 1, True)
tdSql.checkData(0, 2, 1) tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 3) tdSql.checkData(1, 2, 3)
@ -4437,8 +4407,6 @@ class TDTestCase:
tdSql.checkData(6, 2, 8) tdSql.checkData(6, 2, 8)
tdSql.checkData(7, 2, 8) tdSql.checkData(7, 2, 8)
tdSql.checkData(8, 2, 9) tdSql.checkData(8, 2, 9)
tdSql.checkData(9, 2, None)
tdSql.checkData(10, 2, None)
# super table # super table
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
@ -4475,7 +4443,7 @@ class TDTestCase:
tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") tdSql.query(f"select _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(9) tdSql.checkRows(8)
tdSql.checkData(0, 1, False) tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True) tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True) tdSql.checkData(2, 1, True)
@ -4484,7 +4452,6 @@ class TDTestCase:
tdSql.checkData(5, 1, True) tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False) tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, False) tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, True)
tdSql.checkData(0, 2, 1) tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 9) tdSql.checkData(1, 2, 9)
@ -4494,12 +4461,11 @@ class TDTestCase:
tdSql.checkData(5, 2, 13) tdSql.checkData(5, 2, 13)
tdSql.checkData(6, 2, 13) tdSql.checkData(6, 2, 13)
tdSql.checkData(7, 2, 15) tdSql.checkData(7, 2, 15)
tdSql.checkData(8, 2, None)
tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(9) tdSql.checkRows(8)
tdSql.checkData(0, 1, False) tdSql.checkData(0, 1, False)
tdSql.checkData(1, 1, True) tdSql.checkData(1, 1, True)
tdSql.checkData(2, 1, True) tdSql.checkData(2, 1, True)
@ -4508,7 +4474,6 @@ class TDTestCase:
tdSql.checkData(5, 1, True) tdSql.checkData(5, 1, True)
tdSql.checkData(6, 1, False) tdSql.checkData(6, 1, False)
tdSql.checkData(7, 1, False) tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, True)
tdSql.checkData(0, 2, 1) tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 2, 9) tdSql.checkData(1, 2, 9)
@ -4518,37 +4483,36 @@ class TDTestCase:
tdSql.checkData(5, 2, 13) tdSql.checkData(5, 2, 13)
tdSql.checkData(6, 2, 13) tdSql.checkData(6, 2, 13)
tdSql.checkData(7, 2, 15) tdSql.checkData(7, 2, 15)
tdSql.checkData(8, 2, None)
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0, 1) from {dbname}.{stbname_null} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(18) tdSql.checkRows(15)
for i in range(0, 9): for i in range(0, 7):
tdSql.checkData(i, 0, 'ctb1_null') tdSql.checkData(i, 0, 'ctb1_null')
for i in range(9, 18): for i in range(7, 15):
tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(i, 0, 'ctb2_null')
tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') tdSql.checkData(6, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') tdSql.checkData(7, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') tdSql.checkData(14, 1, '2020-02-01 00:00:15.000')
tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)") tdSql.query(f"select tbname, _irowts, _isfilled, interp(c0) from {dbname}.{stbname_null} where c0 is not null partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:17') every(2s) fill(next)")
tdSql.checkRows(18) tdSql.checkRows(15)
for i in range(0, 9): for i in range(0, 7):
tdSql.checkData(i, 0, 'ctb1_null') tdSql.checkData(i, 0, 'ctb1_null')
for i in range(9, 18): for i in range(7, 15):
tdSql.checkData(i, 0, 'ctb2_null') tdSql.checkData(i, 0, 'ctb2_null')
tdSql.checkData(0, 1, '2020-02-01 00:00:01.000') tdSql.checkData(0, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(8, 1, '2020-02-01 00:00:17.000') tdSql.checkData(6, 1, '2020-02-01 00:00:13.000')
tdSql.checkData(9, 1, '2020-02-01 00:00:01.000') tdSql.checkData(7, 1, '2020-02-01 00:00:01.000')
tdSql.checkData(17, 1, '2020-02-01 00:00:17.000') tdSql.checkData(14, 1, '2020-02-01 00:00:15.000')
# fill linear # fill linear
# normal table # normal table

View File

@ -0,0 +1,132 @@
from wsgiref.headers import tspecials
from util.log import *
from util.cases import *
from util.sql import *
from util.common import tdCom
import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = "db"
self.rowNum = 10
self.ts = 1537146000000
def notConditionTest(self):
dbname = "nottest"
stbname = "st1"
tdsql = tdCom.newTdSql()
tdsql.execute(f"create database if not exists {dbname}")
stype = ["INT", "INT UNSIGNED", "BIGINT", "BIGINT UNSIGNED", "DOUBLE", "FLOAT", "SMALLINT", "SMALLINT UNSIGNED", "TINYINT", "TINYINT UNSIGNED"]
for type_name in stype:
tdsql.execute(f"drop table if exists {dbname}.{stbname}")
tdsql.execute(f"create table if not exists {dbname}.{stbname} (ts timestamp, v1 {type_name}) tags(t1 {type_name})")
tdsql.execute(f"insert into {dbname}.sub_1 using {dbname}.{stbname} tags(1) values({self.ts}, 10)")
tdsql.execute(f"insert into {dbname}.sub_2 using {dbname}.{stbname} tags(2) values({self.ts + 1000}, 20)")
tdsql.execute(f"insert into {dbname}.sub_3 using {dbname}.{stbname} tags(3) values({self.ts + 2000}, 30)")
# Test case 1: NOT IN
tdsql.query(f"select t1, * from {dbname}.{stbname} where t1 not in (1, 2) order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 0, 3)
# Test case 2: NOT BETWEEN
tdsql.query(f"select * from {dbname}.{stbname} where v1 not between 10 and 20 order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not(v1 not between 10 and 20) order by t1")
tdsql.checkRows(2)
# Test case 4: NOT EQUAL
tdsql.query(f"select * from {dbname}.{stbname} where v1 != 20 order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 30)
# Test case 8: NOT (v1 < 20 OR v1 > 30)
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 > 30) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 20)
tdsql.checkData(1, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 < 20 or v1 >= 30) order by t1")
tdsql.checkRows(1)
# Test case 9: NOT (t1 != 1)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 1, 10)
tdsql.query(f"select * from {dbname}.{stbname} where (t1 != 1) or not (v1 == 20) order by t1")
tdsql.checkRows(3)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 20)
tdsql.checkData(2, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not((t1 != 1) or not (v1 == 20)) order by t1")
tdsql.checkRows(0)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 20) order by t1")
tdsql.checkRows(0)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1) and not (v1 != 20)) order by t1")
tdsql.checkRows(3)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 != 1) and not (v1 != 10) order by t1")
tdsql.checkRows(1)
tdsql.checkData(0, 1, 10)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 > 2) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 20)
tdsql.query(f"select * from {dbname}.{stbname} where not (t1 == 2) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not (v1 > 10 and v1 < 30) order by t1")
tdsql.checkRows(2)
tdsql.checkData(0, 1, 10)
tdsql.checkData(1, 1, 30)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 > 30)) order by t1")
tdsql.checkRows(1)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 < 20 or v1 >= 30)) order by t1")
tdsql.checkRows(2)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 != 1)) order by t1")
tdsql.checkRows(2)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 > 2)) order by t1")
tdsql.checkRows(1)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (t1 == 2)) order by t1")
tdsql.checkRows(1)
tdsql.query(f"select * from {dbname}.{stbname} where not(not (v1 > 10 and v1 < 30)) order by t1")
tdsql.checkRows(1)
def run(self):
dbname = "db"
tdSql.prepare()
self.notConditionTest()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,66 @@
import random
import string
from util.log import *
from util.cases import *
from util.sql import *
from util.common import *
from util.sqlset import *
import numpy as np
class TDTestCase:
updatecfgDict = {'slowLogThresholdTest': ''}
updatecfgDict["slowLogThresholdTest"] = 0
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def getPath(self, tool="taosBenchmark"):
if (platform.system().lower() == 'windows'):
tool = tool + ".exe"
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
paths = []
for root, dirs, files in os.walk(projPath):
if ((tool) in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
paths.append(os.path.join(root, tool))
break
if (len(paths) == 0):
tdLog.exit("taosBenchmark not found!")
return
else:
tdLog.info("taosBenchmark found in %s" % paths[0])
return paths[0]
def taosBenchmark(self, param):
binPath = self.getPath()
cmd = f"{binPath} {param}"
tdLog.info(cmd)
os.system(cmd)
def testSlowQuery(self):
self.taosBenchmark(" -d db -t 2 -v 2 -n 1000000 -y")
sql = "select count(*) from db.meters"
for i in range(10):
tdSql.query(sql)
tdSql.checkData(0, 0, 2 * 1000000)
def run(self):
self.testSlowQuery()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -24,45 +24,6 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor()) tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
# def consume_TS_4674_Test(self):
#
# os.system("nohup taosBenchmark -y -B 1 -t 4 -S 1000 -n 1000000 -i 1000 -v 1 -a 3 > /dev/null 2>&1 &")
# time.sleep()
# tdSql.execute(f'create topic topic_all with meta as database test')
# consumer_dict = {
# "group.id": "g1",
# "td.connect.user": "root",
# "td.connect.pass": "taosdata",
# "auto.offset.reset": "earliest",
# }
# consumer = Consumer(consumer_dict)
#
# try:
# consumer.subscribe(["topic_all"])
# except TmqError:
# tdLog.exit(f"subscribe error")
#
# try:
# while True:
# res = consumer.poll(5)
# if not res:
# print(f"null")
# continue
# val = res.value()
# if val is None:
# print(f"null")
# continue
# cnt = 0;
# for block in val:
# cnt += len(block.fetchall())
#
# print(f"block {cnt} rows")
#
# finally:
# consumer.close()
def get_leader(self): def get_leader(self):
tdLog.debug("get leader") tdLog.debug("get leader")
tdSql.query("show vnodes") tdSql.query("show vnodes")
@ -74,19 +35,20 @@ class TDTestCase:
def balance_vnode(self): def balance_vnode(self):
leader_before = self.get_leader() leader_before = self.get_leader()
while True:
leader_after = -1
tdSql.query("balance vgroup leader") tdSql.query("balance vgroup leader")
while True: while True:
leader_after = -1
tdLog.debug("balancing vgroup leader")
while True:
tdLog.debug("get new vgroup leader")
leader_after = self.get_leader() leader_after = self.get_leader()
if leader_after != -1 : if leader_after != -1 :
break; break
else: else:
time.sleep(1) time.sleep(1)
if leader_after != leader_before: if leader_after != leader_before:
tdLog.debug("leader changed") tdLog.debug("leader changed")
break; break
else : else :
time.sleep(1) time.sleep(1)
@ -115,7 +77,7 @@ class TDTestCase:
except TmqError: except TmqError:
tdLog.exit(f"subscribe error") tdLog.exit(f"subscribe error")
cnt = 0; cnt = 0
balance = False balance = False
try: try:
while True: while True:

View File

@ -92,7 +92,7 @@ class TDTestCase:
def run(self): def run(self):
for fill_history_value in [None, 1]: for fill_history_value in [None, 1]:
for watermark in [None, random.randint(20, 30)]: for watermark in [None, random.randint(20, 30)]:
self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(1, 3)}s", fill_history_value=fill_history_value) self.watermark_max_delay_session(session=random.randint(10, 15), watermark=watermark, max_delay=f"{random.randint(5, 8)}s", fill_history_value=fill_history_value)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -245,6 +245,8 @@ python3 ./test.py -f 2-query/min.py -P
python3 ./test.py -f 2-query/min.py -P -R python3 ./test.py -f 2-query/min.py -P -R
python3 ./test.py -f 2-query/normal.py -P python3 ./test.py -f 2-query/normal.py -P
python3 ./test.py -f 2-query/normal.py -P -R python3 ./test.py -f 2-query/normal.py -P -R
python3 ./test.py -f 2-query/not.py -P
python3 ./test.py -f 2-query/not.py -P -R
python3 ./test.py -f 2-query/mode.py -P python3 ./test.py -f 2-query/mode.py -P
python3 ./test.py -f 2-query/mode.py -P -R python3 ./test.py -f 2-query/mode.py -P -R
python3 ./test.py -f 2-query/Now.py -P python3 ./test.py -f 2-query/Now.py -P
@ -427,6 +429,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 2
python3 ./test.py -f 2-query/max.py -P -Q 2 python3 ./test.py -f 2-query/max.py -P -Q 2
python3 ./test.py -f 2-query/min.py -P -Q 2 python3 ./test.py -f 2-query/min.py -P -Q 2
python3 ./test.py -f 2-query/normal.py -P -Q 2 python3 ./test.py -f 2-query/normal.py -P -Q 2
python3 ./test.py -f 2-query/not.py -P -Q 2
python3 ./test.py -f 2-query/mode.py -P -Q 2 python3 ./test.py -f 2-query/mode.py -P -Q 2
python3 ./test.py -f 2-query/count.py -P -Q 2 python3 ./test.py -f 2-query/count.py -P -Q 2
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 2 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 2
@ -526,6 +529,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 3
python3 ./test.py -f 2-query/max.py -P -Q 3 python3 ./test.py -f 2-query/max.py -P -Q 3
python3 ./test.py -f 2-query/min.py -P -Q 3 python3 ./test.py -f 2-query/min.py -P -Q 3
python3 ./test.py -f 2-query/normal.py -P -Q 3 python3 ./test.py -f 2-query/normal.py -P -Q 3
python3 ./test.py -f 2-query/not.py -P -Q 3
python3 ./test.py -f 2-query/mode.py -P -Q 3 python3 ./test.py -f 2-query/mode.py -P -Q 3
python3 ./test.py -f 2-query/count.py -P -Q 3 python3 ./test.py -f 2-query/count.py -P -Q 3
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 3 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 3
@ -624,6 +628,7 @@ python3 ./test.py -f 2-query/Today.py -P -Q 4
python3 ./test.py -f 2-query/max.py -P -Q 4 python3 ./test.py -f 2-query/max.py -P -Q 4
python3 ./test.py -f 2-query/min.py -P -Q 4 python3 ./test.py -f 2-query/min.py -P -Q 4
python3 ./test.py -f 2-query/normal.py -P -Q 4 python3 ./test.py -f 2-query/normal.py -P -Q 4
python3 ./test.py -f 2-query/not.py -P -Q 4
python3 ./test.py -f 2-query/mode.py -P -Q 4 python3 ./test.py -f 2-query/mode.py -P -Q 4
python3 ./test.py -f 2-query/count.py -P -Q 4 python3 ./test.py -f 2-query/count.py -P -Q 4
python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 4 python3 ./test.py -f 2-query/countAlwaysReturnValue.py -P -Q 4

View File

@ -925,3 +925,4 @@ python3 ./test.py -f 99-TDcase/TD-20582.py
python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3
python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3
python3 ./test.py -f eco-system/meta/database/keep_time_offset.py python3 ./test.py -f eco-system/meta/database/keep_time_offset.py
python3 ./test.py -f 2-query/slow_query_basic.py

View File

@ -8,15 +8,14 @@
"confirm_parameter_prompt": "no", "confirm_parameter_prompt": "no",
"continue_if_fail": "yes", "continue_if_fail": "yes",
"databases": "dbrate", "databases": "dbrate",
"query_times": 20, "query_times": 5,
"query_mode": "taosc", "query_mode": "taosc",
"specified_table_query": { "specified_table_query": {
"query_interval": 0, "query_interval": 0,
"concurrent": 10, "threads": 10,
"sqls": [ "sqls": [
{ {
"sql": "select count(*) from meters", "sql": "select count(*) from meters"
"result": "./query_result.txt"
} }
] ]
} }

View File

@ -16,18 +16,20 @@
{ {
"dbinfo": { "dbinfo": {
"name": "dbrate", "name": "dbrate",
"vgroups": 1,
"drop": "yes", "drop": "yes",
"vgroups": 2 "wal_retention_size": 1,
"wal_retention_period": 1
}, },
"super_tables": [ "super_tables": [
{ {
"name": "meters", "name": "meters",
"child_table_exists": "no", "child_table_exists": "no",
"childtable_count": 10, "childtable_count": 1,
"childtable_prefix": "d", "childtable_prefix": "d",
"insert_mode": "@STMT_MODE", "insert_mode": "@STMT_MODE",
"interlace_rows": @INTERLACE_MODE, "interlace_rows": @INTERLACE_MODE,
"insert_rows": 100000, "insert_rows": 10000,
"timestamp_step": 1, "timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000", "start_timestamp": "2020-10-01 00:00:00.000",
"auto_create_table": "no", "auto_create_table": "no",

View File

@ -34,28 +34,6 @@ def exec(command, show=True):
print(f"exec {command}\n") print(f"exec {command}\n")
return os.system(command) return os.system(command)
# run return output and error
def run(command, timeout = 60, show=True):
if(show):
print(f"run {command} timeout={timeout}s\n")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait(timeout)
output = process.stdout.read().decode(encoding="gbk")
error = process.stderr.read().decode(encoding="gbk")
return output, error
# return list after run
def runRetList(command, timeout=10, first=True):
output,error = run(command, timeout)
if first:
return output.splitlines()
else:
return error.splitlines()
def readFileContext(filename): def readFileContext(filename):
file = open(filename) file = open(filename)
context = file.read() context = file.read()
@ -78,6 +56,27 @@ def appendFileContext(filename, context):
except: except:
print(f"appand file error context={context} .") print(f"appand file error context={context} .")
# run return output and error
def run(command, show=True):
# out to file
out = "out.txt"
err = "err.txt"
ret = exec(command + f" 1>{out} 2>{err}", True)
# read from file
output = readFileContext(out)
error = readFileContext(err)
return output, error
# return list after run
def runRetList(command, first=True):
output,error = run(command)
if first:
return output.splitlines()
else:
return error.splitlines()
def getFolderSize(folder): def getFolderSize(folder):
total_size = 0 total_size = 0
for dirpath, dirnames, filenames in os.walk(folder): for dirpath, dirnames, filenames in os.walk(folder):
@ -134,8 +133,6 @@ def getMatch(datatype, algo):
def generateJsonFile(stmt, interlace): def generateJsonFile(stmt, interlace):
print(f"doTest stmt: {stmt} interlace_rows={interlace}\n")
# replace datatype # replace datatype
context = readFileContext(templateFile) context = readFileContext(templateFile)
# replace compress # replace compress
@ -202,12 +199,23 @@ def writeTemplateInfo(resultFile):
vgroups = findContextValue(context, "vgroups") vgroups = findContextValue(context, "vgroups")
childCount = findContextValue(context, "childtable_count") childCount = findContextValue(context, "childtable_count")
insertRows = findContextValue(context, "insert_rows") insertRows = findContextValue(context, "insert_rows")
line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" bindVGroup = findContextValue(context, "thread_bind_vgroup")
nThread = findContextValue(context, "thread_count")
batch = findContextValue(context, "num_of_records_per_req")
if bindVGroup.lower().find("yes") != -1:
nThread = vgroups
line = f"thread_bind_vgroup = {bindVGroup}\n"
line += f"vgroups = {vgroups}\n"
line += f"childtable_count = {childCount}\n"
line += f"insert_rows = {insertRows}\n"
line += f"insertThreads = {nThread}\n"
line += f"batchSize = {batch}\n\n"
print(line) print(line)
appendFileContext(resultFile, line) appendFileContext(resultFile, line)
def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed): def totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed):
global Number global Number
# flush # flush
command = 'taos -s "flush database dbrate;"' command = 'taos -s "flush database dbrate;"'
@ -220,7 +228,7 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
# read compress rate # read compress rate
command = 'taos -s "show table distributed dbrate.meters\G;"' command = 'taos -s "show table distributed dbrate.meters\G;"'
rets = runRetList(command) rets = runRetList(command)
print(rets) #print(rets)
str1 = rets[5] str1 = rets[5]
arr = str1.split(" ") arr = str1.split(" ")
@ -234,7 +242,6 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
str2 = arr[6] str2 = arr[6]
pos = str2.find("=[") pos = str2.find("=[")
rate = str2[pos+2:] rate = str2[pos+2:]
print("rate =" + rate)
# total data file size # total data file size
#dataSize = getFolderSize(f"{dataDir}/vnode/") #dataSize = getFolderSize(f"{dataDir}/vnode/")
@ -242,42 +249,79 @@ def totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed):
# appand to file # appand to file
# %("No", "stmtMode", "interlaceRows", "spent", "spent-real", "writeSpeed", "write-real", "query-QPS", "dataSize", "rate")
Number += 1 Number += 1
context = "%10s %10s %15s %10s %10s %30s %15s\n"%( Number, stmt, interlace, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%(
Number, stmt, interlace, spent + "s", spentReal + "s", writeSpeed + " r/s", writeReal + " r/s",
min, avg, p90, p99, max + "ms",
querySpeed, str(totalSize) + " MB", rate + "%")
showLog(context) showLog(context)
appendFileContext(resultFile, context) appendFileContext(resultFile, context)
def cutEnd(line, start, endChar):
pos = line.find(endChar, start)
if pos == -1:
return line[start:]
return line[start : pos]
def findValue(context, pos, key, endChar,command):
pos = context.find(key, pos)
if pos == -1:
print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
exit(1)
pos += len(key)
value = cutEnd(context, pos, endChar)
return (value, pos)
def testWrite(jsonFile): def testWrite(jsonFile):
command = f"taosBenchmark -f {jsonFile}" command = f"taosBenchmark -f {jsonFile}"
output, context = run(command, 60000) output, context = run(command, 60000)
print(context)
# SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second # SUCC: Spent 0.960248 (real 0.947154) seconds to insert rows: 100000 with 1 thread(s) into dbrate 104139.76 (real 105579.45) records/second
# find second real # spent
pos = context.find("(real ") key = "Spent "
pos = -1
pos1 = 0
while pos1 != -1: # find last "Spent "
pos1 = context.find(key, pos1)
if pos1 != -1:
pos = pos1 # update last found
pos1 += len(key)
if pos == -1: if pos == -1:
print(f"error, run command={command} output not found first \"(real\" keyword. error={context}") print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
exit(1) exit(1)
pos = context.find("(real ", pos + 5) pos += len(key)
spent = cutEnd(context, pos, ".")
# spent-real
spentReal, pos = findValue(context, pos, "(real ", ".", command)
# writeSpeed
key = "into "
pos = context.find(key, pos)
if pos == -1: if pos == -1:
print(f"error, run command={command} output not found second \"(real\" keyword. error={context}") print(f"error, run command={command} output not found \"{key}\" keyword. context={context}")
exit(1) exit(1)
pos += len(key)
writeSpeed, pos = findValue(context, pos, " ", ".", command)
# writeReal
writeReal, pos = findValue(context, pos, "(real ", ".", command)
pos += 5 # delay
length = len(context) min, pos = findValue(context, pos, "min: ", ",", command)
while pos < length and context[pos] == ' ': avg, pos = findValue(context, pos, "avg: ", ",", command)
pos += 1 p90, pos = findValue(context, pos, "p90: ", ",", command)
end = context.find(".", pos) p99, pos = findValue(context, pos, "p99: ", ",", command)
if end == -1: max, pos = findValue(context, pos, "max: ", "ms", command)
print(f"error, run command={command} output not found second \".\" keyword. error={context}")
exit(1)
speed = context[pos: end] return (spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max)
#print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n")
return speed
def testQuery(): def testQuery():
command = f"taosBenchmark -f json/query.json" command = f"taosBenchmark -f json/query.json"
lines = runRetList(command, 60000) lines = runRetList(command)
# INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485
speed = None speed = None
@ -308,13 +352,13 @@ def doTest(stmt, interlace, resultFile):
# run taosBenchmark # run taosBenchmark
t1 = time.time() t1 = time.time()
writeSpeed = testWrite(jsonFile) spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max = testWrite(jsonFile)
t2 = time.time() t2 = time.time()
# total write speed # total write speed
querySpeed = testQuery() querySpeed = testQuery()
# total compress rate # total compress rate
totalCompressRate(stmt, interlace, resultFile, writeSpeed, querySpeed) totalCompressRate(stmt, interlace, resultFile, spent, spentReal, writeSpeed, writeReal, min, avg, p90, p99, max, querySpeed)
def main(): def main():
@ -333,7 +377,17 @@ def main():
# json info # json info
writeTemplateInfo(resultFile) writeTemplateInfo(resultFile)
# head # head
context = "\n%10s %10s %15s %10s %10s %30s %15s\n"%("No", "stmtMode", "interlaceRows", "dataSize", "rate", "writeSpeed", "query-QPS") '''
context = "%3s %8s %10s %10s %10s %15s %15s %10s %10s %10s %10s %10s %8s %8s %8s\n"%(
"No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real",
"min", "avg", "p90", "p99", "max",
"query-QPS", "dataSize", "rate")
'''
context = "%2s %8s %10s %10s %16s %16s %12s %12s %12s %12s %12s %12s %10s %10s %10s\n"%(
"No", "stmtMode", "interlace", "spent", "spent-real", "writeSpeed", "write-real",
"min", "avg", "p90", "p99", "max",
"query-QPS", "dataSize", "rate")
appendFileContext(resultFile, context) appendFileContext(resultFile, context)

View File

@ -8,15 +8,14 @@
"confirm_parameter_prompt": "no", "confirm_parameter_prompt": "no",
"continue_if_fail": "yes", "continue_if_fail": "yes",
"databases": "dbrate", "databases": "dbrate",
"query_times": 20, "query_times": 5,
"query_mode": "taosc", "query_mode": "taosc",
"specified_table_query": { "specified_table_query": {
"query_interval": 0, "query_interval": 0,
"concurrent": 10, "threads": 10,
"sqls": [ "sqls": [
{ {
"sql": "select * from meters", "sql": "select * from meters"
"result": "./query_res0.txt"
} }
] ]
} }

View File

@ -34,28 +34,6 @@ def exec(command, show=True):
print(f"exec {command}\n") print(f"exec {command}\n")
return os.system(command) return os.system(command)
# run return output and error
def run(command, timeout = 60, show=True):
if(show):
print(f"run {command} timeout={timeout}s\n")
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait(timeout)
output = process.stdout.read().decode(encoding="gbk")
error = process.stderr.read().decode(encoding="gbk")
return output, error
# return list after run
def runRetList(command, timeout=10, first=True):
output,error = run(command, timeout)
if first:
return output.splitlines()
else:
return error.splitlines()
def readFileContext(filename): def readFileContext(filename):
file = open(filename) file = open(filename)
context = file.read() context = file.read()
@ -78,6 +56,27 @@ def appendFileContext(filename, context):
except: except:
print(f"appand file error context={context} .") print(f"appand file error context={context} .")
# run return output and error
def run(command, show=True):
# out to file
out = "out.txt"
err = "err.txt"
ret = exec(command + f" 1>{out} 2>{err}", True)
# read from file
output = readFileContext(out)
error = readFileContext(err)
return output, error
# return list after run
def runRetList(command, first=True):
output,error = run(command)
if first:
return output.splitlines()
else:
return error.splitlines()
def getFolderSize(folder): def getFolderSize(folder):
total_size = 0 total_size = 0
for dirpath, dirnames, filenames in os.walk(folder): for dirpath, dirnames, filenames in os.walk(folder):
@ -134,8 +133,6 @@ def getMatch(datatype, algo):
def generateJsonFile(algo): def generateJsonFile(algo):
print(f"doTest algo: {algo} \n")
# replace datatype # replace datatype
context = readFileContext(templateFile) context = readFileContext(templateFile)
# replace compress # replace compress
@ -192,30 +189,34 @@ def findContextValue(context, label):
ends = [',','}',']', 0] ends = [',','}',']', 0]
while context[end] not in ends: while context[end] not in ends:
end += 1 end += 1
print(f"start = {start} end={end}\n")
return context[start:end] return context[start:end]
def writeTemplateInfo(resultFile): def writeTemplateInfo(resultFile):
# create info # create info
context = readFileContext(templateFile) context = readFileContext(templateFile)
dbname = findContextValue(context, "name")
vgroups = findContextValue(context, "vgroups") vgroups = findContextValue(context, "vgroups")
childCount = findContextValue(context, "childtable_count") childCount = findContextValue(context, "childtable_count")
insertRows = findContextValue(context, "insert_rows") insertRows = findContextValue(context, "insert_rows")
line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n" line = f"vgroups = {vgroups}\nchildtable_count = {childCount}\ninsert_rows = {insertRows}\n\n"
print(line) print(line)
appendFileContext(resultFile, line) appendFileContext(resultFile, line)
return dbname
def totalCompressRate(algo, resultFile, writeSpeed, querySpeed): def totalCompressRate(algo, resultFile, writeSpeed, querySpeed):
global Number global Number
# flush loop = 30
while loop > 0:
loop -= 1
# flush database
command = 'taos -s "flush database dbrate;"' command = 'taos -s "flush database dbrate;"'
rets = exec(command) exec(command)
command = 'taos -s "compact database dbrate;"' time.sleep(1)
rets = exec(command)
waitCompactFinish(60)
# read compress rate # read compress rate
command = 'taos -s "show table distributed dbrate.meters\G;"' command = 'taos -s "show table distributed dbrate.meters\G;"'
@ -235,13 +236,14 @@ def totalCompressRate(algo, resultFile, writeSpeed, querySpeed):
pos = str2.find("=[") pos = str2.find("=[")
rate = str2[pos+2:] rate = str2[pos+2:]
print("rate =" + rate) print("rate =" + rate)
if rate != "0.00":
break
# total data file size # total data file size
#dataSize = getFolderSize(f"{dataDir}/vnode/") #dataSize = getFolderSize(f"{dataDir}/vnode/")
#dataSizeMB = int(dataSize/1024/1024) #dataSizeMB = int(dataSize/1024/1024)
# appand to file # appand to file
Number += 1 Number += 1
context = "%10s %10s %10s %10s %30s %15s\n"%( Number, algo, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed) context = "%10s %10s %10s %10s %30s %15s\n"%( Number, algo, str(totalSize)+" MB", rate+"%", writeSpeed + " Records/second", querySpeed)
showLog(context) showLog(context)
@ -273,18 +275,22 @@ def testWrite(jsonFile):
speed = context[pos: end] speed = context[pos: end]
#print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n") #print(f"write pos ={pos} end={end} speed={speed}\n output={context} \n")
# flush database
command = 'taos -s "flush database dbrate;"'
exec(command)
return speed return speed
def testQuery(): def testQuery():
command = f"taosBenchmark -f json/query.json" command = f"taosBenchmark -f json/query.json"
lines = runRetList(command, 60000) lines = runRetList(command)
# INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485 # INFO: Spend 6.7350 second completed total queries: 10, the QPS of all threads: 1.485
speed = None speed = None
for i in range(20, len(lines)): for i in range(0, len(lines)):
# find second real # find second real
context = lines[i]
pos = context.find("the QPS of all threads:") pos = context.find("the QPS of all threads:")
context = lines[26]
if pos == -1 : if pos == -1 :
continue continue
pos += 24 pos += 24
@ -300,8 +306,6 @@ def testQuery():
def doTest(algo, resultFile): def doTest(algo, resultFile):
print(f"doTest algo: {algo} \n") print(f"doTest algo: {algo} \n")
#cleanAndStartTaosd()
# json # json
jsonFile = generateJsonFile(algo) jsonFile = generateJsonFile(algo)