Merge branch '3.0' into fix/TD-27455
This commit is contained in:
commit
9b72d890c8
|
@ -355,7 +355,7 @@ pipeline {
|
|||
}
|
||||
parallel {
|
||||
stage('check docs') {
|
||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
|
||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
|
||||
steps {
|
||||
check_docs()
|
||||
}
|
||||
|
@ -401,7 +401,7 @@ pipeline {
|
|||
}
|
||||
}
|
||||
stage('linux test') {
|
||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 "}
|
||||
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
|
||||
options { skipDefaultCheckout() }
|
||||
when {
|
||||
changeRequest()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
set(CMAKE_VERBOSE_MAKEFILE FALSE)
|
||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
#set output directory
|
||||
|
@ -159,6 +159,7 @@ ELSE ()
|
|||
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
|
||||
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
|
||||
|
||||
IF (COMPILER_SUPPORT_SSE42)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
|
||||
|
@ -166,11 +167,11 @@ ELSE ()
|
|||
ENDIF()
|
||||
|
||||
IF ("${SIMD_SUPPORT}" MATCHES "true")
|
||||
IF (COMPILER_SUPPORT_FMA)
|
||||
IF (COMPILER_SUPPORT_FMA)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_AVX)
|
||||
ENDIF()
|
||||
IF (COMPILER_SUPPORT_AVX)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
|
||||
ENDIF()
|
||||
|
@ -183,7 +184,13 @@ ELSE ()
|
|||
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
|
||||
MESSAGE(STATUS "avx512 supported by gcc")
|
||||
MESSAGE(STATUS "avx512f/avx512bmi supported by compiler")
|
||||
ENDIF()
|
||||
|
||||
IF (COMPILER_SUPPORT_AVX512VL)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
|
||||
MESSAGE(STATUS "avx512vl supported by compiler")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
|
|
|
@ -414,7 +414,8 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
|
||||
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
|
||||
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
|
||||
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
|
||||
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL,
|
||||
QUERY_NODE_END
|
||||
} ENodeType;
|
||||
|
||||
typedef struct {
|
||||
|
|
|
@ -67,7 +67,7 @@ enum {
|
|||
|
||||
enum { // WARN: new msg should be appended to segment tail
|
||||
#endif
|
||||
TD_NEW_MSG_SEG(TDMT_DND_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_DND_MSG) // 0<<8
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_DROP_MNODE, "dnode-drop-mnode", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_QNODE, "dnode-create-qnode", NULL, NULL)
|
||||
|
@ -87,7 +87,7 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_MND_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_MND_MSG) // 1<<8
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
|
||||
|
@ -194,7 +194,7 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_MND_VIEW_META, "view-meta", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_TABLE, "create-table", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_TABLE, "alter-table", NULL, NULL)
|
||||
|
@ -243,7 +243,7 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_DISABLE_WRITE, "vnode-disable-write", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_SCH_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_SCH_MSG) // 3<<8
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_MERGE_QUERY, "merge-query", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY_CONTINUE, "query-continue", NULL, NULL)
|
||||
|
@ -258,13 +258,13 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
|
||||
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_STREAM_MSG)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp)
|
||||
TD_NEW_MSG_SEG(TDMT_STREAM_MSG) //4 << 8
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DEPLOY, "stream-task-deploy", SStreamTaskDeployReq, SStreamTaskDeployRsp) //1025 1026
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DROP, "stream-task-drop", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RUN, "stream-task-run", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL) //1035 1036
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT_READY, "stream-checkpoint-ready", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
|
||||
|
@ -274,10 +274,10 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_MON_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8
|
||||
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_SYNC_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_SYNC_MSG) //6 << 8
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT_ELECTION, "sync-elect", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_PING_REPLY, "sync-ping-reply", NULL, NULL) // no longer used
|
||||
|
@ -308,7 +308,7 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG) //7 << 8
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
|
||||
|
@ -317,7 +317,7 @@ enum { // WARN: new msg should be appended to segment tail
|
|||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
|
||||
|
||||
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG)
|
||||
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG) //8 << 8
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_SUBSCRIBE, "vnode-tmq-subscribe", SMqRebVgReq, SMqRebVgRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_DELETE_SUB, "vnode-tmq-delete-sub", SMqVDeleteReq, SMqVDeleteRsp)
|
||||
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_COMMIT_OFFSET, "vnode-tmq-commit-offset", STqOffset, STqOffset)
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
//
|
||||
// Created by mingming wanng on 2023/11/15.
|
||||
//
|
||||
|
||||
#ifndef TDENGINE_STREAM_H
|
||||
#define TDENGINE_STREAM_H
|
||||
|
||||
#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1)
|
||||
#define STREAM_EXEC_START_ALL_TASKS_ID (-2)
|
||||
#define STREAM_EXEC_RESTART_ALL_TASKS_ID (-3)
|
||||
|
||||
typedef struct STaskUpdateEntry {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t transId;
|
||||
} STaskUpdateEntry;
|
||||
|
||||
#endif // TDENGINE_STREAM_H
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef TDENGINE_TQ_COMMON_H
|
||||
#define TDENGINE_TQ_COMMON_H
|
||||
|
||||
// message process
|
||||
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart);
|
||||
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored);
|
||||
int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessScanHistoryFinishReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
||||
int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg);
|
||||
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader, bool restored);
|
||||
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen);
|
||||
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
|
||||
int32_t startStreamTasks(SStreamMeta* pMeta);
|
||||
int32_t resetStreamTaskStatus(SStreamMeta* pMeta);
|
||||
|
||||
#endif // TDENGINE_TQ_COMMON_H
|
|
@ -110,6 +110,7 @@ int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId);
|
|||
int64_t nodesReleaseAllocatorWeakRef(int64_t allocatorId);
|
||||
void nodesDestroyAllocator(int64_t allocatorId);
|
||||
|
||||
int32_t getNodeSize(ENodeType type);
|
||||
SNode* nodesMakeNode(ENodeType type);
|
||||
void nodesDestroyNode(SNode* pNode);
|
||||
void nodesFree(void* p);
|
||||
|
|
|
@ -50,6 +50,10 @@ extern "C" {
|
|||
(_t)->hTaskInfo.id.streamId = 0; \
|
||||
} while (0)
|
||||
|
||||
#define STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID (-1)
|
||||
#define STREAM_EXEC_START_ALL_TASKS_ID (-2)
|
||||
#define STREAM_EXEC_RESTART_ALL_TASKS_ID (-3)
|
||||
|
||||
typedef struct SStreamTask SStreamTask;
|
||||
typedef struct SStreamQueue SStreamQueue;
|
||||
typedef struct SStreamTaskSM SStreamTaskSM;
|
||||
|
@ -664,6 +668,8 @@ typedef struct STaskStatusEntry {
|
|||
int32_t relatedHTask; // has related fill-history task
|
||||
int64_t activeCheckpointId; // current active checkpoint id
|
||||
bool checkpointFailed; // denote if the checkpoint is failed or not
|
||||
bool inputQChanging; // inputQ is changing or not
|
||||
int64_t inputQUnchangeCounter;
|
||||
double inputQUsed; // in MiB
|
||||
double inputRate;
|
||||
double sinkQuota; // existed quota size for sink task
|
||||
|
|
|
@ -65,6 +65,7 @@ typedef struct TdFile *TdFilePtr;
|
|||
#define TD_FILE_STREAM 0x0100 // Only support taosFprintfFile, taosGetLineFile, taosEOFFile
|
||||
#define TD_FILE_WRITE_THROUGH 0x0200
|
||||
#define TD_FILE_CLOEXEC 0x0400
|
||||
|
||||
TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions);
|
||||
TdFilePtr taosCreateFile(const char *path, int32_t tdFileOptions);
|
||||
|
||||
|
|
|
@ -139,6 +139,8 @@ int32_t getWordLength(char type);
|
|||
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
|
||||
int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output);
|
||||
int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output);
|
||||
int32_t tsDecompressTimestampAvx512(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
|
||||
int32_t tsDecompressTimestampAvx2(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
|
||||
|
||||
/*************************************************************************
|
||||
* STREAM COMPRESSION
|
||||
|
|
|
@ -38,3 +38,4 @@ source /etc/profile
|
|||
${csudo}mkdir -p ${corePath} ||:
|
||||
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
|
||||
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
|
||||
${csudo}echo "kernel.core_pattern = ${corePath}/core_%e-%p" >> /etc/sysctl.conf ||:
|
||||
|
|
|
@ -876,6 +876,7 @@ int taos_get_current_db(TAOS *taos, char *database, int len, int *required) {
|
|||
code = 0;
|
||||
}
|
||||
taosThreadMutexUnlock(&pTscObj->mutex);
|
||||
releaseTscObj(*(int64_t *)taos);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,9 +126,9 @@ void queryCallback(void* param, void* res, int32_t code) {
|
|||
taos_fetch_raw_block_a(res, fetchCallback, param);
|
||||
}
|
||||
|
||||
void createNewTable(TAOS* pConn, int32_t index) {
|
||||
void createNewTable(TAOS* pConn, int32_t index, int32_t numOfRows, int64_t startTs, const char* pVarchar) {
|
||||
char str[1024] = {0};
|
||||
sprintf(str, "create table tu%d using st2 tags(%d)", index, index);
|
||||
sprintf(str, "create table if not exists tu%d using st2 tags(%d)", index, index);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, str);
|
||||
if (taos_errno(pRes) != 0) {
|
||||
|
@ -136,22 +136,43 @@ void createNewTable(TAOS* pConn, int32_t index) {
|
|||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
for (int32_t i = 0; i < 10000; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
|
||||
index, i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7,
|
||||
i + 7, i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14,
|
||||
i + 14, i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
|
||||
TAOS_RES* p = taos_query(pConn, sql);
|
||||
if (taos_errno(p) != 0) {
|
||||
printf("failed to insert data, reason:%s\n", taos_errstr(p));
|
||||
}
|
||||
if (startTs == 0) {
|
||||
for (int32_t i = 0; i < numOfRows; i += 20) {
|
||||
char sql[1024] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
|
||||
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
|
||||
index, i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7,
|
||||
i + 7, i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14,
|
||||
i + 14, i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
|
||||
TAOS_RES* p = taos_query(pConn, sql);
|
||||
if (taos_errno(p) != 0) {
|
||||
printf("failed to insert data, reason:%s\n", taos_errstr(p));
|
||||
}
|
||||
|
||||
taos_free_result(p);
|
||||
taos_free_result(p);
|
||||
}
|
||||
} else {
|
||||
for (int32_t i = 0; i < numOfRows; i += 20) {
|
||||
char sql[1024*50] = {0};
|
||||
sprintf(sql,
|
||||
"insert into tu%d values(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, "
|
||||
"%d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, "
|
||||
"'%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')",
|
||||
index, startTs, i, pVarchar, startTs + 1, i + 1, pVarchar, startTs + 2, i + 2, pVarchar, startTs + 3, i + 3, pVarchar, startTs + 4, i + 4,
|
||||
pVarchar, startTs + 5, i + 5, pVarchar, startTs + 6, i + 6, pVarchar, startTs + 7, i + 7, pVarchar, startTs + 8, i + 8, pVarchar, startTs + 9, i + 9,
|
||||
pVarchar, startTs + 10, i + 10, pVarchar, startTs + 11, i + 11, pVarchar, startTs + 12, i + 12, pVarchar, startTs + 13, i + 13, pVarchar, startTs + 14,
|
||||
i + 14, pVarchar, startTs + 15, i + 15, pVarchar, startTs + 16, i + 16, pVarchar, startTs + 17, i + 17, pVarchar, startTs + 18, i + 18,
|
||||
pVarchar, startTs + 19, i + 19, pVarchar);
|
||||
TAOS_RES* p = taos_query(pConn, sql);
|
||||
if (taos_errno(p) != 0) {
|
||||
printf("failed to insert data, reason:%s\n", taos_errstr(p));
|
||||
}
|
||||
|
||||
taos_free_result(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -808,14 +829,7 @@ TEST(clientCase, projection_query_tables) {
|
|||
TAOS_RES* pRes = taos_query(pConn, "use abc1");
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
|
||||
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int, f varchar(4096)) tags(a int)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
|
@ -828,28 +842,32 @@ TEST(clientCase, projection_query_tables) {
|
|||
taos_free_result(pRes);
|
||||
|
||||
int64_t start = 1685959190000;
|
||||
const char* pstr =
|
||||
"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefgh"
|
||||
"ijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnop"
|
||||
"qrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx"
|
||||
"yzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdef"
|
||||
"ghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz!@#$%^&&*&^^%$#@!qQWERTYUIOPASDFGHJKL:"
|
||||
"QWERTYUIOP{}";
|
||||
|
||||
int32_t code = -1;
|
||||
for(int32_t i = 0; i < 1000000; ++i) {
|
||||
char t[512] = {0};
|
||||
for(int32_t i = 0; i < 10000; ++i) {
|
||||
char str[1024] = {0};
|
||||
sprintf(str, "create table if not exists tu%d using st2 tags(%d)", i, i);
|
||||
|
||||
sprintf(t, "insert into t1 values(now, %d)", i);
|
||||
while(1) {
|
||||
void* p = taos_query(pConn, t);
|
||||
code = taos_errno(p);
|
||||
taos_free_result(p);
|
||||
if (code != 0) {
|
||||
printf("insert data error, retry\n");
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
TAOS_RES* px = taos_query(pConn, str);
|
||||
if (taos_errno(px) != 0) {
|
||||
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(px);
|
||||
}
|
||||
|
||||
for(int32_t j = 0; j < 5000; ++j) {
|
||||
start += 20;
|
||||
for (int32_t i = 0; i < 10000; ++i) {
|
||||
createNewTable(pConn, i, 20, start, pstr);
|
||||
}
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < 1; ++i) {
|
||||
printf("create table :%d\n", i);
|
||||
createNewTable(pConn, i);
|
||||
}
|
||||
//
|
||||
// pRes = taos_query(pConn, "select * from tu");
|
||||
// if (taos_errno(pRes) != 0) {
|
||||
|
|
|
@ -140,7 +140,7 @@ static const SSysDbTableSchema userStbsSchema[] = {
|
|||
{.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||
{.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
|
||||
{.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
|
||||
{.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
{.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
|
||||
|
|
|
@ -193,10 +193,6 @@ int32_t smPutNodeMsgToStreamQueue(SSnodeMgmt *pMgmt, SRpcMsg *pMsg) {
|
|||
SSingleWorker *pWorker = &pMgmt->streamWorker;
|
||||
|
||||
dTrace("msg:%p, put into worker %s", pMsg, pWorker->name);
|
||||
if (pMsg->msgType == TDMT_STREAM_TASK_DISPATCH) {
|
||||
sndEnqueueStreamDispatch(pMgmt->pSnode, pMsg);
|
||||
} else {
|
||||
taosWriteQitem(pWorker->queue, pMsg);
|
||||
}
|
||||
taosWriteQitem(pWorker->queue, pMsg);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -206,11 +206,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
break;
|
||||
case STREAM_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-stream queue", pVnode->vgId, pMsg);
|
||||
if (pMsg->msgType == TDMT_STREAM_TASK_DISPATCH) {
|
||||
vnodeEnqueueStreamMsg(pVnode->pImpl, pMsg);
|
||||
} else {
|
||||
taosWriteQitem(pVnode->pStreamQ, pMsg);
|
||||
}
|
||||
taosWriteQitem(pVnode->pStreamQ, pMsg);
|
||||
break;
|
||||
case FETCH_QUEUE:
|
||||
dGTrace("vgId:%d, msg:%p put into vnode-fetch queue", pVnode->vgId, pMsg);
|
||||
|
|
|
@ -52,7 +52,7 @@ int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
|||
int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
|
||||
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pName, const char* pSrcDb, const char* pDstDb);
|
||||
bool streamTransConflictOtherTrans(SMnode *pMnode, const char *pSrcDb, const char *pDstDb);
|
||||
bool streamTransConflictOtherTrans(SMnode *pMnode, const char *pSrcDb, const char *pDstDb, bool lock);
|
||||
|
||||
// for sma
|
||||
// TODO refactor
|
||||
|
|
|
@ -742,14 +742,14 @@ static int32_t checkForNumOfStreams(SMnode *pMnode, SStreamObj *pStreamObj) { /
|
|||
}
|
||||
|
||||
static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
||||
SMnode * pMnode = pReq->info.node;
|
||||
int32_t code = -1;
|
||||
SStreamObj * pStream = NULL;
|
||||
SDbObj * pDb = NULL;
|
||||
SCMCreateStreamReq createStreamReq = {0};
|
||||
SStreamObj streamObj = {0};
|
||||
char* sql = NULL;
|
||||
SMnode *pMnode = pReq->info.node;
|
||||
int32_t code = -1;
|
||||
SStreamObj *pStream = NULL;
|
||||
SStreamObj streamObj = {0};
|
||||
char *sql = NULL;
|
||||
int32_t sqlLen = 0;
|
||||
|
||||
SCMCreateStreamReq createStreamReq = {0};
|
||||
if (tDeserializeSCMCreateStreamReq(pReq->pCont, pReq->contLen, &createStreamReq) != 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
|
@ -780,8 +780,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
int32_t sqlLen = 0;
|
||||
if(createStreamReq.sql != NULL){
|
||||
if (createStreamReq.sql != NULL) {
|
||||
sqlLen = strlen(createStreamReq.sql);
|
||||
sql = taosMemoryMalloc(sqlLen + 1);
|
||||
memset(sql, 0, sqlLen + 1);
|
||||
|
@ -868,14 +867,13 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
// reuse this function for stream
|
||||
|
||||
if (sql != NULL && sqlLen > 0) {
|
||||
auditRecord(pReq, pMnode->clusterId, "createStream", dbname.dbname, name.dbname, sql,
|
||||
sqlLen);
|
||||
}
|
||||
else{
|
||||
auditRecord(pReq, pMnode->clusterId, "createStream", dbname.dbname, name.dbname, sql, sqlLen);
|
||||
} else {
|
||||
char detail[1000] = {0};
|
||||
sprintf(detail, "dbname:%s, stream name:%s", dbname.dbname, name.dbname);
|
||||
auditRecord(pReq, pMnode->clusterId, "createStream", dbname.dbname, name.dbname, detail, strlen(detail));
|
||||
}
|
||||
|
||||
_OVER:
|
||||
if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) {
|
||||
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
|
||||
|
@ -884,7 +882,7 @@ _OVER:
|
|||
mndReleaseStream(pMnode, pStream);
|
||||
tFreeSCMCreateStreamReq(&createStreamReq);
|
||||
tFreeStreamObj(&streamObj);
|
||||
if(sql != NULL){
|
||||
if (sql != NULL) {
|
||||
taosMemoryFreeClear(sql);
|
||||
}
|
||||
return code;
|
||||
|
@ -1335,7 +1333,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb, true);
|
||||
if (conflict) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
tFreeMDropStreamReq(&dropReq);
|
||||
|
@ -1818,7 +1816,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb, true);
|
||||
if (conflict) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
|
@ -1953,7 +1951,7 @@ static int32_t mndProcessResumeStreamReq(SRpcMsg *pReq) {
|
|||
}
|
||||
|
||||
// check if it is conflict with other trans in both sourceDb and targetDb.
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb, true);
|
||||
if (conflict) {
|
||||
sdbRelease(pMnode->pSdb, pStream);
|
||||
return -1;
|
||||
|
@ -2741,7 +2739,7 @@ static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int32_t transId) {
|
|||
break;
|
||||
}
|
||||
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb);
|
||||
bool conflict = streamTransConflictOtherTrans(pMnode, pStream->sourceDb, pStream->targetDb, false);
|
||||
if (conflict) {
|
||||
mError("stream:%s other trans exists in DB:%s & %s failed to start reset-status trans",
|
||||
pStream->name, pStream->sourceDb, pStream->targetDb);
|
||||
|
@ -2777,7 +2775,7 @@ static SStreamTask* mndGetStreamTask(STaskId* pId, SStreamObj* pStream) {
|
|||
|
||||
static bool needDropRelatedFillhistoryTask(STaskStatusEntry *pTaskEntry, SStreamExecInfo *pExecNode) {
|
||||
if (pTaskEntry->status == TASK_STATUS__STREAM_SCAN_HISTORY && pTaskEntry->statusLastDuration >= 10) {
|
||||
if (fabs(pTaskEntry->inputQUsed) <= DBL_EPSILON) {
|
||||
if (!pTaskEntry->inputQChanging && pTaskEntry->inputQUnchangeCounter > 10) {
|
||||
int32_t numOfReady = 0;
|
||||
int32_t numOfTotal = 0;
|
||||
for (int32_t k = 0; k < taosArrayGetSize(pExecNode->pTaskList); ++k) {
|
||||
|
@ -2920,6 +2918,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
|||
bool snodeChanged = false;
|
||||
for (int32_t i = 0; i < req.numOfTasks; ++i) {
|
||||
STaskStatusEntry *p = taosArrayGet(req.pTaskStatus, i);
|
||||
|
||||
STaskStatusEntry *pTaskEntry = taosHashGet(execInfo.pTaskMap, &p->id, sizeof(p->id));
|
||||
if (pTaskEntry == NULL) {
|
||||
mError("s-task:0x%" PRIx64 " not found in mnode task list", p->id.taskId);
|
||||
|
@ -2928,8 +2927,22 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
|
|||
|
||||
if (pTaskEntry->stage != p->stage && pTaskEntry->stage != -1) {
|
||||
updateStageInfo(pTaskEntry, p->stage);
|
||||
if(pTaskEntry->nodeId == SNODE_HANDLE) snodeChanged = true;
|
||||
if(pTaskEntry->nodeId == SNODE_HANDLE) {
|
||||
snodeChanged = true;
|
||||
}
|
||||
} else {
|
||||
// task is idle for more than 50 sec.
|
||||
if (fabs(pTaskEntry->inputQUsed - p->inputQUsed) <= DBL_EPSILON) {
|
||||
if (!pTaskEntry->inputQChanging) {
|
||||
pTaskEntry->inputQUnchangeCounter++;
|
||||
} else {
|
||||
pTaskEntry->inputQChanging = false;
|
||||
}
|
||||
} else {
|
||||
pTaskEntry->inputQChanging = true;
|
||||
pTaskEntry->inputQUnchangeCounter = 0;
|
||||
}
|
||||
|
||||
streamTaskStatusCopy(pTaskEntry, p);
|
||||
if (p->activeCheckpointId != 0) {
|
||||
if (activeCheckpointId != 0) {
|
||||
|
|
|
@ -35,17 +35,15 @@ int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pName, const char* pS
|
|||
}
|
||||
|
||||
int32_t clearFinishedTrans(SMnode* pMnode) {
|
||||
SArray* pList = taosArrayInit(4, sizeof(SKeyInfo));
|
||||
size_t keyLen = 0;
|
||||
SArray* pList = taosArrayInit(4, sizeof(SKeyInfo));
|
||||
void* pIter = NULL;
|
||||
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
|
||||
void* pIter = NULL;
|
||||
while ((pIter = taosHashIterate(execInfo.transMgmt.pDBTrans, pIter)) != NULL) {
|
||||
SStreamTransInfo *pEntry = (SStreamTransInfo *)pIter;
|
||||
STrans* pTrans = mndAcquireTrans(pMnode, pEntry->transId);
|
||||
SStreamTransInfo* pEntry = (SStreamTransInfo*)pIter;
|
||||
|
||||
// let's clear the finished trans
|
||||
STrans* pTrans = mndAcquireTrans(pMnode, pEntry->transId);
|
||||
if (pTrans == NULL) {
|
||||
void* pKey = taosHashGetKey(pEntry, &keyLen);
|
||||
// key is the name of src/dst db name
|
||||
|
@ -60,44 +58,55 @@ int32_t clearFinishedTrans(SMnode* pMnode) {
|
|||
}
|
||||
|
||||
size_t num = taosArrayGetSize(pList);
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
for (int32_t i = 0; i < num; ++i) {
|
||||
SKeyInfo* pKey = taosArrayGet(pList, i);
|
||||
taosHashRemove(execInfo.transMgmt.pDBTrans, pKey->pKey, pKey->keyLen);
|
||||
}
|
||||
|
||||
mDebug("clear %d finished stream-trans, remained:%d", (int32_t) num, taosHashGetSize(execInfo.transMgmt.pDBTrans));
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
mDebug("clear %d finished stream-trans, remained:%d", (int32_t)num, taosHashGetSize(execInfo.transMgmt.pDBTrans));
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
taosArrayDestroy(pList);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool streamTransConflictOtherTrans(SMnode* pMnode, const char* pSrcDb, const char* pDstDb) {
|
||||
clearFinishedTrans(pMnode);
|
||||
bool streamTransConflictOtherTrans(SMnode* pMnode, const char* pSrcDb, const char* pDstDb, bool lock) {
|
||||
if (lock) {
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
}
|
||||
|
||||
taosThreadMutexLock(&execInfo.lock);
|
||||
int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans);
|
||||
if (num <= 0) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
if (lock) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
clearFinishedTrans(pMnode);
|
||||
|
||||
SStreamTransInfo *pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pSrcDb, strlen(pSrcDb));
|
||||
if (pEntry != NULL) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
if (lock) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
}
|
||||
mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name);
|
||||
return true;
|
||||
}
|
||||
|
||||
pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pDstDb, strlen(pDstDb));
|
||||
if (pEntry != NULL) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
if (lock) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
}
|
||||
mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name);
|
||||
return true;
|
||||
}
|
||||
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
if (lock) {
|
||||
taosThreadMutexUnlock(&execInfo.lock);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -708,9 +708,8 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
int32_t numOfAlterViews = taosHashGetSize(pUser->alterViews);
|
||||
int32_t numOfTopics = taosHashGetSize(pUser->topics);
|
||||
int32_t numOfUseDbs = taosHashGetSize(pUser->useDbs);
|
||||
int32_t size = sizeof(SUserObj) + USER_RESERVE_SIZE +
|
||||
(numOfReadDbs + numOfWriteDbs + numOfUseDbs) * TSDB_DB_FNAME_LEN + numOfTopics * TSDB_TOPIC_FNAME_LEN +
|
||||
ipWhiteReserve;
|
||||
int32_t size = sizeof(SUserObj) + USER_RESERVE_SIZE + (numOfReadDbs + numOfWriteDbs) * TSDB_DB_FNAME_LEN +
|
||||
numOfTopics * TSDB_TOPIC_FNAME_LEN + ipWhiteReserve;
|
||||
|
||||
char *stb = taosHashIterate(pUser->readTbs, NULL);
|
||||
while (stb != NULL) {
|
||||
|
@ -720,7 +719,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
size += keyLen;
|
||||
|
||||
size_t valueLen = 0;
|
||||
valueLen = strlen(stb);
|
||||
valueLen = strlen(stb) + 1;
|
||||
size += sizeof(int32_t);
|
||||
size += valueLen;
|
||||
stb = taosHashIterate(pUser->readTbs, stb);
|
||||
|
@ -734,7 +733,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
size += keyLen;
|
||||
|
||||
size_t valueLen = 0;
|
||||
valueLen = strlen(stb);
|
||||
valueLen = strlen(stb) + 1;
|
||||
size += sizeof(int32_t);
|
||||
size += valueLen;
|
||||
stb = taosHashIterate(pUser->writeTbs, stb);
|
||||
|
@ -748,7 +747,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
size += keyLen;
|
||||
|
||||
size_t valueLen = 0;
|
||||
valueLen = strlen(stb);
|
||||
valueLen = strlen(stb) + 1;
|
||||
size += sizeof(int32_t);
|
||||
size += valueLen;
|
||||
stb = taosHashIterate(pUser->alterTbs, stb);
|
||||
|
@ -762,7 +761,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
size += keyLen;
|
||||
|
||||
size_t valueLen = 0;
|
||||
valueLen = strlen(stb);
|
||||
valueLen = strlen(stb) + 1;
|
||||
size += sizeof(int32_t);
|
||||
size += valueLen;
|
||||
stb = taosHashIterate(pUser->readViews, stb);
|
||||
|
@ -776,7 +775,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
size += keyLen;
|
||||
|
||||
size_t valueLen = 0;
|
||||
valueLen = strlen(stb);
|
||||
valueLen = strlen(stb) + 1;
|
||||
size += sizeof(int32_t);
|
||||
size += valueLen;
|
||||
stb = taosHashIterate(pUser->writeViews, stb);
|
||||
|
@ -790,11 +789,21 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
size += keyLen;
|
||||
|
||||
size_t valueLen = 0;
|
||||
valueLen = strlen(stb);
|
||||
valueLen = strlen(stb) + 1;
|
||||
size += sizeof(int32_t);
|
||||
size += valueLen;
|
||||
stb = taosHashIterate(pUser->alterViews, stb);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t *useDb = taosHashIterate(pUser->useDbs, NULL);
|
||||
while (useDb != NULL) {
|
||||
size_t keyLen = 0;
|
||||
void *key = taosHashGetKey(useDb, &keyLen);
|
||||
size += sizeof(int32_t);
|
||||
size += keyLen;
|
||||
size += sizeof(int32_t);
|
||||
useDb = taosHashIterate(pUser->useDbs, useDb);
|
||||
}
|
||||
|
||||
SSdbRaw *pRaw = sdbAllocRaw(SDB_USER, USER_VER_NUMBER, size);
|
||||
if (pRaw == NULL) goto _OVER;
|
||||
|
@ -925,7 +934,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
|
|||
stb = taosHashIterate(pUser->alterViews, stb);
|
||||
}
|
||||
|
||||
int32_t *useDb = taosHashIterate(pUser->useDbs, NULL);
|
||||
useDb = taosHashIterate(pUser->useDbs, NULL);
|
||||
while (useDb != NULL) {
|
||||
size_t keyLen = 0;
|
||||
void *key = taosHashGetKey(useDb, &keyLen);
|
||||
|
|
|
@ -17,4 +17,5 @@ target_link_libraries(
|
|||
PRIVATE stream
|
||||
PRIVATE wal
|
||||
PRIVATE index
|
||||
PRIVATE tqCommon
|
||||
)
|
||||
|
|
|
@ -16,9 +16,8 @@
|
|||
#include "rsync.h"
|
||||
#include "executor.h"
|
||||
#include "sndInt.h"
|
||||
#include "tstream.h"
|
||||
#include "tqCommon.h"
|
||||
#include "tuuid.h"
|
||||
#include "stream.h"
|
||||
|
||||
#define sndError(...) \
|
||||
do { \
|
||||
|
@ -41,45 +40,6 @@
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
char *msgStr = pMsg->pCont;
|
||||
char *msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t code = 0;
|
||||
|
||||
SStreamDispatchReq req;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, msgBody, msgLen);
|
||||
if (tDecodeStreamDispatchReq(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
goto FAIL;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = { .info = pMsg->info, .code = 0 };
|
||||
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return;
|
||||
} else {
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
return;
|
||||
}
|
||||
|
||||
FAIL:
|
||||
if (pMsg->info.handle == NULL) return;
|
||||
SRpcMsg rsp = { .code = code, .info = pMsg->info};
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
}
|
||||
|
||||
int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer) {
|
||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__AGG && taosArrayGetSize(pTask->upstreamInfo.pList) != 0);
|
||||
int32_t code = streamTaskInit(pTask, pSnode->pMeta, &pSnode->msgCb, nextProcessVer);
|
||||
|
@ -130,159 +90,29 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer
|
|||
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
|
||||
sndInfo("s-task:%s restore from the checkpointId:%" PRId64 " ver:%" PRId64 " nextProcessVer:%" PRId64, pTask->id.idStr,
|
||||
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
|
||||
} else {
|
||||
if (pTask->chkInfo.nextProcessVer == -1) {
|
||||
pTask->chkInfo.nextProcessVer = 0;
|
||||
}
|
||||
}
|
||||
|
||||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
|
||||
sndInfo("snode:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64 " child id:%d, level:%d, status:%s fill-history:%d, trigger:%" PRId64 " ms",
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, pTask->info.triggerParam);
|
||||
|
||||
if (pTask->info.fillHistory) {
|
||||
sndInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms",
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||
(int32_t)pTask->streamTaskId.taskId, pTask->info.triggerParam);
|
||||
} else {
|
||||
sndInfo("vgId:%d expand stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64
|
||||
" nextProcessVer:%" PRId64
|
||||
" child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms",
|
||||
SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer,
|
||||
pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory,
|
||||
(int32_t)pTask->hTaskInfo.id.taskId, pTask->info.triggerParam);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndStartStreamTasks(SSnode* pSnode) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t vgId = SNODE_HANDLE;
|
||||
SStreamMeta* pMeta = pSnode->pMeta;
|
||||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
sndDebug("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SArray* pTaskList = NULL;
|
||||
streamMetaWLock(pMeta);
|
||||
pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
|
||||
taosHashClear(pMeta->startInfo.pReadyTaskSet);
|
||||
taosHashClear(pMeta->startInfo.pFailedTaskSet);
|
||||
pMeta->startInfo.startTs = taosGetTimestampMs();
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// broadcast the check downstream tasks msg
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
|
||||
if (pTask == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// fill-history task can only be launched by related stream tasks.
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTask->status.downstreamReady == 1) {
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
sndDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task",
|
||||
pTask->id.idStr);
|
||||
streamLaunchFillHistoryTask(pTask);
|
||||
}
|
||||
|
||||
streamMetaUpdateTaskDownstreamStatus(pTask, pTask->execInfo.init, pTask->execInfo.start, true);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT;
|
||||
int32_t ret = streamTaskHandleEvent(pTask->status.pSM, event);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
code = ret;
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTaskList);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t sndResetStreamTaskStatus(SSnode* pSnode) {
|
||||
SStreamMeta* pMeta = pSnode->pMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
|
||||
sndDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||
|
||||
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
streamTaskResetStatus(*pTask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndRestartStreamTasks(SSnode* pSnode) {
|
||||
SStreamMeta* pMeta = pSnode->pMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
while(1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
if (startVal == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
sndDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
terrno = 0;
|
||||
sndInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d", vgId,
|
||||
pMeta->updateInfo.transId);
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
sndDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
code = streamMetaReopen(pMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
sndError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
streamMetaInitBackend(pMeta);
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
|
||||
sndInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.);
|
||||
|
||||
code = streamMetaLoadAllTasks(pMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
sndError("vgId:%d failed to load stream tasks, code:%s", vgId, tstrerror(terrno));
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
sndInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||
sndResetStreamTaskStatus(pSnode);
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
sndStartStreamTasks(pSnode);
|
||||
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
SSnode *sndOpen(const char *path, const SSnodeOpt *pOption) {
|
||||
SSnode *pSnode = taosMemoryCalloc(1, sizeof(SSnode));
|
||||
if (pSnode == NULL) {
|
||||
|
@ -318,8 +148,8 @@ FAIL:
|
|||
}
|
||||
|
||||
int32_t sndInit(SSnode * pSnode) {
|
||||
sndResetStreamTaskStatus(pSnode);
|
||||
sndStartStreamTasks(pSnode);
|
||||
resetStreamTaskStatus(pSnode->pMeta);
|
||||
startStreamTasks(pSnode->pMeta);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -332,578 +162,30 @@ void sndClose(SSnode *pSnode) {
|
|||
taosMemoryFree(pSnode);
|
||||
}
|
||||
|
||||
int32_t sndGetLoad(SSnode *pSnode, SSnodeLoad *pLoad) { return 0; }
|
||||
|
||||
int32_t sndStartStreamTaskAsync(SSnode* pSnode, bool restart) {
|
||||
SStreamMeta* pMeta = pSnode->pMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
if (numOfTasks == 0) {
|
||||
sndDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||
if (pRunReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
sndError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
sndDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
||||
pRunReq->head.vgId = vgId;
|
||||
pRunReq->streamId = 0;
|
||||
pRunReq->taskId = restart? STREAM_EXEC_RESTART_ALL_TASKS_ID:STREAM_EXEC_START_ALL_TASKS_ID;
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||
tmsgPutToQueue(&pSnode->msgCb, STREAM_QUEUE, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskDeployReq(SSnode *pSnode, char *msg, int32_t msgLen) {
|
||||
int32_t code;
|
||||
|
||||
// 1.deserialize msg and build task
|
||||
SStreamTask *pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
|
||||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t *)msg, msgLen);
|
||||
code = tDecodeStreamTask(&decoder, pTask);
|
||||
if (code < 0) {
|
||||
tDecoderClear(&decoder);
|
||||
taosMemoryFree(pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__AGG);
|
||||
|
||||
// 2.save task
|
||||
streamMetaWLock(pSnode->pMeta);
|
||||
|
||||
bool added = false;
|
||||
code = streamMetaRegisterTask(pSnode->pMeta, -1, pTask, &added);
|
||||
if (code < 0) {
|
||||
streamMetaWUnLock(pSnode->pMeta);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pSnode->pMeta);
|
||||
streamMetaWUnLock(pSnode->pMeta);
|
||||
|
||||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
|
||||
sndDebug("snode:%d s-task:%s is deployed on snode and add into meta, status:%s, numOfTasks:%d", SNODE_HANDLE,
|
||||
pTask->id.idStr, p, numOfTasks);
|
||||
|
||||
EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT;
|
||||
streamTaskHandleEvent(pTask->status.pSM, event);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) {
|
||||
SVDropStreamTaskReq *pReq = (SVDropStreamTaskReq *)msg;
|
||||
sndDebug("snode:%d receive msg to drop stream task:0x%x", pSnode->pMeta->vgId, pReq->taskId);
|
||||
streamMetaUnregisterTask(pSnode->pMeta, pReq->streamId, pReq->taskId);
|
||||
|
||||
// commit the update
|
||||
streamMetaWLock(pSnode->pMeta);
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pSnode->pMeta);
|
||||
sndDebug("vgId:%d task:0x%x dropped, remain tasks:%d", pSnode->pMeta->vgId, pReq->taskId, numOfTasks);
|
||||
|
||||
if (streamMetaCommit(pSnode->pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
streamMetaWUnLock(pSnode->pMeta);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskRunReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
SStreamTaskRunReq *pReq = pMsg->pCont;
|
||||
|
||||
int32_t taskId = pReq->taskId;
|
||||
|
||||
if (taskId == STREAM_EXEC_START_ALL_TASKS_ID) {
|
||||
sndStartStreamTasks(pSnode);
|
||||
return 0;
|
||||
} else if (taskId == STREAM_EXEC_RESTART_ALL_TASKS_ID) {
|
||||
sndRestartStreamTasks(pSnode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId);
|
||||
if (pTask) {
|
||||
streamExecTask(pTask);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskDispatchReq(SSnode *pSnode, SRpcMsg *pMsg, bool exec) {
|
||||
char *msgStr = pMsg->pCont;
|
||||
char *msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
SStreamDispatchReq req;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t *)msgBody, msgLen);
|
||||
tDecodeStreamDispatchReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return 0;
|
||||
} else {
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
char *msgStr = pMsg->pCont;
|
||||
char *msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
SStreamRetrieveReq req;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, msgBody, msgLen);
|
||||
tDecodeStreamRetrieveReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.dstTaskId);
|
||||
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessRetrieveReq(pTask, &req, &rsp);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
tDeleteStreamRetrieveReq(&req);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
|
||||
pRsp->upstreamTaskId = htonl(pRsp->upstreamTaskId);
|
||||
pRsp->streamId = htobe64(pRsp->streamId);
|
||||
pRsp->downstreamTaskId = htonl(pRsp->downstreamTaskId);
|
||||
pRsp->downstreamNodeId = htonl(pRsp->downstreamNodeId);
|
||||
pRsp->stage = htobe64(pRsp->stage);
|
||||
pRsp->msgId = htonl(pRsp->msgId);
|
||||
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pRsp->streamId, pRsp->upstreamTaskId);
|
||||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskRetrieveRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
//
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskScanHistoryFinishReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
char *msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
// deserialize
|
||||
SStreamScanHistoryFinishReq req;
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, msg, msgLen);
|
||||
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
// find task
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.downstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
return -1;
|
||||
}
|
||||
// do process request
|
||||
if (streamProcessScanHistoryFinishReq(pTask, &req, &pMsg->info) < 0) {
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return -1;
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskScanHistoryFinishRsp(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
// deserialize
|
||||
SStreamCompleteHistoryMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
tDecodeCompleteHistoryDataMsg(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
sndError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed",
|
||||
pSnode->pMeta->vgId, req.upstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t remain = atomic_sub_fetch_32(&pTask->notReadyTasks, 1);
|
||||
if (remain > 0) {
|
||||
sndDebug("s-task:%s scan-history finish rsp received from downstream task:0x%x, unfinished remain:%d",
|
||||
pTask->id.idStr, req.downstreamId, remain);
|
||||
} else {
|
||||
sndDebug(
|
||||
"s-task:%s scan-history finish rsp received from downstream task:0x%x, all downstream tasks rsp scan-history "
|
||||
"completed msg",
|
||||
pTask->id.idStr, req.downstreamId);
|
||||
streamProcessScanHistoryFinishRsp(pTask);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// downstream task has complete the stream task checkpoint procedure, let's start the handle the rsp by execute task
|
||||
int32_t sndProcessTaskCheckpointReadyMsg(SSnode *pSnode, SRpcMsg* pMsg) {
|
||||
SStreamMeta* pMeta = pSnode->pMeta;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t code = 0;
|
||||
|
||||
SStreamCheckpointReadyMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamCheckpointReadyMsg(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
return code;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
sndError("vgId:%d failed to find s-task:0x%x, it may have been destroyed already", pMeta->vgId, req.downstreamTaskId);
|
||||
return code;
|
||||
}
|
||||
|
||||
sndDebug("snode vgId:%d s-task:%s received the checkpoint ready msg from task:0x%x (vgId:%d), handle it", pMeta->vgId,
|
||||
pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId);
|
||||
|
||||
streamProcessCheckpointReadyMsg(pTask);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t sndProcessStreamTaskCheckReq(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
char *msgStr = pMsg->pCont;
|
||||
char *msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
SStreamTaskCheckReq req;
|
||||
SDecoder decoder;
|
||||
|
||||
tDecoderInit(&decoder, (uint8_t *)msgBody, msgLen);
|
||||
tDecodeStreamTaskCheckReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.downstreamTaskId;
|
||||
|
||||
SStreamTaskCheckRsp rsp = {
|
||||
.reqId = req.reqId,
|
||||
.streamId = req.streamId,
|
||||
.childId = req.childId,
|
||||
.downstreamNodeId = req.downstreamNodeId,
|
||||
.downstreamTaskId = req.downstreamTaskId,
|
||||
.upstreamNodeId = req.upstreamNodeId,
|
||||
.upstreamTaskId = req.upstreamTaskId,
|
||||
};
|
||||
|
||||
SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, taskId);
|
||||
|
||||
if (pTask != NULL) {
|
||||
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage, &rsp.oldStage);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
sndDebug("s-task:%s status:%s, recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), ready:%d",
|
||||
pTask->id.idStr, p, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
} else {
|
||||
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
||||
sndDebug("recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp status %d",
|
||||
taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
}
|
||||
|
||||
SEncoder encoder;
|
||||
int32_t code;
|
||||
int32_t len;
|
||||
|
||||
tEncodeSize(tEncodeStreamTaskCheckRsp, &rsp, len, code);
|
||||
if (code < 0) {
|
||||
sndError("vgId:%d failed to encode task check rsp, task:0x%x", pSnode->pMeta->vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void *buf = rpcMallocCont(sizeof(SMsgHead) + len);
|
||||
((SMsgHead *)buf)->vgId = htonl(req.upstreamNodeId);
|
||||
|
||||
void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
|
||||
tEncoderInit(&encoder, (uint8_t *)abuf, len);
|
||||
tEncodeStreamTaskCheckRsp(&encoder, &rsp);
|
||||
tEncoderClear(&encoder);
|
||||
|
||||
SRpcMsg rspMsg = {.code = 0, .pCont = buf, .contLen = sizeof(SMsgHead) + len, .info = pMsg->info};
|
||||
|
||||
tmsgSendRsp(&rspMsg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t sndProcessStreamTaskCheckRsp(SSnode* pSnode, SRpcMsg* pMsg) {
|
||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
int32_t code;
|
||||
SStreamTaskCheckRsp rsp;
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
||||
code = tDecodeStreamTaskCheckRsp(&decoder, &rsp);
|
||||
|
||||
if (code < 0) {
|
||||
tDecoderClear(&decoder);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
sndDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
||||
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, rsp.streamId, rsp.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
sndError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId,
|
||||
pSnode->pMeta->vgId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
code = streamProcessCheckRsp(pTask, &rsp);
|
||||
streamMetaReleaseTask(pSnode->pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t sndProcessTaskUpdateReq(SSnode* pSnode, SRpcMsg* pMsg) {
|
||||
SStreamMeta* pMeta = pSnode->pMeta;
|
||||
int32_t vgId = SNODE_HANDLE;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||
|
||||
SStreamTaskNodeUpdateMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamTaskUpdateMsg(&decoder, &req) < 0) {
|
||||
rsp.code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
sndError("vgId:%d failed to decode task update msg, code:%s", vgId, tstrerror(rsp.code));
|
||||
tDecoderClear(&decoder);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
// update the nodeEpset when it exists
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
// the task epset may be updated again and again, when replaying the WAL, the task may be in stop status.
|
||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask == NULL || *ppTask == NULL) {
|
||||
sndError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", pMeta->vgId,
|
||||
req.taskId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *ppTask;
|
||||
|
||||
if (pMeta->updateInfo.transId != req.transId) {
|
||||
pMeta->updateInfo.transId = req.transId;
|
||||
sndInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", pTask->id.idStr, req.transId);
|
||||
// info needs to be kept till the new trans to update the nodeEp arrived.
|
||||
taosHashClear(pMeta->updateInfo.pTasks);
|
||||
} else {
|
||||
sndDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", pTask->id.idStr, req.transId);
|
||||
}
|
||||
|
||||
STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId};
|
||||
void* exist = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry));
|
||||
if (exist != NULL) {
|
||||
sndDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", pTask->id.idStr, vgId,
|
||||
req.transId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// the following two functions should not be executed within the scope of meta lock to avoid deadlock
|
||||
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
||||
streamTaskResetStatus(pTask);
|
||||
|
||||
// continue after lock the meta again
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
SStreamTask** ppHTask = NULL;
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||
sndError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||
pMeta->vgId, req.taskId);
|
||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||
} else {
|
||||
sndDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||
streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
streamMetaSaveTask(pMeta, pTask);
|
||||
if (ppHTask != NULL) {
|
||||
streamMetaSaveTask(pMeta, *ppHTask);
|
||||
}
|
||||
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
}
|
||||
|
||||
streamTaskStop(pTask);
|
||||
|
||||
// keep the already handled info
|
||||
taosHashPut(pMeta->updateInfo.pTasks, &entry, sizeof(entry), NULL, 0);
|
||||
|
||||
if (ppHTask != NULL) {
|
||||
streamTaskStop(*ppHTask);
|
||||
sndDebug("s-task:%s task nodeEp update completed, streamTask and related fill-history task closed", pTask->id.idStr);
|
||||
taosHashPut(pMeta->updateInfo.pTasks, &(*ppHTask)->id, sizeof(pTask->id), NULL, 0);
|
||||
} else {
|
||||
sndDebug("s-task:%s task nodeEp update completed, streamTask closed", pTask->id.idStr);
|
||||
}
|
||||
|
||||
rsp.code = 0;
|
||||
|
||||
// possibly only handle the stream task.
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
int32_t updateTasks = taosHashGetSize(pMeta->updateInfo.pTasks);
|
||||
|
||||
pMeta->startInfo.tasksWillRestart = 1;
|
||||
|
||||
if (updateTasks < numOfTasks) {
|
||||
sndDebug("vgId:%d closed tasks:%d, unclosed:%d, all tasks will be started when nodeEp update completed", vgId,
|
||||
updateTasks, (numOfTasks - updateTasks));
|
||||
streamMetaWUnLock(pMeta);
|
||||
} else {
|
||||
sndDebug("vgId:%d all %d task(s) nodeEp updated and closed", vgId, numOfTasks);
|
||||
#if 1
|
||||
sndStartStreamTaskAsync(pSnode, true);
|
||||
streamMetaWUnLock(pMeta);
|
||||
#else
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// For debug purpose.
|
||||
// the following procedure consume many CPU resource, result in the re-election of leader
|
||||
// with high probability. So we employ it as a test case for the stream processing framework, with
|
||||
// checkpoint/restart/nodeUpdate etc.
|
||||
while(1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
if (startVal == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
int32_t code = streamMetaReopen(pMeta);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (streamMetaLoadAllTasks(pTq->pStreamMeta) < 0) {
|
||||
tqError("vgId:%d failed to load stream tasks", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||
tqInfo("vgId:%d start all stream tasks after all being updated", vgId);
|
||||
tqResetStreamTaskStatus(pTq);
|
||||
tqStartStreamTaskAsync(pTq, false);
|
||||
} else {
|
||||
tqInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||
}
|
||||
streamMetaWUnLock(pMeta);
|
||||
#endif
|
||||
}
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
int32_t sndProcessStreamMsg(SSnode *pSnode, SRpcMsg *pMsg) {
|
||||
switch (pMsg->msgType) {
|
||||
case TDMT_STREAM_TASK_RUN:
|
||||
return sndProcessTaskRunReq(pSnode, pMsg);
|
||||
return tqStreamTaskProcessRunReq(pSnode->pMeta, pMsg, true);
|
||||
case TDMT_STREAM_TASK_DISPATCH:
|
||||
return sndProcessTaskDispatchReq(pSnode, pMsg, true);
|
||||
return tqStreamTaskProcessDispatchReq(pSnode->pMeta, pMsg);
|
||||
case TDMT_STREAM_TASK_DISPATCH_RSP:
|
||||
return sndProcessTaskDispatchRsp(pSnode, pMsg);
|
||||
return tqStreamTaskProcessDispatchRsp(pSnode->pMeta, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE:
|
||||
return sndProcessTaskRetrieveReq(pSnode, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_RSP:
|
||||
return sndProcessTaskRetrieveRsp(pSnode, pMsg);
|
||||
return tqStreamTaskProcessRetrieveReq(pSnode->pMeta, pMsg);
|
||||
case TDMT_STREAM_RETRIEVE_RSP: // 1036
|
||||
break;
|
||||
case TDMT_VND_STREAM_SCAN_HISTORY_FINISH:
|
||||
return sndProcessTaskScanHistoryFinishReq(pSnode, pMsg);
|
||||
return tqStreamTaskProcessScanHistoryFinishReq(pSnode->pMeta, pMsg);
|
||||
case TDMT_VND_STREAM_SCAN_HISTORY_FINISH_RSP:
|
||||
return sndProcessTaskScanHistoryFinishRsp(pSnode, pMsg);
|
||||
return tqStreamTaskProcessScanHistoryFinishRsp(pSnode->pMeta, pMsg);
|
||||
case TDMT_VND_STREAM_TASK_CHECK:
|
||||
return sndProcessStreamTaskCheckReq(pSnode, pMsg);
|
||||
return tqStreamTaskProcessCheckReq(pSnode->pMeta, pMsg);
|
||||
case TDMT_VND_STREAM_TASK_CHECK_RSP:
|
||||
return sndProcessStreamTaskCheckRsp(pSnode, pMsg);
|
||||
return tqStreamTaskProcessCheckRsp(pSnode->pMeta, pMsg, true);
|
||||
case TDMT_STREAM_TASK_CHECKPOINT_READY:
|
||||
return sndProcessTaskCheckpointReadyMsg(pSnode, pMsg);
|
||||
return tqStreamTaskProcessCheckpointReadyMsg(pSnode->pMeta, pMsg);
|
||||
default:
|
||||
sndError("invalid snode msg:%d", pMsg->msgType);
|
||||
ASSERT(0);
|
||||
}
|
||||
return 0;
|
||||
|
@ -914,14 +196,13 @@ int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg *pRsp) {
|
|||
case TDMT_STREAM_TASK_DEPLOY: {
|
||||
void *pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
return sndProcessTaskDeployReq(pSnode, pReq, len);
|
||||
return tqStreamTaskProcessDeployReq(pSnode->pMeta, -1, pReq, len, true, true);
|
||||
}
|
||||
|
||||
case TDMT_STREAM_TASK_DROP:
|
||||
return sndProcessTaskDropReq(pSnode, pMsg->pCont, pMsg->contLen);
|
||||
return tqStreamTaskProcessDropReq(pSnode->pMeta, pMsg->pCont, pMsg->contLen);
|
||||
case TDMT_VND_STREAM_TASK_UPDATE:
|
||||
sndProcessTaskUpdateReq(pSnode, pMsg);
|
||||
break;
|
||||
return tqStreamTaskProcessUpdateReq(pSnode->pMeta, &pSnode->msgCb, pMsg, true);
|
||||
default:
|
||||
ASSERT(0);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# vnode
|
||||
add_subdirectory(src/tqCommon)
|
||||
add_library(vnode STATIC "")
|
||||
set(
|
||||
VNODE_SOURCE_FILES
|
||||
|
@ -117,6 +118,7 @@ if (${BUILD_CONTRIB})
|
|||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
|
||||
)
|
||||
else()
|
||||
|
@ -125,6 +127,7 @@ else()
|
|||
PUBLIC "inc"
|
||||
PUBLIC "src/inc"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
)
|
||||
if (${TD_LINUX})
|
||||
target_include_directories(
|
||||
|
@ -138,11 +141,6 @@ else()
|
|||
endif()
|
||||
endif()
|
||||
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/dnode/vnode"
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
vnode
|
||||
PUBLIC os
|
||||
|
@ -163,6 +161,7 @@ target_link_libraries(
|
|||
PUBLIC transport
|
||||
PUBLIC stream
|
||||
PUBLIC index
|
||||
PUBLIC tqCommon
|
||||
)
|
||||
|
||||
IF (TD_GRANT)
|
||||
|
|
|
@ -253,8 +253,6 @@ bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
|
|||
int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char *idstr);
|
||||
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
|
||||
|
||||
int32_t vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);
|
||||
|
||||
// sma
|
||||
int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
|
||||
|
||||
|
|
|
@ -153,7 +153,6 @@ char* tqOffsetBuildFName(const char* path, int32_t fVer);
|
|||
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
|
||||
|
||||
// tqStream
|
||||
int32_t tqResetStreamTaskStatus(STQ* pTq);
|
||||
int32_t tqStopStreamTasks(STQ* pTq);
|
||||
|
||||
// tq util
|
||||
|
|
|
@ -93,7 +93,11 @@ typedef struct SQueryNode SQueryNode;
|
|||
#define VNODE_RSMA2_DIR "rsma2"
|
||||
#define VNODE_TQ_STREAM "stream"
|
||||
|
||||
#if SUSPEND_RESUME_TEST // only for test purpose
|
||||
#define VNODE_BUFPOOL_SEGMENTS 1
|
||||
#else
|
||||
#define VNODE_BUFPOOL_SEGMENTS 3
|
||||
#endif
|
||||
|
||||
#define VND_INFO_FNAME "vnode.json"
|
||||
#define VND_INFO_FNAME_TMP "vnode_tmp.json"
|
||||
|
@ -234,7 +238,6 @@ int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg);
|
|||
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg);
|
||||
|
||||
int32_t tqStartStreamTaskAsync(STQ* pTq, bool restart);
|
||||
int32_t tqRestartStreamTasks(STQ* pTq);
|
||||
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver);
|
||||
int32_t tqScanWal(STQ* pTq);
|
||||
|
@ -263,7 +266,7 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t version, char* msg, int32_t msg
|
|||
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
*/
|
||||
|
||||
#include "tq.h"
|
||||
#include "stream.h"
|
||||
#include "vnd.h"
|
||||
#include "tqCommon.h"
|
||||
|
||||
typedef struct {
|
||||
int8_t inited;
|
||||
|
@ -891,175 +891,15 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
|
|||
}
|
||||
|
||||
int32_t tqProcessTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
|
||||
SStreamTaskCheckReq req;
|
||||
SDecoder decoder;
|
||||
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamTaskCheckReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.downstreamTaskId;
|
||||
|
||||
SStreamTaskCheckRsp rsp = {
|
||||
.reqId = req.reqId,
|
||||
.streamId = req.streamId,
|
||||
.childId = req.childId,
|
||||
.downstreamNodeId = req.downstreamNodeId,
|
||||
.downstreamTaskId = req.downstreamTaskId,
|
||||
.upstreamNodeId = req.upstreamNodeId,
|
||||
.upstreamTaskId = req.upstreamTaskId,
|
||||
};
|
||||
|
||||
// only the leader node handle the check request
|
||||
if (pMeta->role == NODE_ROLE_FOLLOWER) {
|
||||
tqError(
|
||||
"s-task:0x%x invalid check msg from upstream:0x%x(vgId:%d), vgId:%d is follower, not handle check status msg",
|
||||
taskId, req.upstreamTaskId, req.upstreamNodeId, pMeta->vgId);
|
||||
rsp.status = TASK_DOWNSTREAM_NOT_LEADER;
|
||||
} else {
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, taskId);
|
||||
if (pTask != NULL) {
|
||||
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage, &rsp.oldStage);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
||||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
tqDebug("s-task:%s status:%s, stage:%" PRId64 " recv task check req(reqId:0x%" PRIx64
|
||||
") task:0x%x (vgId:%d), check_status:%d",
|
||||
pTask->id.idStr, p, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
} else {
|
||||
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
||||
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
||||
") from task:0x%x (vgId:%d), rsp check_status %d",
|
||||
req.streamId, taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
}
|
||||
}
|
||||
|
||||
return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId);
|
||||
return tqStreamTaskProcessCheckReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskCheckRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||
|
||||
int32_t code;
|
||||
SStreamTaskCheckRsp rsp;
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
||||
code = tDecodeStreamTaskCheckRsp(&decoder, &rsp);
|
||||
if (code < 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
tDecoderClear(&decoder);
|
||||
tqError("vgId:%d failed to parse check rsp msg, code:%s", vgId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
||||
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
||||
|
||||
if (!vnodeIsRoleLeader(pTq->pVnode)) {
|
||||
tqError("vgId:%d not leader, task:0x%x not handle the check rsp, downstream:0x%x (vgId:%d)", vgId,
|
||||
rsp.upstreamTaskId, rsp.downstreamTaskId, rsp.downstreamNodeId);
|
||||
return code;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.streamId, rsp.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
|
||||
rsp.streamId, rsp.upstreamTaskId, pTq->pStreamMeta->vgId);
|
||||
terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
code = streamProcessCheckRsp(pTask, &rsp);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return code;
|
||||
return tqStreamTaskProcessCheckRsp(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||
int32_t code = 0;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
if (tsDisableStream) {
|
||||
tqInfo("vgId:%d stream disabled, not deploy stream tasks", vgId);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d receive new stream task deploy msg, start to build stream task", vgId);
|
||||
|
||||
// 1.deserialize msg and build task
|
||||
int32_t size = sizeof(SStreamTask);
|
||||
SStreamTask* pTask = taosMemoryCalloc(1, size);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId, size);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
code = tDecodeStreamTask(&decoder, pTask);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(pTask);
|
||||
return TSDB_CODE_INVALID_MSG;
|
||||
}
|
||||
|
||||
SStreamMeta* pStreamMeta = pTq->pStreamMeta;
|
||||
|
||||
// 2.save task, use the latest commit version as the initial start version of stream task.
|
||||
int32_t taskId = pTask->id.taskId;
|
||||
int64_t streamId = pTask->id.streamId;
|
||||
bool added = false;
|
||||
|
||||
streamMetaWLock(pStreamMeta);
|
||||
code = streamMetaRegisterTask(pStreamMeta, sversion, pTask, &added);
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pStreamMeta);
|
||||
streamMetaWUnLock(pStreamMeta);
|
||||
|
||||
if (code < 0) {
|
||||
tqError("failed to add s-task:0x%x into vgId:%d meta, total:%d, code:%s", vgId, taskId, numOfTasks,
|
||||
tstrerror(code));
|
||||
tFreeStreamTask(pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
// added into meta store, pTask cannot be reference since it may have been destroyed by other threads already now if
|
||||
// it is added into the meta store
|
||||
if (added) {
|
||||
// only handled in the leader node
|
||||
if (vnodeIsRoleLeader(pTq->pVnode)) {
|
||||
tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks);
|
||||
SStreamTask* p = streamMetaAcquireTask(pStreamMeta, streamId, taskId);
|
||||
|
||||
bool restored = pTq->pVnode->restored;
|
||||
if (p != NULL && restored && p->info.fillHistory == 0) {
|
||||
EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT;
|
||||
streamTaskHandleEvent(p->status.pSM, event);
|
||||
} else if (!restored) {
|
||||
tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId);
|
||||
}
|
||||
|
||||
if (p != NULL) {
|
||||
streamMetaReleaseTask(pStreamMeta, p);
|
||||
}
|
||||
} else {
|
||||
tqDebug("vgId:%d not leader, not launch stream task s-task:0x%x", vgId, taskId);
|
||||
}
|
||||
} else {
|
||||
tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store", vgId, taskId);
|
||||
tFreeStreamTask(pTask);
|
||||
}
|
||||
|
||||
return code;
|
||||
return tqStreamTaskProcessDeployReq(pTq->pStreamMeta, sversion, msg, msgLen, vnodeIsRoleLeader(pTq->pVnode), pTq->pVnode->restored);
|
||||
}
|
||||
|
||||
static void doStartFillhistoryStep2(SStreamTask* pTask, SStreamTask* pStreamTask, STQ* pTq) {
|
||||
|
@ -1241,190 +1081,39 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
// only the agg tasks and the sink tasks will receive this message from upstream tasks
|
||||
int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
// deserialize
|
||||
SStreamScanHistoryFinishReq req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process scan history finish msg, failed to find task:0x%x, it may be destroyed",
|
||||
pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tqDebug("s-task:%s receive scan-history finish msg from task:0x%x", pTask->id.idStr, req.upstreamTaskId);
|
||||
|
||||
int32_t code = streamProcessScanHistoryFinishReq(pTask, &req, &pMsg->info);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return code;
|
||||
return tqStreamTaskProcessScanHistoryFinishReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
// deserialize
|
||||
SStreamCompleteHistoryMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
tDecodeCompleteHistoryDataMsg(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed",
|
||||
pTq->pStreamMeta->vgId, req.upstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t remain = atomic_sub_fetch_32(&pTask->notReadyTasks, 1);
|
||||
if (remain > 0) {
|
||||
tqDebug("s-task:%s scan-history finish rsp received from downstream task:0x%x, unfinished remain:%d",
|
||||
pTask->id.idStr, req.downstreamId, remain);
|
||||
} else {
|
||||
tqDebug(
|
||||
"s-task:%s scan-history finish rsp received from downstream task:0x%x, all downstream tasks rsp scan-history "
|
||||
"completed msg",
|
||||
pTask->id.idStr, req.downstreamId);
|
||||
streamProcessScanHistoryFinishRsp(pTask);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
return tqStreamTaskProcessScanHistoryFinishRsp(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamTaskRunReq* pReq = pMsg->pCont;
|
||||
|
||||
int32_t taskId = pReq->taskId;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
|
||||
if (taskId == STREAM_EXEC_EXTRACT_DATA_IN_WAL_ID) { // all tasks are extracted submit data from the wal
|
||||
tqScanWal(pTq);
|
||||
return 0;
|
||||
} else if (taskId == STREAM_EXEC_START_ALL_TASKS_ID) {
|
||||
tqStartStreamTasks(pTq);
|
||||
return 0;
|
||||
} else if (taskId == STREAM_EXEC_RESTART_ALL_TASKS_ID) {
|
||||
tqRestartStreamTasks(pTq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, taskId);
|
||||
if (pTask != NULL) { // even in halt status, the data in inputQ must be processed
|
||||
char* p = NULL;
|
||||
if (streamTaskReadyToRun(pTask, &p)) {
|
||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, next checked ver:%" PRId64, vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.nextProcessVer);
|
||||
streamExecTask(pTask);
|
||||
} else {
|
||||
int8_t status = streamTaskSetSchedStatusInactive(pTask);
|
||||
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state, status:%s, sched-status:%d", vgId,
|
||||
pTask->id.idStr, p, status);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
int32_t code = tqStreamTaskProcessRunReq(pTq->pStreamMeta, pMsg, vnodeIsRoleLeader(pTq->pVnode));
|
||||
if(code == 0 && taskId > 0){
|
||||
tqScanWalAsync(pTq, false);
|
||||
return 0;
|
||||
} else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec.
|
||||
// todo add one function to handle this
|
||||
tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
SStreamDispatchReq req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamDispatchReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
tqDebug("s-task:0x%x recv dispatch msg from 0x%x(vgId:%d)", req.taskId, req.upstreamTaskId, req.upstreamNodeId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
} else {
|
||||
tqError("vgId:%d failed to find task:0x%x to handle the dispatch req, it may have been destroyed already",
|
||||
pTq->pStreamMeta->vgId, req.taskId);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
return -1;
|
||||
}
|
||||
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
return tqStreamTaskProcessDispatchReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
|
||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||
pRsp->upstreamTaskId = htonl(pRsp->upstreamTaskId);
|
||||
pRsp->streamId = htobe64(pRsp->streamId);
|
||||
pRsp->downstreamTaskId = htonl(pRsp->downstreamTaskId);
|
||||
pRsp->downstreamNodeId = htonl(pRsp->downstreamNodeId);
|
||||
pRsp->stage = htobe64(pRsp->stage);
|
||||
pRsp->msgId = htonl(pRsp->msgId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pRsp->streamId, pRsp->upstreamTaskId);
|
||||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, pRsp->upstreamTaskId);
|
||||
terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
return terrno;
|
||||
}
|
||||
return tqStreamTaskProcessDispatchRsp(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen) {
|
||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
if (pTask != NULL) {
|
||||
// drop the related fill-history task firstly
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||
streamMetaUnregisterTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
|
||||
tqDebug("vgId:%d drop fill-history task:0x%x dropped firstly", vgId, (int32_t)pHTaskId->taskId);
|
||||
}
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
// drop the stream task now
|
||||
streamMetaUnregisterTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
|
||||
// commit the update
|
||||
streamMetaWLock(pMeta);
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
tqDebug("vgId:%d task:0x%x dropped, remain tasks:%d", vgId, pReq->taskId, numOfTasks);
|
||||
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
return 0;
|
||||
return tqStreamTaskProcessDropReq(pTq->pStreamMeta, msg, msgLen);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
|
||||
|
@ -1533,30 +1222,7 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
|
|||
}
|
||||
|
||||
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
SDecoder decoder;
|
||||
|
||||
SStreamRetrieveReq req;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamRetrieveReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t vgId = pTq->pStreamMeta->vgId;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.dstTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process retrieve req, failed to acquire task:0x%x, it may have been dropped already", vgId,
|
||||
req.dstTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessRetrieveReq(pTask, &req, &rsp);
|
||||
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
tDeleteStreamRetrieveReq(&req);
|
||||
return 0;
|
||||
return tqStreamTaskProcessRetrieveReq(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) {
|
||||
|
@ -1564,89 +1230,6 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// todo refactor.
|
||||
int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
||||
STQ* pTq = pVnode->pTq;
|
||||
int32_t vgId = pVnode->config.vgId;
|
||||
|
||||
SMsgHead* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t code = 0;
|
||||
|
||||
SStreamDispatchReq req;
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
if (tDecodeStreamDispatchReq(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
goto FAIL;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.taskId;
|
||||
tqDebug("vgId:%d receive dispatch msg to s-task:0x%" PRIx64 "-0x%x", vgId, req.streamId, taskId);
|
||||
|
||||
// for test purpose
|
||||
// if (req.type == STREAM_INPUT__CHECKPOINT_TRIGGER) {
|
||||
// code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
// goto FAIL;
|
||||
// }
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId);
|
||||
if (pTask != NULL) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessDispatchMsg(pTask, &req, &rsp);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return 0;
|
||||
} else {
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
}
|
||||
|
||||
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
|
||||
FAIL:
|
||||
if (pMsg->info.handle == NULL) {
|
||||
tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SMsgHead* pRspHead = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp));
|
||||
if (pRspHead == NULL) {
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_OUT_OF_MEMORY, .info = pMsg->info};
|
||||
tqError("s-task:0x%x send dispatch error rsp, code:%s", taskId, tstrerror(code));
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRspHead->vgId = htonl(req.upstreamNodeId);
|
||||
ASSERT(pRspHead->vgId != 0);
|
||||
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pRspHead, sizeof(SMsgHead));
|
||||
pRsp->streamId = htobe64(req.streamId);
|
||||
pRsp->upstreamTaskId = htonl(req.upstreamTaskId);
|
||||
pRsp->upstreamNodeId = htonl(req.upstreamNodeId);
|
||||
pRsp->downstreamNodeId = htonl(pVnode->config.vgId);
|
||||
pRsp->downstreamTaskId = htonl(req.taskId);
|
||||
pRsp->msgId = htonl(req.msgId);
|
||||
pRsp->stage = htobe64(req.stage);
|
||||
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
int32_t len = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
||||
SRpcMsg rsp = {.code = code, .info = pMsg->info, .contLen = len, .pCont = pRspHead};
|
||||
tqError("s-task:0x%x send dispatch error rsp, code:%s", taskId, tstrerror(code));
|
||||
|
||||
tmsgSendRsp(&rsp);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
|
@ -1776,220 +1359,11 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
|
|||
|
||||
// downstream task has complete the stream task checkpoint procedure, let's start the handle the rsp by execute task
|
||||
int32_t tqProcessTaskCheckpointReadyMsg(STQ* pTq, SRpcMsg* pMsg) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t code = 0;
|
||||
|
||||
SStreamCheckpointReadyMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamCheckpointReadyMsg(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
return code;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d failed to find s-task:0x%x, it may have been destroyed already", vgId, req.downstreamTaskId);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d s-task:%s received the checkpoint ready msg from task:0x%x (vgId:%d), handle it", vgId,
|
||||
pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId);
|
||||
|
||||
streamProcessCheckpointReadyMsg(pTask);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
return tqStreamTaskProcessCheckpointReadyMsg(pTq->pStreamMeta, pMsg);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskUpdateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||
|
||||
SStreamTaskNodeUpdateMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamTaskUpdateMsg(&decoder, &req) < 0) {
|
||||
rsp.code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tqError("vgId:%d failed to decode task update msg, code:%s", vgId, tstrerror(rsp.code));
|
||||
tDecoderClear(&decoder);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
// update the nodeEpset when it exists
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
// the task epset may be updated again and again, when replaying the WAL, the task may be in stop status.
|
||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask == NULL || *ppTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", pMeta->vgId,
|
||||
req.taskId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *ppTask;
|
||||
|
||||
if (pMeta->updateInfo.transId != req.transId) {
|
||||
pMeta->updateInfo.transId = req.transId;
|
||||
tqInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", pTask->id.idStr, req.transId);
|
||||
// info needs to be kept till the new trans to update the nodeEp arrived.
|
||||
taosHashClear(pMeta->updateInfo.pTasks);
|
||||
} else {
|
||||
tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", pTask->id.idStr, req.transId);
|
||||
}
|
||||
|
||||
STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId};
|
||||
void* exist = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry));
|
||||
if (exist != NULL) {
|
||||
tqDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", pTask->id.idStr, vgId,
|
||||
req.transId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// the following two functions should not be executed within the scope of meta lock to avoid deadlock
|
||||
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
||||
streamTaskResetStatus(pTask);
|
||||
|
||||
// continue after lock the meta again
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
SStreamTask** ppHTask = NULL;
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||
pMeta->vgId, req.taskId);
|
||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||
} else {
|
||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||
streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
streamMetaSaveTask(pMeta, pTask);
|
||||
if (ppHTask != NULL) {
|
||||
streamMetaSaveTask(pMeta, *ppHTask);
|
||||
}
|
||||
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
}
|
||||
|
||||
streamTaskStop(pTask);
|
||||
|
||||
// keep the already handled info
|
||||
taosHashPut(pMeta->updateInfo.pTasks, &entry, sizeof(entry), NULL, 0);
|
||||
|
||||
if (ppHTask != NULL) {
|
||||
streamTaskStop(*ppHTask);
|
||||
tqDebug("s-task:%s task nodeEp update completed, streamTask and related fill-history task closed", pTask->id.idStr);
|
||||
taosHashPut(pMeta->updateInfo.pTasks, &(*ppHTask)->id, sizeof(pTask->id), NULL, 0);
|
||||
} else {
|
||||
tqDebug("s-task:%s task nodeEp update completed, streamTask closed", pTask->id.idStr);
|
||||
}
|
||||
|
||||
rsp.code = 0;
|
||||
|
||||
// possibly only handle the stream task.
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
int32_t updateTasks = taosHashGetSize(pMeta->updateInfo.pTasks);
|
||||
|
||||
pMeta->startInfo.tasksWillRestart = 1;
|
||||
|
||||
if (updateTasks < numOfTasks) {
|
||||
tqDebug("vgId:%d closed tasks:%d, unclosed:%d, all tasks will be started when nodeEp update completed", vgId,
|
||||
updateTasks, (numOfTasks - updateTasks));
|
||||
streamMetaWUnLock(pMeta);
|
||||
} else {
|
||||
if (!pTq->pVnode->restored) {
|
||||
tqDebug("vgId:%d vnode restore not completed, not restart the tasks, clear the start after nodeUpdate flag",
|
||||
vgId);
|
||||
pMeta->startInfo.tasksWillRestart = 0;
|
||||
streamMetaWUnLock(pMeta);
|
||||
} else {
|
||||
tqDebug("vgId:%d all %d task(s) nodeEp updated and closed", vgId, numOfTasks);
|
||||
|
||||
#if 1
|
||||
tqStartStreamTaskAsync(pTq, true);
|
||||
streamMetaWUnLock(pMeta);
|
||||
#else
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// For debug purpose.
|
||||
// the following procedure consume many CPU resource, result in the re-election of leader
|
||||
// with high probability. So we employ it as a test case for the stream processing framework, with
|
||||
// checkpoint/restart/nodeUpdate etc.
|
||||
while (1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
if (startVal == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
int32_t code = streamMetaReopen(pMeta);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
streamMetaInitBackend(pMeta);
|
||||
|
||||
if (streamMetaLoadAllTasks(pTq->pStreamMeta) < 0) {
|
||||
tqError("vgId:%d failed to load stream tasks", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||
tqInfo("vgId:%d start all stream tasks after all being updated", vgId);
|
||||
tqResetStreamTaskStatus(pTq);
|
||||
tqStartStreamTaskAsync(pTq, false);
|
||||
} else {
|
||||
tqInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||
}
|
||||
streamMetaWUnLock(pMeta);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
return tqStreamTaskProcessUpdateReq(pTq->pStreamMeta, &pTq->pVnode->msgCb, pMsg, pTq->pVnode->restored);
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
|
@ -2041,6 +1415,9 @@ int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) {
|
|||
SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
|
||||
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id);
|
||||
|
||||
// clear the scheduler status
|
||||
streamTaskSetSchedStatusInactive(pTask);
|
||||
tqDebug("s-task:%s set scheduler status:%d after drop fill-history task", pTask->id.idStr, pTask->status.schedStatus);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
#include "tq.h"
|
||||
#include "vnd.h"
|
||||
#include "stream.h"
|
||||
|
||||
#define MAX_REPEAT_SCAN_THRESHOLD 3
|
||||
#define SCAN_WAL_IDLE_DURATION 100
|
||||
|
@ -61,154 +60,6 @@ int32_t tqScanWal(STQ* pTq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqStartStreamTasks(STQ* pTq) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
tqDebug("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SArray* pTaskList = NULL;
|
||||
streamMetaWLock(pMeta);
|
||||
pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
|
||||
taosHashClear(pMeta->startInfo.pReadyTaskSet);
|
||||
taosHashClear(pMeta->startInfo.pFailedTaskSet);
|
||||
pMeta->startInfo.startTs = taosGetTimestampMs();
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// broadcast the check downstream tasks msg
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
|
||||
if (pTask == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// fill-history task can only be launched by related stream tasks.
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTask->status.downstreamReady == 1) {
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
tqDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task",
|
||||
pTask->id.idStr);
|
||||
streamLaunchFillHistoryTask(pTask);
|
||||
}
|
||||
|
||||
streamMetaUpdateTaskDownstreamStatus(pTask, pTask->execInfo.init, pTask->execInfo.start, true);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT;
|
||||
int32_t ret = streamTaskHandleEvent(pTask->status.pSM, event);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
code = ret;
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTaskList);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqRestartStreamTasks(STQ* pTq) {
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
while(1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
if (startVal == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
terrno = 0;
|
||||
tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d", vgId,
|
||||
pMeta->updateInfo.transId);
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
code = streamMetaReopen(pMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
streamMetaInitBackend(pMeta);
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
|
||||
tqInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.);
|
||||
|
||||
code = streamMetaLoadAllTasks(pTq->pStreamMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d failed to load stream tasks, code:%s", vgId, tstrerror(terrno));
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||
tqResetStreamTaskStatus(pTq);
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqStartStreamTasks(pTq);
|
||||
} else {
|
||||
streamMetaResetStartInfo(&pMeta->startInfo);
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||
}
|
||||
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqStartStreamTaskAsync(STQ* pTq, bool restart) {
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
if (numOfTasks == 0) {
|
||||
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||
if (pRunReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
||||
pRunReq->head.vgId = vgId;
|
||||
pRunReq->streamId = 0;
|
||||
pRunReq->taskId = restart? STREAM_EXEC_RESTART_ALL_TASKS_ID:STREAM_EXEC_START_ALL_TASKS_ID;
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqScanWalAsync(STQ* pTq, bool ckPause) {
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
|
@ -302,27 +153,6 @@ int32_t tqStopStreamTasks(STQ* pTq) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqResetStreamTaskStatus(STQ* pTq) {
|
||||
SStreamMeta* pMeta = pTq->pStreamMeta;
|
||||
int32_t vgId = TD_VID(pTq->pVnode);
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
|
||||
tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||
|
||||
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
streamTaskResetStatus(*pTask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t setWalReaderStartOffset(SStreamTask* pTask, int32_t vgId) {
|
||||
// seek the stored version and extract data from WAL
|
||||
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
aux_source_directory(. TQ_SOURCE_FILES)
|
||||
add_library(tqCommon STATIC ${TQ_SOURCE_FILES})
|
||||
target_include_directories(
|
||||
tqCommon
|
||||
PUBLIC "../inc"
|
||||
PUBLIC "../../inc"
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
tqCommon
|
||||
PRIVATE stream
|
||||
PRIVATE common
|
||||
PRIVATE transport
|
||||
PRIVATE executor
|
||||
PRIVATE index
|
||||
PRIVATE qcom
|
||||
PRIVATE qworker
|
||||
PRIVATE sync
|
||||
PRIVATE tfs
|
||||
)
|
|
@ -0,0 +1,809 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "tstream.h"
|
||||
#include "tmsgcb.h"
|
||||
#include "tq.h"
|
||||
|
||||
typedef struct STaskUpdateEntry {
|
||||
int64_t streamId;
|
||||
int32_t taskId;
|
||||
int32_t transId;
|
||||
} STaskUpdateEntry;
|
||||
|
||||
int32_t tqStreamTaskStartAsync(SStreamMeta* pMeta, SMsgCb* cb, bool restart) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
if (numOfTasks == 0) {
|
||||
tqDebug("vgId:%d no stream tasks existed to run", vgId);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
|
||||
if (pRunReq == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr());
|
||||
return -1;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d start all %d stream task(s) async", vgId, numOfTasks);
|
||||
pRunReq->head.vgId = vgId;
|
||||
pRunReq->streamId = 0;
|
||||
pRunReq->taskId = restart? STREAM_EXEC_RESTART_ALL_TASKS_ID:STREAM_EXEC_START_ALL_TASKS_ID;
|
||||
|
||||
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
|
||||
tmsgPutToQueue(cb, STREAM_QUEUE, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS};
|
||||
|
||||
SStreamTaskNodeUpdateMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamTaskUpdateMsg(&decoder, &req) < 0) {
|
||||
rsp.code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tqError("vgId:%d failed to decode task update msg, code:%s", vgId, tstrerror(rsp.code));
|
||||
tDecoderClear(&decoder);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
// update the nodeEpset when it exists
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
// the task epset may be updated again and again, when replaying the WAL, the task may be in stop status.
|
||||
STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
|
||||
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
if (ppTask == NULL || *ppTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", pMeta->vgId,
|
||||
req.taskId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = *ppTask;
|
||||
|
||||
if (pMeta->updateInfo.transId != req.transId) {
|
||||
pMeta->updateInfo.transId = req.transId;
|
||||
tqInfo("s-task:%s receive new trans to update nodeEp msg from mnode, transId:%d", pTask->id.idStr, req.transId);
|
||||
// info needs to be kept till the new trans to update the nodeEp arrived.
|
||||
taosHashClear(pMeta->updateInfo.pTasks);
|
||||
} else {
|
||||
tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", pTask->id.idStr, req.transId);
|
||||
}
|
||||
|
||||
STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId};
|
||||
void* exist = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry));
|
||||
if (exist != NULL) {
|
||||
tqDebug("s-task:%s (vgId:%d) already update in trans:%d, discard the nodeEp update msg", pTask->id.idStr, vgId,
|
||||
req.transId);
|
||||
rsp.code = TSDB_CODE_SUCCESS;
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// the following two functions should not be executed within the scope of meta lock to avoid deadlock
|
||||
streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
|
||||
streamTaskResetStatus(pTask);
|
||||
|
||||
// continue after lock the meta again
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
SStreamTask** ppHTask = NULL;
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id));
|
||||
if (ppHTask == NULL || *ppHTask == NULL) {
|
||||
tqError("vgId:%d failed to acquire fill-history task:0x%x when handling update, it may have been dropped already",
|
||||
pMeta->vgId, req.taskId);
|
||||
CLEAR_RELATED_FILLHISTORY_TASK(pTask);
|
||||
} else {
|
||||
tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr);
|
||||
streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
streamMetaSaveTask(pMeta, pTask);
|
||||
if (ppHTask != NULL) {
|
||||
streamMetaSaveTask(pMeta, *ppHTask);
|
||||
}
|
||||
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
}
|
||||
|
||||
streamTaskStop(pTask);
|
||||
|
||||
// keep the already handled info
|
||||
taosHashPut(pMeta->updateInfo.pTasks, &entry, sizeof(entry), NULL, 0);
|
||||
|
||||
if (ppHTask != NULL) {
|
||||
streamTaskStop(*ppHTask);
|
||||
tqDebug("s-task:%s task nodeEp update completed, streamTask and related fill-history task closed", pTask->id.idStr);
|
||||
taosHashPut(pMeta->updateInfo.pTasks, &(*ppHTask)->id, sizeof(pTask->id), NULL, 0);
|
||||
} else {
|
||||
tqDebug("s-task:%s task nodeEp update completed, streamTask closed", pTask->id.idStr);
|
||||
}
|
||||
|
||||
rsp.code = 0;
|
||||
|
||||
// possibly only handle the stream task.
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
int32_t updateTasks = taosHashGetSize(pMeta->updateInfo.pTasks);
|
||||
|
||||
pMeta->startInfo.tasksWillRestart = 1;
|
||||
|
||||
if (updateTasks < numOfTasks) {
|
||||
tqDebug("vgId:%d closed tasks:%d, unclosed:%d, all tasks will be started when nodeEp update completed", vgId,
|
||||
updateTasks, (numOfTasks - updateTasks));
|
||||
streamMetaWUnLock(pMeta);
|
||||
} else {
|
||||
if (!restored) {
|
||||
tqDebug("vgId:%d vnode restore not completed, not restart the tasks, clear the start after nodeUpdate flag", vgId);
|
||||
pMeta->startInfo.tasksWillRestart = 0;
|
||||
streamMetaWUnLock(pMeta);
|
||||
} else {
|
||||
tqDebug("vgId:%d all %d task(s) nodeEp updated and closed", vgId, numOfTasks);
|
||||
#if 1
|
||||
tqStreamTaskStartAsync(pMeta, cb, true);
|
||||
streamMetaWUnLock(pMeta);
|
||||
#else
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// For debug purpose.
|
||||
// the following procedure consume many CPU resource, result in the re-election of leader
|
||||
// with high probability. So we employ it as a test case for the stream processing framework, with
|
||||
// checkpoint/restart/nodeUpdate etc.
|
||||
while (1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
if (startVal == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
|
||||
int32_t code = streamMetaReopen(pMeta);
|
||||
if (code != 0) {
|
||||
tqError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
streamMetaInitBackend(pMeta);
|
||||
|
||||
if (streamMetaLoadAllTasks(pTq->pStreamMeta) < 0) {
|
||||
tqError("vgId:%d failed to load stream tasks", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (vnodeIsRoleLeader(pTq->pVnode) && !tsDisableStream) {
|
||||
tqInfo("vgId:%d start all stream tasks after all being updated", vgId);
|
||||
resetStreamTaskStatus(pTq->pStreamMeta);
|
||||
tqStartStreamTaskAsync(pTq, false);
|
||||
} else {
|
||||
tqInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||
}
|
||||
streamMetaWUnLock(pMeta);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(req.pNodeList);
|
||||
return rsp.code;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessDispatchReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
SStreamDispatchReq req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
if (tDecodeStreamDispatchReq(&decoder, &req) < 0) {
|
||||
tDecoderClear(&decoder);
|
||||
return TSDB_CODE_MSG_DECODE_ERROR;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
tqDebug("s-task:0x%x recv dispatch msg from 0x%x(vgId:%d)", req.taskId, req.upstreamTaskId, req.upstreamNodeId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.taskId);
|
||||
if (pTask) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
if (streamProcessDispatchMsg(pTask, &req, &rsp) != 0){
|
||||
return -1;
|
||||
}
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
} else {
|
||||
tqError("vgId:%d failed to find task:0x%x to handle the dispatch req, it may have been destroyed already",
|
||||
pMeta->vgId, req.taskId);
|
||||
|
||||
SMsgHead* pRspHead = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp));
|
||||
if (pRspHead == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
tqError("s-task:0x%x send dispatch error rsp, out of memory", req.taskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRspHead->vgId = htonl(req.upstreamNodeId);
|
||||
ASSERT(pRspHead->vgId != 0);
|
||||
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pRspHead, sizeof(SMsgHead));
|
||||
pRsp->streamId = htobe64(req.streamId);
|
||||
pRsp->upstreamTaskId = htonl(req.upstreamTaskId);
|
||||
pRsp->upstreamNodeId = htonl(req.upstreamNodeId);
|
||||
pRsp->downstreamNodeId = htonl(pMeta->vgId);
|
||||
pRsp->downstreamTaskId = htonl(req.taskId);
|
||||
pRsp->msgId = htonl(req.msgId);
|
||||
pRsp->stage = htobe64(req.stage);
|
||||
pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
|
||||
|
||||
int32_t len = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
|
||||
SRpcMsg rsp = {.code = TSDB_CODE_STREAM_TASK_NOT_EXIST, .info = pMsg->info, .contLen = len, .pCont = pRspHead};
|
||||
tqError("s-task:0x%x send dispatch error rsp, no task", req.taskId);
|
||||
|
||||
tmsgSendRsp(&rsp);
|
||||
tDeleteStreamDispatchReq(&req);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessDispatchRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
|
||||
int32_t vgId = pMeta->vgId;
|
||||
pRsp->upstreamTaskId = htonl(pRsp->upstreamTaskId);
|
||||
pRsp->streamId = htobe64(pRsp->streamId);
|
||||
pRsp->downstreamTaskId = htonl(pRsp->downstreamTaskId);
|
||||
pRsp->downstreamNodeId = htonl(pRsp->downstreamNodeId);
|
||||
pRsp->stage = htobe64(pRsp->stage);
|
||||
pRsp->msgId = htonl(pRsp->msgId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pRsp->streamId, pRsp->upstreamTaskId);
|
||||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, pRsp->upstreamTaskId);
|
||||
terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
return terrno;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessRetrieveReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
SDecoder decoder;
|
||||
|
||||
SStreamRetrieveReq req;
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamRetrieveReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.dstTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process retrieve req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId,
|
||||
req.dstTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessRetrieveReq(pTask, &req, &rsp);
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
tDeleteStreamRetrieveReq(&req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessScanHistoryFinishReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
// deserialize
|
||||
SStreamScanHistoryFinishReq req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.downstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process scan history finish msg, failed to find task:0x%x, it may be destroyed",
|
||||
pMeta->vgId, req.downstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
tqDebug("s-task:%s receive scan-history finish msg from task:0x%x", pTask->id.idStr, req.upstreamTaskId);
|
||||
|
||||
int32_t code = streamProcessScanHistoryFinishReq(pTask, &req, &pMsg->info);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessScanHistoryFinishRsp(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
// deserialize
|
||||
SStreamCompleteHistoryMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
tDecodeCompleteHistoryDataMsg(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed",
|
||||
pMeta->vgId, req.upstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t remain = atomic_sub_fetch_32(&pTask->notReadyTasks, 1);
|
||||
if (remain > 0) {
|
||||
tqDebug("s-task:%s scan-history finish rsp received from downstream task:0x%x, unfinished remain:%d",
|
||||
pTask->id.idStr, req.downstreamId, remain);
|
||||
} else {
|
||||
tqDebug(
|
||||
"s-task:%s scan-history finish rsp received from downstream task:0x%x, all downstream tasks rsp scan-history "
|
||||
"completed msg",
|
||||
pTask->id.idStr, req.downstreamId);
|
||||
streamProcessScanHistoryFinishRsp(pTask);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessCheckReq(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
char* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
SStreamTaskCheckReq req;
|
||||
SDecoder decoder;
|
||||
|
||||
tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen);
|
||||
tDecodeStreamTaskCheckReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.downstreamTaskId;
|
||||
|
||||
SStreamTaskCheckRsp rsp = {
|
||||
.reqId = req.reqId,
|
||||
.streamId = req.streamId,
|
||||
.childId = req.childId,
|
||||
.downstreamNodeId = req.downstreamNodeId,
|
||||
.downstreamTaskId = req.downstreamTaskId,
|
||||
.upstreamNodeId = req.upstreamNodeId,
|
||||
.upstreamTaskId = req.upstreamTaskId,
|
||||
};
|
||||
|
||||
// only the leader node handle the check request
|
||||
if (pMeta->role == NODE_ROLE_FOLLOWER) {
|
||||
tqError("s-task:0x%x invalid check msg from upstream:0x%x(vgId:%d), vgId:%d is follower, not handle check status msg",
|
||||
taskId, req.upstreamTaskId, req.upstreamNodeId, pMeta->vgId);
|
||||
rsp.status = TASK_DOWNSTREAM_NOT_LEADER;
|
||||
} else {
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, taskId);
|
||||
if (pTask != NULL) {
|
||||
rsp.status = streamTaskCheckStatus(pTask, req.upstreamTaskId, req.upstreamNodeId, req.stage, &rsp.oldStage);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
|
||||
char* p = NULL;
|
||||
streamTaskGetStatus(pTask, &p);
|
||||
tqDebug("s-task:%s status:%s, stage:%"PRId64" recv task check req(reqId:0x%" PRIx64 ") task:0x%x (vgId:%d), check_status:%d",
|
||||
pTask->id.idStr, p, rsp.oldStage, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
} else {
|
||||
rsp.status = TASK_DOWNSTREAM_NOT_READY;
|
||||
tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64
|
||||
") from task:0x%x (vgId:%d), rsp check_status %d",
|
||||
req.streamId, taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
|
||||
}
|
||||
}
|
||||
|
||||
return streamSendCheckRsp(pMeta, &req, &rsp, &pMsg->info, taskId);
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessCheckRsp(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
|
||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
int32_t code;
|
||||
SStreamTaskCheckRsp rsp;
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
||||
code = tDecodeStreamTaskCheckRsp(&decoder, &rsp);
|
||||
if (code < 0) {
|
||||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
tDecoderClear(&decoder);
|
||||
tqError("vgId:%d failed to parse check rsp msg, code:%s", vgId, tstrerror(terrno));
|
||||
return -1;
|
||||
}
|
||||
|
||||
tDecoderClear(&decoder);
|
||||
tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d",
|
||||
rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status);
|
||||
|
||||
if (!isLeader) {
|
||||
tqError("vgId:%d not leader, task:0x%x not handle the check rsp, downstream:0x%x (vgId:%d)", vgId,
|
||||
rsp.upstreamTaskId, rsp.downstreamTaskId, rsp.downstreamNodeId);
|
||||
return code;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, rsp.streamId, rsp.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("tq failed to locate the stream task:0x%" PRIx64 "-0x%x (vgId:%d), it may have been destroyed or stopped",
|
||||
rsp.streamId, rsp.upstreamTaskId, vgId);
|
||||
terrno = TSDB_CODE_STREAM_TASK_NOT_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
||||
code = streamProcessCheckRsp(pTask, &rsp);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessCheckpointReadyMsg(SStreamMeta* pMeta, SRpcMsg* pMsg) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
int32_t code = 0;
|
||||
|
||||
SStreamCheckpointReadyMsg req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, len);
|
||||
if (tDecodeStreamCheckpointReadyMsg(&decoder, &req) < 0) {
|
||||
code = TSDB_CODE_MSG_DECODE_ERROR;
|
||||
tDecoderClear(&decoder);
|
||||
return code;
|
||||
}
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, req.streamId, req.upstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d failed to find s-task:0x%x, it may have been destroyed already", vgId, req.downstreamTaskId);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d s-task:%s received the checkpoint ready msg from task:0x%x (vgId:%d), handle it", vgId,
|
||||
pTask->id.idStr, req.downstreamTaskId, req.downstreamNodeId);
|
||||
|
||||
streamProcessCheckpointReadyMsg(pTask);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, int64_t sversion, char* msg, int32_t msgLen, bool isLeader, bool restored) {
|
||||
int32_t code = 0;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
if (tsDisableStream) {
|
||||
tqInfo("vgId:%d stream disabled, not deploy stream tasks", vgId);
|
||||
return code;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d receive new stream task deploy msg, start to build stream task", vgId);
|
||||
|
||||
// 1.deserialize msg and build task
|
||||
int32_t size = sizeof(SStreamTask);
|
||||
SStreamTask* pTask = taosMemoryCalloc(1, size);
|
||||
if (pTask == NULL) {
|
||||
tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId, size);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
|
||||
code = tDecodeStreamTask(&decoder, pTask);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(pTask);
|
||||
return TSDB_CODE_INVALID_MSG;
|
||||
}
|
||||
|
||||
// 2.save task, use the latest commit version as the initial start version of stream task.
|
||||
int32_t taskId = pTask->id.taskId;
|
||||
int64_t streamId = pTask->id.streamId;
|
||||
bool added = false;
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
code = streamMetaRegisterTask(pMeta, sversion, pTask, &added);
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
if (code < 0) {
|
||||
tqError("failed to add s-task:0x%x into vgId:%d meta, total:%d, code:%s", vgId, taskId, numOfTasks, tstrerror(code));
|
||||
tFreeStreamTask(pTask);
|
||||
return code;
|
||||
}
|
||||
|
||||
// added into meta store, pTask cannot be reference since it may have been destroyed by other threads already now if
|
||||
// it is added into the meta store
|
||||
if (added) {
|
||||
// only handled in the leader node
|
||||
if (isLeader) {
|
||||
tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks);
|
||||
SStreamTask* p = streamMetaAcquireTask(pMeta, streamId, taskId);
|
||||
|
||||
if (p != NULL && restored && p->info.fillHistory == 0) {
|
||||
EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(p)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT;
|
||||
streamTaskHandleEvent(p->status.pSM, event);
|
||||
} else if (!restored) {
|
||||
tqWarn("s-task:%s not launched since vnode(vgId:%d) not ready", p->id.idStr, vgId);
|
||||
}
|
||||
|
||||
if (p != NULL) {
|
||||
streamMetaReleaseTask(pMeta, p);
|
||||
}
|
||||
} else {
|
||||
tqDebug("vgId:%d not leader, not launch stream task s-task:0x%x", vgId, taskId);
|
||||
}
|
||||
} else {
|
||||
tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store", vgId, taskId);
|
||||
tFreeStreamTask(pTask);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) {
|
||||
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
|
||||
|
||||
int32_t vgId = pMeta->vgId;
|
||||
tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
if (pTask != NULL) {
|
||||
// drop the related fill-history task firstly
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
STaskId* pHTaskId = &pTask->hTaskInfo.id;
|
||||
streamMetaUnregisterTask(pMeta, pHTaskId->streamId, pHTaskId->taskId);
|
||||
tqDebug("vgId:%d drop fill-history task:0x%x dropped firstly", vgId, (int32_t)pHTaskId->taskId);
|
||||
}
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
// drop the stream task now
|
||||
streamMetaUnregisterTask(pMeta, pReq->streamId, pReq->taskId);
|
||||
|
||||
// commit the update
|
||||
streamMetaWLock(pMeta);
|
||||
int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta);
|
||||
tqDebug("vgId:%d task:0x%x dropped, remain tasks:%d", vgId, pReq->taskId, numOfTasks);
|
||||
|
||||
if (streamMetaCommit(pMeta) < 0) {
|
||||
// persist to disk
|
||||
}
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t startStreamTasks(SStreamMeta* pMeta) {
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
tqDebug("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SArray* pTaskList = NULL;
|
||||
streamMetaWLock(pMeta);
|
||||
pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
|
||||
taosHashClear(pMeta->startInfo.pReadyTaskSet);
|
||||
taosHashClear(pMeta->startInfo.pFailedTaskSet);
|
||||
pMeta->startInfo.startTs = taosGetTimestampMs();
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
||||
// broadcast the check downstream tasks msg
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
|
||||
if (pTask == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// fill-history task can only be launched by related stream tasks.
|
||||
if (pTask->info.fillHistory == 1) {
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pTask->status.downstreamReady == 1) {
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
tqDebug("s-task:%s downstream ready, no need to check downstream, check only related fill-history task",
|
||||
pTask->id.idStr);
|
||||
streamLaunchFillHistoryTask(pTask);
|
||||
}
|
||||
|
||||
streamMetaUpdateTaskDownstreamStatus(pTask, pTask->execInfo.init, pTask->execInfo.start, true);
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
EStreamTaskEvent event = (HAS_RELATED_FILLHISTORY_TASK(pTask)) ? TASK_EVENT_INIT_STREAM_SCANHIST : TASK_EVENT_INIT;
|
||||
int32_t ret = streamTaskHandleEvent(pTask->status.pSM, event);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
code = ret;
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
}
|
||||
|
||||
taosArrayDestroy(pTaskList);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t resetStreamTaskStatus(SStreamMeta* pMeta) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
|
||||
|
||||
tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks);
|
||||
if (numOfTasks == 0) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfTasks; ++i) {
|
||||
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
|
||||
|
||||
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
|
||||
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
|
||||
streamTaskResetStatus(*pTask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
|
||||
int32_t vgId = pMeta->vgId;
|
||||
int32_t code = 0;
|
||||
int64_t st = taosGetTimestampMs();
|
||||
|
||||
while(1) {
|
||||
int32_t startVal = atomic_val_compare_exchange_32(&pMeta->startInfo.taskStarting, 0, 1);
|
||||
if (startVal == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
tqDebug("vgId:%d in start stream tasks procedure, wait for 500ms and recheck", vgId);
|
||||
taosMsleep(500);
|
||||
}
|
||||
|
||||
terrno = 0;
|
||||
tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d", vgId,
|
||||
pMeta->updateInfo.transId);
|
||||
|
||||
while (streamMetaTaskInTimer(pMeta)) {
|
||||
tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId);
|
||||
taosMsleep(100);
|
||||
}
|
||||
|
||||
streamMetaWLock(pMeta);
|
||||
code = streamMetaReopen(pMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d failed to reopen stream meta", vgId);
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
streamMetaInitBackend(pMeta);
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
|
||||
tqInfo("vgId:%d close&reload state elapsed time:%.3fs", vgId, el/1000.);
|
||||
|
||||
code = streamMetaLoadAllTasks(pMeta);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("vgId:%d failed to load stream tasks, code:%s", vgId, tstrerror(terrno));
|
||||
streamMetaWUnLock(pMeta);
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
if (isLeader && !tsDisableStream) {
|
||||
tqInfo("vgId:%d restart all stream tasks after all tasks being updated", vgId);
|
||||
resetStreamTaskStatus(pMeta);
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
startStreamTasks(pMeta);
|
||||
} else {
|
||||
streamMetaResetStartInfo(&pMeta->startInfo);
|
||||
streamMetaWUnLock(pMeta);
|
||||
tqInfo("vgId:%d, follower node not start stream tasks", vgId);
|
||||
}
|
||||
|
||||
code = terrno;
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader) {
|
||||
SStreamTaskRunReq* pReq = pMsg->pCont;
|
||||
|
||||
int32_t taskId = pReq->taskId;
|
||||
int32_t vgId = pMeta->vgId;
|
||||
|
||||
if (taskId == STREAM_EXEC_START_ALL_TASKS_ID) {
|
||||
startStreamTasks(pMeta);
|
||||
return 0;
|
||||
} else if (taskId == STREAM_EXEC_RESTART_ALL_TASKS_ID) {
|
||||
restartStreamTasks(pMeta, isLeader);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, taskId);
|
||||
if (pTask != NULL) { // even in halt status, the data in inputQ must be processed
|
||||
char* p = NULL;
|
||||
if (streamTaskReadyToRun(pTask, &p)) {
|
||||
tqDebug("vgId:%d s-task:%s start to process block from inputQ, next checked ver:%" PRId64, vgId, pTask->id.idStr,
|
||||
pTask->chkInfo.nextProcessVer);
|
||||
streamExecTask(pTask);
|
||||
} else {
|
||||
int8_t status = streamTaskSetSchedStatusInactive(pTask);
|
||||
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state, status:%s, sched-status:%d", vgId,
|
||||
pTask->id.idStr, p, status);
|
||||
}
|
||||
|
||||
streamMetaReleaseTask(pMeta, pTask);
|
||||
return 0;
|
||||
} else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec.
|
||||
// todo add one function to handle this
|
||||
tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ static int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScan
|
|||
static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
|
||||
STsdbReader* pReader);
|
||||
|
||||
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SCostSummary* pCost);
|
||||
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SReadCostSummary* pCost);
|
||||
static STsdb* getTsdbByRetentions(SVnode* pVnode, SQueryTableDataCond* pCond, SRetention* retentions, const char* idstr,
|
||||
int8_t* pLevel);
|
||||
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
|
||||
|
@ -58,6 +58,7 @@ static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbRea
|
|||
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
|
||||
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
|
||||
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
|
||||
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo);
|
||||
|
||||
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
|
||||
|
||||
|
@ -168,7 +169,7 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SCostSummary* pCost = &pReader->cost;
|
||||
SReadCostSummary* pCost = &pReader->cost;
|
||||
|
||||
pIter->pLastBlockReader->uid = 0;
|
||||
tMergeTreeClose(&pIter->pLastBlockReader->mergeTree);
|
||||
|
@ -291,11 +292,7 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity)
|
|||
}
|
||||
|
||||
static int32_t tsdbInitReaderLock(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexInit(&pReader->readerMutex, NULL);
|
||||
|
||||
int32_t code = taosThreadMutexInit(&pReader->readerMutex, NULL);
|
||||
qTrace("tsdb/read: %p, post-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
|
@ -324,22 +321,14 @@ static int32_t tsdbAcquireReader(STsdbReader* pReader) {
|
|||
}
|
||||
|
||||
static int32_t tsdbTryAcquireReader(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexTryLock(&pReader->readerMutex);
|
||||
|
||||
int32_t code = taosThreadMutexTryLock(&pReader->readerMutex);
|
||||
qTrace("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbReleaseReader(STsdbReader* pReader) {
|
||||
int32_t code = -1;
|
||||
qTrace("tsdb/read: %p, pre-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
code = taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
|
||||
int32_t code = taosThreadMutexUnlock(&pReader->readerMutex);
|
||||
qTrace("tsdb/read: %p, post-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
|
||||
|
||||
return code;
|
||||
|
@ -432,6 +421,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void
|
|||
}
|
||||
|
||||
tsdbInitReaderLock(pReader);
|
||||
tsem_init(&pReader->resumeAfterSuspend, 0, 0);
|
||||
|
||||
*ppReader = pReader;
|
||||
return code;
|
||||
|
@ -1015,8 +1005,8 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
|
|||
// check if current block are all handled
|
||||
if (pDumpInfo->rowIndex >= 0 && pDumpInfo->rowIndex < pRecord->numRow) {
|
||||
int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex];
|
||||
if (outOfTimeWindow(ts,
|
||||
&pReader->info.window)) { // the remain data has out of query time window, ignore current block
|
||||
if (outOfTimeWindow(ts, &pReader->info.window)) {
|
||||
// the remain data has out of query time window, ignore current block
|
||||
setBlockAllDumped(pDumpInfo, ts, pReader->info.order);
|
||||
}
|
||||
} else {
|
||||
|
@ -1123,16 +1113,12 @@ static bool getNeighborBlockOfSameTable(SDataBlockIter* pBlockIter, SFileDataBlo
|
|||
}
|
||||
|
||||
int32_t step = asc ? 1 : -1;
|
||||
// *nextIndex = pBlockInfo->tbBlockIdx + step;
|
||||
// *pBlockIndex = *(SBlockIndex*)taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
|
||||
STableDataBlockIdx* pTableDataBlockIdx =
|
||||
taosArrayGet(pTableBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step);
|
||||
SFileDataBlockInfo* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex);
|
||||
memcpy(pRecord, &p->record, sizeof(SBrinRecord));
|
||||
|
||||
*nextIndex = pBlockInfo->tbBlockIdx + step;
|
||||
|
||||
// tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, pIndex->ordinalIndex, pBlock, tGetDataBlk);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1376,23 +1362,19 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
|
||||
int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader);
|
||||
|
||||
blockDataUpdateTsWindow(pBlock, pReader->suppInfo.slotId[0]);
|
||||
pBlock->info.id.uid = pBlockScanInfo->uid;
|
||||
double el = (taosGetTimestampUs() - st) / 1000.0;
|
||||
updateComposedBlockInfo(pReader, el, pBlockScanInfo);
|
||||
|
||||
setComposedBlockFlag(pReader, true);
|
||||
|
||||
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
|
||||
tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%" PRId64 ", brange:%" PRId64
|
||||
" - %" PRId64 ", uid:%" PRIu64 ", %s",
|
||||
pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey,
|
||||
pReader, el, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey,
|
||||
pBlockScanInfo->uid, pReader->idStr);
|
||||
|
||||
pReader->cost.buildmemBlock += elapsedTime;
|
||||
pReader->cost.buildmemBlock += el;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -2293,13 +2275,12 @@ static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlock
|
|||
return code;
|
||||
}
|
||||
|
||||
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo) {
|
||||
void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo) {
|
||||
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
|
||||
|
||||
pResBlock->info.id.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0;
|
||||
pResBlock->info.dataLoad = 1;
|
||||
blockDataUpdateTsWindow(pResBlock, pReader->suppInfo.slotId[0]);
|
||||
|
||||
setComposedBlockFlag(pReader, true);
|
||||
|
||||
pReader->cost.composedBlocks += 1;
|
||||
|
@ -2356,7 +2337,6 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
|
|||
pBlockScanInfo = *pReader->status.pTableIter;
|
||||
if (pReader->pIgnoreTables &&
|
||||
taosHashGet(*pReader->pIgnoreTables, &pBlockScanInfo->uid, sizeof(pBlockScanInfo->uid))) {
|
||||
// setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->info.order);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
@ -2436,7 +2416,7 @@ int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) {
|
|||
return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1;
|
||||
}
|
||||
|
||||
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SCostSummary* pCost) {
|
||||
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SReadCostSummary* pCost) {
|
||||
int32_t code = 0;
|
||||
int32_t newDelDataInFile = taosArrayGetSize(pBlockScanInfo->pFileDelData);
|
||||
if (newDelDataInFile == 0 &&
|
||||
|
@ -2935,6 +2915,8 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
|
|||
SReaderStatus* pStatus = &pReader->status;
|
||||
STableUidList* pUidList = &pStatus->uidList;
|
||||
|
||||
tsdbDebug("seq load data blocks from cache, %s", pReader->idStr);
|
||||
|
||||
while (1) {
|
||||
if (pReader->code != TSDB_CODE_SUCCESS) {
|
||||
tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", strerror(pReader->code), pReader->idStr);
|
||||
|
@ -3043,6 +3025,8 @@ static ERetrieveType doReadDataFromLastFiles(STsdbReader* pReader) {
|
|||
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
|
||||
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
|
||||
|
||||
tsdbDebug("seq load data blocks from stt files %s", pReader->idStr);
|
||||
|
||||
while (1) {
|
||||
terrno = 0;
|
||||
|
||||
|
@ -3774,7 +3758,6 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
|
|||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
do {
|
||||
// SRow* pTSRow = NULL;
|
||||
TSDBROW row = {.type = -1};
|
||||
bool freeTSRow = false;
|
||||
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &row, endKey, &freeTSRow);
|
||||
|
@ -3783,6 +3766,7 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
|
|||
}
|
||||
|
||||
if (row.type == TSDBROW_ROW_FMT) {
|
||||
int64_t ts = row.pTSRow->ts;;
|
||||
code = doAppendRowFromTSRow(pBlock, pReader, row.pTSRow, pBlockScanInfo);
|
||||
|
||||
if (freeTSRow) {
|
||||
|
@ -3792,13 +3776,17 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
|
|||
if (code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
pBlockScanInfo->lastProcKey = ts;
|
||||
} else {
|
||||
code = doAppendRowFromFileBlock(pBlock, pReader, row.pBlockData, row.iRow);
|
||||
if (code) {
|
||||
break;
|
||||
}
|
||||
pBlockScanInfo->lastProcKey = row.pBlockData->aTSKEY[row.iRow];
|
||||
}
|
||||
|
||||
|
||||
// no data in buffer, return immediately
|
||||
if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) {
|
||||
break;
|
||||
|
@ -4107,7 +4095,7 @@ void tsdbReaderClose2(STsdbReader* pReader) {
|
|||
tsdbDataFileReaderClose(&pReader->pFileReader);
|
||||
}
|
||||
|
||||
SCostSummary* pCost = &pReader->cost;
|
||||
SReadCostSummary* pCost = &pReader->cost;
|
||||
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
|
||||
if (pFilesetIter->pLastBlockReader != NULL) {
|
||||
SLastBlockReader* pLReader = pFilesetIter->pLastBlockReader;
|
||||
|
@ -4122,6 +4110,7 @@ void tsdbReaderClose2(STsdbReader* pReader) {
|
|||
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, true);
|
||||
pReader->pReadSnap = NULL;
|
||||
|
||||
tsem_destroy(&pReader->resumeAfterSuspend);
|
||||
tsdbReleaseReader(pReader);
|
||||
tsdbUninitReaderLock(pReader);
|
||||
|
||||
|
@ -4154,6 +4143,8 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
|
|||
SReaderStatus* pStatus = &pReader->status;
|
||||
STableBlockScanInfo* pBlockScanInfo = NULL;
|
||||
|
||||
pReader->status.suspendInvoked = true; // record the suspend status
|
||||
|
||||
if (pStatus->loadFromFile) {
|
||||
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
|
||||
if (pBlockInfo != NULL) {
|
||||
|
@ -4167,84 +4158,34 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
|
|||
|
||||
tsdbDataFileReaderClose(&pReader->pFileReader);
|
||||
|
||||
SCostSummary* pCost = &pReader->cost;
|
||||
SReadCostSummary* pCost = &pReader->cost;
|
||||
pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &pCost->sttCost);
|
||||
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
|
||||
// resetDataBlockScanInfo excluding lastKey
|
||||
STableBlockScanInfo** p = NULL;
|
||||
int32_t iter = 0;
|
||||
|
||||
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
|
||||
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
|
||||
|
||||
pInfo->iterInit = false;
|
||||
pInfo->iter.hasVal = false;
|
||||
pInfo->iiter.hasVal = false;
|
||||
|
||||
if (pInfo->iter.iter != NULL) {
|
||||
pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter);
|
||||
}
|
||||
|
||||
if (pInfo->iiter.iter != NULL) {
|
||||
pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter);
|
||||
}
|
||||
|
||||
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
|
||||
pInfo->pFileDelData = taosArrayDestroy(pInfo->pFileDelData);
|
||||
}
|
||||
} else {
|
||||
// resetDataBlockScanInfo excluding lastKey
|
||||
STableBlockScanInfo** p = NULL;
|
||||
int32_t iter = 0;
|
||||
|
||||
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
|
||||
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
|
||||
|
||||
pInfo->iterInit = false;
|
||||
pInfo->iter.hasVal = false;
|
||||
pInfo->iiter.hasVal = false;
|
||||
|
||||
if (pInfo->iter.iter != NULL) {
|
||||
pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter);
|
||||
}
|
||||
|
||||
if (pInfo->iiter.iter != NULL) {
|
||||
pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter);
|
||||
}
|
||||
|
||||
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
|
||||
}
|
||||
|
||||
pBlockScanInfo = pStatus->pTableIter == NULL ? NULL : *pStatus->pTableIter;
|
||||
if (pBlockScanInfo) {
|
||||
// save lastKey to restore memory iterator
|
||||
STimeWindow w = pReader->resBlockInfo.pResBlock->info.window;
|
||||
pBlockScanInfo->lastProcKey = ASCENDING_TRAVERSE(pReader->info.order) ? w.ekey : w.skey;
|
||||
|
||||
// reset current current table's data block scan info,
|
||||
pBlockScanInfo->iterInit = false;
|
||||
|
||||
pBlockScanInfo->iter.hasVal = false;
|
||||
pBlockScanInfo->iiter.hasVal = false;
|
||||
if (pBlockScanInfo->iter.iter != NULL) {
|
||||
pBlockScanInfo->iter.iter = tsdbTbDataIterDestroy(pBlockScanInfo->iter.iter);
|
||||
}
|
||||
|
||||
if (pBlockScanInfo->iiter.iter != NULL) {
|
||||
pBlockScanInfo->iiter.iter = tsdbTbDataIterDestroy(pBlockScanInfo->iiter.iter);
|
||||
}
|
||||
|
||||
pBlockScanInfo->pBlockList = taosArrayDestroy(pBlockScanInfo->pBlockList);
|
||||
pBlockScanInfo->pBlockIdxList = taosArrayDestroy(pBlockScanInfo->pBlockIdxList);
|
||||
// TODO: keep skyline for reuse
|
||||
pBlockScanInfo->delSkyline = taosArrayDestroy(pBlockScanInfo->delSkyline);
|
||||
}
|
||||
}
|
||||
|
||||
// resetDataBlockScanInfo excluding lastKey
|
||||
STableBlockScanInfo** p = NULL;
|
||||
|
||||
int32_t step = ASCENDING_TRAVERSE(pReader->info.order)? 1:-1;
|
||||
|
||||
int32_t iter = 0;
|
||||
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
|
||||
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
|
||||
clearBlockScanInfo(pInfo);
|
||||
pInfo->sttKeyInfo.nextProcKey = pInfo->lastProcKey + step;
|
||||
}
|
||||
|
||||
pStatus->uidList.currentIndex = 0;
|
||||
initReaderStatus(pStatus);
|
||||
|
||||
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false);
|
||||
pReader->pReadSnap = NULL;
|
||||
pReader->flag = READER_STATUS_SUSPEND;
|
||||
|
||||
#if SUSPEND_RESUME_TEST
|
||||
tsem_post(&pReader->resumeAfterSuspend);
|
||||
#endif
|
||||
|
||||
tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo ? pBlockScanInfo->uid : 0,
|
||||
pReader->idStr);
|
||||
return code;
|
||||
|
@ -4399,6 +4340,16 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
|
|||
|
||||
SReaderStatus* pStatus = &pReader->status;
|
||||
|
||||
// NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit
|
||||
// the data should be ingested in round-robin and all the child tables should be createted before ingesting data
|
||||
// the version range of query will be used to identify the correctness of suspend/resume functions.
|
||||
// this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
|
||||
#if SUSPEND_RESUME_TEST
|
||||
if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) {
|
||||
tsem_wait(&pReader->resumeAfterSuspend);
|
||||
}
|
||||
#endif
|
||||
|
||||
code = tsdbAcquireReader(pReader);
|
||||
qTrace("tsdb/read: %p, take read mutex, code: %d", pReader, code);
|
||||
|
||||
|
|
|
@ -210,6 +210,7 @@ void clearBlockScanInfo(STableBlockScanInfo* p) {
|
|||
p->iterInit = false;
|
||||
p->iter.hasVal = false;
|
||||
p->iiter.hasVal = false;
|
||||
p->sttKeyInfo.status = STT_FILE_READER_UNINIT;
|
||||
|
||||
if (p->iter.iter != NULL) {
|
||||
p->iter.iter = tsdbTbDataIterDestroy(p->iter.iter);
|
||||
|
|
|
@ -96,7 +96,7 @@ typedef struct SResultBlockInfo {
|
|||
int64_t capacity;
|
||||
} SResultBlockInfo;
|
||||
|
||||
typedef struct SCostSummary {
|
||||
typedef struct SReadCostSummary {
|
||||
int64_t numOfBlocks;
|
||||
double blockLoadTime;
|
||||
double buildmemBlock;
|
||||
|
@ -110,7 +110,7 @@ typedef struct SCostSummary {
|
|||
double createScanInfoList;
|
||||
double createSkylineIterTime;
|
||||
double initLastBlockReader;
|
||||
} SCostSummary;
|
||||
} SReadCostSummary;
|
||||
|
||||
typedef struct STableUidList {
|
||||
uint64_t* tableUidList; // access table uid list in uid ascending order list
|
||||
|
@ -122,12 +122,6 @@ typedef struct {
|
|||
int32_t numOfSttFiles;
|
||||
} SBlockNumber;
|
||||
|
||||
typedef struct SBlockIndex {
|
||||
int32_t ordinalIndex;
|
||||
int64_t inFileOffset;
|
||||
STimeWindow window; // todo replace it with overlap flag.
|
||||
} SBlockIndex;
|
||||
|
||||
typedef struct SBlockOrderWrapper {
|
||||
int64_t uid;
|
||||
int64_t offset;
|
||||
|
@ -192,6 +186,7 @@ typedef struct SFileBlockDumpInfo {
|
|||
} SFileBlockDumpInfo;
|
||||
|
||||
typedef struct SReaderStatus {
|
||||
bool suspendInvoked;
|
||||
bool loadFromFile; // check file stage
|
||||
bool composedDataBlock; // the returned data block is a composed block or not
|
||||
SSHashObj* pTableMap; // SHash<STableBlockScanInfo>
|
||||
|
@ -220,7 +215,8 @@ struct STsdbReader {
|
|||
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
|
||||
SBlockLoadSuppInfo suppInfo;
|
||||
STsdbReadSnap* pReadSnap;
|
||||
SCostSummary cost;
|
||||
tsem_t resumeAfterSuspend;
|
||||
SReadCostSummary cost;
|
||||
SHashObj** pIgnoreTables;
|
||||
SSHashObj* pSchemaMap; // keep the retrieved schema info, to avoid the overhead by repeatly load schema
|
||||
SDataFileReader* pFileReader; // the file reader
|
||||
|
|
|
@ -463,7 +463,6 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
|
|||
break;
|
||||
}
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
vError("vgId:%d, failed to preprocess write request since %s, msg type:%s", TD_VID(pVnode), tstrerror(code),
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
|
@ -767,7 +766,7 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
|||
case TDMT_STREAM_TASK_RUN:
|
||||
return tqProcessTaskRunReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_DISPATCH:
|
||||
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg, true);
|
||||
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TASK_DISPATCH_RSP:
|
||||
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_STREAM_TASK_CHECK:
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "sync.h"
|
||||
#include "tsdb.h"
|
||||
#include "vnd.h"
|
||||
#include "tqCommon.h"
|
||||
|
||||
#define BATCH_ENABLE 0
|
||||
|
||||
|
@ -569,8 +570,8 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
|
|||
vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId);
|
||||
} else {
|
||||
vInfo("vgId:%d sync restore finished, start to launch stream tasks", pVnode->config.vgId);
|
||||
tqResetStreamTaskStatus(pVnode->pTq);
|
||||
tqStartStreamTaskAsync(pVnode->pTq, false);
|
||||
resetStreamTaskStatus(pVnode->pTq->pStreamMeta);
|
||||
tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false);
|
||||
}
|
||||
} else {
|
||||
vInfo("vgId:%d, sync restore finished, not launch stream tasks since not leader", vgId);
|
||||
|
|
|
@ -610,6 +610,13 @@ void streamEventReloadState(SOperatorInfo* pOperator) {
|
|||
compactEventWindow(pOperator, &curInfo, pInfo->pSeUpdated, pInfo->pSeDeleted, false);
|
||||
qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, curInfo.winInfo.sessionWin.win.skey,
|
||||
curInfo.winInfo.sessionWin.groupId);
|
||||
if (IS_VALID_SESSION_WIN(curInfo.winInfo)) {
|
||||
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
|
||||
}
|
||||
|
||||
if (!curInfo.pWinFlag->endFlag) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
|
||||
saveResult(curInfo.winInfo, pInfo->pSeUpdated);
|
||||
|
@ -621,10 +628,6 @@ void streamEventReloadState(SOperatorInfo* pOperator) {
|
|||
getSessionHashKey(&curInfo.winInfo.sessionWin, &key);
|
||||
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curInfo.winInfo, sizeof(SResultWindowInfo));
|
||||
}
|
||||
|
||||
if (IS_VALID_SESSION_WIN(curInfo.winInfo)) {
|
||||
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
|
||||
}
|
||||
}
|
||||
taosMemoryFree(pBuf);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -276,6 +276,7 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
|
|||
int32_t code = buildDispatchRsp(pTask, pReq, status, &pRsp->pCont);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
stError("s-task:%s failed to build dispatch rsp, msgId:%d, code:%s", id, pReq->msgId, tstrerror(code));
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
|
|
|
@ -252,14 +252,15 @@ static void streamScanHistoryDataImpl(SStreamTask* pTask, SArray* pRes, int32_t*
|
|||
SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st) {
|
||||
ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE);
|
||||
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
bool finished = false;
|
||||
void* exec = pTask->exec.pExecutor;
|
||||
bool finished = false;
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
qSetStreamOpOpen(exec);
|
||||
|
||||
while (1) {
|
||||
if (streamTaskShouldPause(pTask)) {
|
||||
stDebug("s-task:%s paused from the scan-history task", pTask->id.idStr);
|
||||
stDebug("s-task:%s paused from the scan-history task", id);
|
||||
// quit from step1, not continue to handle the step2
|
||||
return (SScanhistoryDataInfo){TASK_SCANHISTORY_QUIT, 0};
|
||||
}
|
||||
|
@ -267,8 +268,7 @@ SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st) {
|
|||
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
|
||||
if (pRes == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
stError("s-task:%s scan-history prepare result block failed, code:%s, retry later", pTask->id.idStr,
|
||||
tstrerror(terrno));
|
||||
stError("s-task:%s scan-history prepare result block failed, code:%s, retry later", id, tstrerror(terrno));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -281,12 +281,12 @@ SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st) {
|
|||
}
|
||||
|
||||
// dispatch the generated results
|
||||
int32_t code = handleResultBlocks(pTask, pRes, size);
|
||||
/*int32_t code = */handleResultBlocks(pTask, pRes, size);
|
||||
|
||||
int64_t el = taosGetTimestampMs() - st;
|
||||
|
||||
// downstream task input queue is full, try in 5sec
|
||||
if (pTask->inputq.status == TASK_INPUT_STATUS__BLOCKED) {
|
||||
if (pTask->inputq.status == TASK_INPUT_STATUS__BLOCKED && (pTask->info.fillHistory == 1)) {
|
||||
return (SScanhistoryDataInfo){TASK_SCANHISTORY_REXEC, 5000};
|
||||
}
|
||||
|
||||
|
@ -294,9 +294,9 @@ SScanhistoryDataInfo streamScanHistoryData(SStreamTask* pTask, int64_t st) {
|
|||
return (SScanhistoryDataInfo){TASK_SCANHISTORY_CONT, 0};
|
||||
}
|
||||
|
||||
if (el >= STREAM_SCAN_HISTORY_TIMESLICE) {
|
||||
stDebug("s-task:%s fill-history:%d time slice exhausted, elapsed time:%.2fs, retry in 100ms",
|
||||
pTask->id.idStr, pTask->info.fillHistory, el / 1000.0);
|
||||
if (el >= STREAM_SCAN_HISTORY_TIMESLICE && (pTask->info.fillHistory == 1)) {
|
||||
stDebug("s-task:%s fill-history:%d time slice exhausted, elapsed time:%.2fs, retry in 100ms", id,
|
||||
pTask->info.fillHistory, el / 1000.0);
|
||||
return (SScanhistoryDataInfo){TASK_SCANHISTORY_REXEC, 100};
|
||||
}
|
||||
}
|
||||
|
@ -543,7 +543,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
|||
* todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
|
||||
* appropriate batch of blocks should be handled in 5 to 10 sec.
|
||||
*/
|
||||
int32_t streamExecForAll(SStreamTask* pTask) {
|
||||
int32_t doStreamExecTask(SStreamTask* pTask) {
|
||||
const char* id = pTask->id.idStr;
|
||||
|
||||
// merge multiple input data if possible in the input queue.
|
||||
|
@ -654,7 +654,7 @@ int32_t streamExecTask(SStreamTask* pTask) {
|
|||
int8_t schedStatus = streamTaskSetSchedStatusActive(pTask);
|
||||
if (schedStatus == TASK_SCHED_STATUS__WAITING) {
|
||||
while (1) {
|
||||
int32_t code = streamExecForAll(pTask);
|
||||
int32_t code = doStreamExecTask(pTask);
|
||||
if (code < 0) { // todo this status should be removed
|
||||
atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__FAILED);
|
||||
return -1;
|
||||
|
|
|
@ -409,7 +409,6 @@ static void addIntoNodeUpdateList(SStreamTask* pTask, int32_t nodeId) {
|
|||
int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp) {
|
||||
ASSERT(pTask->id.taskId == pRsp->upstreamTaskId);
|
||||
const char* id = pTask->id.idStr;
|
||||
int32_t vgId = pTask->pMeta->vgId;
|
||||
|
||||
if (streamTaskShouldStop(pTask)) {
|
||||
stDebug("s-task:%s should stop, do not do check downstream again", id);
|
||||
|
@ -461,16 +460,25 @@ int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRs
|
|||
"s-task:%s vgId:%d self vnode-transfer/leader-change/restart detected, old stage:%"PRId64", current stage:%"PRId64", "
|
||||
"not check wait for downstream task nodeUpdate, and all tasks restart",
|
||||
id, pRsp->upstreamNodeId, pRsp->oldStage, pTask->pMeta->stage);
|
||||
addIntoNodeUpdateList(pTask, pRsp->upstreamNodeId);
|
||||
} else {
|
||||
stError(
|
||||
"s-task:%s downstream taskId:0x%x (vgId:%d) not leader, self dispatch epset needs to be updated, not check "
|
||||
"downstream again, nodeUpdate needed",
|
||||
id, pRsp->downstreamTaskId, pRsp->downstreamNodeId);
|
||||
addIntoNodeUpdateList(pTask, pRsp->downstreamNodeId);
|
||||
}
|
||||
|
||||
addIntoNodeUpdateList(pTask, pRsp->downstreamNodeId);
|
||||
streamMetaUpdateTaskDownstreamStatus(pTask, pTask->execInfo.init, taosGetTimestampMs(), false);
|
||||
|
||||
// automatically set the related fill-history task to be failed.
|
||||
if (HAS_RELATED_FILLHISTORY_TASK(pTask)) {
|
||||
STaskId* pId = &pTask->hTaskInfo.id;
|
||||
|
||||
SStreamTask* pHTask = streamMetaAcquireTask(pTask->pMeta, pId->streamId, pId->taskId);
|
||||
streamMetaUpdateTaskDownstreamStatus(pHTask, pHTask->execInfo.init, taosGetTimestampMs(), false);
|
||||
streamMetaReleaseTask(pTask->pMeta, pHTask);
|
||||
}
|
||||
} else { // TASK_DOWNSTREAM_NOT_READY, let's retry in 100ms
|
||||
STaskRecheckInfo* pInfo = createRecheckInfo(pTask, pRsp);
|
||||
|
||||
|
@ -1072,8 +1080,9 @@ int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs
|
|||
taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs));
|
||||
|
||||
int32_t numOfTotal = streamMetaGetNumOfTasks(pMeta);
|
||||
int32_t numOfRecv = taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet);
|
||||
|
||||
if (taosHashGetSize(pStartInfo->pReadyTaskSet) + taosHashGetSize(pStartInfo->pFailedTaskSet) == numOfTotal) {
|
||||
if (numOfRecv == numOfTotal) {
|
||||
pStartInfo->readyTs = taosGetTimestampMs();
|
||||
pStartInfo->elapsedTime = (pStartInfo->startTs != 0) ? pStartInfo->readyTs - pStartInfo->startTs : 0;
|
||||
|
||||
|
@ -1087,6 +1096,8 @@ int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs
|
|||
displayStatusInfo(pMeta, pStartInfo->pFailedTaskSet, false);
|
||||
|
||||
streamMetaResetStartInfo(pStartInfo);
|
||||
} else {
|
||||
stDebug("vgId:%d recv check down results:%d, total:%d", pMeta->vgId, numOfRecv, numOfTotal);
|
||||
}
|
||||
|
||||
streamMetaWUnLock(pMeta);
|
||||
|
|
|
@ -782,4 +782,5 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc)
|
|||
pDst->sinkDataSize = pSrc->sinkDataSize;
|
||||
pDst->activeCheckpointId = pSrc->activeCheckpointId;
|
||||
pDst->checkpointFailed = pSrc->checkpointFailed;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -957,8 +957,8 @@ static void cliSendCb(uv_write_t* req, int status) {
|
|||
SCliMsg* pMsg = !transQueueEmpty(&pConn->cliMsgs) ? transQueueGet(&pConn->cliMsgs, 0) : NULL;
|
||||
if (pMsg != NULL) {
|
||||
int64_t cost = taosGetTimestampUs() - pMsg->st;
|
||||
if (cost > 1000 * 20) {
|
||||
tWarn("%s conn %p send cost:%dus, send exception", CONN_GET_INST_LABEL(pConn), pConn, (int)cost);
|
||||
if (cost > 1000 * 50) {
|
||||
tTrace("%s conn %p send cost:%dus ", CONN_GET_INST_LABEL(pConn), pConn, (int)cost);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -648,6 +648,7 @@ int taosOpenFileNotStream(const char *path, int32_t tdFileOptions) {
|
|||
access |= (tdFileOptions & TD_FILE_TEXT) ? O_TEXT : 0;
|
||||
access |= (tdFileOptions & TD_FILE_EXCL) ? O_EXCL : 0;
|
||||
access |= (tdFileOptions & TD_FILE_CLOEXEC) ? O_CLOEXEC : 0;
|
||||
|
||||
int fd = open(path, access, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
return fd;
|
||||
}
|
||||
|
|
|
@ -538,66 +538,71 @@ int32_t tsDecompressTimestampImp(const char *const input, const int32_t nelement
|
|||
memcpy(output, input + 1, nelements * longBytes);
|
||||
return nelements * longBytes;
|
||||
} else if (input[0] == 1) { // Decompress
|
||||
int64_t *ostream = (int64_t *)output;
|
||||
if (tsSIMDEnable && tsAVX512Enable) {
|
||||
tsDecompressTimestampAvx512(input, nelements, output, false);
|
||||
} else if (tsSIMDEnable && tsAVX2Enable) {
|
||||
tsDecompressTimestampAvx2(input, nelements, output, false);
|
||||
} else {
|
||||
int64_t *ostream = (int64_t *)output;
|
||||
|
||||
int32_t ipos = 1, opos = 0;
|
||||
int8_t nbytes = 0;
|
||||
int64_t prev_value = 0;
|
||||
int64_t prev_delta = 0;
|
||||
int64_t delta_of_delta = 0;
|
||||
int32_t ipos = 1, opos = 0;
|
||||
int8_t nbytes = 0;
|
||||
int64_t prev_value = 0;
|
||||
int64_t prev_delta = 0;
|
||||
int64_t delta_of_delta = 0;
|
||||
|
||||
while (1) {
|
||||
uint8_t flags = input[ipos++];
|
||||
// Decode dd1
|
||||
uint64_t dd1 = 0;
|
||||
nbytes = flags & INT8MASK(4);
|
||||
if (nbytes == 0) {
|
||||
delta_of_delta = 0;
|
||||
} else {
|
||||
if (is_bigendian()) {
|
||||
memcpy(((char *)(&dd1)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
while (1) {
|
||||
uint8_t flags = input[ipos++];
|
||||
// Decode dd1
|
||||
uint64_t dd1 = 0;
|
||||
nbytes = flags & INT8MASK(4);
|
||||
if (nbytes == 0) {
|
||||
delta_of_delta = 0;
|
||||
} else {
|
||||
memcpy(&dd1, input + ipos, nbytes);
|
||||
if (is_bigendian()) {
|
||||
memcpy(((char *)(&dd1)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
} else {
|
||||
memcpy(&dd1, input + ipos, nbytes);
|
||||
}
|
||||
delta_of_delta = ZIGZAG_DECODE(int64_t, dd1);
|
||||
}
|
||||
delta_of_delta = ZIGZAG_DECODE(int64_t, dd1);
|
||||
}
|
||||
ipos += nbytes;
|
||||
if (opos == 0) {
|
||||
prev_value = delta_of_delta;
|
||||
prev_delta = 0;
|
||||
ostream[opos++] = delta_of_delta;
|
||||
} else {
|
||||
|
||||
ipos += nbytes;
|
||||
if (opos == 0) {
|
||||
prev_value = delta_of_delta;
|
||||
prev_delta = 0;
|
||||
ostream[opos++] = delta_of_delta;
|
||||
} else {
|
||||
prev_delta = delta_of_delta + prev_delta;
|
||||
prev_value = prev_value + prev_delta;
|
||||
ostream[opos++] = prev_value;
|
||||
}
|
||||
if (opos == nelements) return nelements * longBytes;
|
||||
|
||||
// Decode dd2
|
||||
uint64_t dd2 = 0;
|
||||
nbytes = (flags >> 4) & INT8MASK(4);
|
||||
if (nbytes == 0) {
|
||||
delta_of_delta = 0;
|
||||
} else {
|
||||
if (is_bigendian()) {
|
||||
memcpy(((char *)(&dd2)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
} else {
|
||||
memcpy(&dd2, input + ipos, nbytes);
|
||||
}
|
||||
// zigzag_decoding
|
||||
delta_of_delta = ZIGZAG_DECODE(int64_t, dd2);
|
||||
}
|
||||
ipos += nbytes;
|
||||
prev_delta = delta_of_delta + prev_delta;
|
||||
prev_value = prev_value + prev_delta;
|
||||
ostream[opos++] = prev_value;
|
||||
if (opos == nelements) return nelements * longBytes;
|
||||
}
|
||||
if (opos == nelements) return nelements * longBytes;
|
||||
|
||||
// Decode dd2
|
||||
uint64_t dd2 = 0;
|
||||
nbytes = (flags >> 4) & INT8MASK(4);
|
||||
if (nbytes == 0) {
|
||||
delta_of_delta = 0;
|
||||
} else {
|
||||
if (is_bigendian()) {
|
||||
memcpy(((char *)(&dd2)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
} else {
|
||||
memcpy(&dd2, input + ipos, nbytes);
|
||||
}
|
||||
// zigzag_decoding
|
||||
delta_of_delta = ZIGZAG_DECODE(int64_t, dd2);
|
||||
}
|
||||
ipos += nbytes;
|
||||
prev_delta = delta_of_delta + prev_delta;
|
||||
prev_value = prev_value + prev_delta;
|
||||
ostream[opos++] = prev_value;
|
||||
if (opos == nelements) return nelements * longBytes;
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(0);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return nelements * longBytes;
|
||||
}
|
||||
|
||||
/* --------------------------------------------Double Compression ---------------------------------------------- */
|
||||
|
|
|
@ -53,11 +53,8 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements,
|
|||
int64_t prevValue = 0;
|
||||
|
||||
#if __AVX2__
|
||||
while (1) {
|
||||
if (_pos == nelements) break;
|
||||
|
||||
uint64_t w = 0;
|
||||
memcpy(&w, ip, LONG_BYTES);
|
||||
while (_pos < nelements) {
|
||||
uint64_t w = *(uint64_t*) ip;
|
||||
|
||||
char selector = (char)(w & INT64MASK(4)); // selector = 4
|
||||
char bit = bit_per_integer[(int32_t)selector]; // bit = 3
|
||||
|
@ -114,7 +111,7 @@ int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements,
|
|||
__m256i signmask = _mm256_and_si256(_mm256_set1_epi64x(1), zigzagVal);
|
||||
signmask = _mm256_sub_epi64(_mm256_setzero_si256(), signmask);
|
||||
|
||||
// get the four zigzag values here
|
||||
// get four zigzag values here
|
||||
__m256i delta = _mm256_xor_si256(_mm256_srli_epi64(zigzagVal, 1), signmask);
|
||||
|
||||
// calculate the cumulative sum (prefix sum) for each number
|
||||
|
@ -250,73 +247,264 @@ int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelemen
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tsDecompressTimestampAvx2(const char* const input, const int32_t nelements, char *const output, bool bigEndian) {
|
||||
int32_t tsDecompressTimestampAvx2(const char *const input, const int32_t nelements, char *const output,
|
||||
bool bigEndian) {
|
||||
#if 0
|
||||
int64_t *ostream = (int64_t *)output;
|
||||
int32_t ipos = 1, opos = 0;
|
||||
int8_t nbytes = 0;
|
||||
|
||||
int64_t prevValue = 0;
|
||||
int64_t prevDelta = 0;
|
||||
|
||||
int64_t deltaOfDelta = 0;
|
||||
int32_t longBytes = LONG_BYTES;
|
||||
__m128i prevVal = _mm_setzero_si128();
|
||||
__m128i prevDelta = _mm_setzero_si128();
|
||||
|
||||
#if __AVX2__
|
||||
int32_t batch = nelements >> 1;
|
||||
int32_t remainder = nelements & 0x01;
|
||||
__mmask16 mask2[16] = {0, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff};
|
||||
|
||||
int32_t batch = nelements >> 2;
|
||||
int32_t remainder = nelements & 0x1;
|
||||
|
||||
while (1) {
|
||||
int32_t i = 0;
|
||||
if (batch > 1) {
|
||||
// first loop
|
||||
uint8_t flags = input[ipos++];
|
||||
|
||||
// Decode dd1
|
||||
uint64_t dd1 = 0;
|
||||
nbytes = flags & INT8MASK(4);
|
||||
int8_t nbytes1 = flags & INT8MASK(4); // range of nbytes starts from 0 to 7
|
||||
int8_t nbytes2 = (flags >> 4) & INT8MASK(4);
|
||||
|
||||
__m128i data1;
|
||||
if (nbytes1 == 0) {
|
||||
data1 = _mm_setzero_si128();
|
||||
} else {
|
||||
memcpy(&data1, (const void*) (input + ipos), nbytes1);
|
||||
}
|
||||
|
||||
__m128i data2;
|
||||
if (nbytes2 == 0) {
|
||||
data2 = _mm_setzero_si128();
|
||||
} else {
|
||||
memcpy(&data2, (const void*) (input + ipos + nbytes1), nbytes2);
|
||||
}
|
||||
|
||||
data2 = _mm_broadcastq_epi64(data2);
|
||||
__m128i zzVal = _mm_blend_epi32(data2, data1, 0x03);
|
||||
|
||||
// ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1)))
|
||||
__m128i signmask = _mm_and_si128(_mm_set1_epi64x(1), zzVal);
|
||||
signmask = _mm_sub_epi64(_mm_setzero_si128(), signmask);
|
||||
|
||||
// get two zigzag values here
|
||||
__m128i deltaOfDelta = _mm_xor_si128(_mm_srli_epi64(zzVal, 1), signmask);
|
||||
|
||||
__m128i deltaCurrent = _mm_add_epi64(deltaOfDelta, prevDelta);
|
||||
deltaCurrent = _mm_add_epi64(_mm_slli_si128(deltaCurrent, 8), deltaCurrent);
|
||||
|
||||
__m128i val = _mm_add_epi64(deltaCurrent, prevVal);
|
||||
_mm_storeu_si128((__m128i *)&ostream[opos], val);
|
||||
|
||||
// keep the previous value
|
||||
prevVal = _mm_shuffle_epi32 (val, 0xEE);
|
||||
|
||||
// keep the previous delta of delta, for the first item
|
||||
prevDelta = _mm_shuffle_epi32(deltaOfDelta, 0xEE);
|
||||
|
||||
opos += 2;
|
||||
ipos += nbytes1 + nbytes2;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// the remain
|
||||
for(; i < batch; ++i) {
|
||||
uint8_t flags = input[ipos++];
|
||||
|
||||
int8_t nbytes1 = flags & INT8MASK(4); // range of nbytes starts from 0 to 7
|
||||
int8_t nbytes2 = (flags >> 4) & INT8MASK(4);
|
||||
|
||||
// __m128i data1 = _mm_maskz_loadu_epi8(mask2[nbytes1], (const void*)(input + ipos));
|
||||
// __m128i data2 = _mm_maskz_loadu_epi8(mask2[nbytes2], (const void*)(input + ipos + nbytes1));
|
||||
__m128i data1;
|
||||
if (nbytes1 == 0) {
|
||||
data1 = _mm_setzero_si128();
|
||||
} else {
|
||||
int64_t dd = 0;
|
||||
memcpy(&dd, (const void*) (input + ipos), nbytes1);
|
||||
data1 = _mm_loadu_si64(&dd);
|
||||
}
|
||||
|
||||
__m128i data2;
|
||||
if (nbytes2 == 0) {
|
||||
data2 = _mm_setzero_si128();
|
||||
} else {
|
||||
int64_t dd = 0;
|
||||
memcpy(&dd, (const void*) (input + ipos + nbytes1), nbytes2);
|
||||
data2 = _mm_loadu_si64(&dd);
|
||||
}
|
||||
|
||||
data2 = _mm_broadcastq_epi64(data2);
|
||||
|
||||
__m128i zzVal = _mm_blend_epi32(data2, data1, 0x03);
|
||||
|
||||
// ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1)))
|
||||
__m128i signmask = _mm_and_si128(_mm_set1_epi64x(1), zzVal);
|
||||
signmask = _mm_sub_epi64(_mm_setzero_si128(), signmask);
|
||||
|
||||
// get two zigzag values here
|
||||
__m128i deltaOfDelta = _mm_xor_si128(_mm_srli_epi64(zzVal, 1), signmask);
|
||||
|
||||
__m128i deltaCurrent = _mm_add_epi64(deltaOfDelta, prevDelta);
|
||||
deltaCurrent = _mm_add_epi64(_mm_slli_si128(deltaCurrent, 8), deltaCurrent);
|
||||
|
||||
__m128i val = _mm_add_epi64(deltaCurrent, prevVal);
|
||||
_mm_storeu_si128((__m128i *)&ostream[opos], val);
|
||||
|
||||
// keep the previous value
|
||||
prevVal = _mm_shuffle_epi32 (val, 0xEE);
|
||||
|
||||
// keep the previous delta of delta
|
||||
__m128i delta = _mm_add_epi64(_mm_slli_si128(deltaOfDelta, 8), deltaOfDelta);
|
||||
prevDelta = _mm_shuffle_epi32(_mm_add_epi64(delta, prevDelta), 0xEE);
|
||||
|
||||
opos += 2;
|
||||
ipos += nbytes1 + nbytes2;
|
||||
}
|
||||
|
||||
if (remainder > 0) {
|
||||
uint64_t dd = 0;
|
||||
uint8_t flags = input[ipos++];
|
||||
|
||||
int32_t nbytes = flags & INT8MASK(4);
|
||||
int64_t deltaOfDelta = 0;
|
||||
if (nbytes == 0) {
|
||||
deltaOfDelta = 0;
|
||||
} else {
|
||||
if (bigEndian) {
|
||||
memcpy(((char *)(&dd1)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
} else {
|
||||
memcpy(&dd1, input + ipos, nbytes);
|
||||
}
|
||||
deltaOfDelta = ZIGZAG_DECODE(int64_t, dd1);
|
||||
// if (is_bigendian()) {
|
||||
// memcpy(((char *)(&dd1)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
// } else {
|
||||
memcpy(&dd, input + ipos, nbytes);
|
||||
// }
|
||||
deltaOfDelta = ZIGZAG_DECODE(int64_t, dd);
|
||||
}
|
||||
|
||||
ipos += nbytes;
|
||||
prevDelta += deltaOfDelta;
|
||||
prevValue += prevDelta;
|
||||
ostream[opos++] = prevValue;
|
||||
|
||||
if (opos == nelements) {
|
||||
return nelements * longBytes;
|
||||
}
|
||||
|
||||
// Decode dd2
|
||||
uint64_t dd2 = 0;
|
||||
nbytes = (flags >> 4) & INT8MASK(4);
|
||||
if (nbytes == 0) {
|
||||
deltaOfDelta = 0;
|
||||
if (opos == 0) {
|
||||
ostream[opos++] = deltaOfDelta;
|
||||
} else {
|
||||
if (bigEndian) {
|
||||
memcpy(((char *)(&dd2)) + longBytes - nbytes, input + ipos, nbytes);
|
||||
} else {
|
||||
memcpy(&dd2, input + ipos, nbytes);
|
||||
}
|
||||
// zigzag_decoding
|
||||
deltaOfDelta = ZIGZAG_DECODE(int64_t, dd2);
|
||||
}
|
||||
|
||||
ipos += nbytes;
|
||||
prevDelta += deltaOfDelta;
|
||||
prevValue += prevDelta;
|
||||
ostream[opos++] = prevValue;
|
||||
|
||||
if (opos == nelements) {
|
||||
return nelements * longBytes;
|
||||
int64_t prevDeltaX = deltaOfDelta + prevDelta[1];
|
||||
ostream[opos++] = prevVal[1] + prevDeltaX;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tsDecompressTimestampAvx512(const char *const input, const int32_t nelements, char *const output,
|
||||
bool UNUSED_PARAM(bigEndian)) {
|
||||
int64_t *ostream = (int64_t *)output;
|
||||
int32_t ipos = 1, opos = 0;
|
||||
|
||||
#if __AVX512VL__
|
||||
|
||||
__m128i prevVal = _mm_setzero_si128();
|
||||
__m128i prevDelta = _mm_setzero_si128();
|
||||
|
||||
int32_t numOfBatch = nelements >> 1;
|
||||
int32_t remainder = nelements & 0x01;
|
||||
__mmask16 mask2[16] = {0, 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff};
|
||||
|
||||
int32_t i = 0;
|
||||
if (numOfBatch > 1) {
|
||||
// first loop
|
||||
uint8_t flags = input[ipos++];
|
||||
|
||||
int8_t nbytes1 = flags & INT8MASK(4); // range of nbytes starts from 0 to 7
|
||||
int8_t nbytes2 = (flags >> 4) & INT8MASK(4);
|
||||
|
||||
__m128i data1 = _mm_maskz_loadu_epi8(mask2[nbytes1], (const void*)(input + ipos));
|
||||
__m128i data2 = _mm_maskz_loadu_epi8(mask2[nbytes2], (const void*)(input + ipos + nbytes1));
|
||||
data2 = _mm_broadcastq_epi64(data2);
|
||||
|
||||
__m128i zzVal = _mm_blend_epi32(data2, data1, 0x03);
|
||||
|
||||
// ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1)))
|
||||
__m128i signmask = _mm_and_si128(_mm_set1_epi64x(1), zzVal);
|
||||
signmask = _mm_sub_epi64(_mm_setzero_si128(), signmask);
|
||||
|
||||
// get two zigzag values here
|
||||
__m128i deltaOfDelta = _mm_xor_si128(_mm_srli_epi64(zzVal, 1), signmask);
|
||||
|
||||
__m128i deltaCurrent = _mm_add_epi64(deltaOfDelta, prevDelta);
|
||||
deltaCurrent = _mm_add_epi64(_mm_slli_si128(deltaCurrent, 8), deltaCurrent);
|
||||
|
||||
__m128i val = _mm_add_epi64(deltaCurrent, prevVal);
|
||||
_mm_storeu_si128((__m128i *)&ostream[opos], val);
|
||||
|
||||
// keep the previous value
|
||||
prevVal = _mm_shuffle_epi32 (val, 0xEE);
|
||||
|
||||
// keep the previous delta of delta, for the first item
|
||||
prevDelta = _mm_shuffle_epi32(deltaOfDelta, 0xEE);
|
||||
|
||||
opos += 2;
|
||||
ipos += nbytes1 + nbytes2;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// the remain
|
||||
for(; i < numOfBatch; ++i) {
|
||||
uint8_t flags = input[ipos++];
|
||||
|
||||
int8_t nbytes1 = flags & INT8MASK(4); // range of nbytes starts from 0 to 7
|
||||
int8_t nbytes2 = (flags >> 4) & INT8MASK(4);
|
||||
|
||||
__m128i data1 = _mm_maskz_loadu_epi8(mask2[nbytes1], (const void*)(input + ipos));
|
||||
__m128i data2 = _mm_maskz_loadu_epi8(mask2[nbytes2], (const void*)(input + ipos + nbytes1));
|
||||
data2 = _mm_broadcastq_epi64(data2);
|
||||
|
||||
__m128i zzVal = _mm_blend_epi32(data2, data1, 0x03);
|
||||
|
||||
// ZIGZAG_DECODE(T, v) (((v) >> 1) ^ -((T)((v)&1)))
|
||||
__m128i signmask = _mm_and_si128(_mm_set1_epi64x(1), zzVal);
|
||||
signmask = _mm_sub_epi64(_mm_setzero_si128(), signmask);
|
||||
|
||||
// get two zigzag values here
|
||||
__m128i deltaOfDelta = _mm_xor_si128(_mm_srli_epi64(zzVal, 1), signmask);
|
||||
|
||||
__m128i deltaCurrent = _mm_add_epi64(deltaOfDelta, prevDelta);
|
||||
deltaCurrent = _mm_add_epi64(_mm_slli_si128(deltaCurrent, 8), deltaCurrent);
|
||||
|
||||
__m128i val = _mm_add_epi64(deltaCurrent, prevVal);
|
||||
_mm_storeu_si128((__m128i *)&ostream[opos], val);
|
||||
|
||||
// keep the previous value
|
||||
prevVal = _mm_shuffle_epi32 (val, 0xEE);
|
||||
|
||||
// keep the previous delta of delta
|
||||
__m128i delta = _mm_add_epi64(_mm_slli_si128(deltaOfDelta, 8), deltaOfDelta);
|
||||
prevDelta = _mm_shuffle_epi32(_mm_add_epi64(delta, prevDelta), 0xEE);
|
||||
|
||||
opos += 2;
|
||||
ipos += nbytes1 + nbytes2;
|
||||
}
|
||||
|
||||
if (remainder > 0) {
|
||||
uint64_t dd = 0;
|
||||
uint8_t flags = input[ipos++];
|
||||
|
||||
int32_t nbytes = flags & INT8MASK(4);
|
||||
int64_t deltaOfDelta = 0;
|
||||
if (nbytes == 0) {
|
||||
deltaOfDelta = 0;
|
||||
} else {
|
||||
memcpy(&dd, input + ipos, nbytes);
|
||||
deltaOfDelta = ZIGZAG_DECODE(int64_t, dd);
|
||||
}
|
||||
|
||||
ipos += nbytes;
|
||||
if (opos == 0) {
|
||||
ostream[opos++] = deltaOfDelta;
|
||||
} else {
|
||||
int64_t prevDeltaX = deltaOfDelta + prevDelta[1];
|
||||
ostream[opos++] = prevVal[1] + prevDeltaX;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
#include <gtest/gtest.h>
|
||||
#include <stdlib.h>
|
||||
#include <tcompression.h>
|
||||
#include <random>
|
||||
|
||||
namespace {} // namespace
|
||||
|
||||
TEST(utilTest, decompress_test) {
|
||||
int64_t tsList[10] = {1700000000, 1700000100, 1700000200, 1700000300, 1700000400,
|
||||
1700000500, 1700000600, 1700000700, 1700000800, 1700000900};
|
||||
|
||||
char* pOutput[10 * sizeof(int64_t)] = {0};
|
||||
int32_t len = tsCompressTimestamp(tsList, sizeof(tsList), sizeof(tsList) / sizeof(tsList[0]), pOutput, 10, ONE_STAGE_COMP, NULL, 0);
|
||||
|
||||
char* decompOutput[10 * 8] = {0};
|
||||
tsDecompressTimestamp(pOutput, len, 10, decompOutput, sizeof(int64_t)*10, ONE_STAGE_COMP, NULL, 0);
|
||||
|
||||
for(int32_t i = 0; i < 10; ++i) {
|
||||
std::cout<< ((int64_t*)decompOutput)[i] << std::endl;
|
||||
}
|
||||
|
||||
memset(decompOutput, 0, 10*8);
|
||||
tsDecompressTimestampAvx512(reinterpret_cast<const char* const>(pOutput), 10,
|
||||
reinterpret_cast<char* const>(decompOutput), false);
|
||||
|
||||
for(int32_t i = 0; i < 10; ++i) {
|
||||
std::cout<<((int64_t*)decompOutput)[i] << std::endl;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
int64_t tsList1[7] = {1700000000, 1700000000, 1700000000, 1700000000, 1700000000, 1700000000, 1700000900};
|
||||
int32_t len1 = tsCompressTimestamp(tsList1, sizeof(tsList1), sizeof(tsList1) / sizeof(tsList1[0]), pOutput, 7, ONE_STAGE_COMP, NULL, 0);
|
||||
|
||||
memset(decompOutput, 0, 10*8);
|
||||
tsDecompressTimestampAvx512(reinterpret_cast<const char* const>(pOutput), 7,
|
||||
reinterpret_cast<char* const>(decompOutput), false);
|
||||
|
||||
for(int32_t i = 0; i < 7; ++i) {
|
||||
std::cout<<((int64_t*)decompOutput)[i] << std::endl;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
int64_t tsList2[1] = {1700000000};
|
||||
int32_t len2 = tsCompressTimestamp(tsList2, sizeof(tsList2), sizeof(tsList2) / sizeof(tsList2[0]), pOutput, 1, ONE_STAGE_COMP, NULL, 0);
|
||||
|
||||
memset(decompOutput, 0, 10*8);
|
||||
tsDecompressTimestampAvx512(reinterpret_cast<const char* const>(pOutput), 1,
|
||||
reinterpret_cast<char* const>(decompOutput), false);
|
||||
|
||||
for(int32_t i = 0; i < 1; ++i) {
|
||||
std::cout<<((int64_t*)decompOutput)[i] << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(utilTest, decompress_perf_test) {
|
||||
int32_t num = 10000;
|
||||
|
||||
int64_t* pList = static_cast<int64_t*>(taosMemoryCalloc(num, sizeof(int64_t)));
|
||||
int64_t iniVal = 1700000000;
|
||||
|
||||
uint32_t v = 100;
|
||||
|
||||
for(int32_t i = 0; i < num; ++i) {
|
||||
iniVal += taosRandR(&v)%10;
|
||||
pList[i] = iniVal;
|
||||
}
|
||||
|
||||
char* px = static_cast<char*>(taosMemoryMalloc(num * sizeof(int64_t)));
|
||||
int32_t len = tsCompressTimestamp(pList, num * sizeof(int64_t), num, px, num, ONE_STAGE_COMP, NULL, 0);
|
||||
|
||||
char* pOutput = static_cast<char*>(taosMemoryMalloc(num * sizeof(int64_t)));
|
||||
|
||||
int64_t st = taosGetTimestampUs();
|
||||
for(int32_t k = 0; k < 10000; ++k) {
|
||||
tsDecompressTimestamp(px, len, num, pOutput, sizeof(int64_t) * num, ONE_STAGE_COMP, NULL, 0);
|
||||
}
|
||||
|
||||
int64_t el1 = taosGetTimestampUs() - st;
|
||||
std::cout << "soft decompress elapsed time:" << el1 << " us" << std::endl;
|
||||
|
||||
memset(pOutput, 0, num * sizeof(int64_t));
|
||||
st = taosGetTimestampUs();
|
||||
for(int32_t k = 0; k < 10000; ++k) {
|
||||
tsDecompressTimestampAvx512(px, num, pOutput, false);
|
||||
}
|
||||
|
||||
int64_t el2 = taosGetTimestampUs() - st;
|
||||
std::cout << "SIMD decompress elapsed time:" << el2 << " us" << std::endl;
|
||||
|
||||
taosMemoryFree(pList);
|
||||
taosMemoryFree(pOutput);
|
||||
taosMemoryFree(px);
|
||||
}
|
||||
|
|
@ -1,4 +1,7 @@
|
|||
python3 ./test.py -f 2-query/table_count_scan.py
|
||||
python3 ./test.py -f 2-query/pseudo_column.py
|
||||
python3 ./test.py -f 2-query/ts-range.py
|
||||
python3 ./test.py -f 2-query/tag_scan.py
|
||||
python3 ./test.py -f 2-query/show_create_db.py
|
||||
python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py
|
||||
python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py
|
||||
|
|
|
@ -198,6 +198,7 @@
|
|||
#,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeTransform-db.py -N 6 -n 3
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 2 -n 1
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata-false.py -N 3 -n 3
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 3 -n 3
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-stb.py -N 3 -n 3
|
||||
,,y,system-test,./pytest.sh python3 test.py -f 7-tmq/tmqVnodeSplit-column.py -N 3 -n 3
|
||||
|
|
|
@ -19,19 +19,18 @@
|
|||
./test.sh -f tsim/dnode/balance3.sim
|
||||
./test.sh -f tsim/vnode/replica3_many.sim
|
||||
./test.sh -f tsim/stable/metrics_idx.sim
|
||||
./test.sh -f tsim/db/alter_replica_13.sim
|
||||
./test.sh -f tsim/sync/3Replica1VgElect.sim
|
||||
./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
./test.sh -f tsim/valgrind/checkError6.sim
|
||||
./test.sh -f tsim/user/basic.sim
|
||||
./test.sh -f tsim/user/password.sim
|
||||
./test.sh -f tsim/user/whitelist.sim
|
||||
./test.sh -f tsim/user/privilege_db.sim
|
||||
./test.sh -f tsim/user/privilege_sysinfo.sim
|
||||
./test.sh -f tsim/user/privilege_topic.sim
|
||||
./test.sh -f tsim/user/privilege_table.sim
|
||||
./test.sh -f tsim/user/privilege_create_db.sim
|
||||
./test.sh -f tsim/db/alter_option.sim
|
||||
./test.sh -f tsim/db/alter_replica_31.sim
|
||||
./test.sh -f tsim/db/basic1.sim
|
||||
./test.sh -f tsim/db/basic2.sim
|
||||
./test.sh -f tsim/db/basic3.sim
|
||||
|
@ -97,6 +96,7 @@
|
|||
./test.sh -f tsim/insert/delete0.sim
|
||||
./test.sh -f tsim/insert/update1_sort_merge.sim
|
||||
./test.sh -f tsim/insert/update2.sim
|
||||
./test.sh -f tsim/insert/insert_stb.sim
|
||||
./test.sh -f tsim/parser/alter__for_community_version.sim
|
||||
./test.sh -f tsim/parser/alter_column.sim
|
||||
./test.sh -f tsim/parser/alter_stable.sim
|
||||
|
@ -186,6 +186,7 @@
|
|||
./test.sh -f tsim/query/session.sim
|
||||
./test.sh -f tsim/query/join_interval.sim
|
||||
./test.sh -f tsim/query/join_pk.sim
|
||||
./test.sh -f tsim/query/count_spread.sim
|
||||
./test.sh -f tsim/query/unionall_as_table.sim
|
||||
./test.sh -f tsim/query/multi_order_by.sim
|
||||
./test.sh -f tsim/query/sys_tbname.sim
|
||||
|
@ -197,10 +198,14 @@
|
|||
./test.sh -f tsim/query/emptyTsRange_scl.sim
|
||||
./test.sh -f tsim/query/partitionby.sim
|
||||
./test.sh -f tsim/query/tableCount.sim
|
||||
./test.sh -f tsim/query/show_db_table_kind.sim
|
||||
./test.sh -f tsim/query/bi_star_table.sim
|
||||
./test.sh -f tsim/query/bi_tag_scan.sim
|
||||
./test.sh -f tsim/query/tag_scan.sim
|
||||
./test.sh -f tsim/query/nullColSma.sim
|
||||
./test.sh -f tsim/query/bug3398.sim
|
||||
./test.sh -f tsim/query/explain_tsorder.sim
|
||||
./test.sh -f tsim/query/apercentile.sim
|
||||
./test.sh -f tsim/qnode/basic1.sim
|
||||
./test.sh -f tsim/snode/basic1.sim
|
||||
./test.sh -f tsim/mnode/basic1.sim
|
||||
|
@ -237,52 +242,6 @@
|
|||
./test.sh -f tsim/table/table.sim
|
||||
./test.sh -f tsim/table/tinyint.sim
|
||||
./test.sh -f tsim/table/vgroup.sim
|
||||
./test.sh -f tsim/stream/basic0.sim -g
|
||||
./test.sh -f tsim/stream/basic1.sim
|
||||
./test.sh -f tsim/stream/basic2.sim
|
||||
./test.sh -f tsim/stream/basic3.sim
|
||||
./test.sh -f tsim/stream/basic4.sim
|
||||
./test.sh -f tsim/stream/checkpointInterval0.sim
|
||||
./test.sh -f tsim/stream/checkStreamSTable1.sim
|
||||
./test.sh -f tsim/stream/checkStreamSTable.sim
|
||||
./test.sh -f tsim/stream/deleteInterval.sim
|
||||
./test.sh -f tsim/stream/deleteSession.sim
|
||||
./test.sh -f tsim/stream/deleteState.sim
|
||||
./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
./test.sh -f tsim/stream/distributeSession0.sim
|
||||
./test.sh -f tsim/stream/drop_stream.sim
|
||||
./test.sh -f tsim/stream/fillHistoryBasic1.sim
|
||||
./test.sh -f tsim/stream/fillHistoryBasic2.sim
|
||||
./test.sh -f tsim/stream/fillHistoryBasic3.sim
|
||||
./test.sh -f tsim/stream/fillIntervalDelete0.sim
|
||||
./test.sh -f tsim/stream/fillIntervalDelete1.sim
|
||||
./test.sh -f tsim/stream/fillIntervalLinear.sim
|
||||
./test.sh -f tsim/stream/fillIntervalPartitionBy.sim
|
||||
./test.sh -f tsim/stream/fillIntervalPrevNext1.sim
|
||||
./test.sh -f tsim/stream/fillIntervalPrevNext.sim
|
||||
./test.sh -f tsim/stream/fillIntervalRange.sim
|
||||
./test.sh -f tsim/stream/fillIntervalValue.sim
|
||||
./test.sh -f tsim/stream/ignoreCheckUpdate.sim
|
||||
./test.sh -f tsim/stream/ignoreExpiredData.sim
|
||||
./test.sh -f tsim/stream/partitionby1.sim
|
||||
./test.sh -f tsim/stream/partitionbyColumnInterval.sim
|
||||
./test.sh -f tsim/stream/partitionbyColumnSession.sim
|
||||
./test.sh -f tsim/stream/partitionbyColumnState.sim
|
||||
./test.sh -f tsim/stream/partitionby.sim
|
||||
./test.sh -f tsim/stream/pauseAndResume.sim
|
||||
./test.sh -f tsim/stream/schedSnode.sim
|
||||
./test.sh -f tsim/stream/session0.sim
|
||||
./test.sh -f tsim/stream/session1.sim
|
||||
./test.sh -f tsim/stream/sliding.sim
|
||||
./test.sh -f tsim/stream/state0.sim
|
||||
./test.sh -f tsim/stream/state1.sim
|
||||
./test.sh -f tsim/stream/triggerInterval0.sim
|
||||
./test.sh -f tsim/stream/triggerSession0.sim
|
||||
./test.sh -f tsim/stream/udTableAndTag0.sim
|
||||
./test.sh -f tsim/stream/udTableAndTag1.sim
|
||||
./test.sh -f tsim/stream/udTableAndTag2.sim
|
||||
./test.sh -f tsim/stream/windowClose.sim
|
||||
./test.sh -f tsim/trans/lossdata1.sim
|
||||
./test.sh -f tsim/tmq/basic1.sim
|
||||
./test.sh -f tsim/tmq/basic2.sim
|
||||
|
@ -431,3 +390,8 @@
|
|||
./test.sh -f tsim/tag/drop_tag.sim
|
||||
./test.sh -f tsim/tag/tbNameIn.sim
|
||||
./test.sh -f tmp/monitor.sim
|
||||
./test.sh -f tsim/tagindex/add_index.sim
|
||||
./test.sh -f tsim/tagindex/sma_and_tag_index.sim
|
||||
./test.sh -f tsim/view/view.sim
|
||||
./test.sh -f tsim/query/cache_last.sim
|
||||
./test.sh -f tsim/query/const.sim
|
||||
|
|
|
@ -27,6 +27,7 @@ class TDTestCase:
|
|||
tdSql.init(conn.cursor())
|
||||
self.setsql = TDSetSql()
|
||||
self.stbname = 'stb'
|
||||
self.user_name = 'test'
|
||||
self.binary_length = 20 # the length of binary for column_dict
|
||||
self.nchar_length = 20 # the length of nchar for column_dict
|
||||
self.dbnames = ['db1', 'db2']
|
||||
|
@ -54,12 +55,12 @@ class TDTestCase:
|
|||
]
|
||||
|
||||
self.tbnum = 4
|
||||
self.stbnum_grant = 200
|
||||
|
||||
def create_user(self):
|
||||
user_name = 'test'
|
||||
tdSql.execute(f'create user {user_name} pass "test"')
|
||||
tdSql.execute(f'grant read on {self.dbnames[0]}.{self.stbname} with t2 = "Beijing" to {user_name}')
|
||||
tdSql.execute(f'grant write on {self.dbnames[1]}.{self.stbname} with t1 = 2 to {user_name}')
|
||||
tdSql.execute(f'create user {self.user_name} pass "test"')
|
||||
tdSql.execute(f'grant read on {self.dbnames[0]}.{self.stbname} with t2 = "Beijing" to {self.user_name}')
|
||||
tdSql.execute(f'grant write on {self.dbnames[1]}.{self.stbname} with t1 = 2 to {self.user_name}')
|
||||
|
||||
def prepare_data(self):
|
||||
for db in self.dbnames:
|
||||
|
@ -70,6 +71,8 @@ class TDTestCase:
|
|||
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})')
|
||||
for j in self.values_list:
|
||||
tdSql.execute(f'insert into {self.stbname}_{i} values({j})')
|
||||
for i in range(self.stbnum_grant):
|
||||
tdSql.execute(f'create table {self.stbname}_grant_{i} (ts timestamp, c0 int) tags(t0 int)')
|
||||
|
||||
def user_read_privilege_check(self, dbname):
|
||||
testconn = taos.connect(user='test', password='test')
|
||||
|
@ -128,12 +131,20 @@ class TDTestCase:
|
|||
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
|
||||
pass
|
||||
|
||||
def user_privilege_grant_check(self):
|
||||
for db in self.dbnames:
|
||||
tdSql.execute(f"use {db}")
|
||||
for i in range(self.stbnum_grant):
|
||||
tdSql.execute(f'grant read on {db}.{self.stbname}_grant_{i} to {self.user_name}')
|
||||
tdSql.execute(f'grant write on {db}.{self.stbname}_grant_{i} to {self.user_name}')
|
||||
|
||||
def run(self):
|
||||
self.prepare_data()
|
||||
self.create_user()
|
||||
self.user_read_privilege_check(self.dbnames[0])
|
||||
self.user_write_privilege_check(self.dbnames[1])
|
||||
self.user_privilege_error_check()
|
||||
self.user_privilege_grant_check()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -75,7 +75,7 @@ class TMQCom:
|
|||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
time.sleep(0.5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
|
@ -156,7 +156,7 @@ class TMQCom:
|
|||
tdLog.info("row: %d"%(actRows))
|
||||
if (actRows >= rows):
|
||||
loopFlag = 0
|
||||
time.sleep(0.02)
|
||||
time.sleep(0.5)
|
||||
return
|
||||
|
||||
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb',rows=1):
|
||||
|
@ -167,7 +167,7 @@ class TMQCom:
|
|||
tdLog.info("row: %d"%(actRows))
|
||||
if (actRows >= rows):
|
||||
loopFlag = 0
|
||||
time.sleep(0.02)
|
||||
time.sleep(0.5)
|
||||
return
|
||||
|
||||
def create_database(self,tsql, dbName,dropFlag=1,vgroups=4,replica=1):
|
||||
|
|
|
@ -0,0 +1,218 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
import math
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
from util.cluster import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.vgroups = 1
|
||||
self.ctbNum = 10
|
||||
self.rowsPerTbl = 10000
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), False)
|
||||
|
||||
def getDataPath(self):
|
||||
selfPath = tdCom.getBuildPath()
|
||||
|
||||
return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*';
|
||||
|
||||
def prepareTestEnv(self):
|
||||
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 10,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 60,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tdCom.drop_all_db()
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar)
|
||||
tdLog.info("create stb")
|
||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||
return
|
||||
|
||||
def restartAndRemoveWal(self, deleteWal):
|
||||
tdDnodes = cluster.dnodes
|
||||
tdSql.query("select * from information_schema.ins_vnodes")
|
||||
for result in tdSql.queryResult:
|
||||
if result[2] == 'dbt':
|
||||
tdLog.debug("dnode is %d"%(result[0]))
|
||||
dnodeId = result[0]
|
||||
vnodeId = result[1]
|
||||
|
||||
tdDnodes[dnodeId - 1].stoptaosd()
|
||||
time.sleep(1)
|
||||
dataPath = self.getDataPath()
|
||||
dataPath = dataPath%(dnodeId,vnodeId)
|
||||
tdLog.debug("dataPath:%s"%dataPath)
|
||||
if deleteWal:
|
||||
if os.system('rm -rf ' + dataPath) != 0:
|
||||
tdLog.exit("rm error")
|
||||
|
||||
tdDnodes[dnodeId - 1].starttaosd()
|
||||
time.sleep(1)
|
||||
break
|
||||
tdLog.debug("restart dnode ok")
|
||||
|
||||
def splitVgroups(self):
|
||||
tdSql.query("select * from information_schema.ins_vnodes")
|
||||
vnodeId = 0
|
||||
for result in tdSql.queryResult:
|
||||
if result[2] == 'dbt':
|
||||
vnodeId = result[1]
|
||||
tdLog.debug("vnode is %d"%(vnodeId))
|
||||
break
|
||||
splitSql = "split vgroup %d" %(vnodeId)
|
||||
tdLog.debug("splitSql:%s"%(splitSql))
|
||||
tdSql.query(splitSql)
|
||||
tdLog.debug("splitSql ok")
|
||||
|
||||
def tmqCase1(self, deleteWal=False):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb1',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 10,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 120,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
topicNameList = ['topic1']
|
||||
# expectRowsList = []
|
||||
tmqCom.initConsumerTable()
|
||||
|
||||
tdLog.info("create topics from stb with filter")
|
||||
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
# tdSql.query(queryString)
|
||||
# expectRowsList.append(tdSql.getRows())
|
||||
|
||||
# init consume info, and start tmq_sim, then check consume result
|
||||
tdLog.info("insert consume info to consume processor")
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
|
||||
topicList = topicNameList[0]
|
||||
ifcheckdata = 1
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||
tdLog.info("wait the consume result")
|
||||
|
||||
tdLog.info("create ctb1")
|
||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
|
||||
tdLog.info("create ctb2")
|
||||
paraDict['ctbPrefix'] = "ctb2"
|
||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
|
||||
tdLog.info("insert ctb1 data")
|
||||
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
|
||||
tmqCom.getStartConsumeNotifyFromTmqsim()
|
||||
tmqCom.getStartCommitNotifyFromTmqsim()
|
||||
|
||||
#restart dnode & remove wal
|
||||
self.restartAndRemoveWal(deleteWal)
|
||||
|
||||
# split vgroup
|
||||
self.splitVgroups()
|
||||
|
||||
|
||||
tdLog.info("insert ctb2 data")
|
||||
pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
pInsertThread.join()
|
||||
pInsertThread1.join()
|
||||
|
||||
expectRows = 1
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
|
||||
if expectrowcnt / 2 >= resultList[0]:
|
||||
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0]))
|
||||
tdLog.exit("%d tmq consume rows error!"%consumerId)
|
||||
|
||||
# tmqCom.checkFileContent(consumerId, queryString)
|
||||
|
||||
time.sleep(2)
|
||||
for i in range(len(topicNameList)):
|
||||
tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1(False)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -207,8 +207,6 @@ class TDTestCase:
|
|||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1(True)
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1(False)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -0,0 +1,196 @@
|
|||
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
import os
|
||||
import threading
|
||||
import math
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.common import *
|
||||
from util.cluster import *
|
||||
sys.path.append("./7-tmq")
|
||||
from tmqCommon import *
|
||||
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
class TDTestCase:
|
||||
def __init__(self):
|
||||
self.vgroups = 1
|
||||
self.ctbNum = 10000
|
||||
self.rowsPerTbl = 10000
|
||||
|
||||
def init(self, conn, logSql, replicaVar=1):
|
||||
self.replicaVar = int(replicaVar)
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor(), False)
|
||||
|
||||
def getDataPath(self):
|
||||
selfPath = tdCom.getBuildPath()
|
||||
|
||||
return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*';
|
||||
|
||||
def prepareTestEnv(self):
|
||||
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 10,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 60,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
tdCom.drop_all_db()
|
||||
tmqCom.initConsumerTable()
|
||||
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar)
|
||||
tdLog.info("create stb")
|
||||
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
|
||||
return
|
||||
|
||||
def restartAndRemoveWal(self, deleteWal):
|
||||
tdDnodes = cluster.dnodes
|
||||
tdSql.query("select * from information_schema.ins_vnodes")
|
||||
for result in tdSql.queryResult:
|
||||
if result[2] == 'dbt':
|
||||
tdLog.debug("dnode is %d"%(result[0]))
|
||||
dnodeId = result[0]
|
||||
vnodeId = result[1]
|
||||
|
||||
tdDnodes[dnodeId - 1].stoptaosd()
|
||||
time.sleep(1)
|
||||
dataPath = self.getDataPath()
|
||||
dataPath = dataPath%(dnodeId,vnodeId)
|
||||
tdLog.debug("dataPath:%s"%dataPath)
|
||||
if deleteWal:
|
||||
if os.system('rm -rf ' + dataPath) != 0:
|
||||
tdLog.exit("rm error")
|
||||
|
||||
tdDnodes[dnodeId - 1].starttaosd()
|
||||
time.sleep(1)
|
||||
break
|
||||
tdLog.debug("restart dnode ok")
|
||||
|
||||
def splitVgroups(self):
|
||||
tdSql.query("select * from information_schema.ins_vnodes")
|
||||
vnodeId = 0
|
||||
for result in tdSql.queryResult:
|
||||
if result[2] == 'dbt':
|
||||
vnodeId = result[1]
|
||||
tdLog.debug("vnode is %d"%(vnodeId))
|
||||
break
|
||||
splitSql = "split vgroup %d" %(vnodeId)
|
||||
tdLog.debug("splitSql:%s"%(splitSql))
|
||||
tdSql.query(splitSql)
|
||||
tdLog.debug("splitSql ok")
|
||||
|
||||
def tmqCase1(self, deleteWal=False):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'dbt',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
|
||||
'ctbPrefix': 'ctb1',
|
||||
'ctbStartIdx': 0,
|
||||
'ctbNum': 10000,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10000,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 5,
|
||||
'showMsg': 1,
|
||||
'showRow': 1,
|
||||
'snapshot': 0}
|
||||
|
||||
paraDict['vgroups'] = self.vgroups
|
||||
paraDict['ctbNum'] = self.ctbNum
|
||||
print
|
||||
paraDict['rowsPerTbl'] = self.rowsPerTbl
|
||||
|
||||
topicNameList = ['topic1']
|
||||
# expectRowsList = []
|
||||
tmqCom.initConsumerTable()
|
||||
|
||||
tdLog.info("create topics from stb ")
|
||||
queryString = "stable %s.%s"%(paraDict['dbName'], paraDict['stbName'])
|
||||
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
|
||||
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
|
||||
tdLog.info("create topic sql: %s"%sqlString)
|
||||
tdSql.execute(sqlString)
|
||||
# tdSql.query(queryString)
|
||||
# expectRowsList.append(tdSql.getRows())
|
||||
|
||||
# init consume info, and start tmq_sim, then check consume result
|
||||
tdLog.info("insert consume info to consume processor")
|
||||
consumerId = 0
|
||||
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
|
||||
topicList = topicNameList[0]
|
||||
ifcheckdata = 1
|
||||
ifManualCommit = 1
|
||||
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
|
||||
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
tdLog.info("create ctb1")
|
||||
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
|
||||
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
|
||||
|
||||
tdLog.info("insert ctb1 data")
|
||||
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
|
||||
pInsertThread.join()
|
||||
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
|
||||
tdLog.info("wait the consume result")
|
||||
|
||||
tmqCom.getStartConsumeNotifyFromTmqsim()
|
||||
tmqCom.getStartCommitNotifyFromTmqsim()
|
||||
|
||||
expectRows = 1
|
||||
tdLog.info("expectRows:%d"%expectRows)
|
||||
resultList = tmqCom.selectConsumeResult(expectRows)
|
||||
# for i in range(len(topicNameList)):
|
||||
# tdSql.query("drop topic %s"%topicNameList[i])
|
||||
|
||||
if deleteWal == True:
|
||||
clusterComCheck.check_vgroups_status(vgroup_numbers=1,db_replica=self.replicaVar,db_name="dbt",count_number=240)
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def run(self):
|
||||
self.prepareTestEnv()
|
||||
self.tmqCase1(True)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
event = threading.Event()
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -1,3 +1,12 @@
|
|||
python3 ./test.py -f 2-query/tbname_vgroup.py
|
||||
python3 ./test.py -f 2-query/stbJoin.py
|
||||
python3 ./test.py -f 2-query/stbJoin.py -Q 2
|
||||
python3 ./test.py -f 2-query/stbJoin.py -Q 3
|
||||
python3 ./test.py -f 2-query/stbJoin.py -Q 4
|
||||
python3 ./test.py -f 2-query/hint.py
|
||||
python3 ./test.py -f 2-query/hint.py -Q 2
|
||||
python3 ./test.py -f 2-query/hint.py -Q 3
|
||||
python3 ./test.py -f 2-query/hint.py -Q 4
|
||||
python3 ./test.py -f 2-query/nestedQuery.py
|
||||
python3 ./test.py -f 2-query/nestedQuery_str.py
|
||||
python3 ./test.py -f 2-query/nestedQuery_math.py
|
||||
|
@ -18,7 +27,30 @@ python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4
|
|||
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4
|
||||
python3 ./test.py -f 2-query/nestedQuery_26.py -Q 4
|
||||
python3 ./test.py -f 2-query/interval_limit_opt.py -Q 4
|
||||
python3 ./test.py -f 2-query/interval_unit.py
|
||||
python3 ./test.py -f 2-query/interval_unit.py -Q 2
|
||||
python3 ./test.py -f 2-query/interval_unit.py -Q 3
|
||||
python3 ./test.py -f 2-query/interval_unit.py -Q 4
|
||||
python3 ./test.py -f 2-query/partition_by_col.py -Q 4
|
||||
python3 ./test.py -f 2-query/partition_by_col.py -Q 3
|
||||
python3 ./test.py -f 2-query/partition_by_col.py -Q 2
|
||||
python3 ./test.py -f 2-query/partition_by_col.py
|
||||
python3 ./test.py -f 2-query/partition_by_col_agg.py
|
||||
python3 ./test.py -f 2-query/partition_by_col_agg.py -Q 2
|
||||
python3 ./test.py -f 2-query/partition_by_col_agg.py -Q 3
|
||||
python3 ./test.py -f 2-query/partition_by_col_agg.py -Q 4
|
||||
python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 4
|
||||
python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 3
|
||||
python3 ./test.py -f 2-query/interval_limit_opt_2.py -Q 2
|
||||
python3 ./test.py -f 2-query/interval_limit_opt_2.py
|
||||
python3 ./test.py -f 2-query/func_to_char_timestamp.py
|
||||
python3 ./test.py -f 2-query/func_to_char_timestamp.py -Q 2
|
||||
python3 ./test.py -f 2-query/func_to_char_timestamp.py -Q 3
|
||||
python3 ./test.py -f 2-query/func_to_char_timestamp.py -Q 4
|
||||
python3 ./test.py -f 2-query/last_cache_scan.py
|
||||
python3 ./test.py -f 2-query/last_cache_scan.py -Q 2
|
||||
python3 ./test.py -f 2-query/last_cache_scan.py -Q 3
|
||||
python3 ./test.py -f 2-query/last_cache_scan.py -Q 4
|
||||
python3 ./test.py -f 7-tmq/tmqShow.py
|
||||
python3 ./test.py -f 7-tmq/tmqDropStb.py
|
||||
python3 ./test.py -f 7-tmq/subscribeStb0.py
|
||||
|
@ -29,11 +61,13 @@ python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3
|
|||
python3 ./test.py -f 7-tmq/ins_topics_test.py
|
||||
python3 ./test.py -f 7-tmq/tmqMaxTopic.py
|
||||
python3 ./test.py -f 7-tmq/tmqParamsTest.py
|
||||
python3 ./test.py -f 7-tmq/tmqParamsTest.py -R
|
||||
python3 ./test.py -f 7-tmq/tmqClientConsLog.py
|
||||
python3 ./test.py -f 7-tmq/tmqMaxGroupIds.py
|
||||
python3 ./test.py -f 7-tmq/tmqConsumeDiscontinuousData.py
|
||||
python3 ./test.py -f 7-tmq/tmqOffset.py
|
||||
python3 ./test.py -f 7-tmq/tmqDropConsumer.py
|
||||
python3 ./test.py -f 1-insert/insert_stb.py
|
||||
python3 ./test.py -f 1-insert/delete_stable.py
|
||||
python3 ./test.py -f 2-query/out_of_order.py -Q 3
|
||||
python3 ./test.py -f 2-query/out_of_order.py
|
||||
|
@ -61,10 +95,14 @@ python3 ./test.py -f 2-query/slimit.py -R
|
|||
python3 ./test.py -f 2-query/slimit.py -Q 2
|
||||
python3 ./test.py -f 2-query/slimit.py -Q 3
|
||||
python3 ./test.py -f 2-query/slimit.py -Q 4
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3
|
||||
python3 ./test.py -f 2-query/ts-4233.py
|
||||
python3 ./test.py -f 2-query/ts-4233.py -Q 2
|
||||
python3 ./test.py -f 2-query/ts-4233.py -Q 3
|
||||
python3 ./test.py -f 2-query/ts-4233.py -Q 4
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreMnode.py -N 5 -M 3 -i False
|
||||
python3 ./test.py -f 3-enterprise/restore/restoreQnode.py -N 5 -M 3 -i False
|
||||
python3 ./test.py -f 7-tmq/create_wrong_topic.py
|
||||
python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3
|
||||
python3 ./test.py -f 7-tmq/basic5.py
|
||||
|
@ -112,16 +150,30 @@ python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
|
|||
python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py
|
||||
python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py
|
||||
python3 ./test.py -f 7-tmq/tmq_taosx.py
|
||||
python3 ./test.py -f 7-tmq/tmq_replay.py
|
||||
python3 ./test.py -f 7-tmq/tmqSeekAndCommit.py
|
||||
python3 ./test.py -f 7-tmq/tmq_offset.py
|
||||
python3 ./test.py -f 7-tmq/tmqDataPrecisionUnit.py
|
||||
python3 ./test.py -f 7-tmq/raw_block_interface_test.py
|
||||
python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
|
||||
python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5
|
||||
python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3
|
||||
python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3
|
||||
python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -i True
|
||||
python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 -i True
|
||||
python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 2 -n 1
|
||||
python3 test.py -f 7-tmq/tmqVnodeTransform-stb.py -N 6 -n 3
|
||||
python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 2 -n 1
|
||||
python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select-duplicatedata.py -N 3 -n 3
|
||||
python3 test.py -f 7-tmq/tmqVnodeSplit-stb-select.py -N 3 -n 3
|
||||
python3 test.py -f 7-tmq/tmqVnodeSplit-stb.py -N 3 -n 3
|
||||
python3 test.py -f 7-tmq/tmqVnodeSplit-column.py -N 3 -n 3
|
||||
python3 test.py -f 7-tmq/tmqVnodeSplit-db.py -N 3 -n 3
|
||||
python3 test.py -f 7-tmq/tmqVnodeReplicate.py -M 3 -N 3 -n 3
|
||||
python3 ./test.py -f 99-TDcase/TD-19201.py
|
||||
python3 ./test.py -f 99-TDcase/TD-21561.py
|
||||
python3 ./test.py -f 99-TDcase/TS-3404.py
|
||||
python3 ./test.py -f 99-TDcase/TS-3581.py
|
||||
python3 ./test.py -f 99-TDcase/TS-3311.py
|
||||
python3 ./test.py -f 99-TDcase/TS-3821.py
|
||||
python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6
|
||||
python3 ./test.py -f 0-others/taosShell.py
|
||||
python3 ./test.py -f 0-others/taosShellError.py
|
||||
|
@ -139,10 +191,19 @@ python3 ./test.py -f 0-others/user_privilege_show.py
|
|||
python3 ./test.py -f 0-others/user_privilege_all.py
|
||||
python3 ./test.py -f 0-others/fsync.py
|
||||
python3 ./test.py -f 0-others/multilevel.py
|
||||
python3 ./test.py -f 0-others/ttl.py
|
||||
python3 ./test.py -f 0-others/ttlChangeOnWrite.py
|
||||
python3 ./test.py -f 0-others/compress_tsz1.py
|
||||
python3 ./test.py -f 0-others/compress_tsz2.py
|
||||
python3 ./test.py -f 0-others/view/non_marterial_view/test_view.py
|
||||
python3 ./test.py -f 0-others/compatibility.py
|
||||
python3 ./test.py -f 0-others/tag_index_basic.py
|
||||
python3 ./test.py -N 3 -f 0-others/walRetention.py
|
||||
python3 ./test.py -f 0-others/splitVGroupRep1.py -N 3
|
||||
python3 ./test.py -f 0-others/splitVGroupRep3.py -N 3
|
||||
python3 ./test.py -f 0-others/timeRangeWise.py -N 3
|
||||
python3 ./test.py -f 0-others/delete_check.py
|
||||
python3 ./test.py -f 0-others/test_hot_refresh_configurations.py
|
||||
python3 ./test.py -f 1-insert/alter_database.py
|
||||
python3 ./test.py -f 1-insert/alter_replica.py -N 3
|
||||
python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
|
||||
|
@ -190,6 +251,7 @@ python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3
|
|||
python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4
|
||||
python3 ./test.py -f 1-insert/precisionUS.py
|
||||
python3 ./test.py -f 1-insert/precisionNS.py
|
||||
python3 ./test.py -f 1-insert/test_ts4219.py
|
||||
python3 ./test.py -f 0-others/show.py
|
||||
python3 ./test.py -f 0-others/show_tag_index.py
|
||||
python3 ./test.py -f 0-others/information_schema.py
|
||||
|
@ -325,6 +387,8 @@ python3 ./test.py -f 2-query/smaTest.py
|
|||
python3 ./test.py -f 2-query/smaTest.py -R
|
||||
python3 ./test.py -f 0-others/sma_index.py
|
||||
python3 ./test.py -f 2-query/sml_TS-3724.py
|
||||
python3 ./test.py -f 2-query/sml-TD19291.py
|
||||
python3 ./test.py -f 2-query/varbinary.py
|
||||
python3 ./test.py -f 2-query/sml.py
|
||||
python3 ./test.py -f 2-query/sml.py -R
|
||||
python3 ./test.py -f 2-query/spread.py
|
||||
|
@ -426,7 +490,6 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3
|
|||
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3 -n 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3
|
||||
python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3
|
||||
python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 6 -M 3
|
||||
|
@ -731,7 +794,9 @@ python3 ./test.py -f 2-query/out_of_order.py -R
|
|||
python3 ./test.py -f 2-query/blockSMA.py -Q 4
|
||||
python3 ./test.py -f 2-query/projectionDesc.py -Q 4
|
||||
python3 ./test.py -f 2-query/odbc.py
|
||||
python3 ./test.py -f 2-query/fill_with_group.py
|
||||
python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4
|
||||
python3 ./test.py -f 99-TDcase/TD-20582.py
|
||||
python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3
|
||||
python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3
|
||||
python3 ./test.py -f eco-system/meta/database/keep_time_offset.py
|
||||
|
|
Loading…
Reference in New Issue