Merge branch '3.0' into case/TD_28027_3.0_New

This commit is contained in:
menshibin 2024-01-20 16:28:44 +08:00 committed by GitHub
commit 6d4d88c381
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
98 changed files with 1704 additions and 764 deletions

View File

@ -226,9 +226,10 @@ Please note the `taoskeeper` needs to be installed and running to create the `lo
| Attribute | Description | | Attribute | Description |
| ------------- | --------------------------------------------------------------------------------------------------------------- | | ------------- | --------------------------------------------------------------------------------------------------------------- |
| Applicable | Client only | | Applicable | Client only |
| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. | | Meaning | When the Last, First, and LastRow functions are queried and no alias is specified, the alias is automatically set to the column name (excluding the function name). Therefore, if the order by clause refers to the column name, it will automatically refer to the function corresponding to the column. |
| Value Range | 0 means including the function name, 1 means not including the function name. | | Value Range | 1 means automatically setting the alias to the column name (excluding the function name), 0 means not automatically setting the alias. |
| Default Value | 0 | | Default Value | 0 |
| Notes | When multiple of the above functions act on the same column at the same time and no alias is specified, if the order by clause refers to the column name, column selection ambiguous will occur because the aliases of multiple columns are the same. |
## Locale Parameters ## Locale Parameters

View File

@ -215,9 +215,10 @@ taos -C
| 属性 | 说明 | | 属性 | 说明 |
| -------- | ----------------------------------------------------------- | | -------- | ----------------------------------------------------------- |
| 适用范围 | 仅客户端适用 | | 适用范围 | 仅客户端适用 |
| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 | | 含义 | Last、First、LastRow 函数查询且未指定别名时,自动设置别名为列名(不含函数名),因此 order by 子句如果引用了该列名将自动引用该列对应的函数 |
| 取值范围 | 0 表示包含函数名1 表示不包含函数名。 | | 取值范围 | 1 表示自动设置别名为列名(不包含函数名), 0 表示不自动设置别名。 |
| 缺省值 | 0 | | 缺省值 | 0 |
| 补充说明 | 当同时出现多个上述函数作用于同一列且未指定别名时,如果 order by 子句引用了该列名,将会因为多列别名相同引发列选择冲突|
### countAlwaysReturnValue ### countAlwaysReturnValue

View File

@ -262,6 +262,9 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, c
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId, int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid); tb_uid_t suid);
bool alreadyAddGroupId(char* ctbName);
bool isAutoTableName(char* ctbName);
void buildCtbNameAddGruopId(char* ctbName, uint64_t groupId);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf); int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);

View File

@ -32,8 +32,9 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve
bool isLeader, bool restored); bool isLeader, bool restored);
int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen); int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen);
int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader); int32_t tqStreamTaskProcessRunReq(SStreamMeta* pMeta, SRpcMsg* pMsg, bool isLeader);
int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks); int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta);
int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta); int32_t tqStartTaskCompleteCallback(SStreamMeta* pMeta);
int32_t tqStreamTasksGetTotalNum(SStreamMeta* pMeta);
int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg); int32_t tqStreamTaskProcessTaskResetReq(SStreamMeta* pMeta, SRpcMsg* pMsg);
int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg); int32_t tqStreamTaskProcessTaskPauseReq(SStreamMeta* pMeta, char* pMsg);
int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* pMsg, bool fromVnode); int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* pMsg, bool fromVnode);

View File

@ -249,6 +249,10 @@ typedef struct SPoint {
int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2, int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2,
int32_t inputType); int32_t inputType);
#define LEASTSQUARES_DOUBLE_ITEM_LENGTH 25
#define LEASTSQUARES_BUFF_LENGTH 128
#define DOUBLE_PRECISION_DIGITS "16e"
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -156,6 +156,7 @@ typedef struct SProjectLogicNode {
SNodeList* pProjections; SNodeList* pProjections;
char stmtName[TSDB_TABLE_NAME_LEN]; char stmtName[TSDB_TABLE_NAME_LEN];
bool ignoreGroupId; bool ignoreGroupId;
bool inputIgnoreGroup;
} SProjectLogicNode; } SProjectLogicNode;
typedef struct SIndefRowsFuncLogicNode { typedef struct SIndefRowsFuncLogicNode {
@ -449,6 +450,7 @@ typedef struct SProjectPhysiNode {
SNodeList* pProjections; SNodeList* pProjections;
bool mergeDataBlock; bool mergeDataBlock;
bool ignoreGroupId; bool ignoreGroupId;
bool inputIgnoreGroup;
} SProjectPhysiNode; } SProjectPhysiNode;
typedef struct SIndefRowsFuncPhysiNode { typedef struct SIndefRowsFuncPhysiNode {

View File

@ -21,12 +21,12 @@
extern "C" { extern "C" {
#endif #endif
#include "systable.h"
#include "tarray.h" #include "tarray.h"
#include "thash.h" #include "thash.h"
#include "tlog.h" #include "tlog.h"
#include "tmsg.h" #include "tmsg.h"
#include "tmsgcb.h" #include "tmsgcb.h"
#include "systable.h"
typedef enum { typedef enum {
JOB_TASK_STATUS_NULL = 0, JOB_TASK_STATUS_NULL = 0,
@ -90,7 +90,6 @@ typedef struct SExecResult {
void* res; void* res;
} SExecResult; } SExecResult;
#pragma pack(push, 1) #pragma pack(push, 1)
typedef struct SCTableMeta { typedef struct SCTableMeta {
uint64_t uid; uint64_t uid;
@ -100,7 +99,6 @@ typedef struct SCTableMeta {
} SCTableMeta; } SCTableMeta;
#pragma pack(pop) #pragma pack(pop)
#pragma pack(push, 1) #pragma pack(push, 1)
typedef struct STableMeta { typedef struct STableMeta {
// BEGIN: KEEP THIS PART SAME WITH SCTableMeta // BEGIN: KEEP THIS PART SAME WITH SCTableMeta
@ -137,8 +135,8 @@ typedef struct SDBVgInfo {
int8_t hashMethod; int8_t hashMethod;
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
int64_t stateTs; int64_t stateTs;
SHashObj* vgHash; // key:vgId, value:SVgroupInfo SHashObj* vgHash; // key:vgId, value:SVgroupInfo
SArray* vgArray; // SVgroupInfo SArray* vgArray; // SVgroupInfo
} SDBVgInfo; } SDBVgInfo;
typedef struct SUseDbOutput { typedef struct SUseDbOutput {
@ -173,6 +171,7 @@ typedef struct SDataBuf {
void* pData; void* pData;
uint32_t len; uint32_t len;
void* handle; void* handle;
int64_t handleRefId;
SEpSet* pEpSet; SEpSet* pEpSet;
} SDataBuf; } SDataBuf;
@ -283,7 +282,7 @@ void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType*
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst); int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst); int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
void freeVgInfo(SDBVgInfo* vgInfo); void freeVgInfo(SDBVgInfo* vgInfo);
void freeDbCfgInfo(SDbCfgInfo *pInfo); void freeDbCfgInfo(SDbCfgInfo* pInfo);
extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen, extern int32_t (*queryBuildMsg[TDMT_MAX])(void* input, char** msg, int32_t msgSize, int32_t* msgLen,
void* (*mallocFp)(int64_t)); void* (*mallocFp)(int64_t));
@ -314,7 +313,9 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
((_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_SYN_RESTORING || (_code) == TSDB_CODE_SYN_INTERNAL_ERROR) ((_code) == TSDB_CODE_SYN_NOT_LEADER || (_code) == TSDB_CODE_SYN_RESTORING || (_code) == TSDB_CODE_SYN_INTERNAL_ERROR)
#define SYNC_OTHER_LEADER_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_MNODE_NOT_FOUND) #define SYNC_OTHER_LEADER_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_MNODE_NOT_FOUND)
#define NO_RET_REDIRECT_ERROR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) #define NO_RET_REDIRECT_ERROR(_code) \
((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || \
(_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED)
#define NEED_REDIRECT_ERROR(_code) \ #define NEED_REDIRECT_ERROR(_code) \
(NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \ (NO_RET_REDIRECT_ERROR(_code) || SYNC_UNKNOWN_LEADER_REDIRECT_ERROR(_code) || \

View File

@ -62,9 +62,10 @@ typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue; typedef struct SStreamQueue SStreamQueue;
typedef struct SStreamTaskSM SStreamTaskSM; typedef struct SStreamTaskSM SStreamTaskSM;
#define SSTREAM_TASK_VER 2 #define SSTREAM_TASK_VER 3
#define SSTREAM_TASK_INCOMPATIBLE_VER 1 #define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2 #define SSTREAM_TASK_NEED_CONVERT_VER 2
#define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3
enum { enum {
STREAM_STATUS__NORMAL = 0, STREAM_STATUS__NORMAL = 0,
@ -689,9 +690,9 @@ typedef struct STaskStatusEntry {
int64_t verStart; // start version in WAL, only valid for source task int64_t verStart; // start version in WAL, only valid for source task
int64_t verEnd; // end version in WAL, only valid for source task int64_t verEnd; // end version in WAL, only valid for source task
int64_t processedVer; // only valid for source task int64_t processedVer; // only valid for source task
int64_t activeCheckpointId; // current active checkpoint id int64_t checkpointId; // current active checkpoint id
int32_t chkpointTransId; // checkpoint trans id int32_t chkpointTransId; // checkpoint trans id
bool checkpointFailed; // denote if the checkpoint is failed or not int8_t checkpointFailed; // denote if the checkpoint is failed or not
bool inputQChanging; // inputQ is changing or not bool inputQChanging; // inputQ is changing or not
int64_t inputQUnchangeCounter; int64_t inputQUnchangeCounter;
double inputQUsed; // in MiB double inputQUsed; // in MiB
@ -874,6 +875,8 @@ void streamMetaStartHb(SStreamMeta* pMeta);
bool streamMetaTaskInTimer(SStreamMeta* pMeta); bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
int64_t endTs, bool ready); int64_t endTs, bool ready);
int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta);
void streamMetaRLock(SStreamMeta* pMeta); void streamMetaRLock(SStreamMeta* pMeta);
void streamMetaRUnLock(SStreamMeta* pMeta); void streamMetaRUnLock(SStreamMeta* pMeta);
void streamMetaWLock(SStreamMeta* pMeta); void streamMetaWLock(SStreamMeta* pMeta);
@ -886,6 +889,7 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStopAllTasks(SStreamMeta* pMeta); int32_t streamMetaStopAllTasks(SStreamMeta* pMeta);
int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
bool streamMetaAllTasksReady(const SStreamMeta* pMeta); bool streamMetaAllTasksReady(const SStreamMeta* pMeta);
tmr_h streamTimerGetInstance();
// checkpoint // checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq); int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);

View File

@ -744,6 +744,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_VIEW_QUERY TAOS_DEF_ERROR_CODE(0, 0x266C) #define TSDB_CODE_PAR_INVALID_VIEW_QUERY TAOS_DEF_ERROR_CODE(0, 0x266C)
#define TSDB_CODE_PAR_COL_QUERY_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x266D) #define TSDB_CODE_PAR_COL_QUERY_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x266D)
#define TSDB_CODE_PAR_VIEW_CONFLICT_WITH_TABLE TAOS_DEF_ERROR_CODE(0, 0x266E) #define TSDB_CODE_PAR_VIEW_CONFLICT_WITH_TABLE TAOS_DEF_ERROR_CODE(0, 0x266E)
#define TSDB_CODE_PAR_ORDERBY_AMBIGUOUS TAOS_DEF_ERROR_CODE(0, 0x266F)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner //planner

View File

@ -155,6 +155,7 @@ typedef struct STscObj {
int8_t biMode; int8_t biMode;
int32_t acctId; int32_t acctId;
uint32_t connId; uint32_t connId;
int32_t appHbMgrIdx;
int64_t id; // ref ID returned by taosAddRef int64_t id; // ref ID returned by taosAddRef
TdThreadMutex mutex; // used to protect the operation on db TdThreadMutex mutex; // used to protect the operation on db
int32_t numOfReqs; // number of sqlObj bound to this connection int32_t numOfReqs; // number of sqlObj bound to this connection

View File

@ -283,6 +283,7 @@ void *createTscObj(const char *user, const char *auth, const char *db, int32_t c
pObj->connType = connType; pObj->connType = connType;
pObj->pAppInfo = pAppInfo; pObj->pAppInfo = pAppInfo;
pObj->appHbMgrIdx = pAppInfo->pAppHbMgr->idx;
tstrncpy(pObj->user, user, sizeof(pObj->user)); tstrncpy(pObj->user, user, sizeof(pObj->user));
memcpy(pObj->pass, auth, TSDB_PASSWORD_LEN); memcpy(pObj->pass, auth, TSDB_PASSWORD_LEN);

View File

@ -30,7 +30,7 @@ typedef struct {
}; };
} SHbParam; } SHbParam;
static SClientHbMgr clientHbMgr = {0}; SClientHbMgr clientHbMgr = {0};
static int32_t hbCreateThread(); static int32_t hbCreateThread();
static void hbStopThread(); static void hbStopThread();
@ -1294,9 +1294,8 @@ void hbMgrCleanUp() {
taosThreadMutexLock(&clientHbMgr.lock); taosThreadMutexLock(&clientHbMgr.lock);
appHbMgrCleanup(); appHbMgrCleanup();
taosArrayDestroy(clientHbMgr.appHbMgrs); clientHbMgr.appHbMgrs = taosArrayDestroy(clientHbMgr.appHbMgrs);
taosThreadMutexUnlock(&clientHbMgr.lock); taosThreadMutexUnlock(&clientHbMgr.lock);
clientHbMgr.appHbMgrs = NULL;
} }
int hbRegisterConnImpl(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, int64_t clusterId) { int hbRegisterConnImpl(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, int64_t clusterId) {
@ -1335,19 +1334,18 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
} }
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) { void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr; taosThreadMutexLock(&clientHbMgr.lock);
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey)); SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
if (pReq) { if (pAppHbMgr) {
tFreeClientHbReq(pReq); SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey)); if (pReq) {
taosHashRelease(pAppHbMgr->activeInfo, pReq); tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
taosHashRelease(pAppHbMgr->activeInfo, pReq);
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
}
} }
taosThreadMutexUnlock(&clientHbMgr.lock);
if (NULL == pReq) {
return;
}
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
} }
// set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner // set heart beat thread quit mode , if quicByKill 1 then kill thread else quit from inner

View File

@ -285,8 +285,8 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL; SRetrieveTableRsp* pRsp = NULL;
int8_t biMode = atomic_load_8(&pRequest->pTscObj->biMode); int8_t biMode = atomic_load_8(&pRequest->pTscObj->biMode);
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode); int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, biMode);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
} }
@ -324,7 +324,8 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
return; return;
} }
int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp, atomic_load_8(&pRequest->pTscObj->biMode)); int32_t code = qExecCommand(&pRequest->pTscObj->id, pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp,
atomic_load_8(&pRequest->pTscObj->biMode));
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
} }
@ -492,7 +493,8 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
pResInfo->userFields[i].bytes = pSchema[i].bytes; pResInfo->userFields[i].bytes = pSchema[i].bytes;
pResInfo->userFields[i].type = pSchema[i].type; pResInfo->userFields[i].type = pSchema[i].type;
if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_VARBINARY || pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) { if (pSchema[i].type == TSDB_DATA_TYPE_VARCHAR || pSchema[i].type == TSDB_DATA_TYPE_VARBINARY ||
pSchema[i].type == TSDB_DATA_TYPE_GEOMETRY) {
pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE; pResInfo->userFields[i].bytes -= VARSTR_HEADER_SIZE;
} else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) { } else if (pSchema[i].type == TSDB_DATA_TYPE_NCHAR || pSchema[i].type == TSDB_DATA_TYPE_JSON) {
pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; pResInfo->userFields[i].bytes = (pResInfo->userFields[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
@ -1103,9 +1105,9 @@ static int32_t asyncExecSchQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaDat
SSqlCallbackWrapper* pWrapper) { SSqlCallbackWrapper* pWrapper) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
pRequest->type = pQuery->msgType; pRequest->type = pQuery->msgType;
SArray* pMnodeList = NULL; SArray* pMnodeList = NULL;
SQueryPlan* pDag = NULL; SQueryPlan* pDag = NULL;
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
if (!pRequest->parseOnly) { if (!pRequest->parseOnly) {
pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad)); pMnodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
@ -1278,7 +1280,7 @@ int32_t removeMeta(STscObj* pTscObj, SArray* tbList, bool isView) {
if (isView) { if (isView) {
for (int32_t i = 0; i < tbNum; ++i) { for (int32_t i = 0; i < tbNum; ++i) {
SName* pViewName = taosArrayGet(tbList, i); SName* pViewName = taosArrayGet(tbList, i);
char dbFName[TSDB_DB_FNAME_LEN]; char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pViewName, dbFName); tNameGetFullDbName(pViewName, dbFName);
catalogRemoveViewMeta(pCatalog, dbFName, 0, pViewName->tname, 0); catalogRemoveViewMeta(pCatalog, dbFName, 0, pViewName->tname, 0);
} }
@ -1510,8 +1512,12 @@ int32_t doProcessMsgFromServer(void* param) {
updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet); updateTargetEpSet(pSendInfo, pTscObj, pMsg, pEpSet);
SDataBuf buf = { SDataBuf buf = {.msgType = pMsg->msgType,
.msgType = pMsg->msgType, .len = pMsg->contLen, .pData = NULL, .handle = pMsg->info.handle, .pEpSet = pEpSet}; .len = pMsg->contLen,
.pData = NULL,
.handle = pMsg->info.handle,
.handleRefId = pMsg->info.refId,
.pEpSet = pEpSet};
if (pMsg->contLen > 0) { if (pMsg->contLen > 0) {
buf.pData = taosMemoryCalloc(1, pMsg->contLen); buf.pData = taosMemoryCalloc(1, pMsg->contLen);
@ -2538,13 +2544,13 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pRequest->code = code; pRequest->code = code;
taosMemoryFreeClear(pResultInfo->pData); taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0); pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0);
return; return;
} }
if (pRequest->code != TSDB_CODE_SUCCESS) { if (pRequest->code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pResultInfo->pData); taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, 0); pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, 0);
return; return;
} }
@ -2565,12 +2571,12 @@ static void fetchCallback(void* pResult, void* param, int32_t code) {
atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen); atomic_add_fetch_64((int64_t*)&pActivity->fetchBytes, pRequest->body.resInfo.payloadLen);
} }
pRequest->body.fetchFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows); pRequest->body.fetchFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, pResultInfo->numOfRows);
} }
void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) { void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param) {
pRequest->body.fetchFp = fp; pRequest->body.fetchFp = fp;
((SSyncQueryParam *)pRequest->body.interParam)->userParam = param; ((SSyncQueryParam*)pRequest->body.interParam)->userParam = param;
SReqResultInfo* pResultInfo = &pRequest->body.resInfo; SReqResultInfo* pResultInfo = &pRequest->body.resInfo;
@ -2611,7 +2617,7 @@ void taosAsyncFetchImpl(SRequestObj* pRequest, __taos_async_fn_t fp, void* param
void doRequestCallback(SRequestObj* pRequest, int32_t code) { void doRequestCallback(SRequestObj* pRequest, int32_t code) {
pRequest->inCallback = true; pRequest->inCallback = true;
int64_t this = pRequest->self; int64_t this = pRequest->self;
pRequest->body.queryFp(((SSyncQueryParam *)pRequest->body.interParam)->userParam, pRequest, code); pRequest->body.queryFp(((SSyncQueryParam*)pRequest->body.interParam)->userParam, pRequest, code);
SRequestObj* pReq = acquireRequest(this); SRequestObj* pReq = acquireRequest(this);
if (pReq != NULL) { if (pReq != NULL) {
pReq->inCallback = false; pReq->inCallback = false;
@ -2619,11 +2625,11 @@ void doRequestCallback(SRequestObj* pRequest, int32_t code) {
} }
} }
int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser, SParseSqlRes* pRes) { int32_t clientParseSql(void* param, const char* dbName, const char* sql, bool parseOnly, const char* effectiveUser,
SParseSqlRes* pRes) {
#ifndef TD_ENTERPRISE #ifndef TD_ENTERPRISE
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
#else #else
return clientParseSqlImpl(param, dbName, sql, parseOnly, effectiveUser, pRes); return clientParseSqlImpl(param, dbName, sql, parseOnly, effectiveUser, pRes);
#endif #endif
} }

View File

@ -26,6 +26,8 @@
#include "tname.h" #include "tname.h"
#include "tversion.h" #include "tversion.h"
extern SClientHbMgr clientHbMgr;
static void setErrno(SRequestObj* pRequest, int32_t code) { static void setErrno(SRequestObj* pRequest, int32_t code) {
pRequest->code = code; pRequest->code = code;
terrno = code; terrno = code;
@ -63,8 +65,9 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
STscObj* pTscObj = pRequest->pTscObj; STscObj* pTscObj = pRequest->pTscObj;
if (NULL == pTscObj->pAppInfo || NULL == pTscObj->pAppInfo->pAppHbMgr) { if (NULL == pTscObj->pAppInfo) {
setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED); code = TSDB_CODE_TSC_DISCONNECTED;
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem); tsem_post(&pRequest->body.rspSem);
goto End; goto End;
} }
@ -95,7 +98,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
} }
if (connectRsp.epSet.numOfEps == 0) { if (connectRsp.epSet.numOfEps == 0) {
setErrno(pRequest, TSDB_CODE_APP_ERROR); code = TSDB_CODE_APP_ERROR;
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem); tsem_post(&pRequest->body.rspSem);
goto End; goto End;
} }
@ -142,7 +146,18 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
pTscObj->authVer = connectRsp.authVer; pTscObj->authVer = connectRsp.authVer;
pTscObj->whiteListInfo.ver = connectRsp.whiteListVer; pTscObj->whiteListInfo.ver = connectRsp.whiteListVer;
hbRegisterConn(pTscObj->pAppInfo->pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType); taosThreadMutexLock(&clientHbMgr.lock);
SAppHbMgr* pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, pTscObj->appHbMgrIdx);
if (pAppHbMgr) {
hbRegisterConn(pAppHbMgr, pTscObj->id, connectRsp.clusterId, connectRsp.connType);
} else {
taosThreadMutexUnlock(&clientHbMgr.lock);
code = TSDB_CODE_TSC_DISCONNECTED;
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
goto End;
}
taosThreadMutexUnlock(&clientHbMgr.lock);
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId, tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
pTscObj->pAppInfo->numOfConns); pTscObj->pAppInfo->numOfConns);

View File

@ -2118,6 +2118,30 @@ _end:
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
void buildCtbNameAddGruopId(char* ctbName, uint64_t groupId){
char tmp[TSDB_TABLE_NAME_LEN] = {0};
snprintf(tmp, TSDB_TABLE_NAME_LEN, "_%"PRIu64, groupId);
ctbName[TSDB_TABLE_NAME_LEN - strlen(tmp) - 1] = 0; // put groupId to the end
strcat(ctbName, tmp);
}
bool isAutoTableName(char* ctbName){
return (strlen(ctbName) == 34 && ctbName[0] == 't' && ctbName[1] == '_');
}
bool alreadyAddGroupId(char* ctbName){
size_t len = strlen(ctbName);
size_t _location = len - 1;
while(_location > 0){
if(ctbName[_location] < '0' || ctbName[_location] > '9'){
break;
}
_location--;
}
return ctbName[_location] == '_' && len - 1 - _location > 15; //15 means the min length of groupid
}
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1); char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (!pBuf) { if (!pBuf) {

View File

@ -2374,7 +2374,7 @@ static int32_t mndProcessVgroupChange(SMnode *pMnode, SVgroupChangeInfo *pChange
sdbRelease(pSdb, pStream); sdbRelease(pSdb, pStream);
if (conflict) { if (conflict) {
mWarn("nodeUpdate trans in progress, current nodeUpdate ignored"); mError("nodeUpdate conflict with other trans, current nodeUpdate ignored");
sdbCancelFetch(pSdb, pIter); sdbCancelFetch(pSdb, pIter);
return -1; return -1;
} }
@ -2587,14 +2587,22 @@ int32_t removeExpirednodeEntryAndTask(SArray *pNodeSnapshot) {
// kill all trans in the dst DB // kill all trans in the dst DB
static void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) { static void killAllCheckpointTrans(SMnode *pMnode, SVgroupChangeInfo *pChangeInfo) {
mDebug("start to clear checkpoints in all Dbs");
void *pIter = NULL; void *pIter = NULL;
while ((pIter = taosHashIterate(pChangeInfo->pDBMap, pIter)) != NULL) { while ((pIter = taosHashIterate(pChangeInfo->pDBMap, pIter)) != NULL) {
char *pDb = (char *)pIter; char *pDb = (char *)pIter;
size_t len = 0; size_t len = 0;
void *pKey = taosHashGetKey(pDb, &len); void *pKey = taosHashGetKey(pDb, &len);
char *p = strndup(pKey, len);
mDebug("clear checkpoint trans in Db:%s", p);
doKillCheckpointTrans(pMnode, pKey, len); doKillCheckpointTrans(pMnode, pKey, len);
taosMemoryFree(p);
} }
mDebug("complete clear checkpoints in Dbs");
} }
// this function runs by only one thread, so it is not multi-thread safe // this function runs by only one thread, so it is not multi-thread safe
@ -2649,7 +2657,7 @@ static int32_t mndProcessNodeCheckReq(SRpcMsg *pMsg) {
execInfo.pNodeList = pNodeSnapshot; execInfo.pNodeList = pNodeSnapshot;
execInfo.ts = ts; execInfo.ts = ts;
} else { } else {
mDebug("unexpect code during create nodeUpdate trans, code:%s", tstrerror(code)); mError("unexpected code during create nodeUpdate trans, code:%s", tstrerror(code));
taosArrayDestroy(pNodeSnapshot); taosArrayDestroy(pNodeSnapshot);
} }
} else { } else {
@ -2844,6 +2852,8 @@ void killTransImpl(SMnode *pMnode, int32_t transId, const char *pDbName) {
mInfo("kill active transId:%d in Db:%s", transId, pDbName); mInfo("kill active transId:%d in Db:%s", transId, pDbName);
mndKillTrans(pMnode, pTrans); mndKillTrans(pMnode, pTrans);
mndReleaseTrans(pMnode, pTrans); mndReleaseTrans(pMnode, pTrans);
} else {
mError("failed to acquire trans in Db:%s, transId:%d", pDbName, transId);
} }
} }
@ -2869,15 +2879,7 @@ int32_t doKillCheckpointTrans(SMnode *pMnode, const char *pDBName, size_t len) {
static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int64_t streamId, int32_t transId) { static int32_t mndResetStatusFromCheckpoint(SMnode *pMnode, int64_t streamId, int32_t transId) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
killTransImpl(pMnode, transId, "");
STrans *pTrans = mndAcquireTrans(pMnode, transId);
if (pTrans != NULL) {
mInfo("kill checkpoint transId:%d to reset task status", transId);
mndKillTrans(pMnode, pTrans);
mndReleaseTrans(pMnode, pTrans);
} else {
mError("failed to acquire checkpoint trans:%d", transId);
}
SStreamObj *pStream = mndGetStreamObj(pMnode, streamId); SStreamObj *pStream = mndGetStreamObj(pMnode, streamId);
if (pStream == NULL) { if (pStream == NULL) {
@ -3033,7 +3035,7 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
SStreamHbMsg req = {0}; SStreamHbMsg req = {0};
bool checkpointFailed = false; bool checkpointFailed = false;
int64_t activeCheckpointId = 0; int64_t checkpointId = 0;
int64_t streamId = 0; int64_t streamId = 0;
int32_t transId = 0; int32_t transId = 0;
@ -3095,14 +3097,17 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
} }
streamTaskStatusCopy(pTaskEntry, p); streamTaskStatusCopy(pTaskEntry, p);
if (p->activeCheckpointId != 0) { if (p->checkpointId != 0) {
if (activeCheckpointId != 0) { if (checkpointId != 0) {
ASSERT(activeCheckpointId == p->activeCheckpointId); ASSERT(checkpointId == p->checkpointId);
} else { } else {
activeCheckpointId = p->activeCheckpointId; checkpointId = p->checkpointId;
} }
if (p->checkpointFailed) { if (p->checkpointFailed) {
mError("stream task:0x%" PRIx64 " checkpointId:%" PRIx64 " transId:%d failed, kill it", p->id.taskId,
p->checkpointId, p->chkpointTransId);
checkpointFailed = p->checkpointFailed; checkpointFailed = p->checkpointFailed;
streamId = p->id.streamId; streamId = p->id.streamId;
transId = p->chkpointTransId; transId = p->chkpointTransId;
@ -3124,17 +3129,17 @@ int32_t mndProcessStreamHb(SRpcMsg *pReq) {
// current checkpoint is failed, rollback from the checkpoint trans // current checkpoint is failed, rollback from the checkpoint trans
// kill the checkpoint trans and then set all tasks status to be normal // kill the checkpoint trans and then set all tasks status to be normal
if (checkpointFailed && activeCheckpointId != 0) { if (checkpointFailed && checkpointId != 0) {
bool allReady = true; bool allReady = true;
SArray *p = mndTakeVgroupSnapshot(pMnode, &allReady); SArray *p = mndTakeVgroupSnapshot(pMnode, &allReady);
taosArrayDestroy(p); taosArrayDestroy(p);
if (allReady || snodeChanged) { if (allReady || snodeChanged) {
// if the execInfo.activeCheckpoint == 0, the checkpoint is restoring from wal // if the execInfo.activeCheckpoint == 0, the checkpoint is restoring from wal
mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status", activeCheckpointId); mInfo("checkpointId:%" PRId64 " failed, issue task-reset trans to reset all tasks status", checkpointId);
mndResetStatusFromCheckpoint(pMnode, streamId, transId); mndResetStatusFromCheckpoint(pMnode, streamId, transId);
} else { } else {
mInfo("not all vgroups are ready, wait for next HB from stream tasks"); mInfo("not all vgroups are ready, wait for next HB from stream tasks to reset the task status");
} }
} }
@ -3148,6 +3153,7 @@ void freeCheckpointCandEntry(void *param) {
SCheckpointCandEntry *pEntry = param; SCheckpointCandEntry *pEntry = param;
taosMemoryFreeClear(pEntry->pName); taosMemoryFreeClear(pEntry->pName);
} }
SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId) { SStreamObj *mndGetStreamObj(SMnode *pMnode, int64_t streamId) {
void *pIter = NULL; void *pIter = NULL;
SSdb *pSdb = pMnode->pSdb; SSdb *pSdb = pMnode->pSdb;

View File

@ -89,7 +89,7 @@ bool mndStreamTransConflictCheck(SMnode* pMnode, int64_t streamUid, const char*
} }
if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0) { if (strcmp(tInfo.name, MND_STREAM_CHECKPOINT_NAME) == 0) {
if (strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) { if ((strcmp(pTransName, MND_STREAM_DROP_NAME) != 0) && (strcmp(pTransName, MND_STREAM_TASK_RESET_NAME) != 0)) {
mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid, mWarn("conflict with other transId:%d streamUid:0x%" PRIx64 ", trans:%s", tInfo.transId, tInfo.streamUid,
tInfo.name); tInfo.name);
terrno = TSDB_CODE_MND_TRANS_CONFLICT; terrno = TSDB_CODE_MND_TRANS_CONFLICT;

View File

@ -145,8 +145,7 @@ FAIL:
} }
int32_t sndInit(SSnode *pSnode) { int32_t sndInit(SSnode *pSnode) {
int32_t numOfTasks = 0; streamMetaResetTaskStatus(pSnode->pMeta);
tqStreamTaskResetStatus(pSnode->pMeta, &numOfTasks);
streamMetaStartAllTasks(pSnode->pMeta); streamMetaStartAllTasks(pSnode->pMeta);
return 0; return 0;
} }

View File

@ -107,7 +107,6 @@ struct STQ {
TTB* pExecStore; TTB* pExecStore;
TTB* pCheckStore; TTB* pCheckStore;
SStreamMeta* pStreamMeta; SStreamMeta* pStreamMeta;
void* tqTimer;
}; };
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle); int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
@ -146,7 +145,7 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore);
// tqSink // tqSink
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr); const char* pIdStr, bool newSubTableRule);
void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data); void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data);
// tqOffset // tqOffset
@ -165,7 +164,7 @@ int32_t setDstTableDataPayload(uint64_t suid, const STSchema* pTSchema, int32_t
int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id); int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, const char* id);
SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols,
SSDataBlock* pDataBlock, SArray* pTagArray); SSDataBlock* pDataBlock, SArray* pTagArray, bool newSubTableRule);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -703,7 +703,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
code = terrno; code = terrno;
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);
} }
code = tqBuildDeleteReq(pSma->pVnode->pTq, NULL, output, &deleteReq, ""); code = tqBuildDeleteReq(pSma->pVnode->pTq, NULL, output, &deleteReq, "", true);
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);
code = tdRSmaProcessDelReq(pSma, suid, pItem->level, &deleteReq); code = tdRSmaProcessDelReq(pSma, suid, pItem->level, &deleteReq);
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);

View File

@ -188,7 +188,7 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema *
if (pDataBlock->info.type == STREAM_DELETE_RESULT) { if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
pDeleteReq->suid = suid; pDeleteReq->suid = suid;
pDeleteReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq)); pDeleteReq->deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, pDeleteReq, ""); code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, pDeleteReq, "", true);
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);
continue; continue;
} }
@ -196,7 +196,7 @@ int32_t smaBlockToSubmit(SVnode *pVnode, const SArray *pBlocks, const STSchema *
SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = SUBMIT_REQ_AUTO_CREATE_TABLE,}; SSubmitTbData tbData = {.suid = suid, .uid = 0, .sver = pTSchema->version, .flags = SUBMIT_REQ_AUTO_CREATE_TABLE,};
int32_t cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1; int32_t cid = taosArrayGetSize(pDataBlock->pDataBlock) + 1;
tbData.pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, cid, pDataBlock, tagArray); tbData.pCreateTbReq = buildAutoCreateTableReq(stbFullName, suid, cid, pDataBlock, tagArray, true);
{ {
uint64_t groupId = pDataBlock->info.id.groupId; uint64_t groupId = pDataBlock->info.id.groupId;

View File

@ -26,18 +26,6 @@ static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_
static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; } static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; }
static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; } static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; }
static int32_t tqTimerInit(STQ* pTq) {
pTq->tqTimer = taosTmrInit(100, 100, 1000, "TQ");
if (pTq->tqTimer == NULL) {
return -1;
}
return 0;
}
static void tqTimerCleanUp(STQ* pTq) {
taosTmrCleanUp(pTq->tqTimer);
}
void tqDestroyTqHandle(void* data) { void tqDestroyTqHandle(void* data) {
STqHandle* pData = (STqHandle*)data; STqHandle* pData = (STqHandle*)data;
qDestroyTask(pData->execHandle.task); qDestroyTask(pData->execHandle.task);
@ -118,7 +106,6 @@ int32_t tqInitialize(STQ* pTq) {
return -1; return -1;
} }
tqTimerInit(pTq);
return 0; return 0;
} }
@ -149,7 +136,6 @@ void tqClose(STQ* pTq) {
taosMemoryFree(pTq->path); taosMemoryFree(pTq->path);
tqMetaClose(pTq); tqMetaClose(pTq);
streamMetaClose(pTq->pStreamMeta); streamMetaClose(pTq->pStreamMeta);
tqTimerCleanUp(pTq);
qDebug("end to close tq"); qDebug("end to close tq");
taosMemoryFree(pTq); taosMemoryFree(pTq);

View File

@ -596,10 +596,12 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol
if (IS_STR_DATA_TYPE(pColVal->type)) { if (IS_STR_DATA_TYPE(pColVal->type)) {
char val[65535 + 2] = {0}; char val[65535 + 2] = {0};
if (pColVal->value.pData != NULL) { if(COL_VAL_IS_VALUE(pColVal)){
memcpy(varDataVal(val), pColVal->value.pData, pColVal->value.nData); if (pColVal->value.pData != NULL) {
memcpy(varDataVal(val), pColVal->value.pData, pColVal->value.nData);
}
varDataSetLen(val, pColVal->value.nData); varDataSetLen(val, pColVal->value.nData);
code = colDataSetVal(pColumnInfoData, rowIndex, val, !COL_VAL_IS_VALUE(pColVal)); code = colDataSetVal(pColumnInfoData, rowIndex, val, false);
} else { } else {
colDataSetNULL(pColumnInfoData, rowIndex); colDataSetNULL(pColumnInfoData, rowIndex);
} }
@ -869,22 +871,8 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
sourceIdx++; sourceIdx++;
} else if (pCol->cid == pColData->info.colId) { } else if (pCol->cid == pColData->info.colId) {
tColDataGetValue(pCol, i, &colVal); tColDataGetValue(pCol, i, &colVal);
if(doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS){
if (IS_STR_DATA_TYPE(colVal.type)) { goto FAIL;
if (colVal.value.pData != NULL) {
char val[65535 + 2];
memcpy(varDataVal(val), colVal.value.pData, colVal.value.nData);
varDataSetLen(val, colVal.value.nData);
if (colDataSetVal(pColData, curRow - lastRow, val, !COL_VAL_IS_VALUE(&colVal)) < 0) {
goto FAIL;
}
} else {
colDataSetNULL(pColData, curRow - lastRow);
}
} else {
if (colDataSetVal(pColData, curRow - lastRow, (void*)&colVal.value.val, !COL_VAL_IS_VALUE(&colVal)) < 0) {
goto FAIL;
}
} }
sourceIdx++; sourceIdx++;
targetIdx++; targetIdx++;
@ -967,23 +955,10 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas
if (colVal.cid < pColData->info.colId) { if (colVal.cid < pColData->info.colId) {
sourceIdx++; sourceIdx++;
} else if (colVal.cid == pColData->info.colId) { } else if (colVal.cid == pColData->info.colId) {
if (IS_STR_DATA_TYPE(colVal.type)) { if(doSetVal(pColData, curRow - lastRow, &colVal) != TDB_CODE_SUCCESS){
if (colVal.value.pData != NULL) { goto FAIL;
char val[65535 + 2];
memcpy(varDataVal(val), colVal.value.pData, colVal.value.nData);
varDataSetLen(val, colVal.value.nData);
if (colDataSetVal(pColData, curRow - lastRow, val, !COL_VAL_IS_VALUE(&colVal)) < 0) {
goto FAIL;
}
} else {
colDataSetNULL(pColData, curRow - lastRow);
}
} else {
if (colDataSetVal(pColData, curRow - lastRow, (void*)&colVal.value.val, !COL_VAL_IS_VALUE(&colVal)) < 0) {
goto FAIL;
}
} }
sourceIdx++; sourceIdx++;
targetIdx++; targetIdx++;
} }
} }

View File

@ -41,10 +41,10 @@ static bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const ch
static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, int32_t numOfTags); static int32_t initCreateTableMsg(SVCreateTbReq* pCreateTableReq, uint64_t suid, const char* stbFullName, int32_t numOfTags);
static SArray* createDefaultTagColName(); static SArray* createDefaultTagColName();
static void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, static void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
int64_t gid); int64_t gid, bool newSubTableRule);
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq, int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr) { const char* pIdStr, bool newSubTableRule) {
int32_t totalRows = pDataBlock->info.rows; int32_t totalRows = pDataBlock->info.rows;
SColumnInfoData* pStartTsCol = taosArrayGet(pDataBlock->pDataBlock, START_TS_COLUMN_INDEX); SColumnInfoData* pStartTsCol = taosArrayGet(pDataBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pDataBlock->pDataBlock, END_TS_COLUMN_INDEX); SColumnInfoData* pEndTsCol = taosArrayGet(pDataBlock->pDataBlock, END_TS_COLUMN_INDEX);
@ -68,6 +68,12 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
if (varTbName != NULL && varTbName != (void*)-1) { if (varTbName != NULL && varTbName != (void*)-1) {
name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN); name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
memcpy(name, varDataVal(varTbName), varDataLen(varTbName)); memcpy(name, varDataVal(varTbName), varDataLen(varTbName));
if(newSubTableRule &&
!isAutoTableName(name) &&
!alreadyAddGroupId(name) &&
groupId != 0) {
buildCtbNameAddGruopId(name, groupId);
}
} else if (stbFullName) { } else if (stbFullName) {
name = buildCtbNameByGroupId(stbFullName, groupId); name = buildCtbNameByGroupId(stbFullName, groupId);
} else { } else {
@ -173,9 +179,18 @@ SArray* createDefaultTagColName() {
} }
void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName, void setCreateTableMsgTableName(SVCreateTbReq* pCreateTableReq, SSDataBlock* pDataBlock, const char* stbFullName,
int64_t gid) { int64_t gid, bool newSubTableRule) {
if (pDataBlock->info.parTbName[0]) { if (pDataBlock->info.parTbName[0]) {
pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName); if(newSubTableRule &&
!isAutoTableName(pDataBlock->info.parTbName) &&
!alreadyAddGroupId(pDataBlock->info.parTbName) &&
gid != 0) {
pCreateTableReq->name = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN);
strcpy(pCreateTableReq->name, pDataBlock->info.parTbName);
buildCtbNameAddGruopId(pCreateTableReq->name, gid);
}else{
pCreateTableReq->name = taosStrdup(pDataBlock->info.parTbName);
}
} else { } else {
pCreateTableReq->name = buildCtbNameByGroupId(stbFullName, gid); pCreateTableReq->name = buildCtbNameByGroupId(stbFullName, gid);
} }
@ -247,7 +262,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
ASSERT(gid == *(int64_t*)pGpIdData); ASSERT(gid == *(int64_t*)pGpIdData);
} }
setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid); setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, gid, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER);
taosArrayPush(reqs.pArray, pCreateTbReq); taosArrayPush(reqs.pArray, pCreateTbReq);
tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name); tqDebug("s-task:%s build create table:%s msg complete", pTask->id.idStr, pCreateTbReq->name);
@ -357,7 +372,8 @@ int32_t doBuildAndSendDeleteMsg(SVnode* pVnode, char* stbFullName, SSDataBlock*
int64_t suid) { int64_t suid) {
SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))}; SBatchDeleteReq deleteReq = {.suid = suid, .deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq))};
int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr); int32_t code = tqBuildDeleteReq(pVnode->pTq, stbFullName, pDataBlock, &deleteReq, pTask->id.idStr,
pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -411,7 +427,7 @@ bool isValidDstChildTable(SMetaReader* pReader, int32_t vgId, const char* ctbNam
} }
SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols,
SSDataBlock* pDataBlock, SArray* pTagArray) { SSDataBlock* pDataBlock, SArray* pTagArray, bool newSubTableRule) {
SVCreateTbReq* pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq)); SVCreateTbReq* pCreateTbReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq));
if (pCreateTbReq == NULL) { if (pCreateTbReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -436,7 +452,7 @@ SVCreateTbReq* buildAutoCreateTableReq(const char* stbFullName, int64_t suid, in
pCreateTbReq->ctb.tagName = createDefaultTagColName(); pCreateTbReq->ctb.tagName = createDefaultTagColName();
// set table name // set table name
setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, pDataBlock->info.id.groupId); setCreateTableMsgTableName(pCreateTbReq, pDataBlock, stbFullName, pDataBlock->info.id.groupId, newSubTableRule);
return pCreateTbReq; return pCreateTbReq;
} }
@ -637,6 +653,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
tqDebug("s-task:%s vgId:%d, gropuId:%" PRIu64 " datablock table name is null, set name:%s", id, vgId, groupId, tqDebug("s-task:%s vgId:%d, gropuId:%" PRIu64 " datablock table name is null, set name:%s", id, vgId, groupId,
dstTableName); dstTableName);
} else { } else {
tstrncpy(dstTableName, pTableSinkInfo->name.data, pTableSinkInfo->name.len + 1);
if (pTableSinkInfo->uid != 0) { if (pTableSinkInfo->uid != 0) {
tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(uid:%" PRIu64 ")", id, numOfRows, groupId, tqDebug("s-task:%s write %d rows into groupId:%" PRIu64 " dstTable:%s(uid:%" PRIu64 ")", id, numOfRows, groupId,
dstTableName, pTableSinkInfo->uid); dstTableName, pTableSinkInfo->uid);
@ -649,10 +666,17 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
if (dstTableName[0] == 0) { if (dstTableName[0] == 0) {
memset(dstTableName, 0, TSDB_TABLE_NAME_LEN); memset(dstTableName, 0, TSDB_TABLE_NAME_LEN);
buildCtbNameByGroupIdImpl(stbFullName, groupId, dstTableName); buildCtbNameByGroupIdImpl(stbFullName, groupId, dstTableName);
}else{
if(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER &&
!isAutoTableName(dstTableName) &&
!alreadyAddGroupId(dstTableName) &&
groupId != 0) {
buildCtbNameAddGruopId(dstTableName, groupId);
}
} }
int32_t nameLen = strlen(dstTableName); int32_t nameLen = strlen(dstTableName);
pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen); pTableSinkInfo = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1);
if (pTableSinkInfo == NULL) { if (pTableSinkInfo == NULL) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
@ -690,7 +714,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE; pTableData->flags = SUBMIT_REQ_AUTO_CREATE_TABLE;
pTableData->pCreateTbReq = pTableData->pCreateTbReq =
buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray); buildAutoCreateTableReq(stbFullName, suid, pTSchema->numOfCols + 1, pDataBlock, pTagArray, pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER);
taosArrayDestroy(pTagArray); taosArrayDestroy(pTagArray);
if (pTableData->pCreateTbReq == NULL) { if (pTableData->pCreateTbReq == NULL) {
@ -719,7 +743,7 @@ int32_t setDstTableDataUid(SVnode* pVnode, SStreamTask* pTask, SSDataBlock* pDat
} }
} }
return TSDB_CODE_SUCCESS; return TDB_CODE_SUCCESS;
} }
int32_t setDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock, int32_t setDstTableDataPayload(uint64_t suid, const STSchema *pTSchema, int32_t blockIndex, SSDataBlock* pDataBlock,

View File

@ -95,10 +95,14 @@ int32_t tqScanWalInFuture(STQ* pTq, int32_t numOfTasks, int32_t idleDuration) {
pParam->pTq = pTq; pParam->pTq = pTq;
pParam->numOfTasks = numOfTasks; pParam->numOfTasks = numOfTasks;
tmr_h pTimer = streamTimerGetInstance();
ASSERT(pTimer);
if (pMeta->scanInfo.scanTimer == NULL) { if (pMeta->scanInfo.scanTimer == NULL) {
pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTq->tqTimer); pMeta->scanInfo.scanTimer = taosTmrStart(doStartScanWal, idleDuration, pParam, pTimer);
} else { } else {
taosTmrReset(doStartScanWal, idleDuration, pParam, pTq->tqTimer, &pMeta->scanInfo.scanTimer); taosTmrReset(doStartScanWal, idleDuration, pParam, pTimer, &pMeta->scanInfo.scanTimer);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
@ -175,11 +179,11 @@ int32_t tqStopStreamTasksAsync(STQ* pTq) {
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq)); SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
if (pRunReq == NULL) { if (pRunReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("vgId:%d failed to create msg to stop tasks, code:%s", vgId, terrstr()); tqError("vgId:%d failed to create msg to stop tasks async, code:%s", vgId, terrstr());
return -1; return -1;
} }
tqDebug("vgId:%d create msg to stop tasks", vgId); tqDebug("vgId:%d create msg to stop all tasks async", vgId);
pRunReq->head.vgId = vgId; pRunReq->head.vgId = vgId;
pRunReq->streamId = 0; pRunReq->streamId = 0;

View File

@ -103,8 +103,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; STaskId id = {.streamId = req.streamId, .taskId = req.taskId};
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
if (ppTask == NULL || *ppTask == NULL) { if (ppTask == NULL || *ppTask == NULL) {
tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped already", vgId, tqError("vgId:%d failed to acquire task:0x%x when handling update, it may have been dropped", vgId, req.taskId);
req.taskId);
rsp.code = TSDB_CODE_SUCCESS; rsp.code = TSDB_CODE_SUCCESS;
streamMetaWUnLock(pMeta); streamMetaWUnLock(pMeta);
@ -124,6 +123,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", idstr, req.transId); tqDebug("s-task:%s recv trans to update nodeEp from mnode, transId:%d", idstr, req.transId);
} }
// duplicate update epset msg received, discard this redundant message
STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId}; STaskUpdateEntry entry = {.streamId = req.streamId, .taskId = req.taskId, .transId = req.transId};
void* pReqTask = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry)); void* pReqTask = taosHashGet(pMeta->updateInfo.pTasks, &entry, sizeof(STaskUpdateEntry));
@ -135,7 +135,6 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
return rsp.code; return rsp.code;
} }
// the following two functions should not be executed within the scope of meta lock to avoid deadlock
streamTaskUpdateEpsetInfo(pTask, req.pNodeList); streamTaskUpdateEpsetInfo(pTask, req.pNodeList);
streamTaskResetStatus(pTask); streamTaskResetStatus(pTask);
@ -159,11 +158,6 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
streamMetaSaveTask(pMeta, *ppHTask); streamMetaSaveTask(pMeta, *ppHTask);
} }
#if 0
if (streamMetaCommit(pMeta) < 0) {
// persist to disk
}
#endif
} else { } else {
tqDebug("s-task:%s vgId:%d not save since restore not finish", idstr, vgId); tqDebug("s-task:%s vgId:%d not save since restore not finish", idstr, vgId);
} }
@ -171,7 +165,7 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
tqDebug("s-task:%s vgId:%d start to stop task after save task", idstr, vgId); tqDebug("s-task:%s vgId:%d start to stop task after save task", idstr, vgId);
streamTaskStop(pTask); streamTaskStop(pTask);
// keep the already handled info // keep the already updated info
taosHashPut(pMeta->updateInfo.pTasks, &entry, sizeof(entry), NULL, 0); taosHashPut(pMeta->updateInfo.pTasks, &entry, sizeof(entry), NULL, 0);
if (ppHTask != NULL) { if (ppHTask != NULL) {
@ -200,6 +194,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM
updateTasks, (numOfTasks - updateTasks)); updateTasks, (numOfTasks - updateTasks));
streamMetaWUnLock(pMeta); streamMetaWUnLock(pMeta);
} else { } else {
if (streamMetaCommit(pMeta) < 0) {
// persist to disk
}
if (!restored) { if (!restored) {
tqDebug("vgId:%d vnode restore not completed, not start the tasks, clear the start after nodeUpdate flag", vgId); tqDebug("vgId:%d vnode restore not completed, not start the tasks, clear the start after nodeUpdate flag", vgId);
pMeta->startInfo.tasksWillRestart = 0; pMeta->startInfo.tasksWillRestart = 0;
@ -686,16 +684,16 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen
return 0; return 0;
} }
int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta, int32_t* numOfTasks) { int32_t tqStreamTaskResetStatus(SStreamMeta* pMeta) {
int32_t vgId = pMeta->vgId; int32_t vgId = pMeta->vgId;
*numOfTasks = taosArrayGetSize(pMeta->pTaskList); int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, *numOfTasks); tqDebug("vgId:%d reset all %d stream task(s) status to be uninit", vgId, numOfTasks);
if (*numOfTasks == 0) { if (numOfTasks == 0) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
for (int32_t i = 0; i < (*numOfTasks); ++i) { for (int32_t i = 0; i < numOfTasks; ++i) {
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
@ -754,8 +752,7 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) {
} }
if (isLeader && !tsDisableStream) { if (isLeader && !tsDisableStream) {
int32_t numOfTasks = 0; streamMetaResetTaskStatus(pMeta);
tqStreamTaskResetStatus(pMeta, &numOfTasks);
streamMetaWUnLock(pMeta); streamMetaWUnLock(pMeta);
streamMetaStartAllTasks(pMeta); streamMetaStartAllTasks(pMeta);
@ -965,8 +962,7 @@ static int32_t tqProcessTaskResumeImpl(void* handle, SStreamTask* pTask, int64_t
} else if (status == TASK_STATUS__UNINIT) { } else if (status == TASK_STATUS__UNINIT) {
// todo: fill-history task init ? // todo: fill-history task init ?
if (pTask->info.fillHistory == 0) { if (pTask->info.fillHistory == 0) {
EStreamTaskEvent event = /*HAS_RELATED_FILLHISTORY_TASK(pTask) ? TASK_EVENT_INIT_STREAM_SCANHIST : */TASK_EVENT_INIT; streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT);
streamTaskHandleEvent(pTask->status.pSM, event);
} }
} }
@ -991,3 +987,7 @@ int32_t tqStreamTaskProcessTaskResumeReq(void* handle, int64_t sversion, char* m
return code; return code;
} }
int32_t tqStreamTasksGetTotalNum(SStreamMeta* pMeta) {
return taosArrayGetSize(pMeta->pTaskList);
}

View File

@ -941,7 +941,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
} }
if(lastrowTmpIndexArray != NULL) { if(lastrowTmpIndexArray != NULL) {
mergeLastCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds); mergeLastRowCid(uid, pTsdb, &lastrowTmpColArray, pr, lastrowColIds, lastrowIndex, lastrowSlotIds);
for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) { for(int i = 0; i < taosArrayGetSize(lastrowTmpColArray); i++) {
taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i)); taosArrayInsert(pTmpColArray, *(int32_t*)taosArrayGet(lastrowTmpIndexArray, i), taosArrayGet(lastrowTmpColArray, i));
} }

View File

@ -4100,9 +4100,9 @@ void tsdbReaderClose2(STsdbReader* pReader) {
size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap); size_t numOfTables = tSimpleHashGetSize(pReader->status.pTableMap);
if (pReader->status.pTableMap != NULL) { if (pReader->status.pTableMap != NULL) {
destroyAllBlockScanInfo(pReader->status.pTableMap); destroyAllBlockScanInfo(pReader->status.pTableMap);
clearBlockScanInfoBuf(&pReader->blockInfoBuf);
pReader->status.pTableMap = NULL; pReader->status.pTableMap = NULL;
} }
clearBlockScanInfoBuf(&pReader->blockInfoBuf);
if (pReader->pFileReader != NULL) { if (pReader->pFileReader != NULL) {
tsdbDataFileReaderClose(&pReader->pFileReader); tsdbDataFileReaderClose(&pReader->pFileReader);
@ -4120,8 +4120,10 @@ void tsdbReaderClose2(STsdbReader* pReader) {
taosMemoryFreeClear(pReader->status.uidList.tableUidList); taosMemoryFreeClear(pReader->status.uidList.tableUidList);
qTrace("tsdb/reader-close: %p, untake snapshot", pReader); qTrace("tsdb/reader-close: %p, untake snapshot", pReader);
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, true); void* p = pReader->pReadSnap;
pReader->pReadSnap = NULL; if ((p == atomic_val_compare_exchange_ptr((void**)&pReader->pReadSnap, p, NULL)) && (p != NULL)) {
tsdbUntakeReadSnap2(pReader, p, true);
}
tsem_destroy(&pReader->resumeAfterSuspend); tsem_destroy(&pReader->resumeAfterSuspend);
tsdbReleaseReader(pReader); tsdbReleaseReader(pReader);
@ -4195,8 +4197,12 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
doSuspendCurrentReader(pReader); doSuspendCurrentReader(pReader);
} }
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); // make sure only release once
pReader->pReadSnap = NULL; void* p = pReader->pReadSnap;
if ((p == atomic_val_compare_exchange_ptr((void**)&pReader->pReadSnap, p, NULL)) && (p != NULL)) {
tsdbUntakeReadSnap2(pReader, p, false);
}
if (pReader->bFilesetDelimited) { if (pReader->bFilesetDelimited) {
pReader->status.memTableMinKey = INT64_MAX; pReader->status.memTableMinKey = INT64_MAX;
pReader->status.memTableMaxKey = INT64_MIN; pReader->status.memTableMaxKey = INT64_MIN;

View File

@ -570,9 +570,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId); vInfo("vgId:%d, sync restore finished, not launch stream tasks, since stream tasks are disabled", vgId);
} else { } else {
vInfo("vgId:%d sync restore finished, start to launch stream task(s)", pVnode->config.vgId); vInfo("vgId:%d sync restore finished, start to launch stream task(s)", pVnode->config.vgId);
int32_t numOfTasks = 0; int32_t numOfTasks = tqStreamTasksGetTotalNum(pMeta);
tqStreamTaskResetStatus(pMeta, &numOfTasks);
if (numOfTasks > 0) { if (numOfTasks > 0) {
if (pMeta->startInfo.taskStarting == 1) { if (pMeta->startInfo.taskStarting == 1) {
pMeta->startInfo.restartCount += 1; pMeta->startInfo.restartCount += 1;
@ -580,6 +578,7 @@ static void vnodeRestoreFinish(const SSyncFSM *pFsm, const SyncIndex commitIdx)
pMeta->startInfo.restartCount); pMeta->startInfo.restartCount);
} else { } else {
pMeta->startInfo.taskStarting = 1; pMeta->startInfo.taskStarting = 1;
streamMetaWUnLock(pMeta); streamMetaWUnLock(pMeta);
tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false); tqStreamTaskStartAsync(pMeta, &pVnode->msgCb, false);
return; return;

View File

@ -200,6 +200,7 @@ typedef struct SExchangeInfo {
uint64_t self; uint64_t self;
SLimitInfo limitInfo; SLimitInfo limitInfo;
int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo int64_t openedTs; // start exec time stamp, todo: move to SLoadRemoteDataInfo
char* pTaskId;
} SExchangeInfo; } SExchangeInfo;
typedef struct SScanInfo { typedef struct SScanInfo {
@ -272,7 +273,8 @@ typedef struct STableScanInfo {
SSampleExecInfo sample; // sample execution info SSampleExecInfo sample; // sample execution info
int32_t tableStartIndex; // current group scan start int32_t tableStartIndex; // current group scan start
int32_t tableEndIndex; // current group scan end int32_t tableEndIndex; // current group scan end
int32_t currentGroupIndex; // current group index of groupOffset int32_t currentGroupId;
int32_t currentTable;
int8_t scanMode; int8_t scanMode;
int8_t assignBlockUid; int8_t assignBlockUid;
uint8_t countState; // empty table count state uint8_t countState; // empty table count state

View File

@ -260,14 +260,17 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t len = strlen(id) + 1;
pInfo->pTaskId = taosMemoryCalloc(1, len);
strncpy(pInfo->pTaskId, id, len);
for (int32_t i = 0; i < numOfSources; ++i) { for (int32_t i = 0; i < numOfSources; ++i) {
SSourceDataInfo dataInfo = {0}; SSourceDataInfo dataInfo = {0};
dataInfo.status = EX_SOURCE_DATA_NOT_READY; dataInfo.status = EX_SOURCE_DATA_NOT_READY;
dataInfo.taskId = id; dataInfo.taskId = pInfo->pTaskId;
dataInfo.index = i; dataInfo.index = i;
SSourceDataInfo* pDs = taosArrayPush(pInfo->pSourceDataInfo, &dataInfo); SSourceDataInfo* pDs = taosArrayPush(pInfo->pSourceDataInfo, &dataInfo);
if (pDs == NULL) { if (pDs == NULL) {
taosArrayDestroy(pInfo->pSourceDataInfo); taosArrayDestroyEx(pInfo->pSourceDataInfo, freeSourceDataInfo);
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
} }
@ -383,6 +386,8 @@ void doDestroyExchangeOperatorInfo(void* param) {
tSimpleHashCleanup(pExInfo->pHashSources); tSimpleHashCleanup(pExInfo->pHashSources);
tsem_destroy(&pExInfo->ready); tsem_destroy(&pExInfo->ready);
taosMemoryFreeClear(pExInfo->pTaskId);
taosMemoryFreeClear(param); taosMemoryFreeClear(param);
} }
@ -782,7 +787,7 @@ int32_t addSingleExchangeSource(SOperatorInfo* pOperator, SExchangeOperatorBasic
if (pIdx->inUseIdx < 0) { if (pIdx->inUseIdx < 0) {
SSourceDataInfo dataInfo = {0}; SSourceDataInfo dataInfo = {0};
dataInfo.status = EX_SOURCE_DATA_NOT_READY; dataInfo.status = EX_SOURCE_DATA_NOT_READY;
dataInfo.taskId = GET_TASKID(pOperator->pTaskInfo); dataInfo.taskId = pExchangeInfo->pTaskId;
dataInfo.index = pIdx->srcIdx; dataInfo.index = pIdx->srcIdx;
dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL); dataInfo.pSrcUidList = taosArrayDup(pBasicParam->uidList, NULL);
dataInfo.srcOpType = pBasicParam->srcOpType; dataInfo.srcOpType = pBasicParam->srcOpType;

View File

@ -646,10 +646,6 @@ int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInf
} }
} }
if (initRemainGroups) {
pTableListInfo->numOfOuputGroups = taosHashGetSize(pTableListInfo->remainGroups);
}
if (tsTagFilterCache) { if (tsTagFilterCache) {
tableList = taosArrayDup(pTableListInfo->pTableList, NULL); tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest),
@ -2142,6 +2138,8 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
pTableListInfo->numOfOuputGroups = numOfTables; pTableListInfo->numOfOuputGroups = numOfTables;
} else if (groupByTbname && pScanNode->groupOrderScan){ } else if (groupByTbname && pScanNode->groupOrderScan){
pTableListInfo->numOfOuputGroups = numOfTables; pTableListInfo->numOfOuputGroups = numOfTables;
} else if (groupByTbname && tsCountAlwaysReturnValue && ((STableScanPhysiNode*)pScanNode)->needCountEmptyTable) {
pTableListInfo->numOfOuputGroups = numOfTables;
} else { } else {
pTableListInfo->numOfOuputGroups = 1; pTableListInfo->numOfOuputGroups = 1;
} }
@ -2159,6 +2157,8 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
return code; return code;
} }
if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList);
if (groupSort || pScanNode->groupOrderScan) { if (groupSort || pScanNode->groupOrderScan) {
code = sortTableGroup(pTableListInfo); code = sortTableGroup(pTableListInfo);
} }

View File

@ -1264,7 +1264,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0); STableKeyInfo* pTableInfo = tableListGetInfo(pTableListInfo, 0);
uid = pTableInfo->uid; uid = pTableInfo->uid;
ts = INT64_MIN; ts = INT64_MIN;
pScanInfo->tableEndIndex = 0; pScanInfo->currentTable = 0;
} else { } else {
taosRUnLockLatch(&pTaskInfo->lock); taosRUnLockLatch(&pTaskInfo->lock);
qError("no table in table list, %s", id); qError("no table in table list, %s", id);
@ -1278,16 +1278,16 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
pInfo->pTableScanOp->resultInfo.totalRows = 0; pInfo->pTableScanOp->resultInfo.totalRows = 0;
// start from current accessed position // start from current accessed position
// we cannot start from the pScanInfo->tableEndIndex, since the commit offset may cause the rollback of the start // we cannot start from the pScanInfo->currentTable, since the commit offset may cause the rollback of the start
// position, let's find it from the beginning. // position, let's find it from the beginning.
index = tableListFind(pTableListInfo, uid, 0); index = tableListFind(pTableListInfo, uid, 0);
taosRUnLockLatch(&pTaskInfo->lock); taosRUnLockLatch(&pTaskInfo->lock);
if (index >= 0) { if (index >= 0) {
pScanInfo->tableEndIndex = index; pScanInfo->currentTable = index;
} else { } else {
qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid, qError("vgId:%d uid:%" PRIu64 " not found in table list, total:%d, index:%d %s", pTaskInfo->id.vgId, uid,
numOfTables, pScanInfo->tableEndIndex, id); numOfTables, pScanInfo->currentTable, id);
terrno = TSDB_CODE_PAR_INTERNAL_ERROR; terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
return -1; return -1;
} }
@ -1310,12 +1310,12 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
} }
qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s", qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s",
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id); uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
} else { } else {
pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1); pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1);
pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond); pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond);
qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s", qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s",
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->tableEndIndex, numOfTables, id); uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
} }
// restore the key value // restore the key value

View File

@ -27,6 +27,7 @@ typedef struct SProjectOperatorInfo {
SLimitInfo limitInfo; SLimitInfo limitInfo;
bool mergeDataBlocks; bool mergeDataBlocks;
SSDataBlock* pFinalRes; SSDataBlock* pFinalRes;
bool inputIgnoreGroup;
} SProjectOperatorInfo; } SProjectOperatorInfo;
typedef struct SIndefOperatorInfo { typedef struct SIndefOperatorInfo {
@ -109,6 +110,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pInfo->pFinalRes = createOneDataBlock(pResBlock, false); pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder; pInfo->binfo.inputTsOrder = pProjPhyNode->node.inputTsOrder;
pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder; pInfo->binfo.outputTsOrder = pProjPhyNode->node.outputTsOrder;
pInfo->inputIgnoreGroup = pProjPhyNode->inputIgnoreGroup;
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM || pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) { if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM || pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
pInfo->mergeDataBlocks = false; pInfo->mergeDataBlocks = false;
@ -300,6 +302,10 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
return pBlock; return pBlock;
} }
if (pProjectInfo->inputIgnoreGroup) {
pBlock->info.id.groupId = 0;
}
int32_t status = discardGroupDataBlock(pBlock, pLimitInfo); int32_t status = discardGroupDataBlock(pBlock, pLimitInfo);
if (status == PROJECT_RETRIEVE_CONTINUE) { if (status == PROJECT_RETRIEVE_CONTINUE) {
continue; continue;

View File

@ -657,33 +657,17 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData,
static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) { static void initNextGroupScan(STableScanInfo* pInfo, STableKeyInfo** pKeyInfo, int32_t* size) {
pInfo->tableStartIndex = pInfo->tableEndIndex + 1; tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, pKeyInfo, size);
STableListInfo* pTableListInfo = pInfo->base.pTableListInfo; pInfo->tableStartIndex = TARRAY_ELEM_IDX(pInfo->base.pTableListInfo->pTableList, *pKeyInfo);
int32_t numOfTables = tableListGetSize(pTableListInfo);
STableKeyInfo* pStart = (STableKeyInfo*)tableListGetInfo(pTableListInfo, pInfo->tableStartIndex);
if (pTableListInfo->oneTableForEachGroup) { pInfo->tableEndIndex = (pInfo->tableStartIndex + (*size) - 1);
pInfo->tableEndIndex = pInfo->tableStartIndex;
} else if (pTableListInfo->groupOffset) {
pInfo->currentGroupIndex++;
if (pInfo->currentGroupIndex + 1 < pTableListInfo->numOfOuputGroups) {
pInfo->tableEndIndex = pTableListInfo->groupOffset[pInfo->currentGroupIndex + 1] - 1;
} else {
pInfo->tableEndIndex = numOfTables - 1;
}
} else {
pInfo->tableEndIndex = numOfTables - 1;
}
if (!pInfo->needCountEmptyTable) { if (!pInfo->needCountEmptyTable) {
pInfo->countState = TABLE_COUNT_STATE_END; pInfo->countState = TABLE_COUNT_STATE_END;
} else { } else {
pInfo->countState = TABLE_COUNT_STATE_SCAN; pInfo->countState = TABLE_COUNT_STATE_SCAN;
} }
*pKeyInfo = pStart;
*size = pInfo->tableEndIndex - pInfo->tableStartIndex + 1;
} }
void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) { void markGroupProcessed(STableScanInfo* pInfo, uint64_t groupId) {
@ -939,7 +923,7 @@ static SSDataBlock* startNextGroupScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStorageAPI* pAPI = &pTaskInfo->storageAPI; SStorageAPI* pAPI = &pTaskInfo->storageAPI;
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
if (pInfo->tableEndIndex + 1 >= numOfTables) { if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
setOperatorCompleted(pOperator); setOperatorCompleted(pOperator);
if (pOperator->dynamicTask) { if (pOperator->dynamicTask) {
taosArrayClear(pInfo->base.pTableListInfo->pTableList); taosArrayClear(pInfo->base.pTableListInfo->pTableList);
@ -978,13 +962,14 @@ static SSDataBlock* groupSeqTableScan(SOperatorInfo* pOperator) {
int32_t num = 0; int32_t num = 0;
STableKeyInfo* pList = NULL; STableKeyInfo* pList = NULL;
if (pInfo->tableEndIndex == -1) { if (pInfo->currentGroupId == -1) {
int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo); int32_t numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
if (pInfo->tableEndIndex + 1 == numOfTables) { if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) {
setOperatorCompleted(pOperator); setOperatorCompleted(pOperator);
return NULL; return NULL;
} }
initNextGroupScan(pInfo, &pList, &num); initNextGroupScan(pInfo, &pList, &num);
ASSERT(pInfo->base.dataReader == NULL); ASSERT(pInfo->base.dataReader == NULL);
@ -1034,7 +1019,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
T_LONG_JMP(pTaskInfo->env, code); T_LONG_JMP(pTaskInfo->env, code);
} }
if (pOperator->status == OP_EXEC_DONE) { if (pOperator->status == OP_EXEC_DONE) {
pInfo->tableEndIndex = -1; pInfo->currentGroupId = -1;
pOperator->status = OP_OPENED; pOperator->status = OP_OPENED;
SSDataBlock* result = NULL; SSDataBlock* result = NULL;
while (true) { while (true) {
@ -1059,23 +1044,23 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
} }
// if no data, switch to next table and continue scan // if no data, switch to next table and continue scan
pInfo->tableEndIndex++; pInfo->currentTable++;
taosRLockLatch(&pTaskInfo->lock); taosRLockLatch(&pTaskInfo->lock);
numOfTables = tableListGetSize(pInfo->base.pTableListInfo); numOfTables = tableListGetSize(pInfo->base.pTableListInfo);
if (pInfo->tableEndIndex >= numOfTables) { if (pInfo->currentTable >= numOfTables) {
qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo)); qDebug("all table checked in table list, total:%d, return NULL, %s", numOfTables, GET_TASKID(pTaskInfo));
taosRUnLockLatch(&pTaskInfo->lock); taosRUnLockLatch(&pTaskInfo->lock);
return NULL; return NULL;
} }
tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->tableEndIndex); tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable);
taosRUnLockLatch(&pTaskInfo->lock); taosRUnLockLatch(&pTaskInfo->lock);
pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1); pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1);
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables, qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables,
pInfo->tableEndIndex, numOfTables, GET_TASKID(pTaskInfo)); pInfo->currentTable, numOfTables, GET_TASKID(pTaskInfo));
pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
pInfo->scanTimes = 0; pInfo->scanTimes = 0;
@ -1168,8 +1153,9 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
goto _error; goto _error;
} }
pInfo->currentGroupId = -1;
pInfo->tableEndIndex = -1; pInfo->tableEndIndex = -1;
pInfo->currentGroupIndex = -1;
pInfo->assignBlockUid = pTableScanNode->assignBlockUid; pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false; pInfo->hasGroupByTag = pTableScanNode->pGroupTags ? true : false;
@ -1264,6 +1250,7 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint6
pTableScanInfo->base.cond.startVersion = 0; pTableScanInfo->base.cond.startVersion = 0;
pTableScanInfo->base.cond.endVersion = ver; pTableScanInfo->base.cond.endVersion = ver;
pTableScanInfo->scanTimes = 0; pTableScanInfo->scanTimes = 0;
pTableScanInfo->currentGroupId = -1;
pTableScanInfo->tableEndIndex = -1; pTableScanInfo->tableEndIndex = -1;
pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader); pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
pTableScanInfo->base.dataReader = NULL; pTableScanInfo->base.dataReader = NULL;
@ -2167,7 +2154,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pInfo->pTableScanOp->status = OP_OPENED; pInfo->pTableScanOp->status = OP_OPENED;
pTSInfo->scanTimes = 0; pTSInfo->scanTimes = 0;
pTSInfo->tableEndIndex = -1; pTSInfo->currentGroupId = -1;
} }
if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) { if (pStreamInfo->recoverStep == STREAM_RECOVER_STEP__SCAN1) {

View File

@ -1,6 +1,17 @@
// /*
// Created by slzhou on 22-4-20. * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
// *
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_FNLOG_H #ifndef TDENGINE_FNLOG_H
#define TDENGINE_FNLOG_H #define TDENGINE_FNLOG_H

View File

@ -952,7 +952,7 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le
} }
} }
pFunc->node.resType = (SDataType){.bytes = 64, .type = TSDB_DATA_TYPE_BINARY}; pFunc->node.resType = (SDataType){.bytes = LEASTSQUARES_BUFF_LENGTH, .type = TSDB_DATA_TYPE_BINARY};
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -1576,9 +1576,19 @@ int32_t leastSQRFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
param12 /= param[1][1]; param12 /= param[1][1];
char buf[512] = {0}; char buf[LEASTSQUARES_BUFF_LENGTH] = {0};
char slopBuf[64] = {0};
char interceptBuf[64] = {0};
int n = snprintf(slopBuf, 64, "%.6lf", param02);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(slopBuf, 64, "%." DOUBLE_PRECISION_DIGITS, param02);
}
n = snprintf(interceptBuf, 64, "%.6lf", param12);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(interceptBuf, 64, "%." DOUBLE_PRECISION_DIGITS, param12);
}
size_t len = size_t len =
snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", param02, param12); snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%s, intercept:%s}", slopBuf, interceptBuf);
varDataSetLen(buf, len); varDataSetLen(buf, len);
colDataSetVal(pCol, currentRow, buf, pResInfo->isNullRes); colDataSetVal(pCol, currentRow, buf, pResInfo->isNullRes);

View File

@ -486,6 +486,7 @@ static int32_t logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode
CLONE_NODE_LIST_FIELD(pProjections); CLONE_NODE_LIST_FIELD(pProjections);
COPY_CHAR_ARRAY_FIELD(stmtName); COPY_CHAR_ARRAY_FIELD(stmtName);
COPY_SCALAR_FIELD(ignoreGroupId); COPY_SCALAR_FIELD(ignoreGroupId);
COPY_SCALAR_FIELD(inputIgnoreGroup);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
@ -728,6 +729,15 @@ static int32_t physiPartitionCopy(const SPartitionPhysiNode* pSrc, SPartitionPhy
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
static int32_t physiProjectCopy(const SProjectPhysiNode* pSrc, SProjectPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, physiNodeCopy);
CLONE_NODE_LIST_FIELD(pProjections);
COPY_SCALAR_FIELD(mergeDataBlock);
COPY_SCALAR_FIELD(ignoreGroupId);
COPY_SCALAR_FIELD(inputIgnoreGroup);
return TSDB_CODE_SUCCESS;
}
static int32_t dataBlockDescCopy(const SDataBlockDescNode* pSrc, SDataBlockDescNode* pDst) { static int32_t dataBlockDescCopy(const SDataBlockDescNode* pSrc, SDataBlockDescNode* pDst) {
COPY_SCALAR_FIELD(dataBlockId); COPY_SCALAR_FIELD(dataBlockId);
CLONE_NODE_LIST_FIELD(pSlots); CLONE_NODE_LIST_FIELD(pSlots);
@ -959,6 +969,9 @@ SNode* nodesCloneNode(const SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst); code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst);
break; break;
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
code = physiProjectCopy((const SProjectPhysiNode*)pNode, (SProjectPhysiNode*)pDst);
break;
default: default:
break; break;
} }

View File

@ -784,6 +784,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
static const char* jkProjectLogicPlanProjections = "Projections"; static const char* jkProjectLogicPlanProjections = "Projections";
static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId"; static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId";
static const char* jkProjectLogicPlanInputIgnoreGroup= "InputIgnoreGroup";
static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) { static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj; const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj;
@ -795,6 +796,9 @@ static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId); code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectLogicPlanInputIgnoreGroup, pNode->inputIgnoreGroup);
}
return code; return code;
} }
@ -809,7 +813,9 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId); code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectLogicPlanInputIgnoreGroup, &pNode->inputIgnoreGroup);
}
return code; return code;
} }
@ -2067,6 +2073,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
static const char* jkProjectPhysiPlanProjections = "Projections"; static const char* jkProjectPhysiPlanProjections = "Projections";
static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock"; static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock";
static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId"; static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId";
static const char* jkProjectPhysiPlanInputIgnoreGroup = "InputIgnoreGroup";
static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) { static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj; const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj;
@ -2081,7 +2088,9 @@ static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId); code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanInputIgnoreGroup, pNode->inputIgnoreGroup);
}
return code; return code;
} }
@ -2098,7 +2107,9 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId); code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanInputIgnoreGroup, &pNode->inputIgnoreGroup);
}
return code; return code;
} }

View File

@ -2367,7 +2367,8 @@ enum {
PHY_PROJECT_CODE_BASE_NODE = 1, PHY_PROJECT_CODE_BASE_NODE = 1,
PHY_PROJECT_CODE_PROJECTIONS, PHY_PROJECT_CODE_PROJECTIONS,
PHY_PROJECT_CODE_MERGE_DATA_BLOCK, PHY_PROJECT_CODE_MERGE_DATA_BLOCK,
PHY_PROJECT_CODE_IGNORE_GROUP_ID PHY_PROJECT_CODE_IGNORE_GROUP_ID,
PHY_PROJECT_CODE_INPUT_IGNORE_GROUP
}; };
static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
@ -2383,6 +2384,9 @@ static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId); code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId);
} }
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_INPUT_IGNORE_GROUP, pNode->inputIgnoreGroup);
}
return code; return code;
} }
@ -2406,6 +2410,9 @@ static int32_t msgToPhysiProjectNode(STlvDecoder* pDecoder, void* pObj) {
case PHY_PROJECT_CODE_IGNORE_GROUP_ID: case PHY_PROJECT_CODE_IGNORE_GROUP_ID:
code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId); code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId);
break; break;
case PHY_PROJECT_CODE_INPUT_IGNORE_GROUP:
code = tlvDecodeBool(pTlv, &pNode->inputIgnoreGroup);
break;
default: default:
break; break;
} }

View File

@ -1085,21 +1085,31 @@ static EDealRes translateColumnWithoutPrefix(STranslateContext* pCxt, SColumnNod
static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** pCol, bool* pFound) { static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** pCol, bool* pFound) {
SNodeList* pProjectionList = getProjectListFromCurrStmt(pCxt->pCurrStmt); SNodeList* pProjectionList = getProjectListFromCurrStmt(pCxt->pCurrStmt);
SNode* pNode; SNode* pNode;
SNode* pFoundNode = NULL;
*pFound = false;
FOREACH(pNode, pProjectionList) { FOREACH(pNode, pProjectionList) {
SExprNode* pExpr = (SExprNode*)pNode; SExprNode* pExpr = (SExprNode*)pNode;
if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) { if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) {
SNode* pNew = nodesCloneNode(pNode); if (true == *pFound) {
if (NULL == pNew) { if(nodesEqualNode(pFoundNode, pNode)) {
pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; continue;
return DEAL_RES_ERROR;
} }
nodesDestroyNode(*(SNode**)pCol); pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ORDERBY_AMBIGUOUS, (*pCol)->colName);
*(SNode**)pCol = (SNode*)pNew; return DEAL_RES_ERROR;
*pFound = true; }
return DEAL_RES_CONTINUE; *pFound = true;
pFoundNode = pNode;
} }
} }
*pFound = false; if (*pFound) {
SNode* pNew = nodesCloneNode(pFoundNode);
if (NULL == pNew) {
pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
return DEAL_RES_ERROR;
}
nodesDestroyNode(*(SNode**)pCol);
*(SNode**)pCol = (SNode*)pNew;
}
return DEAL_RES_CONTINUE; return DEAL_RES_CONTINUE;
} }
@ -2743,6 +2753,14 @@ static EDealRes doCheckAggColCoexist(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE; return DEAL_RES_CONTINUE;
} }
static int32_t checkIsEmptyResult(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (pSelect->timeRange.skey > pSelect->timeRange.ekey
&& !pSelect->hasCountFunc) {
pSelect->isEmptyResult = true;
}
return TSDB_CODE_SUCCESS;
}
static int32_t resetSelectFuncNumWithoutDup(SSelectStmt* pSelect) { static int32_t resetSelectFuncNumWithoutDup(SSelectStmt* pSelect) {
if (pSelect->selectFuncNum <= 1) return TSDB_CODE_SUCCESS; if (pSelect->selectFuncNum <= 1) return TSDB_CODE_SUCCESS;
pSelect->selectFuncNum = 0; pSelect->selectFuncNum = 0;
@ -4416,9 +4434,6 @@ static int32_t translateWhere(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = getQueryTimeRange(pCxt, pSelect->pWhere, &pSelect->timeRange); code = getQueryTimeRange(pCxt, pSelect->pWhere, &pSelect->timeRange);
} }
if (TSDB_CODE_SUCCESS == code && pSelect->timeRange.skey > pSelect->timeRange.ekey) {
pSelect->isEmptyResult = true;
}
if (pSelect->pWhere != NULL) { if (pSelect->pWhere != NULL) {
setTableVgroupsFromEqualTbnameCond(pCxt, pSelect); setTableVgroupsFromEqualTbnameCond(pCxt, pSelect);
} }
@ -4494,13 +4509,12 @@ typedef struct SReplaceOrderByAliasCxt {
static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) {
SReplaceOrderByAliasCxt* pCxt = pContext; SReplaceOrderByAliasCxt* pCxt = pContext;
SNodeList* pProjectionList = pCxt->pProjectionList; SNodeList* pProjectionList = pCxt->pProjectionList;
SNode* pProject = NULL; SNode* pProject = NULL;
if (QUERY_NODE_COLUMN == nodeType(*pNode)) { if (QUERY_NODE_COLUMN == nodeType(*pNode)) {
FOREACH(pProject, pProjectionList) { FOREACH(pProject, pProjectionList) {
SExprNode* pExpr = (SExprNode*)pProject; SExprNode* pExpr = (SExprNode*)pProject;
if (0 == strcmp(((SColumnRefNode*)*pNode)->colName, pExpr->userAlias) if (0 == strcmp(((SColumnRefNode*)*pNode)->colName, pExpr->userAlias) && nodeType(*pNode) == nodeType(pProject)) {
&& nodeType(*pNode) == nodeType(pProject)) {
SNode* pNew = nodesCloneNode(pProject); SNode* pNew = nodesCloneNode(pProject);
if (NULL == pNew) { if (NULL == pNew) {
pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
@ -4514,14 +4528,14 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) {
} }
} else if (QUERY_NODE_ORDER_BY_EXPR == nodeType(*pNode)) { } else if (QUERY_NODE_ORDER_BY_EXPR == nodeType(*pNode)) {
STranslateContext* pTransCxt = pCxt->pTranslateCxt; STranslateContext* pTransCxt = pCxt->pTranslateCxt;
SNode* pExpr = ((SOrderByExprNode*)*pNode)->pExpr; SNode* pExpr = ((SOrderByExprNode*)*pNode)->pExpr;
if (QUERY_NODE_VALUE == nodeType(pExpr)) { if (QUERY_NODE_VALUE == nodeType(pExpr)) {
SValueNode* pVal = (SValueNode*)pExpr; SValueNode* pVal = (SValueNode*)pExpr;
if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) {
return pTransCxt->errCode; return pTransCxt->errCode;
} }
int32_t pos = getPositionValue(pVal); int32_t pos = getPositionValue(pVal);
if ( 0 < pos && pos <= LIST_LENGTH(pProjectionList)) { if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) {
SNode* pNew = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1)); SNode* pNew = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1));
if (NULL == pNew) { if (NULL == pNew) {
pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; pCxt->pTranslateCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
@ -4606,6 +4620,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = translateOrderBy(pCxt, pSelect); code = translateOrderBy(pCxt, pSelect);
} }
if (TSDB_CODE_SUCCESS == code) {
code = checkIsEmptyResult(pCxt, pSelect);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
resetSelectFuncNumWithoutDup(pSelect); resetSelectFuncNumWithoutDup(pSelect);
code = checkAggColCoexist(pCxt, pSelect); code = checkAggColCoexist(pCxt, pSelect);

View File

@ -190,6 +190,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "invalid ip range"; return "invalid ip range";
case TSDB_CODE_OUT_OF_MEMORY: case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory"; return "Out of memory";
case TSDB_CODE_PAR_ORDERBY_AMBIGUOUS:
return "ORDER BY \"%s\" is ambiguous";
default: default:
return "Unknown error"; return "Unknown error";
} }

View File

@ -3122,6 +3122,7 @@ static bool mergeProjectsMayBeOptimized(SLogicNode* pNode) {
NULL != pChild->pConditions || NULL != pChild->pLimit || NULL != pChild->pSlimit) { NULL != pChild->pConditions || NULL != pChild->pLimit || NULL != pChild->pSlimit) {
return false; return false;
} }
return true; return true;
} }
@ -3160,7 +3161,9 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) {
static int32_t mergeProjectsOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SLogicNode* pSelfNode) { static int32_t mergeProjectsOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SLogicNode* pSelfNode) {
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSelfNode->pChildren, 0); SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSelfNode->pChildren, 0);
if (((SProjectLogicNode*)pChild)->ignoreGroupId) {
((SProjectLogicNode*)pSelfNode)->inputIgnoreGroup = true;
}
SMergeProjectionsContext cxt = {.pChildProj = (SProjectLogicNode*)pChild, .errCode = TSDB_CODE_SUCCESS}; SMergeProjectionsContext cxt = {.pChildProj = (SProjectLogicNode*)pChild, .errCode = TSDB_CODE_SUCCESS};
nodesRewriteExprs(((SProjectLogicNode*)pSelfNode)->pProjections, mergeProjectionsExpr, &cxt); nodesRewriteExprs(((SProjectLogicNode*)pSelfNode)->pProjections, mergeProjectionsExpr, &cxt);
int32_t code = cxt.errCode; int32_t code = cxt.errCode;
@ -3325,7 +3328,11 @@ static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPu
} }
case QUERY_NODE_LOGIC_PLAN_SCAN: case QUERY_NODE_LOGIC_PLAN_SCAN:
if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) {
swapLimit(pNodeWithLimit, pNodeLimitPushTo); if (((SProjectLogicNode*)pNodeWithLimit)->inputIgnoreGroup) {
cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT);
} else {
swapLimit(pNodeWithLimit, pNodeLimitPushTo);
}
return true; return true;
} }
default: default:

View File

@ -1459,6 +1459,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild
pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode); pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode);
pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId; pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId;
pProject->inputIgnoreGroup = pProjectLogicNode->inputIgnoreGroup;
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
if (0 == LIST_LENGTH(pChildren)) { if (0 == LIST_LENGTH(pChildren)) {

View File

@ -1164,8 +1164,10 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) { static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) {
*pSplitNode = pInfo->pSplitNode; *pSplitNode = pInfo->pSplitNode;
if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) && if (NULL != pInfo->pSplitNode->pParent &&
NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) { QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit &&
!((SProjectLogicNode*)pInfo->pSplitNode->pParent)->inputIgnoreGroup) {
*pSplitNode = pInfo->pSplitNode->pParent; *pSplitNode = pInfo->pSplitNode->pParent;
if (NULL != pInfo->pSplitNode->pLimit) { if (NULL != pInfo->pSplitNode->pLimit) {
(*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit); (*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit);

View File

@ -2427,9 +2427,19 @@ int32_t leastSQRScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarPa
matrix12 /= matrix[1][1]; matrix12 /= matrix[1][1];
char buf[64] = {0}; char buf[LEASTSQUARES_BUFF_LENGTH] = {0};
size_t len = snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%.6lf, intercept:%.6lf}", matrix02, char slopBuf[64] = {0};
matrix12); char interceptBuf[64] = {0};
int n = snprintf(slopBuf, 64, "%.6lf", matrix02);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(slopBuf, 64, "%." DOUBLE_PRECISION_DIGITS, matrix02);
}
n = snprintf(interceptBuf, 64, "%.6lf", matrix12);
if (n > LEASTSQUARES_DOUBLE_ITEM_LENGTH) {
snprintf(interceptBuf, 64, "%." DOUBLE_PRECISION_DIGITS, matrix12);
}
size_t len =
snprintf(varDataVal(buf), sizeof(buf) - VARSTR_HEADER_SIZE, "{slop:%s, intercept:%s}", slopBuf, interceptBuf);
varDataSetLen(buf, len); varDataSetLen(buf, len);
colDataSetVal(pOutputData, 0, buf, false); colDataSetVal(pOutputData, 0, buf, false);
} }

View File

@ -52,10 +52,10 @@ typedef enum {
SCH_ALL, SCH_ALL,
} SCH_POLICY; } SCH_POLICY;
#define SCHEDULE_DEFAULT_MAX_JOB_NUM 1000 #define SCHEDULE_DEFAULT_MAX_JOB_NUM 1000
#define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000 #define SCHEDULE_DEFAULT_MAX_TASK_NUM 1000
#define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ #define SCHEDULE_DEFAULT_POLICY SCH_LOAD_SEQ
#define SCHEDULE_DEFAULT_MAX_NODE_NUM 20 #define SCHEDULE_DEFAULT_MAX_NODE_NUM 20
#define SCH_DEFAULT_TASK_TIMEOUT_USEC 5000000 #define SCH_DEFAULT_TASK_TIMEOUT_USEC 5000000
#define SCH_MAX_TASK_TIMEOUT_USEC 300000000 #define SCH_MAX_TASK_TIMEOUT_USEC 300000000
@ -68,8 +68,9 @@ typedef struct SSchDebug {
} SSchDebug; } SSchDebug;
typedef struct SSchTrans { typedef struct SSchTrans {
void *pTrans; void *pTrans;
void *pHandle; void *pHandle;
int64_t pHandleId;
} SSchTrans; } SSchTrans;
typedef struct SSchHbTrans { typedef struct SSchHbTrans {
@ -299,7 +300,7 @@ typedef struct SSchJob {
void *fetchRes; // TODO free it or not void *fetchRes; // TODO free it or not
bool fetched; bool fetched;
bool noMoreRetry; bool noMoreRetry;
int64_t resNumOfRows; // from int32_t to int64_t int64_t resNumOfRows; // from int32_t to int64_t
SSchResInfo userRes; SSchResInfo userRes;
char *sql; char *sql;
SQueryProfileSummary summary; SQueryProfileSummary summary;
@ -334,14 +335,15 @@ extern SSchedulerMgmt schMgmt;
(!SCH_IS_DATA_BIND_QRY_TASK(_task))) (!SCH_IS_DATA_BIND_QRY_TASK(_task)))
#define SCH_UPDATE_REDIRECT_CODE(job, _code) atomic_val_compare_exchange_32(&((job)->redirectCode), 0, _code) #define SCH_UPDATE_REDIRECT_CODE(job, _code) atomic_val_compare_exchange_32(&((job)->redirectCode), 0, _code)
#define SCH_GET_REDIRECT_CODE(job, _code) (((!NO_RET_REDIRECT_ERROR(_code)) || (job)->redirectCode == 0) ? (_code) : (job)->redirectCode) #define SCH_GET_REDIRECT_CODE(job, _code) \
(((!NO_RET_REDIRECT_ERROR(_code)) || (job)->redirectCode == 0) ? (_code) : (job)->redirectCode)
#define SCH_SET_TASK_STATUS(task, st) atomic_store_8(&(task)->status, st) #define SCH_SET_TASK_STATUS(task, st) atomic_store_8(&(task)->status, st)
#define SCH_GET_TASK_STATUS(task) atomic_load_8(&(task)->status) #define SCH_GET_TASK_STATUS(task) atomic_load_8(&(task)->status)
#define SCH_GET_TASK_STATUS_STR(task) jobTaskStatusStr(SCH_GET_TASK_STATUS(task)) #define SCH_GET_TASK_STATUS_STR(task) jobTaskStatusStr(SCH_GET_TASK_STATUS(task))
#define SCH_TASK_ALREADY_LAUNCHED(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_EXEC) #define SCH_TASK_ALREADY_LAUNCHED(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_EXEC)
#define SCH_TASK_EXEC_DONE(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_PART_SUCC) #define SCH_TASK_EXEC_DONE(task) (SCH_GET_TASK_STATUS(task) >= JOB_TASK_STATUS_PART_SUCC)
#define SCH_GET_TASK_HANDLE(_task) ((_task) ? (_task)->handle : NULL) #define SCH_GET_TASK_HANDLE(_task) ((_task) ? (_task)->handle : NULL)
#define SCH_SET_TASK_HANDLE(_task, _handle) ((_task)->handle = (_handle)) #define SCH_SET_TASK_HANDLE(_task, _handle) ((_task)->handle = (_handle))
@ -362,8 +364,8 @@ extern SSchedulerMgmt schMgmt;
#define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl) #define SCH_JOB_NEED_FLOW_CTRL(_job) ((_job)->attr.needFlowCtrl)
#define SCH_TASK_NEED_FLOW_CTRL(_job, _task) \ #define SCH_TASK_NEED_FLOW_CTRL(_job, _task) \
(SCH_IS_DATA_BIND_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level)) (SCH_IS_DATA_BIND_QRY_TASK(_task) && SCH_JOB_NEED_FLOW_CTRL(_job) && SCH_IS_LEVEL_UNFINISHED((_task)->level))
#define SCH_FETCH_TYPE(_pSrcTask) (SCH_IS_DATA_BIND_QRY_TASK(_pSrcTask) ? TDMT_SCH_FETCH : TDMT_SCH_MERGE_FETCH) #define SCH_FETCH_TYPE(_pSrcTask) (SCH_IS_DATA_BIND_QRY_TASK(_pSrcTask) ? TDMT_SCH_FETCH : TDMT_SCH_MERGE_FETCH)
#define SCH_TASK_NEED_FETCH(_task) ((_task)->plan->subplanType != SUBPLAN_TYPE_MODIFY) #define SCH_TASK_NEED_FETCH(_task) ((_task)->plan->subplanType != SUBPLAN_TYPE_MODIFY)
#define SCH_MULTI_LEVEL_LAUNCHED(_job) ((_job)->levelIdx != ((_job)->levelNum - 1)) #define SCH_MULTI_LEVEL_LAUNCHED(_job) ((_job)->levelIdx != ((_job)->levelNum - 1))
#define SCH_SET_JOB_TYPE(_job, type) \ #define SCH_SET_JOB_TYPE(_job, type) \
@ -380,25 +382,26 @@ extern SSchedulerMgmt schMgmt;
#define SCH_JOB_NEED_WAIT(_job) (!SCH_IS_QUERY_JOB(_job)) #define SCH_JOB_NEED_WAIT(_job) (!SCH_IS_QUERY_JOB(_job))
#define SCH_JOB_NEED_DROP(_job) (SCH_IS_QUERY_JOB(_job)) #define SCH_JOB_NEED_DROP(_job) (SCH_IS_QUERY_JOB(_job))
#define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode) #define SCH_IS_EXPLAIN_JOB(_job) (EXPLAIN_MODE_ANALYZE == (_job)->attr.explainMode)
#define SCH_NETWORK_ERR(_code) ((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || (_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED) #define SCH_NETWORK_ERR(_code) \
((_code) == TSDB_CODE_RPC_BROKEN_LINK || (_code) == TSDB_CODE_RPC_NETWORK_UNAVAIL || \
(_code) == TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED)
#define SCH_REDIRECT_MSGTYPE(_msgType) \ #define SCH_REDIRECT_MSGTYPE(_msgType) \
((_msgType) == TDMT_SCH_LINK_BROKEN || (_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || \ ((_msgType) == TDMT_SCH_LINK_BROKEN || (_msgType) == TDMT_SCH_QUERY || (_msgType) == TDMT_SCH_MERGE_QUERY || \
(_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH) (_msgType) == TDMT_SCH_FETCH || (_msgType) == TDMT_SCH_MERGE_FETCH)
#define SCH_LOW_LEVEL_NETWORK_ERR(_job, _task, _code) \ #define SCH_LOW_LEVEL_NETWORK_ERR(_job, _task, _code) \
(SCH_NETWORK_ERR(_code) && ((_task)->level->level == (_job)->levelIdx)) (SCH_NETWORK_ERR(_code) && ((_task)->level->level == (_job)->levelIdx))
#define SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code) \ #define SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code) \
(SCH_NETWORK_ERR(_code) && ((_task)->level->level > (_job)->levelIdx)) (SCH_NETWORK_ERR(_code) && ((_task)->level->level > (_job)->levelIdx))
#define SCH_TASK_RETRY_NETWORK_ERR(_task, _code) \ #define SCH_TASK_RETRY_NETWORK_ERR(_task, _code) (SCH_NETWORK_ERR(_code) && (_task)->redirectCtx.inRedirect)
(SCH_NETWORK_ERR(_code) && (_task)->redirectCtx.inRedirect)
#define SCH_JOB_NEED_RETRY(_job, _task, _msgType, _code) \ #define SCH_JOB_NEED_RETRY(_job, _task, _msgType, _code) \
(SCH_REDIRECT_MSGTYPE(_msgType) && SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code)) (SCH_REDIRECT_MSGTYPE(_msgType) && SCH_TOP_LEVEL_NETWORK_ERR(_job, _task, _code))
#define SCH_TASKSET_NEED_RETRY(_job, _task, _msgType, _code) \ #define SCH_TASKSET_NEED_RETRY(_job, _task, _msgType, _code) \
(SCH_REDIRECT_MSGTYPE(_msgType) && \ (SCH_REDIRECT_MSGTYPE(_msgType) && \
(NEED_SCHEDULER_REDIRECT_ERROR(_code) || SCH_LOW_LEVEL_NETWORK_ERR((_job), (_task), (_code)) || SCH_TASK_RETRY_NETWORK_ERR((_task), (_code)))) (NEED_SCHEDULER_REDIRECT_ERROR(_code) || SCH_LOW_LEVEL_NETWORK_ERR((_job), (_task), (_code)) || \
SCH_TASK_RETRY_NETWORK_ERR((_task), (_code))))
#define SCH_TASK_NEED_RETRY(_msgType, _code) \ #define SCH_TASK_NEED_RETRY(_msgType, _code) \
((SCH_REDIRECT_MSGTYPE(_msgType) && SCH_NETWORK_ERR(_code)) || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR) ((SCH_REDIRECT_MSGTYPE(_msgType) && SCH_NETWORK_ERR(_code)) || (_code) == TSDB_CODE_SCH_TIMEOUT_ERROR)
#define SCH_IS_LEVEL_UNFINISHED(_level) ((_level)->taskLaunchedNum < (_level)->taskNum) #define SCH_IS_LEVEL_UNFINISHED(_level) ((_level)->taskLaunchedNum < (_level)->taskNum)
#define SCH_GET_CUR_EP(_addr) (&(_addr)->epSet.eps[(_addr)->epSet.inUse]) #define SCH_GET_CUR_EP(_addr) (&(_addr)->epSet.eps[(_addr)->epSet.inUse])
@ -488,50 +491,51 @@ extern SSchedulerMgmt schMgmt;
#define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000 #define TD_RWLATCH_WRITE_FLAG_COPY 0x40000000
#define SCH_LOCK(type, _lock) \ #define SCH_LOCK(type, _lock) \
do { \ do { \
if (SCH_READ == (type)) { \ if (SCH_READ == (type)) { \
ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before read lock"); \ ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before read lock"); \
SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
taosRLockLatch(_lock); \ taosRLockLatch(_lock); \
SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH RLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
ASSERTS(atomic_load_32(_lock) > 0, "invalid lock value after read lock"); \ ASSERTS(atomic_load_32(_lock) > 0, "invalid lock value after read lock"); \
} else { \ } else { \
ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before write lock"); \ ASSERTS(atomic_load_32(_lock) >= 0, "invalid lock value before write lock"); \
SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
taosWLockLatch(_lock); \ taosWLockLatch(_lock); \
SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH WLOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
ASSERTS(atomic_load_32(_lock) == TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value after write lock"); \ ASSERTS(atomic_load_32(_lock) == TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value after write lock"); \
} \ } \
} while (0) } while (0)
#define SCH_UNLOCK(type, _lock) \ #define SCH_UNLOCK(type, _lock) \
do { \ do { \
if (SCH_READ == (type)) { \ if (SCH_READ == (type)) { \
ASSERTS(atomic_load_32((_lock)) > 0, "invalid lock value before read unlock"); \ ASSERTS(atomic_load_32((_lock)) > 0, "invalid lock value before read unlock"); \
SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
taosRUnLockLatch(_lock); \ taosRUnLockLatch(_lock); \
SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH RULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after read unlock"); \ ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after read unlock"); \
} else { \ } else { \
ASSERTS(atomic_load_32((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value before write unlock"); \ ASSERTS(atomic_load_32((_lock)) & TD_RWLATCH_WRITE_FLAG_COPY, "invalid lock value before write unlock"); \
SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d B", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
taosWUnLockLatch(_lock); \ taosWUnLockLatch(_lock); \
SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \ SCH_LOCK_DEBUG("SCH WULOCK%p:%d, %s:%d E", (_lock), atomic_load_32(_lock), __FILE__, __LINE__); \
ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after write unlock"); \ ASSERTS(atomic_load_32((_lock)) >= 0, "invalid lock value after write unlock"); \
} \ } \
} while (0) } while (0)
#define SCH_RESET_JOB_LEVEL_IDX(_job) do { \ #define SCH_RESET_JOB_LEVEL_IDX(_job) \
(_job)->levelIdx = (_job)->levelNum - 1; \ do { \
SCH_JOB_DLOG("set job levelIdx to %d", (_job)->levelIdx); \ (_job)->levelIdx = (_job)->levelNum - 1; \
} while (0) SCH_JOB_DLOG("set job levelIdx to %d", (_job)->levelIdx); \
} while (0)
void schDeregisterTaskHb(SSchJob *pJob, SSchTask *pTask); void schDeregisterTaskHb(SSchJob *pJob, SSchTask *pTask);
void schCleanClusterHb(void *pTrans); void schCleanClusterHb(void *pTrans);
int32_t schLaunchTask(SSchJob *job, SSchTask *task); int32_t schLaunchTask(SSchJob *job, SSchTask *task);
int32_t schDelayLaunchTask(SSchJob *pJob, SSchTask *pTask); int32_t schDelayLaunchTask(SSchJob *pJob, SSchTask *pTask);
int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void* param); int32_t schBuildAndSendMsg(SSchJob *job, SSchTask *task, SQueryNodeAddr *addr, int32_t msgType, void *param);
SSchJob *schAcquireJob(int64_t refId); SSchJob *schAcquireJob(int64_t refId);
int32_t schReleaseJob(int64_t refId); int32_t schReleaseJob(int64_t refId);
void schFreeFlowCtrl(SSchJob *pJob); void schFreeFlowCtrl(SSchJob *pJob);

View File

@ -505,7 +505,7 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) {
taosMemoryFree(pMsg->pEpSet); taosMemoryFree(pMsg->pEpSet);
SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param;
SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL}; SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL, .pHandleId = 0};
SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans));
SCH_ERR_RET(schBuildAndSendHbMsg(&hbParam->nodeEpId, NULL)); SCH_ERR_RET(schBuildAndSendHbMsg(&hbParam->nodeEpId, NULL));
@ -537,6 +537,7 @@ int32_t schHandleHbCallback(void *param, SDataBuf *pMsg, int32_t code) {
SSchTrans trans = {0}; SSchTrans trans = {0};
trans.pTrans = pParam->pTrans; trans.pTrans = pParam->pTrans;
trans.pHandle = pMsg->handle; trans.pHandle = pMsg->handle;
trans.pHandleId = pMsg->handleRefId;
SCH_ERR_JRET(schUpdateHbConnection(&rsp.epId, &trans)); SCH_ERR_JRET(schUpdateHbConnection(&rsp.epId, &trans));
SCH_ERR_JRET(schProcessOnTaskStatusRsp(&rsp.epId, rsp.taskStatus)); SCH_ERR_JRET(schProcessOnTaskStatusRsp(&rsp.epId, rsp.taskStatus));

View File

@ -42,7 +42,7 @@ char *schDumpEpSet(SEpSet *pEpSet) {
} }
int32_t maxSize = 1024; int32_t maxSize = 1024;
char *str = taosMemoryMalloc(maxSize); char *str = taosMemoryMalloc(maxSize);
if (NULL == str) { if (NULL == str) {
return NULL; return NULL;
} }
@ -73,7 +73,7 @@ char *schGetOpStr(SCH_OP_TYPE type) {
} }
void schFreeHbTrans(SSchHbTrans *pTrans) { void schFreeHbTrans(SSchHbTrans *pTrans) {
rpcReleaseHandle(pTrans->trans.pHandle, TAOS_CONN_CLIENT); rpcReleaseHandle((void *)pTrans->trans.pHandleId, TAOS_CONN_CLIENT);
schFreeRpcCtx(&pTrans->rpcCtx); schFreeRpcCtx(&pTrans->rpcCtx);
} }
@ -234,7 +234,8 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) {
hb = taosHashGet(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId)); hb = taosHashGet(schMgmt.hbConnections, epId, sizeof(SQueryNodeEpId));
if (NULL == hb) { if (NULL == hb) {
SCH_UNLOCK(SCH_READ, &schMgmt.hbLock); SCH_UNLOCK(SCH_READ, &schMgmt.hbLock);
qInfo("taosHashGet hb connection not exists, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn, epId->ep.port); qInfo("taosHashGet hb connection not exists, nodeId:%d, fqdn:%s, port:%d", epId->nodeId, epId->ep.fqdn,
epId->ep.port);
SCH_ERR_RET(TSDB_CODE_APP_ERROR); SCH_ERR_RET(TSDB_CODE_APP_ERROR);
} }

View File

@ -48,8 +48,8 @@ extern "C" {
#define stError(...) do { if (stDebugFlag & DEBUG_ERROR) { taosPrintLog("STM ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0) #define stError(...) do { if (stDebugFlag & DEBUG_ERROR) { taosPrintLog("STM ERROR ", DEBUG_ERROR, 255, __VA_ARGS__); }} while(0)
#define stWarn(...) do { if (stDebugFlag & DEBUG_WARN) { taosPrintLog("STM WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0) #define stWarn(...) do { if (stDebugFlag & DEBUG_WARN) { taosPrintLog("STM WARN ", DEBUG_WARN, 255, __VA_ARGS__); }} while(0)
#define stInfo(...) do { if (stDebugFlag & DEBUG_INFO) { taosPrintLog("STM ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0) #define stInfo(...) do { if (stDebugFlag & DEBUG_INFO) { taosPrintLog("STM ", DEBUG_INFO, 255, __VA_ARGS__); }} while(0)
#define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0) #define stDebug(...) do { if (stDebugFlag & DEBUG_DEBUG) { taosPrintLog("STM ", DEBUG_DEBUG, stDebugFlag, __VA_ARGS__); }} while(0)
#define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0) #define stTrace(...) do { if (stDebugFlag & DEBUG_TRACE) { taosPrintLog("STM ", DEBUG_TRACE, stDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on // clang-format on
typedef struct { typedef struct {

View File

@ -35,6 +35,10 @@ void streamTimerCleanUp() {
streamTimer = NULL; streamTimer = NULL;
} }
tmr_h streamTimerGetInstance() {
return streamTimer;
}
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) { char* createStreamTaskIdStr(int64_t streamId, int32_t taskId) {
char buf[128] = {0}; char buf[128] = {0};
sprintf(buf, "0x%" PRIx64 "-0x%x", streamId, taskId); sprintf(buf, "0x%" PRIx64 "-0x%x", streamId, taskId);

View File

@ -500,34 +500,27 @@ int32_t backendCopyFiles(char* src, char* dst) {
// return 0; // return 0;
} }
int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) { int32_t rebuildFromLocalChkp(char* key, char* chkpPath, int64_t chkpId, char* defaultPath) {
int32_t code = -1; int32_t code = 0;
int32_t len = strlen(defaultPath) + 32; if (taosIsDir(defaultPath)) {
char* tmp = taosMemoryCalloc(1, len); taosRemoveDir(defaultPath);
sprintf(tmp, "%s%s", defaultPath, "_tmp");
if (taosIsDir(tmp)) taosRemoveDir(tmp);
if (taosIsDir(defaultPath)) taosRenameFile(defaultPath, tmp);
if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) {
if (taosIsDir(tmp)) {
taosRemoveDir(tmp);
}
taosMkDir(defaultPath); taosMkDir(defaultPath);
stInfo("succ to clear stream backend %s", defaultPath);
}
if (taosIsDir(chkpPath) && isValidCheckpoint(chkpPath)) {
code = backendCopyFiles(chkpPath, defaultPath); code = backendCopyFiles(chkpPath, defaultPath);
if (code != 0) { if (code != 0) {
stError("failed to restart stream backend from %s, reason: %s", chkpPath, tstrerror(TAOS_SYSTEM_ERROR(errno))); taosRemoveDir(defaultPath);
taosMkDir(defaultPath);
stError("failed to restart stream backend from %s, reason: %s, start to restart from empty path: %s", chkpPath,
tstrerror(TAOS_SYSTEM_ERROR(errno)), defaultPath);
code = 0;
} else { } else {
stInfo("start to restart stream backend at checkpoint path: %s", chkpPath); stInfo("start to restart stream backend at checkpoint path: %s", chkpPath);
} }
} }
if (code != 0) {
if (taosIsDir(defaultPath)) taosRemoveDir(defaultPath);
if (taosIsDir(tmp)) taosRenameFile(tmp, defaultPath);
} else {
taosRemoveDir(tmp);
}
taosMemoryFree(tmp);
return code; return code;
} }

View File

@ -360,6 +360,8 @@ int32_t streamSaveTaskCheckpointInfo(SStreamTask* p, int64_t checkpointId) {
void streamTaskSetCheckpointFailedId(SStreamTask* pTask) { void streamTaskSetCheckpointFailedId(SStreamTask* pTask) {
pTask->chkInfo.failedId = pTask->chkInfo.checkpointingId; pTask->chkInfo.failedId = pTask->chkInfo.checkpointingId;
stDebug("s-task:%s mark the checkpointId:%" PRId64 " (transId:%d) failed", pTask->id.idStr,
pTask->chkInfo.checkpointingId, pTask->chkInfo.transId);
} }
int32_t getChkpMeta(char* id, char* path, SArray* list) { int32_t getChkpMeta(char* id, char* path, SArray* list) {

View File

@ -547,25 +547,21 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S
memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName)); memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName));
} }
} else { } else {
char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); char ctbName[TSDB_TABLE_FNAME_LEN] = {0};
if (ctbName == NULL) {
return -1;
}
if (pDataBlock->info.parTbName[0]) { if (pDataBlock->info.parTbName[0]) {
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, if(pTask->ver >= SSTREAM_TASK_SUBTABLE_CHANGED_VER &&
pDataBlock->info.parTbName); !isAutoTableName(pDataBlock->info.parTbName) &&
!alreadyAddGroupId(pDataBlock->info.parTbName) &&
groupId != 0){
buildCtbNameAddGruopId(pDataBlock->info.parTbName, groupId);
}
} else { } else {
buildCtbNameByGroupIdImpl(pTask->outputInfo.shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName); buildCtbNameByGroupIdImpl(pTask->outputInfo.shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName);
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db,
pDataBlock->info.parTbName);
} }
snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->outputInfo.shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
/*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/ /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
SUseDbRsp* pDbInfo = &pTask->outputInfo.shuffleDispatcher.dbInfo; SUseDbRsp* pDbInfo = &pTask->outputInfo.shuffleDispatcher.dbInfo;
hashValue = hashValue = taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
taosMemoryFree(ctbName);
SBlockName bln = {0}; SBlockName bln = {0};
bln.hashValue = hashValue; bln.hashValue = hashValue;
memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName)); memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName));

View File

@ -180,10 +180,8 @@ int32_t streamMetaCheckBackendCompatible(SStreamMeta* pMeta) {
} }
if (info.msgVer <= SSTREAM_TASK_INCOMPATIBLE_VER) { if (info.msgVer <= SSTREAM_TASK_INCOMPATIBLE_VER) {
ret = STREAM_STATA_NO_COMPATIBLE; ret = STREAM_STATA_NO_COMPATIBLE;
} else if (info.msgVer == SSTREAM_TASK_NEED_CONVERT_VER) { } else if (info.msgVer >= SSTREAM_TASK_NEED_CONVERT_VER) {
ret = STREAM_STATA_NEED_CONVERT; ret = STREAM_STATA_NEED_CONVERT;
} else if (info.msgVer == SSTREAM_TASK_VER) {
ret = STREAM_STATA_COMPATIBLE;
} }
tDecoderClear(&decoder); tDecoderClear(&decoder);
break; break;
@ -962,7 +960,7 @@ int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pReq) {
if (tEncodeI64(pEncoder, ps->processedVer) < 0) return -1; if (tEncodeI64(pEncoder, ps->processedVer) < 0) return -1;
if (tEncodeI64(pEncoder, ps->verStart) < 0) return -1; if (tEncodeI64(pEncoder, ps->verStart) < 0) return -1;
if (tEncodeI64(pEncoder, ps->verEnd) < 0) return -1; if (tEncodeI64(pEncoder, ps->verEnd) < 0) return -1;
if (tEncodeI64(pEncoder, ps->activeCheckpointId) < 0) return -1; if (tEncodeI64(pEncoder, ps->checkpointId) < 0) return -1;
if (tEncodeI8(pEncoder, ps->checkpointFailed) < 0) return -1; if (tEncodeI8(pEncoder, ps->checkpointFailed) < 0) return -1;
if (tEncodeI32(pEncoder, ps->chkpointTransId) < 0) return -1; if (tEncodeI32(pEncoder, ps->chkpointTransId) < 0) return -1;
} }
@ -1001,8 +999,8 @@ int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pReq) {
if (tDecodeI64(pDecoder, &entry.processedVer) < 0) return -1; if (tDecodeI64(pDecoder, &entry.processedVer) < 0) return -1;
if (tDecodeI64(pDecoder, &entry.verStart) < 0) return -1; if (tDecodeI64(pDecoder, &entry.verStart) < 0) return -1;
if (tDecodeI64(pDecoder, &entry.verEnd) < 0) return -1; if (tDecodeI64(pDecoder, &entry.verEnd) < 0) return -1;
if (tDecodeI64(pDecoder, &entry.activeCheckpointId) < 0) return -1; if (tDecodeI64(pDecoder, &entry.checkpointId) < 0) return -1;
if (tDecodeI8(pDecoder, (int8_t*)&entry.checkpointFailed) < 0) return -1; if (tDecodeI8(pDecoder, &entry.checkpointFailed) < 0) return -1;
if (tDecodeI32(pDecoder, &entry.chkpointTransId) < 0) return -1; if (tDecodeI32(pDecoder, &entry.chkpointTransId) < 0) return -1;
entry.id.taskId = taskId; entry.id.taskId = taskId;
@ -1115,8 +1113,8 @@ static int32_t metaHeartbeatToMnodeImpl(SStreamMeta* pMeta) {
} }
if ((*pTask)->chkInfo.checkpointingId != 0) { if ((*pTask)->chkInfo.checkpointingId != 0) {
entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId); entry.checkpointFailed = ((*pTask)->chkInfo.failedId >= (*pTask)->chkInfo.checkpointingId)? 1:0;
entry.activeCheckpointId = (*pTask)->chkInfo.checkpointingId; entry.checkpointId = (*pTask)->chkInfo.checkpointingId;
entry.chkpointTransId = (*pTask)->chkInfo.transId; entry.chkpointTransId = (*pTask)->chkInfo.transId;
if (entry.checkpointFailed) { if (entry.checkpointFailed) {
@ -1375,7 +1373,6 @@ SArray* streamMetaSendMsgBeforeCloseTasks(SStreamMeta* pMeta) {
SStreamTaskState* pState = streamTaskGetStatus(pTask); SStreamTaskState* pState = streamTaskGetStatus(pTask);
if (pState->state == TASK_STATUS__CK) { if (pState->state == TASK_STATUS__CK) {
streamTaskSetCheckpointFailedId(pTask); streamTaskSetCheckpointFailedId(pTask);
stDebug("s-task:%s mark the checkpoint:%"PRId64" failed", pTask->id.idStr, pTask->chkInfo.checkpointingId);
} else { } else {
stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, pState->name); stDebug("s-task:%s status:%s not reset the checkpoint", pTask->id.idStr, pState->name);
} }
@ -1413,31 +1410,43 @@ void streamMetaUpdateStageRole(SStreamMeta* pMeta, int64_t stage, bool isLeader)
} }
} }
static SArray* prepareBeforeStartTasks(SStreamMeta* pMeta) {
streamMetaWLock(pMeta);
SArray* pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
taosHashClear(pMeta->startInfo.pReadyTaskSet);
taosHashClear(pMeta->startInfo.pFailedTaskSet);
pMeta->startInfo.startTs = taosGetTimestampMs();
streamMetaResetTaskStatus(pMeta);
streamMetaWUnLock(pMeta);
return pTaskList;
}
int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) { int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
int32_t vgId = pMeta->vgId; int32_t vgId = pMeta->vgId;
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList); int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
stInfo("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks); stInfo("vgId:%d start to check all %d stream task(s) downstream status", vgId, numOfTasks);
if (numOfTasks == 0) { if (numOfTasks == 0) {
stInfo("vgId:%d start tasks completed", pMeta->vgId); stInfo("vgId:%d start tasks completed", pMeta->vgId);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SArray* pTaskList = NULL;
streamMetaWLock(pMeta);
pTaskList = taosArrayDup(pMeta->pTaskList, NULL);
taosHashClear(pMeta->startInfo.pReadyTaskSet);
taosHashClear(pMeta->startInfo.pFailedTaskSet);
pMeta->startInfo.startTs = taosGetTimestampMs();
streamMetaWUnLock(pMeta);
// broadcast the check downstream tasks msg
int64_t now = taosGetTimestampMs(); int64_t now = taosGetTimestampMs();
SArray* pTaskList = prepareBeforeStartTasks(pMeta);
numOfTasks = taosArrayGetSize(pTaskList);
// broadcast the check downstream tasks msg
for (int32_t i = 0; i < numOfTasks; ++i) { for (int32_t i = 0; i < numOfTasks; ++i) {
SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i);
// todo: may be we should find the related fill-history task and set it failed.
// todo: use hashTable instead
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId); SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId);
if (pTask == NULL) { if (pTask == NULL) {
stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId); stError("vgId:%d failed to acquire task:0x%x during start tasks", pMeta->vgId, pTaskId->taskId);
@ -1445,8 +1454,6 @@ int32_t streamMetaStartAllTasks(SStreamMeta* pMeta) {
continue; continue;
} }
// todo: may be we should find the related fill-history task and set it failed.
// fill-history task can only be launched by related stream tasks. // fill-history task can only be launched by related stream tasks.
STaskExecStatisInfo* pInfo = &pTask->execInfo; STaskExecStatisInfo* pInfo = &pTask->execInfo;
if (pTask->info.fillHistory == 1) { if (pTask->info.fillHistory == 1) {
@ -1493,10 +1500,13 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
int32_t num = taosArrayGetSize(pMeta->pTaskList); int32_t num = taosArrayGetSize(pMeta->pTaskList);
stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num);
if (num == 0) { if (num == 0) {
stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num);
streamMetaRUnLock(pMeta); streamMetaRUnLock(pMeta);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int64_t st = taosGetTimestampMs();
// send hb msg to mnode before closing all tasks. // send hb msg to mnode before closing all tasks.
SArray* pTaskList = streamMetaSendMsgBeforeCloseTasks(pMeta); SArray* pTaskList = streamMetaSendMsgBeforeCloseTasks(pMeta);
int32_t numOfTasks = taosArrayGetSize(pTaskList); int32_t numOfTasks = taosArrayGetSize(pTaskList);
@ -1514,6 +1524,9 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) {
taosArrayDestroy(pTaskList); taosArrayDestroy(pTaskList);
double el = (taosGetTimestampMs() - st) / 1000.0;
stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, num, el);
streamMetaRUnLock(pMeta); streamMetaRUnLock(pMeta);
return 0; return 0;
} }
@ -1641,3 +1654,22 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) {
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
stDebug("vgId:%d reset all %d stream task(s) status to be uninit", pMeta->vgId, numOfTasks);
if (numOfTasks == 0) {
return TSDB_CODE_SUCCESS;
}
for (int32_t i = 0; i < numOfTasks; ++i) {
SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i);
STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId};
SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id));
streamTaskResetStatus(*pTask);
}
return 0;
}

View File

@ -90,6 +90,7 @@ SRowBuffPos* createSessionWinBuff(SStreamFileState* pFileState, SSessionKey* pKe
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey)); memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
pNewPos->needFree = true; pNewPos->needFree = true;
pNewPos->beFlushed = true;
memcpy(pNewPos->pRowBuff, p, *pVLen); memcpy(pNewPos->pRowBuff, p, *pVLen);
taosMemoryFree(p); taosMemoryFree(p);
return pNewPos; return pNewPos;
@ -217,6 +218,7 @@ int32_t getSessionFlushedBuff(SStreamFileState* pFileState, SSessionKey* pKey, v
SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState); SRowBuffPos* pNewPos = getNewRowPosForWrite(pFileState);
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey)); memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
pNewPos->needFree = true; pNewPos->needFree = true;
pNewPos->beFlushed = true;
void* pBuff = NULL; void* pBuff = NULL;
int32_t code = streamStateSessionGet_rocksdb(getStateFileStore(pFileState), pKey, &pBuff, pVLen); int32_t code = streamStateSessionGet_rocksdb(getStateFileStore(pFileState), pKey, &pBuff, pVLen);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
@ -307,6 +309,7 @@ int32_t allocSessioncWinBuffByNextPosition(SStreamFileState* pFileState, SStream
} }
pNewPos = getNewRowPosForWrite(pFileState); pNewPos = getNewRowPosForWrite(pFileState);
pNewPos->needFree = true; pNewPos->needFree = true;
pNewPos->beFlushed = true;
} }
_end: _end:
@ -482,6 +485,7 @@ int32_t sessionWinStateGetKVByCur(SStreamStateCur* pCur, SSessionKey* pKey, void
SRowBuffPos* pNewPos = getNewRowPosForWrite(pCur->pStreamFileState); SRowBuffPos* pNewPos = getNewRowPosForWrite(pCur->pStreamFileState);
memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey)); memcpy(pNewPos->pKey, pKey, sizeof(SSessionKey));
pNewPos->needFree = true; pNewPos->needFree = true;
pNewPos->beFlushed = true;
memcpy(pNewPos->pRowBuff, pData, *pVLen); memcpy(pNewPos->pRowBuff, pData, *pVLen);
(*pVal) = pNewPos; (*pVal) = pNewPos;
} }

View File

@ -698,6 +698,7 @@ int32_t streamStateSessionPut(SStreamState* pState, const SSessionKey* key, void
stDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64 ".code:%d", key->win.skey, stDebug("===stream===save skey:%" PRId64 ", ekey:%" PRId64 ", groupId:%" PRIu64 ".code:%d", key->win.skey,
key->win.ekey, key->groupId, code); key->win.ekey, key->groupId, code);
} else { } else {
pos->beFlushed = false;
code = putSessionWinResultBuff(pState->pFileState, value); code = putSessionWinResultBuff(pState->pFileState, value);
} }
} }

View File

@ -213,7 +213,7 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
if (tStartDecode(pDecoder) < 0) return -1; if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pTask->ver) < 0) return -1; if (tDecodeI64(pDecoder, &pTask->ver) < 0) return -1;
if (pTask->ver != SSTREAM_TASK_VER) return -1; if (pTask->ver <= SSTREAM_TASK_INCOMPATIBLE_VER) return -1;
if (tDecodeI64(pDecoder, &pTask->id.streamId) < 0) return -1; if (tDecodeI64(pDecoder, &pTask->id.streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pTask->id.taskId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->id.taskId) < 0) return -1;
@ -300,7 +300,7 @@ int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo)
if (tStartDecode(pDecoder) < 0) return -1; if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pChkpInfo->msgVer) < 0) return -1; if (tDecodeI64(pDecoder, &pChkpInfo->msgVer) < 0) return -1;
// if (ver != SSTREAM_TASK_VER) return -1; // if (ver <= SSTREAM_TASK_INCOMPATIBLE_VER) return -1;
if (tDecodeI64(pDecoder, &skip64) < 0) return -1; if (tDecodeI64(pDecoder, &skip64) < 0) return -1;
if (tDecodeI32(pDecoder, &skip32) < 0) return -1; if (tDecodeI32(pDecoder, &skip32) < 0) return -1;
@ -328,7 +328,7 @@ int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId) {
int64_t ver; int64_t ver;
if (tStartDecode(pDecoder) < 0) return -1; if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &ver) < 0) return -1; if (tDecodeI64(pDecoder, &ver) < 0) return -1;
if (ver != SSTREAM_TASK_VER) return -1; if (ver <= SSTREAM_TASK_INCOMPATIBLE_VER) return -1;
if (tDecodeI64(pDecoder, &pTaskId->streamId) < 0) return -1; if (tDecodeI64(pDecoder, &pTaskId->streamId) < 0) return -1;
@ -810,7 +810,7 @@ void streamTaskStatusCopy(STaskStatusEntry* pDst, const STaskStatusEntry* pSrc)
pDst->verEnd = pSrc->verEnd; pDst->verEnd = pSrc->verEnd;
pDst->sinkQuota = pSrc->sinkQuota; pDst->sinkQuota = pSrc->sinkQuota;
pDst->sinkDataSize = pSrc->sinkDataSize; pDst->sinkDataSize = pSrc->sinkDataSize;
pDst->activeCheckpointId = pSrc->activeCheckpointId; pDst->checkpointId = pSrc->checkpointId;
pDst->checkpointFailed = pSrc->checkpointFailed; pDst->checkpointFailed = pSrc->checkpointFailed;
pDst->chkpointTransId = pSrc->chkpointTransId; pDst->chkpointTransId = pSrc->chkpointTransId;
} }

View File

@ -676,7 +676,7 @@ int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak, int64_
// heartbeat timeout // heartbeat timeout
if (syncNodeHeartbeatReplyTimeout(pSyncNode)) { if (syncNodeHeartbeatReplyTimeout(pSyncNode)) {
terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY; terrno = TSDB_CODE_SYN_PROPOSE_NOT_READY;
sNError(pSyncNode, "failed to sync propose since hearbeat timeout, type:%s, last:%" PRId64 ", cmt:%" PRId64, sNError(pSyncNode, "failed to sync propose since heartbeat timeout, type:%s, last:%" PRId64 ", cmt:%" PRId64,
TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex); TMSG_INFO(pMsg->msgType), syncNodeGetLastIndex(pSyncNode), pSyncNode->commitIndex);
return -1; return -1;
} }

View File

@ -345,9 +345,9 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) { for (int32_t seq = pSndBuf->cursor + 1; seq < pSndBuf->end; ++seq) {
SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size]; SyncSnapBlock *pBlk = pSndBuf->entries[seq % pSndBuf->size];
ASSERT(pBlk && !pBlk->acked); ASSERT(pBlk);
int64_t nowMs = taosGetTimestampMs(); int64_t nowMs = taosGetTimestampMs();
if (nowMs < pBlk->sendTimeMs + SYNC_SNAP_RESEND_MS) { if (pBlk->acked || nowMs < pBlk->sendTimeMs + SYNC_SNAP_RESEND_MS) {
continue; continue;
} }
if (syncSnapSendMsg(pSender, pBlk->seq, pBlk->pBlock, pBlk->blockLen, 0) != 0) { if (syncSnapSendMsg(pSender, pBlk->seq, pBlk->pBlock, pBlk->blockLen, 0) != 0) {

View File

@ -414,6 +414,7 @@ void cliHandleResp(SCliConn* conn) {
// buf's mem alread translated to transMsg.pCont // buf's mem alread translated to transMsg.pCont
if (!CONN_NO_PERSIST_BY_APP(conn)) { if (!CONN_NO_PERSIST_BY_APP(conn)) {
transMsg.info.handle = (void*)conn->refId; transMsg.info.handle = (void*)conn->refId;
transMsg.info.refId = (int64_t)(void*)conn->refId;
tDebug("%s conn %p ref by app", CONN_GET_INST_LABEL(conn), conn); tDebug("%s conn %p ref by app", CONN_GET_INST_LABEL(conn), conn);
} }

View File

@ -101,8 +101,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STARTING, "Database is starting
TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STOPPING, "Database is closing down") TAOS_DEFINE_ERROR(TSDB_CODE_APP_IS_STOPPING, "Database is closing down")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DATA_FMT, "Invalid data format") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_DATA_FMT, "Invalid data format")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CFG_VALUE, "Invalid configuration value") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CFG_VALUE, "Invalid configuration value")
TAOS_DEFINE_ERROR(TSDB_CODE_IP_NOT_IN_WHITE_LIST, "Not allowed to connect") TAOS_DEFINE_ERROR(TSDB_CODE_IP_NOT_IN_WHITE_LIST, "Not allowed to connect")
TAOS_DEFINE_ERROR(TSDB_CODE_FAILED_TO_CONNECT_S3, "Failed to connect to s3 server") TAOS_DEFINE_ERROR(TSDB_CODE_FAILED_TO_CONNECT_S3, "Failed to connect to s3 server")
//client //client
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_OPERATION, "Invalid operation")

View File

@ -119,6 +119,8 @@ class TDTestCase(TBase):
# stop taosd test taos as server # stop taosd test taos as server
sc.dnodeStop(idx) sc.dnodeStop(idx)
etool.exeBinFile("taos", f'-n server', wait=False) etool.exeBinFile("taos", f'-n server', wait=False)
time.sleep(3)
eos.exe("pkill -9 taos")
# run # run
def run(self): def run(self):

View File

@ -7,7 +7,7 @@
"password": "taosdata", "password": "taosdata",
"connection_pool_size": 8, "connection_pool_size": 8,
"num_of_records_per_req": 4000, "num_of_records_per_req": 4000,
"prepared_rand": 1000, "prepared_rand": 10000,
"thread_count": 3, "thread_count": 3,
"create_table_thread_count": 1, "create_table_thread_count": 1,
"confirm_parameter_prompt": "no", "confirm_parameter_prompt": "no",

View File

@ -41,6 +41,7 @@ class TDTestCase(TBase):
etool.benchMark(json=jfile) etool.benchMark(json=jfile)
tdSql.execute(f"use {self.db}") tdSql.execute(f"use {self.db}")
tdSql.execute("select database();")
# set insert data information # set insert data information
self.childtable_count = 6 self.childtable_count = 6
self.insert_rows = 100000 self.insert_rows = 100000
@ -50,10 +51,30 @@ class TDTestCase(TBase):
def doQuery(self): def doQuery(self):
tdLog.info(f"do query.") tdLog.info(f"do query.")
# top bottom # __group_key
sql = f"select top(uti, 5) from {self.stb} " sql = f"select count(*),_group_key(uti),uti from {self.stb} partition by uti"
tdSql.execute(sql) tdSql.query(sql)
# column index 1 value same with 2
tdSql.checkSameColumn(1, 2)
sql = f"select count(*),_group_key(usi),usi from {self.stb} group by usi limit 100;"
tdSql.query(sql)
tdSql.checkSameColumn(1, 2)
# tail
sql1 = "select ts,ui from d0 order by ts desc limit 5 offset 2;"
sql2 = "select ts,tail(ui,5,2) from d0;"
self.checkSameResult(sql1, sql2)
# uninqe
sql1 = "select distinct uti from d0 order by uti;"
sql2 = "select UNIQUE(uti) from d0 order by uti asc;"
self.checkSameResult(sql1, sql2)
# top
sql1 = "select top(bi,10) from stb;"
sql2 = "select bi from stb where bi is not null order by bi desc limit 10;"
self.checkSameResult(sql1, sql2)
# run # run
def run(self): def run(self):

View File

@ -16,6 +16,7 @@ import os
import time import time
import datetime import datetime
import random import random
import copy
from frame.log import * from frame.log import *
from frame.sql import * from frame.sql import *
@ -183,6 +184,46 @@ class TBase:
sql = f"select {col} from {self.stb} order by _c0 asc limit 1" sql = f"select {col} from {self.stb} order by _c0 asc limit 1"
tdSql.checkFirstValue(sql, expect) tdSql.checkFirstValue(sql, expect)
# check sql1 is same result with sql2
def checkSameResult(self, sql1, sql2):
tdLog.info(f"sql1={sql1}")
tdLog.info(f"sql2={sql2}")
tdLog.info("compare sql1 same with sql2 ...")
# sql
rows1 = tdSql.query(sql1,queryTimes=2)
res1 = copy.deepcopy(tdSql.res)
tdSql.query(sql2,queryTimes=2)
res2 = tdSql.res
rowlen1 = len(res1)
rowlen2 = len(res2)
errCnt = 0
if rowlen1 != rowlen2:
tdLog.exit(f"both row count not equal. rowlen1={rowlen1} rowlen2={rowlen2} ")
return False
for i in range(rowlen1):
row1 = res1[i]
row2 = res2[i]
collen1 = len(row1)
collen2 = len(row2)
if collen1 != collen2:
tdLog.exit(f"both col count not equal. collen1={collen1} collen2={collen2}")
return False
for j in range(collen1):
if row1[j] != row2[j]:
tdLog.info(f"error both column value not equal. row={i} col={j} col1={row1[j]} col2={row2[j]} .")
errCnt += 1
if errCnt > 0:
tdLog.exit(f"sql2 column value different with sql1. different count ={errCnt} ")
tdLog.info("sql1 same result with sql2.")
# #
# get db information # get db information
# #

View File

@ -795,7 +795,7 @@ class TDCom:
def getOneRow(self, location, containElm): def getOneRow(self, location, containElm):
res_list = list() res_list = list()
if 0 <= location < tdSql.queryRows: if 0 <= location < tdSql.queryRows:
for row in tdSql.queryResult: for row in tdSql.res:
if row[location] == containElm: if row[location] == containElm:
res_list.append(row) res_list.append(row)
return res_list return res_list
@ -943,7 +943,7 @@ class TDCom:
"""drop all streams """drop all streams
""" """
tdSql.query("show streams") tdSql.query("show streams")
stream_name_list = list(map(lambda x: x[0], tdSql.queryResult)) stream_name_list = list(map(lambda x: x[0], tdSql.res))
for stream_name in stream_name_list: for stream_name in stream_name_list:
tdSql.execute(f'drop stream if exists {stream_name};') tdSql.execute(f'drop stream if exists {stream_name};')
@ -962,7 +962,7 @@ class TDCom:
"""drop all databases """drop all databases
""" """
tdSql.query("show databases;") tdSql.query("show databases;")
db_list = list(map(lambda x: x[0], tdSql.queryResult)) db_list = list(map(lambda x: x[0], tdSql.res))
for dbname in db_list: for dbname in db_list:
if dbname not in self.white_list and "telegraf" not in dbname: if dbname not in self.white_list and "telegraf" not in dbname:
tdSql.execute(f'drop database if exists `{dbname}`') tdSql.execute(f'drop database if exists `{dbname}`')
@ -1412,7 +1412,7 @@ class TDCom:
input_function (str): scalar input_function (str): scalar
""" """
tdSql.query(sql) tdSql.query(sql)
res = tdSql.queryResult res = tdSql.res
if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]: if input_function in ["acos", "asin", "atan", "cos", "log", "pow", "sin", "sqrt", "tan"]:
tdSql.checkEqual(res[1][1], "DOUBLE") tdSql.checkEqual(res[1][1], "DOUBLE")
tdSql.checkEqual(res[2][1], "DOUBLE") tdSql.checkEqual(res[2][1], "DOUBLE")
@ -1490,7 +1490,7 @@ class TDCom:
bigint: bigint-ts bigint: bigint-ts
""" """
tdSql.query(f'select cast({str_ts} as bigint)') tdSql.query(f'select cast({str_ts} as bigint)')
return tdSql.queryResult[0][0] return tdSql.res[0][0]
def cast_query_data(self, query_data): def cast_query_data(self, query_data):
"""cast query-result for existed-stb """cast query-result for existed-stb
@ -1514,7 +1514,7 @@ class TDCom:
tdSql.query(f'select cast("{v}" as binary(6))') tdSql.query(f'select cast("{v}" as binary(6))')
else: else:
tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})') tdSql.query(f'select cast("{v}" as {" ".join(col_tag_type_list[i].strip().split(" ")[1:])})')
query_data_l[i] = tdSql.queryResult[0][0] query_data_l[i] = tdSql.res[0][0]
else: else:
query_data_l[i] = v query_data_l[i] = v
nl.append(tuple(query_data_l)) nl.append(tuple(query_data_l))
@ -1566,9 +1566,9 @@ class TDCom:
if tag_value_list: if tag_value_list:
dvalue = len(self.tag_type_str.split(',')) - defined_tag_count dvalue = len(self.tag_type_str.split(',')) - defined_tag_count
tdSql.query(sql1) tdSql.query(sql1)
res1 = tdSql.queryResult res1 = tdSql.res
tdSql.query(sql2) tdSql.query(sql2)
res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res
tdSql.sql = sql1 tdSql.sql = sql1
new_list = list() new_list = list()
if tag_value_list: if tag_value_list:
@ -1601,10 +1601,10 @@ class TDCom:
tdLog.info("query retrying ...") tdLog.info("query retrying ...")
new_list = list() new_list = list()
tdSql.query(sql1) tdSql.query(sql1)
res1 = tdSql.queryResult res1 = tdSql.res
tdSql.query(sql2) tdSql.query(sql2)
# res2 = tdSql.queryResult # res2 = tdSql.res
res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res
tdSql.sql = sql1 tdSql.sql = sql1
if tag_value_list: if tag_value_list:
@ -1643,10 +1643,10 @@ class TDCom:
tdLog.info("query retrying ...") tdLog.info("query retrying ...")
new_list = list() new_list = list()
tdSql.query(sql1) tdSql.query(sql1)
res1 = tdSql.queryResult res1 = tdSql.res
tdSql.query(sql2) tdSql.query(sql2)
# res2 = tdSql.queryResult # res2 = tdSql.res
res2 = self.cast_query_data(tdSql.queryResult) if tag_value_list or use_exist_stb else tdSql.queryResult res2 = self.cast_query_data(tdSql.res) if tag_value_list or use_exist_stb else tdSql.res
tdSql.sql = sql1 tdSql.sql = sql1
if tag_value_list: if tag_value_list:

View File

@ -87,10 +87,9 @@ class ConfigureyCluster:
count=0 count=0
while count < 5: while count < 5:
tdSql.query("select * from information_schema.ins_dnodes") tdSql.query("select * from information_schema.ins_dnodes")
# tdLog.debug(tdSql.queryResult)
status=0 status=0
for i in range(self.dnodeNums): for i in range(self.dnodeNums):
if tdSql.queryResult[i][4] == "ready": if tdSql.res[i][4] == "ready":
status+=1 status+=1
# tdLog.debug(status) # tdLog.debug(status)

View File

@ -78,6 +78,107 @@ class TDSql:
self.cursor.execute(s) self.cursor.execute(s)
time.sleep(2) time.sleep(2)
#
# do execute
#
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None):
self.sql = sql
i=1
while i <= queryTimes:
try:
self.cursor.execute(sql)
self.res = self.cursor.fetchall()
self.queryRows = len(self.res)
self.queryCols = len(self.cursor.description)
if count_expected_res is not None:
counter = 0
while count_expected_res != self.res[0][0]:
self.cursor.execute(sql)
self.res = self.cursor.fetchall()
if counter < queryTimes:
counter += 0.5
time.sleep(0.5)
else:
return False
if row_tag:
return self.res
return self.queryRows
except Exception as e:
tdLog.notice("Try to query again, query times: %d "%i)
if i == queryTimes:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
i+=1
time.sleep(1)
pass
def executeTimes(self, sql, times):
for i in range(times):
try:
return self.cursor.execute(sql)
except BaseException:
time.sleep(1)
continue
def execute(self, sql, queryTimes=30, show=False):
self.sql = sql
if show:
tdLog.info(sql)
i=1
while i <= queryTimes:
try:
self.affectedRows = self.cursor.execute(sql)
return self.affectedRows
except Exception as e:
tdLog.notice("Try to execute sql again, query times: %d "%i)
if i == queryTimes:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
i+=1
time.sleep(1)
pass
# execute many sql
def executes(self, sqls, queryTimes=30, show=False):
for sql in sqls:
self.execute(sql, queryTimes, show)
def waitedQuery(self, sql, expectRows, timeout):
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
self.sql = sql
try:
for i in range(timeout):
self.cursor.execute(sql)
self.res = self.cursor.fetchall()
self.queryRows = len(self.res)
self.queryCols = len(self.cursor.description)
tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= expectRows:
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
return (self.queryRows, timeout)
def is_err_sql(self, sql):
err_flag = True
try:
self.cursor.execute(sql)
except BaseException:
err_flag = False
return False if err_flag else True
def error(self, sql, expectedErrno = None, expectErrInfo = None): def error(self, sql, expectedErrno = None, expectErrInfo = None):
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
expectErrNotOccured = True expectErrNotOccured = True
@ -95,7 +196,7 @@ class TDSql:
else: else:
self.queryRows = 0 self.queryRows = 0
self.queryCols = 0 self.queryCols = 0
self.queryResult = None self.res = None
if expectedErrno != None: if expectedErrno != None:
if expectedErrno == self.errno: if expectedErrno == self.errno:
@ -115,49 +216,25 @@ class TDSql:
return self.error_info return self.error_info
def query(self, sql, row_tag=None, queryTimes=10, count_expected_res=None): #
# get session
#
def getData(self, row, col):
self.checkRowCol(row, col)
return self.res[row][col]
def getResult(self, sql):
self.sql = sql self.sql = sql
i=1
while i <= queryTimes:
try:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(self.cursor.description)
if count_expected_res is not None:
counter = 0
while count_expected_res != self.queryResult[0][0]:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
if counter < queryTimes:
counter += 0.5
time.sleep(0.5)
else:
return False
if row_tag:
return self.queryResult
return self.queryRows
except Exception as e:
tdLog.notice("Try to query again, query times: %d "%i)
if i == queryTimes:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
i+=1
time.sleep(1)
pass
def is_err_sql(self, sql):
err_flag = True
try: try:
self.cursor.execute(sql) self.cursor.execute(sql)
except BaseException: self.res = self.cursor.fetchall()
err_flag = False except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
return False if err_flag else True args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
return self.res
def getVariable(self, search_attr): def getVariable(self, search_attr):
''' '''
@ -193,29 +270,19 @@ class TDSql:
return col_name_list, col_type_list return col_name_list, col_type_list
return col_name_list return col_name_list
def waitedQuery(self, sql, expectRows, timeout):
tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout))
self.sql = sql
try:
for i in range(timeout):
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(self.cursor.description)
tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= expectRows:
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
return (self.queryRows, timeout)
def getRows(self): def getRows(self):
return self.queryRows return self.queryRows
# get first value
def getFirstValue(self, sql) :
self.query(sql)
return self.getData(0, 0)
#
# check session
#
def checkRows(self, expectRows): def checkRows(self, expectRows):
if self.queryRows == expectRows: if self.queryRows == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows)) tdLog.info("sql:%s, queryRows:%d == expect:%d" % (self.sql, self.queryRows, expectRows))
@ -273,26 +340,26 @@ class TDSql:
self.checkRowCol(row, col) self.checkRowCol(row, col)
if self.queryResult[row][col] != data: if self.res[row][col] != data:
if self.cursor.istype(col, "TIMESTAMP"): if self.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed`` # suppose user want to check nanosecond timestamp if a longer data passed``
if isinstance(data,str) : if isinstance(data,str) :
if (len(data) >= 28): if (len(data) >= 28):
if self.queryResult[row][col] == _parse_ns_timestamp(data): if self.res[row][col] == _parse_ns_timestamp(data):
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
else: else:
if self.queryResult[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc): if self.res[row][col].astimezone(datetime.timezone.utc) == _parse_datetime(data).astimezone(datetime.timezone.utc):
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return return
elif isinstance(data,int): elif isinstance(data,int):
@ -304,72 +371,72 @@ class TDSql:
precision = 'ns' precision = 'ns'
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return return
success = False success = False
if precision == 'ms': if precision == 'ms':
dt_obj = self.queryResult[row][col] dt_obj = self.res[row][col]
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000) ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1000) + int(dt_obj.microsecond/1000)
if ts == data: if ts == data:
success = True success = True
elif precision == 'us': elif precision == 'us':
dt_obj = self.queryResult[row][col] dt_obj = self.res[row][col]
ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond) ts = int(int((dt_obj-datetime.datetime.fromtimestamp(0,dt_obj.tzinfo)).total_seconds())*1e6) + int(dt_obj.microsecond)
if ts == data: if ts == data:
success = True success = True
elif precision == 'ns': elif precision == 'ns':
if data == self.queryResult[row][col]: if data == self.res[row][col]:
success = True success = True
if success: if success:
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return return
elif isinstance(data,datetime.datetime): elif isinstance(data,datetime.datetime):
dt_obj = self.queryResult[row][col] dt_obj = self.res[row][col]
delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo) delt_data = data-datetime.datetime.fromtimestamp(0,data.tzinfo)
delt_result = self.queryResult[row][col] - datetime.datetime.fromtimestamp(0,self.queryResult[row][col].tzinfo) delt_result = self.res[row][col] - datetime.datetime.fromtimestamp(0,self.res[row][col].tzinfo)
if delt_data == delt_result: if delt_data == delt_result:
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return return
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
if str(self.queryResult[row][col]) == str(data): if str(self.res[row][col]) == str(data):
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
return return
elif isinstance(data, float): elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: if abs(data) >= 1 and abs((self.res[row][col] - data) / data) <= 0.000001:
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: elif abs(data) < 1 and abs(self.res[row][col] - data) <= 0.000001:
# tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}") # tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.res[row][col]} == expect:{data}")
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return return
else: else:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) args = (caller.filename, caller.lineno, self.sql, row, col, self.res[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
if(show): if(show):
tdLog.info("check successfully") tdLog.info("check successfully")
@ -397,23 +464,23 @@ class TDSql:
def checkDataNoExit(self, row, col, data): def checkDataNoExit(self, row, col, data):
if self.checkRowColNoExit(row, col) == False: if self.checkRowColNoExit(row, col) == False:
return False return False
if self.queryResult[row][col] != data: if self.res[row][col] != data:
if self.cursor.istype(col, "TIMESTAMP"): if self.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed # suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28): if (len(data) >= 28):
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): if pd.to_datetime(self.res[row][col]) == pd.to_datetime(data):
return True return True
else: else:
if self.queryResult[row][col] == _parse_datetime(data): if self.res[row][col] == _parse_datetime(data):
return True return True
return False return False
if str(self.queryResult[row][col]) == str(data): if str(self.res[row][col]) == str(data):
return True return True
elif isinstance(data, float): elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001: if abs(data) >= 1 and abs((self.res[row][col] - data) / data) <= 0.000001:
return True return True
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001: elif abs(data) < 1 and abs(self.res[row][col] - data) <= 0.000001:
return True return True
else: else:
return False return False
@ -437,56 +504,6 @@ class TDSql:
self.query(sql) self.query(sql)
self.checkData(row, col, data) self.checkData(row, col, data)
def getData(self, row, col):
self.checkRowCol(row, col)
return self.queryResult[row][col]
def getResult(self, sql):
self.sql = sql
try:
self.cursor.execute(sql)
self.queryResult = self.cursor.fetchall()
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
return self.queryResult
def executeTimes(self, sql, times):
for i in range(times):
try:
return self.cursor.execute(sql)
except BaseException:
time.sleep(1)
continue
def execute(self, sql, queryTimes=30, show=False):
self.sql = sql
if show:
tdLog.info(sql)
i=1
while i <= queryTimes:
try:
self.affectedRows = self.cursor.execute(sql)
return self.affectedRows
except Exception as e:
tdLog.notice("Try to execute sql again, query times: %d "%i)
if i == queryTimes:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
raise Exception(repr(e))
i+=1
time.sleep(1)
pass
# execute many sql
def executes(self, sqls, queryTimes=30, show=False):
for sql in sqls:
self.execute(sql, queryTimes, show)
def checkAffectedRows(self, expectAffectedRows): def checkAffectedRows(self, expectAffectedRows):
if self.affectedRows != expectAffectedRows: if self.affectedRows != expectAffectedRows:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
@ -545,16 +562,21 @@ class TDSql:
self.query(sql) self.query(sql)
self.checkData(0, 0, expectCnt) self.checkData(0, 0, expectCnt)
# get first value
def getFirstValue(self, sql) :
self.query(sql)
return self.getData(0, 0)
# expect first value # expect first value
def checkFirstValue(self, sql, expect): def checkFirstValue(self, sql, expect):
self.query(sql) self.query(sql)
self.checkData(0, 0, expect) self.checkData(0, 0, expect)
# colIdx1 value same with colIdx2
def checkSameColumn(self, c1, c2):
for i in range(self.queryRows):
if self.res[i][c1] != self.res[i][c2]:
tdLog.exit(f"Not same. row={i} col1={c1} col2={c2}. {self.res[i][c1]}!={self.res[i][c2]}")
tdLog.info(f"check {self.queryRows} rows two column value same. column index [{c1},{c2}]")
#
# others session
#
def get_times(self, time_str, precision="ms"): def get_times(self, time_str, precision="ms"):
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])

View File

@ -560,17 +560,6 @@ if __name__ == "__main__":
conn = taosws.connect(f"taosws://root:taosdata@{host}:6041") conn = taosws.connect(f"taosws://root:taosdata@{host}:6041")
else: else:
conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath()) conn = taos.connect(host=f"{host}", config=tdDnodes.getSimCfgPath())
# tdSql.init(conn.cursor())
# tdSql.execute("create qnode on dnode 1")
# tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy)
# tdSql.query("show local variables;")
# for i in range(tdSql.queryRows):
# if tdSql.queryResult[i][0] == "queryPolicy" :
# if int(tdSql.queryResult[i][1]) == int(queryPolicy):
# tdLog.info('alter queryPolicy to %d successfully'%queryPolicy)
# else :
# tdLog.debug(tdSql.queryResult)
# tdLog.exit("alter queryPolicy to %d failed"%queryPolicy)
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute("create qnode on dnode 1") cursor.execute("create qnode on dnode 1")

View File

@ -3,7 +3,13 @@
#NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim #NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim
#unit-test #unit-test
archOs=$(arch)
if [[ $archOs =~ "aarch64" ]]; then
,,n,unit-test,bash test.sh
else
,,y,unit-test,bash test.sh ,,y,unit-test,bash test.sh
fi
# #
# army-test # army-test
@ -12,6 +18,7 @@
,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1 ,,y,army,./pytest.sh python3 ./test.py -f enterprise/s3/s3_basic.py -L 3 -D 1
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f community/cluster/snapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/cluster/incSnapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f community/cluster/incSnapshot.py -N 3 -L 3 -D 2
,,y,army,./pytest.sh python3 ./test.py -f community/query/query_basic.py -N 3
,,n,army,python3 ./test.py -f community/cmdline/fullopt.py ,,n,army,python3 ./test.py -f community/cmdline/fullopt.py
@ -37,6 +44,7 @@
#,,n,system-test,python3 ./test.py -f 8-stream/snode_restart.py -N 4 #,,n,system-test,python3 ./test.py -f 8-stream/snode_restart.py -N 4
,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4 ,,n,system-test,python3 ./test.py -f 8-stream/snode_restart_with_checkpoint.py -N 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/project_group.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tbname_vgroup.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_interval.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count_interval.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/compact-col.py
@ -147,6 +155,10 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/like.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py -Q 2
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py -Q 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/td-28068.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreDnode.py -N 5 -M 3 -i False
,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False ,,y,system-test,./pytest.sh python3 ./test.py -f 3-enterprise/restore/restoreVnode.py -N 5 -M 3 -i False
@ -1241,7 +1253,9 @@ e
,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ,,y,script,./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim ,,y,script,./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim
,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
### refactor stream backend, open case after rsma refactored
#,,y,script,./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
,,y,script,./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim ,,y,script,./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim
,,n,script,./test.sh -f tsim/valgrind/checkError1.sim ,,n,script,./test.sh -f tsim/valgrind/checkError1.sim
,,n,script,./test.sh -f tsim/valgrind/checkError2.sim ,,n,script,./test.sh -f tsim/valgrind/checkError2.sim

View File

@ -1845,6 +1845,23 @@ class TDCom:
if i == 1: if i == 1:
self.record_history_ts = ts_value self.record_history_ts = ts_value
def get_subtable(self, tbname_pre):
tdSql.query(f'show tables')
tbname_list = list(map(lambda x:x[0], tdSql.queryResult))
for tbname in tbname_list:
if tbname_pre in tbname:
return tbname
def get_subtable_wait(self, tbname_pre):
tbname = self.get_subtable(tbname_pre)
latency = 0
while tbname is None:
tbname = self.get_subtable(tbname_pre)
if latency < self.stream_timeout:
latency += 1
time.sleep(1)
return tbname
def is_json(msg): def is_json(msg):
if isinstance(msg, str): if isinstance(msg, str):
try: try:

View File

@ -88,7 +88,7 @@ class TDSql:
self.affectedRows = self.cursor.execute(sql) self.affectedRows = self.cursor.execute(sql)
return self.affectedRows return self.affectedRows
except Exception as e: except Exception as e:
tdLog.notice("Try to execute sql again, query times: %d "%i) tdLog.notice("Try to execute sql again, execute times: %d "%i)
if i == queryTimes: if i == queryTimes:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e)) args = (caller.filename, caller.lineno, sql, repr(e))
@ -98,6 +98,23 @@ class TDSql:
time.sleep(1) time.sleep(1)
pass pass
def no_error(self, sql):
caller = inspect.getframeinfo(inspect.stack()[1][0])
expectErrOccurred = False
try:
self.cursor.execute(sql)
except BaseException as e:
expectErrOccurred = True
self.errno = e.errno
error_info = repr(e)
self.error_info = ','.join(error_info[error_info.index('(') + 1:-1].split(",")[:-1]).replace("'", "")
if expectErrOccurred:
tdLog.exit("%s(%d) failed: sql:%s, unexpect error '%s' occurred" % (caller.filename, caller.lineno, sql, self.error_info))
else:
tdLog.info("sql:%s, check passed, no ErrInfo occurred" % (sql))
def error(self, sql, expectedErrno = None, expectErrInfo = None, fullMatched = True): def error(self, sql, expectedErrno = None, expectErrInfo = None, fullMatched = True):
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
expectErrNotOccured = True expectErrNotOccured = True
@ -126,9 +143,9 @@ class TDSql:
if expectErrInfo != None: if expectErrInfo != None:
if expectErrInfo == self.error_info: if expectErrInfo == self.error_info:
tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) tdLog.info("sql:%s, expected ErrInfo '%s' occured" % (sql, expectErrInfo))
else: else:
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo '%s' occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) tdLog.exit("%s(%d) failed: sql:%s, ErrInfo '%s' occured, but not expected ErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
else: else:
if expectedErrno != None: if expectedErrno != None:
if expectedErrno in self.errno: if expectedErrno in self.errno:
@ -138,9 +155,9 @@ class TDSql:
if expectErrInfo != None: if expectErrInfo != None:
if expectErrInfo in self.error_info: if expectErrInfo in self.error_info:
tdLog.info("sql:%s, expected expectErrInfo '%s' occured" % (sql, expectErrInfo)) tdLog.info("sql:%s, expected ErrInfo '%s' occured" % (sql, expectErrInfo))
else: else:
tdLog.exit("%s(%d) failed: sql:%s, expectErrInfo %s occured, but not expected expectErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo)) tdLog.exit("%s(%d) failed: sql:%s, ErrInfo %s occured, but not expected ErrInfo '%s'" % (caller.filename, caller.lineno, sql, self.error_info, expectErrInfo))
return self.error_info return self.error_info

View File

@ -1505,7 +1505,7 @@ if $data10 != @21-05-05 18:19:21.000@ then
return -1 return -1
endi endi
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by a.ts;
if $rows != 4 then if $rows != 4 then
return -1 return -1
endi endi
@ -1521,8 +1521,9 @@ endi
if $data30 != @21-05-05 18:19:14.000@ then if $data30 != @21-05-05 18:19:14.000@ then
return -1 return -1
endi endi
sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts;
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by a.ts;
if $rows != 3 then if $rows != 3 then
return -1 return -1
endi endi
@ -1535,6 +1536,8 @@ endi
if $data20 != @21-05-05 18:19:10.000@ then if $data20 != @21-05-05 18:19:10.000@ then
return -1 return -1
endi endi
sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;
sql select a.ts,a.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;
sql select * from stb1 where c1 is null and c1 is not null; sql select * from stb1 where c1 is null and c1 is not null;
if $rows != 0 then if $rows != 0 then
@ -2469,7 +2472,7 @@ endi
if $data10 != @21-05-05 18:19:05.000@ then if $data10 != @21-05-05 18:19:05.000@ then
return -1 return -1
endi endi
sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.u1 < 5 order by ts; sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts > '2021-05-05 18:19:03.000' and tb2_1.u1 < 5 order by tb1.ts;
if $rows != 2 then if $rows != 2 then
return -1 return -1
endi endi
@ -2480,7 +2483,7 @@ if $data10 != @21-05-05 18:19:06.000@ then
return -1 return -1
endi endi
sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by ts; sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by tb1.ts;
if $rows != 2 then if $rows != 2 then
return -1 return -1
endi endi
@ -2491,7 +2494,7 @@ if $data10 != @21-05-05 18:19:07.000@ then
return -1 return -1
endi endi
sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by ts; sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by stb1.ts;
if $rows != 9 then if $rows != 9 then
return -1 return -1
endi endi
@ -2523,7 +2526,7 @@ if $data80 != @21-05-05 18:19:11.000@ then
return -1 return -1
endi endi
sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 and stb1.c1 > 2 and stb2.u1 <=4 order by ts; sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 and stb1.c1 > 2 and stb2.u1 <=4 order by stb1.ts;
if $rows != 3 then if $rows != 3 then
return -1 return -1
endi endi

View File

@ -13,6 +13,8 @@ sql create table ts3 using st tags(3,2,2);
sql create table ts4 using st tags(4,2,2); sql create table ts4 using st tags(4,2,2);
sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s); sql create stream stream_t1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s);
sleep 1000
sql insert into ts1 values(1648791213001,1,12,3,1.0); sql insert into ts1 values(1648791213001,1,12,3,1.0);
sql insert into ts2 values(1648791213001,1,12,3,1.0); sql insert into ts2 values(1648791213001,1,12,3,1.0);

View File

@ -48,16 +48,6 @@ if $rows != 2 then
goto loop0 goto loop0
endi endi
if $data00 != aaa-t1 then
print =====data00=$data00
goto loop0
endi
if $data10 != aaa-t2 then
print =====data10=$data10
goto loop0
endi
$loop_count = 0 $loop_count = 0
loop1: loop1:
@ -264,17 +254,6 @@ if $rows != 2 then
goto loop6 goto loop6
endi endi
if $data00 != tbn-t1 then
print =====data00=$data00
goto loop6
endi
if $data10 != tbn-t2 then
print =====data10=$data10
goto loop6
endi
print ===== step5 print ===== step5
print ===== tag name + table name print ===== tag name + table name
@ -312,21 +291,6 @@ if $rows != 3 then
goto loop7 goto loop7
endi endi
if $data00 != tbn-t1 then
print =====data00=$data00
goto loop7
endi
if $data10 != tbn-t2 then
print =====data10=$data10
goto loop7
endi
if $data20 != tbn-t3 then
print =====data20=$data20
goto loop7
endi
$loop_count = 0 $loop_count = 0
loop8: loop8:

View File

@ -47,16 +47,6 @@ if $rows != 2 then
goto loop0 goto loop0
endi endi
if $data00 != aaa-1 then
print =====data00=$data00
goto loop0
endi
if $data10 != aaa-2 then
print =====data10=$data10
goto loop0
endi
$loop_count = 0 $loop_count = 0
loop1: loop1:
@ -264,16 +254,6 @@ if $rows != 2 then
goto loop6 goto loop6
endi endi
if $data00 != tbn-1 then
print =====data00=$data00
goto loop6
endi
if $data10 != tbn-2 then
print =====data10=$data10
goto loop6
endi
print ===== step5 print ===== step5
print ===== tag name + table name print ===== tag name + table name
@ -311,21 +291,6 @@ if $rows != 3 then
goto loop7 goto loop7
endi endi
if $data00 != tbn-t1 then
print =====data00=$data00
goto loop7
endi
if $data10 != tbn-t2 then
print =====data10=$data10
goto loop7
endi
if $data20 != tbn-t3 then
print =====data20=$data20
goto loop7
endi
$loop_count = 0 $loop_count = 0
loop8: loop8:

View File

@ -415,11 +415,6 @@ if $data00 != aaa then
goto loop9 goto loop9
endi endi
if $data10 != aaa-1 then
print =====data00=$data00
goto loop9
endi
$loop_count = 0 $loop_count = 0
loop10: loop10:

View File

@ -129,10 +129,10 @@ sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * f
sql select * from stb1 where (c6 > 3.0 or c6 < 60) and c6 > 50 and (c6 != 53 or c6 != 63);; sql select * from stb1 where (c6 > 3.0 or c6 < 60) and c6 > 50 and (c6 != 53 or c6 != 63);;
sql select ts,c1 from stb1 where (c1 > 60 or c1 < 10 or (c1 > 20 and c1 < 30)) and ts > '2021-05-05 18:19:00.000' and ts < '2021-05-05 18:19:25.000' and c1 != 21 and c1 != 22 order by ts; sql select ts,c1 from stb1 where (c1 > 60 or c1 < 10 or (c1 > 20 and c1 < 30)) and ts > '2021-05-05 18:19:00.000' and ts < '2021-05-05 18:19:25.000' and c1 != 21 and c1 != 22 order by ts;
sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts;; sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts;;
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts;; sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by a.ts;;
sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;; sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by a.ts;;
sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by ts;; sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by tb1.ts;;
sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by ts;; sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by stb1.ts;;
sql select count(*) from stb1 where tbname like 'tb%' or c1 > 0;; sql select count(*) from stb1 where tbname like 'tb%' or c1 > 0;;
sql select * from stb1 where tbname like 'tb%' and (t1=1 or t2=2 or t3=3) and t1 > 2 order by ts;; sql select * from stb1 where tbname like 'tb%' and (t1=1 or t2=2 or t3=3) and t1 > 2 order by ts;;

View File

@ -280,7 +280,9 @@
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim ./test.sh -f tsim/sma/rsmaCreateInsertQueryDelete.sim
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
### refactor stream backend, open case after rsma refactored
#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim ./test.sh -f tsim/sync/vnodesnapshot-rsma-test.sim
./test.sh -f tsim/valgrind/checkError1.sim ./test.sh -f tsim/valgrind/checkError1.sim
./test.sh -f tsim/valgrind/checkError2.sim ./test.sh -f tsim/valgrind/checkError2.sim

View File

@ -162,6 +162,16 @@ class TDTestCase:
case6 = {"col": "c9"} case6 = {"col": "c9"}
self.checkcsum(**case6) self.checkcsum(**case6)
# unsigned check
case61 = {"col": "c11"}
self.checkcsum(**case61)
case62 = {"col": "c12"}
self.checkcsum(**case62)
case63 = {"col": "c13"}
self.checkcsum(**case63)
case64 = {"col": "c14"}
self.checkcsum(**case64)
# case7~8: nested query # case7~8: nested query
case7 = {"table_expr": "(select ts,c1 from db.stb1 order by ts, tbname )"} case7 = {"table_expr": "(select ts,c1 from db.stb1 order by ts, tbname )"}
self.checkcsum(**case7) self.checkcsum(**case7)
@ -317,14 +327,14 @@ class TDTestCase:
f"insert into t{i} values (" f"insert into t{i} values ("
f"{basetime + (j+1)*10 + i * msec_per_min}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " f"{basetime + (j+1)*10 + i * msec_per_min}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}', {j},{j},{j},{j} )"
) )
tdSql.execute( tdSql.execute(
f"insert into t{i} values (" f"insert into t{i} values ("
f"{basetime - (j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " f"{basetime - (j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1', {j*2},{j*2},{j*2},{j*2} )"
) )
tdSql.execute( tdSql.execute(
f"insert into tt{i} values ( {basetime-(j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)} )" f"insert into tt{i} values ( {basetime-(j+1) * 10 + i * msec_per_min}, {random.randint(1, 200)} )"
@ -340,8 +350,8 @@ class TDTestCase:
tdSql.execute( tdSql.execute(
"create stable db.stb1 (\ "create stable db.stb1 (\
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16),\
) \ c11 int unsigned, c12 smallint unsigned, c13 tinyint unsigned, c14 bigint unsigned) \
tags(st1 int)" tags(st1 int)"
) )
tdSql.execute( tdSql.execute(
@ -373,10 +383,10 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):") tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
self.csum_test_table(tbnum) self.csum_test_table(tbnum)
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values " tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7,c11) values "
f"({nowtime - (per_table_rows + 1) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") f"({nowtime - (per_table_rows + 1) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1}, 128)")
tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7) values " tdSql.execute(f"insert into db.t1(ts, c1,c2,c5,c7,c11) values "
f"({nowtime - (per_table_rows + 2) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})") f"({nowtime - (per_table_rows + 2) * 10 + i * msec_per_min}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1}, 129)")
self.csum_current_query() self.csum_current_query()
self.csum_error_query() self.csum_error_query()
@ -460,7 +470,7 @@ class TDTestCase:
tdSql.checkRows(40) tdSql.checkRows(40)
# bug need fix # bug need fix
tdSql.query("select tbname , csum(c1) from db.stb1 partition by tbname") tdSql.query("select tbname , csum(c1), csum(c12) from db.stb1 partition by tbname")
tdSql.checkRows(40) tdSql.checkRows(40)
tdSql.query("select tbname , csum(st1) from db.stb1 partition by tbname") tdSql.query("select tbname , csum(st1) from db.stb1 partition by tbname")
tdSql.checkRows(70) tdSql.checkRows(70)
@ -468,7 +478,7 @@ class TDTestCase:
tdSql.checkRows(7) tdSql.checkRows(7)
# partition by tags # partition by tags
tdSql.query("select st1 , csum(c1) from db.stb1 partition by st1") tdSql.query("select st1 , csum(c1), csum(c13) from db.stb1 partition by st1")
tdSql.checkRows(40) tdSql.checkRows(40)
tdSql.query("select csum(c1) from db.stb1 partition by st1") tdSql.query("select csum(c1) from db.stb1 partition by st1")
tdSql.checkRows(40) tdSql.checkRows(40)

View File

@ -861,11 +861,56 @@ class TDTestCase:
self.support_super_table_test() self.support_super_table_test()
def initLastRowDelayTest(self, dbname="db"):
tdSql.execute(f"drop database if exists {dbname} ")
create_db_sql = f"create database if not exists {dbname} keep 3650 duration 1000 cachemodel 'NONE' REPLICA 1"
tdSql.execute(create_db_sql)
time.sleep(3)
tdSql.execute(f"use {dbname}")
tdSql.execute(f'create stable {dbname}.st(ts timestamp, v_int int, v_float float) TAGS (ctname varchar(32))')
tdSql.execute(f"create table {dbname}.ct1 using {dbname}.st tags('ct1')")
tdSql.execute(f"create table {dbname}.ct2 using {dbname}.st tags('ct2')")
tdSql.execute(f"insert into {dbname}.st(tbname,ts,v_float, v_int) values('ct1',1630000000000,86,86)")
tdSql.execute(f"insert into {dbname}.st(tbname,ts,v_float, v_int) values('ct1',1630000021255,59,59)")
tdSql.execute(f'flush database {dbname}')
tdSql.execute(f'select last(*) from {dbname}.st')
tdSql.execute(f'select last_row(*) from {dbname}.st')
tdSql.execute(f"insert into {dbname}.st(tbname,ts) values('ct1',1630000091255)")
tdSql.execute(f'flush database {dbname}')
tdSql.execute(f'select last(*) from {dbname}.st')
tdSql.execute(f'select last_row(*) from {dbname}.st')
tdSql.execute(f'alter database {dbname} cachemodel "both"')
tdSql.query(f'select last(*) from {dbname}.st')
tdSql.checkData(0 , 1 , 59)
tdSql.query(f'select last_row(*) from {dbname}.st')
tdSql.checkData(0 , 1 , None)
tdSql.checkData(0 , 2 , None)
tdLog.printNoPrefix("========== delay test init success ==============")
def lastRowDelayTest(self, dbname="db"):
tdLog.printNoPrefix("========== delay test start ==============")
tdSql.execute(f"use {dbname}")
tdSql.query(f'select last(*) from {dbname}.st')
tdSql.checkData(0 , 1 , 59)
tdSql.query(f'select last_row(*) from {dbname}.st')
tdSql.checkData(0 , 1 , None)
tdSql.checkData(0 , 2 , None)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
# tdSql.prepare() # tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============") tdLog.printNoPrefix("==========step1:create table ==============")
self.initLastRowDelayTest("DELAYTEST")
# cache_last 0 # cache_last 0
self.prepare_datas("'NONE' ") self.prepare_datas("'NONE' ")
self.prepare_tag_datas("'NONE'") self.prepare_tag_datas("'NONE'")
@ -890,6 +935,8 @@ class TDTestCase:
self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'BOTH'") self.insert_datas_and_check_abs(self.tb_nums,self.row_nums,self.time_step,"'BOTH'")
self.basic_query() self.basic_query()
self.lastRowDelayTest("DELAYTEST")
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -1310,7 +1310,7 @@ class TDTestCase:
ts = ts + 20 ts = ts + 20
tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, 52); tdSql.checkData(0, 1, 52)
#stables #stables
@ -1347,16 +1347,16 @@ class TDTestCase:
tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;")
tdSql.checkRows(6) tdSql.checkRows(6)
tdSql.checkData(0, 1, 162); tdSql.checkData(0, 1, 162)
tdSql.checkData(1, 1, 200); tdSql.checkData(1, 1, 200)
tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, 62); tdSql.checkData(0, 1, 62)
tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;")
tdSql.checkRows(1) tdSql.checkRows(1)
tdSql.checkData(0, 1, 62); tdSql.checkData(0, 1, 62)
#test special character #test special character
@ -1366,25 +1366,25 @@ class TDTestCase:
tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;")
tdSql.checkRows(7) tdSql.checkRows(7)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 162); tdSql.checkData(1, 1, 162)
tdSql.checkData(2, 1, 200); tdSql.checkData(2, 1, 200)
tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;") tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 62); tdSql.checkData(1, 1, 62)
tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;") tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 62); tdSql.checkData(1, 1, 62)
tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;") tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
#test csv #test csv
sql1 = "select tbname,ts,q_int,q_binary from nested.stable_1 >>'%s/stable_1.csv';" %self.testcasePath sql1 = "select tbname,ts,q_int,q_binary from nested.stable_1 >>'%s/stable_1.csv';" %self.testcasePath
@ -1404,31 +1404,31 @@ class TDTestCase:
tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;")
tdSql.checkRows(7) tdSql.checkRows(7)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 162); tdSql.checkData(1, 1, 162)
tdSql.checkData(2, 1, 200); tdSql.checkData(2, 1, 200)
tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;") tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select count(q_bool) from nested.stable_1;") tdSql.query(f"select count(q_bool) from nested.stable_1;")
tdSql.checkData(0, 0, 0); tdSql.checkData(0, 0, 0)
tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 62); tdSql.checkData(1, 1, 62)
tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;") tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select count(q_bool) from nested.stable_null_data;") tdSql.query(f"select count(q_bool) from nested.stable_null_data;")
tdSql.checkData(0, 0, 0); tdSql.checkData(0, 0, 0)
tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 62); tdSql.checkData(1, 1, 62)
tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;") tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select count(q_bool) from nested.stable_null_childtable;") tdSql.query(f"select count(q_bool) from nested.stable_null_childtable;")
tdSql.checkData(0, 0, 0); tdSql.checkData(0, 0, 0)
tdSql.query(f"delete from nested.stable_1;") tdSql.query(f"delete from nested.stable_1;")
@ -1440,31 +1440,40 @@ class TDTestCase:
tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_1 group by tbname order by tbname;")
tdSql.checkRows(7) tdSql.checkRows(7)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 162); tdSql.checkData(1, 1, 162)
tdSql.checkData(2, 1, 200); tdSql.checkData(2, 1, 200)
tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;") tdSql.query(f"select count(*) from nested.stable_1 where tbname ='!@!@$$^$' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select count(q_bool) from nested.stable_1;") tdSql.query(f"select count(q_bool) from nested.stable_1;")
tdSql.checkData(0, 0, 0); tdSql.checkData(0, 0, 0)
tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_data group by tbname order by tbname;")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 62); tdSql.checkData(1, 1, 62)
tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;") tdSql.query(f"select count(*) from nested.stable_null_data where tbname ='%^$^&^&' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select count(q_bool) from nested.stable_null_data;") tdSql.query(f"select count(q_bool) from nested.stable_null_data;")
tdSql.checkData(0, 0, 0); tdSql.checkData(0, 0, 0)
tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;") tdSql.query(f"select tbname,count(*) from nested.stable_null_childtable group by tbname order by tbname;")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 1, 1); tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 62); tdSql.checkData(1, 1, 62)
tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;") tdSql.query(f"select count(*) from nested.stable_null_childtable where tbname ='$^%$%^&' ;")
tdSql.checkData(0, 0, 1); tdSql.checkData(0, 0, 1)
tdSql.query(f"select count(q_bool) from nested.stable_null_childtable;") tdSql.query(f"select count(q_bool) from nested.stable_null_childtable;")
tdSql.checkData(0, 0, 0); tdSql.checkData(0, 0, 0)
tdSql.query(f"select tbname,count(*) from nested.stable_1 where ts is not null group by tbname order by tbname;")
tdSql.checkRows(7)
tdSql.query(f"select tbname,count(*) from nested.stable_1 where ts is null group by tbname order by tbname;")
tdSql.checkRows(7)
tdSql.query(f"select tbname,last(*) from nested.stable_1 where ts is not null group by tbname order by tbname;")
tdSql.checkRows(3)
tdSql.query(f"select tbname,last(*) from nested.stable_1 where ts is null group by tbname order by tbname;")
tdSql.checkRows(0)
#test stable #test stable

View File

@ -278,19 +278,19 @@ class TDTestCase:
def queryOrderByAgg(self): def queryOrderByAgg(self):
tdSql.query("SELECT COUNT(*) FROM t1 order by COUNT(*)") tdSql.no_error("SELECT COUNT(*) FROM t1 order by COUNT(*)")
tdSql.query("SELECT COUNT(*) FROM t1 order by last(c2)") tdSql.no_error("SELECT COUNT(*) FROM t1 order by last(c2)")
tdSql.query("SELECT c1 FROM t1 order by last(ts)") tdSql.no_error("SELECT c1 FROM t1 order by last(ts)")
tdSql.query("SELECT ts FROM t1 order by last(ts)") tdSql.no_error("SELECT ts FROM t1 order by last(ts)")
tdSql.query("SELECT last(ts), ts, c1 FROM t1 order by 2") tdSql.no_error("SELECT last(ts), ts, c1 FROM t1 order by 2")
tdSql.query("SELECT ts, last(ts) FROM t1 order by last(ts)") tdSql.no_error("SELECT ts, last(ts) FROM t1 order by last(ts)")
tdSql.query(f"SELECT * FROM t1 order by last(ts)") tdSql.no_error(f"SELECT * FROM t1 order by last(ts)")
tdSql.query(f"SELECT last(ts) as t2, ts FROM t1 order by 1") tdSql.query(f"SELECT last(ts) as t2, ts FROM t1 order by 1")
tdSql.checkRows(1) tdSql.checkRows(1)
@ -302,6 +302,18 @@ class TDTestCase:
tdSql.error(f"SELECT last(ts) as t2, ts FROM t1 order by last(t2)") tdSql.error(f"SELECT last(ts) as t2, ts FROM t1 order by last(t2)")
def queryOrderByAmbiguousName(self):
tdSql.error(sql="select c1 as name, c2 as name, c3 from t1 order by name", expectErrInfo='ambiguous',
fullMatched=False)
tdSql.error(sql="select c1, c2 as c1, c3 from t1 order by c1", expectErrInfo='ambiguous', fullMatched=False)
tdSql.error(sql='select last(ts), last(c1) as name ,last(c2) as name,last(c3) from t1 order by name',
expectErrInfo='ambiguous', fullMatched=False)
tdSql.no_error("select c1 as name, c2 as c1, c3 from t1 order by c1")
tdSql.no_error('select c1 as name from (select c1, c2 as name from st) order by name')
# run # run
def run(self): def run(self):
@ -317,6 +329,8 @@ class TDTestCase:
# agg # agg
self.queryOrderByAgg() self.queryOrderByAgg()
# td-28332
self.queryOrderByAmbiguousName()
# stop # stop
def stop(self): def stop(self):

View File

@ -0,0 +1,65 @@
from wsgiref.headers import tspecials
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.rowNum = 10
self.batchNum = 5
self.ts = 1537146000000
def run(self):
dbname = "db"
tdSql.prepare()
tdSql.execute(f'''create table sta(ts timestamp, col1 int, col2 bigint) tags(tg1 int, tg2 binary(20))''')
tdSql.execute(f"create table sta1 using sta tags(1, 'a')")
tdSql.execute(f"create table sta2 using sta tags(2, 'b')")
tdSql.execute(f"create table sta3 using sta tags(3, 'c')")
tdSql.execute(f"create table sta4 using sta tags(4, 'a')")
tdSql.execute(f"insert into sta1 values(1537146000001, 11, 110)")
tdSql.execute(f"insert into sta1 values(1537146000002, 12, 120)")
tdSql.execute(f"insert into sta1 values(1537146000003, 13, 130)")
tdSql.execute(f"insert into sta2 values(1537146000001, 21, 210)")
tdSql.execute(f"insert into sta2 values(1537146000002, 22, 220)")
tdSql.execute(f"insert into sta2 values(1537146000003, 23, 230)")
tdSql.execute(f"insert into sta3 values(1537146000001, 31, 310)")
tdSql.execute(f"insert into sta3 values(1537146000002, 32, 320)")
tdSql.execute(f"insert into sta3 values(1537146000003, 33, 330)")
tdSql.execute(f"insert into sta4 values(1537146000001, 41, 410)")
tdSql.execute(f"insert into sta4 values(1537146000002, 42, 420)")
tdSql.execute(f"insert into sta4 values(1537146000003, 43, 430)")
tdSql.execute(f'''create table stb(ts timestamp, col1 int, col2 bigint) tags(tg1 int, tg2 binary(20))''')
tdSql.execute(f"create table stb1 using stb tags(1, 'a')")
tdSql.execute(f"create table stb2 using stb tags(2, 'b')")
tdSql.execute(f"create table stb3 using stb tags(3, 'c')")
tdSql.execute(f"create table stb4 using stb tags(4, 'a')")
tdSql.execute(f"insert into stb1 values(1537146000001, 911, 9110)")
tdSql.execute(f"insert into stb1 values(1537146000002, 912, 9120)")
tdSql.execute(f"insert into stb1 values(1537146000003, 913, 9130)")
tdSql.execute(f"insert into stb2 values(1537146000001, 921, 9210)")
tdSql.execute(f"insert into stb2 values(1537146000002, 922, 9220)")
tdSql.execute(f"insert into stb2 values(1537146000003, 923, 9230)")
tdSql.execute(f"insert into stb3 values(1537146000001, 931, 9310)")
tdSql.execute(f"insert into stb3 values(1537146000002, 932, 9320)")
tdSql.execute(f"insert into stb3 values(1537146000003, 933, 9330)")
tdSql.execute(f"insert into stb4 values(1537146000001, 941, 9410)")
tdSql.execute(f"insert into stb4 values(1537146000002, 942, 9420)")
tdSql.execute(f"insert into stb4 values(1537146000003, 943, 9430)")
tdSql.query("select * from (select ts, col1 from sta partition by tbname) limit 2");
tdSql.checkRows(2)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -245,6 +245,10 @@ class TDTestCase:
# stddev # stddev
tdSql.checkData(0, 5, 0, True) tdSql.checkData(0, 5, 0, True)
sql = f"select twa({col}) from {dbname}.d0"
tdSql.query(sql)
tdSql.checkData(0, 0, 1, True)
i += 1 i += 1

View File

@ -0,0 +1,68 @@
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), True)
tdSql.execute("drop database if exists td_28068;")
tdSql.execute("create database td_28068;")
tdSql.execute("create database if not exists td_28068;")
tdSql.execute("create stable td_28068.st (ts timestamp, test_case nchar(10), time_cost float, num float) tags (branch nchar(10), scenario nchar(10));")
tdSql.execute("insert into td_28068.ct1 using td_28068.st (branch, scenario) tags ('3.0', 'scenario1') values (now(), 'query1', 1,2);")
tdSql.execute("insert into td_28068.ct1 using td_28068.st (branch, scenario) tags ('3.0', 'scenario1') values (now(), 'query1', 2,3);")
tdSql.execute("insert into td_28068.ct2 using td_28068.st (branch, scenario) tags ('3.0', 'scenario2') values (now(), 'query1', 10,1);")
tdSql.execute("insert into td_28068.ct2 using td_28068.st (branch, scenario) tags ('3.0', 'scenario2') values (now(), 'query1', 11,5);")
tdSql.execute("insert into td_28068.ct3 using td_28068.st (branch, scenario) tags ('3.1', 'scenario1') values (now(), 'query1', 20,4);")
tdSql.execute("insert into td_28068.ct3 using td_28068.st (branch, scenario) tags ('3.1', 'scenario1') values (now(), 'query1', 30,1);")
tdSql.execute("insert into td_28068.ct4 using td_28068.st (branch, scenario) tags ('3.1', 'scenario2') values (now(), 'query1', 8,8);")
tdSql.execute("insert into td_28068.ct4 using td_28068.st (branch, scenario) tags ('3.1', 'scenario2') values (now(), 'query1', 9,10);")
def run(self):
tdSql.error('select last(ts) as ts, last(branch) as branch, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch);')
tdSql.error('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch), last(scenario); ')
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch); ')
tdSql.checkRows(4)
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario, last(test_case) from td_28068.st group by branch, scenario order by last(branch), last(test_case);')
tdSql.checkRows(4)
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by branch, scenario order by last(branch), last(scenario);')
tdSql.checkRows(4)
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by branch, scenario order by branch1, scenario1;')
tdSql.checkRows(4)
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by tbname; ')
tdSql.checkRows(4)
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case from td_28068.st group by branch, scenario order by test_case;')
tdSql.checkRows(4)
tdSql.query('select last(ts) as ts, last(branch) as branch1, last(scenario) as scenario1, last(test_case) as test_case1 from td_28068.st group by branch, scenario order by last(test_case);')
tdSql.checkRows(4)
tdSql.query('select time_cost, num, time_cost + num as final_cost from td_28068.st partition by branch; ')
tdSql.checkRows(8)
tdSql.query('select count(*) from td_28068.st partition by branch order by branch; ')
tdSql.checkRows(2)
tdSql.query('select time_cost, num, time_cost + num as final_cost from td_28068.st order by time_cost;')
tdSql.checkRows(8)
tdSql.query('select time_cost, num, time_cost + num as final_cost from td_28068.st order by final_cost;')
tdSql.checkRows(8)
tdSql.execute("drop database if exists td_28068;")
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,218 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
from util.cluster import *
sys.path.append("./7-tmq")
from tmqCommon import *
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 10
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
def getDataPath(self):
selfPath = tdCom.getBuildPath()
return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*';
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 60,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdCom.drop_all_db()
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
return
def restartAndRemoveWal(self, deleteWal):
tdDnodes = cluster.dnodes
tdSql.query("select * from information_schema.ins_vnodes")
for result in tdSql.queryResult:
if result[2] == 'dbt':
tdLog.debug("dnode is %d"%(result[0]))
dnodeId = result[0]
vnodeId = result[1]
tdDnodes[dnodeId - 1].stoptaosd()
time.sleep(1)
dataPath = self.getDataPath()
dataPath = dataPath%(dnodeId,vnodeId)
tdLog.debug("dataPath:%s"%dataPath)
if deleteWal:
if os.system('rm -rf ' + dataPath) != 0:
tdLog.exit("rm error")
tdDnodes[dnodeId - 1].starttaosd()
time.sleep(1)
break
tdLog.debug("restart dnode ok")
def splitVgroups(self):
tdSql.query("select * from information_schema.ins_vnodes")
vnodeId = 0
for result in tdSql.queryResult:
if result[2] == 'dbt':
vnodeId = result[1]
tdLog.debug("vnode is %d"%(vnodeId))
break
splitSql = "split vgroup %d" %(vnodeId)
tdLog.debug("splitSql:%s"%(splitSql))
tdSql.query(splitSql)
tdLog.debug("splitSql ok")
def tmqCase1(self, deleteWal=False):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb1',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 120,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
# expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb ")
queryString = "stable %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
tdLog.info("create ctb1")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create ctb2")
paraDict2 = paraDict.copy()
paraDict2['ctbPrefix'] = "ctb2"
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict2['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert ctb1 data")
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
tmqCom.getStartConsumeNotifyFromTmqsim()
tmqCom.getStartCommitNotifyFromTmqsim()
#restart dnode & remove wal
self.restartAndRemoveWal(deleteWal)
# split vgroup
self.splitVgroups()
tdLog.info("insert ctb2 data")
pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict2)
pInsertThread.join()
pInsertThread1.join()
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectrowcnt / 2 > resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(2)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
if deleteWal == True:
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
self.prepareTestEnv()
self.tmqCase1(False)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,222 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
from util.cluster import *
sys.path.append("./7-tmq")
from tmqCommon import *
from util.cluster import *
sys.path.append("./6-cluster")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 10
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
def getDataPath(self):
selfPath = tdCom.getBuildPath()
return selfPath + '/../sim/dnode%d/data/vnode/vnode%d/wal/*';
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 60,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tdCom.drop_all_db()
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], wal_retention_period=36000,vgroups=paraDict["vgroups"],replica=self.replicaVar)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
return
def restartAndRemoveWal(self, deleteWal):
tdDnodes = cluster.dnodes
tdSql.query("select * from information_schema.ins_vnodes")
for result in tdSql.queryResult:
if result[2] == 'dbt':
tdLog.debug("dnode is %d"%(result[0]))
dnodeId = result[0]
vnodeId = result[1]
tdDnodes[dnodeId - 1].stoptaosd()
time.sleep(1)
dataPath = self.getDataPath()
dataPath = dataPath%(dnodeId,vnodeId)
tdLog.debug("dataPath:%s"%dataPath)
if deleteWal:
if os.system('rm -rf ' + dataPath) != 0:
tdLog.exit("rm error")
tdDnodes[dnodeId - 1].starttaosd()
time.sleep(1)
break
tdLog.debug("restart dnode ok")
def splitVgroups(self):
tdSql.query("select * from information_schema.ins_vnodes")
vnodeId = 0
for result in tdSql.queryResult:
if result[2] == 'dbt':
vnodeId = result[1]
tdLog.debug("vnode is %d"%(vnodeId))
break
splitSql = "split vgroup %d" %(vnodeId)
tdLog.debug("splitSql:%s"%(splitSql))
tdSql.query(splitSql)
tdLog.debug("splitSql ok")
def tmqCase1(self, deleteWal=False):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb1',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 180,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
# expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:200, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
tdLog.info("create ctb1")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create ctb2")
paraDict2 = paraDict.copy()
paraDict2['ctbPrefix'] = "ctb2"
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict2['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert ctb1 data")
pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict)
tmqCom.getStartConsumeNotifyFromTmqsim()
tmqCom.getStartCommitNotifyFromTmqsim()
#restart dnode & remove wal
self.restartAndRemoveWal(deleteWal)
# split vgroup
self.splitVgroups()
tdLog.info("insert ctb2 data")
pInsertThread1 = tmqCom.asyncInsertDataByInterlace(paraDict2)
pInsertThread.join()
pInsertThread1.join()
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectrowcnt / 2 > resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectrowcnt / 2, resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(2)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
if deleteWal == True:
clusterComCheck.check_vgroups_status(vgroup_numbers=2,db_replica=self.replicaVar,db_name="dbt",count_number=240)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
self.prepareTestEnv()
self.tmqCase1(False)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -190,6 +190,8 @@ class TDTestCase:
pInsertThread1.join() pInsertThread1.join()
expectRows = 1 expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows) resultList = tmqCom.selectConsumeResult(expectRows)
if expectrowcnt / 2 > resultList[0]: if expectrowcnt / 2 > resultList[0]:
@ -209,8 +211,6 @@ class TDTestCase:
def run(self): def run(self):
self.prepareTestEnv() self.prepareTestEnv()
self.tmqCase1(True) self.tmqCase1(True)
self.prepareTestEnv()
self.tmqCase1(False)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -207,8 +207,6 @@ class TDTestCase:
def run(self): def run(self):
self.prepareTestEnv() self.prepareTestEnv()
self.tmqCase1(True) self.tmqCase1(True)
self.prepareTestEnv()
self.tmqCase1(False)
def stop(self): def stop(self):
tdSql.close() tdSql.close()

View File

@ -106,14 +106,18 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition is None: elif partition is None:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
@ -121,14 +125,18 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition is None: elif partition is None:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
@ -208,8 +216,8 @@ class TDTestCase:
self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True) self.at_once_interval(interval=random.randint(10, 15), partition=None, delete=True)
self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end') self.at_once_interval(interval=random.randint(10, 15), partition=self.tdCom.stream_case_when_tbname, case_when=f'case when {self.tdCom.stream_case_when_tbname} = tbname then {self.tdCom.partition_tbname_alias} else tbname end')
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL") self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_history_value=1, fill_value="NULL")
# for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: for fill_value in ["NULL", "PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]: # for fill_value in ["PREV", "NEXT", "LINEAR", "VALUE,1,2,3,4,5,6,7,8,9,10,11,1,2,3,4,5,6,7,8,9,10,11"]:
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value) self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value)
self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True) self.at_once_interval(interval=random.randint(10, 15), partition="tbname", fill_value=fill_value, delete=True)

View File

@ -153,12 +153,15 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
else: else:
tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))') tdSql.query(f'select cast(cast(cast({c1_value[1]} as int unsigned) as bigint) as varchar(100))')
@ -166,7 +169,8 @@ class TDTestCase:
if subtable == "constant": if subtable == "constant":
return return
else: else:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{subtable_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
def run(self): def run(self):

View File

@ -171,35 +171,43 @@ class TDTestCase:
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if c1_value[1] is not None: if c1_value[1] is not None:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
if subtable: if subtable:
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
else: else:
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) if subtable is not None else tdSql.checkEqual(tdSql.queryResult[0][0] >= 0, True)
tdSql.query(f'select * from {self.tb_name}') tdSql.query(f'select * from {self.tb_name}')
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if c1_value[1] is not None: if c1_value[1] is not None:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
if subtable: if subtable:
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
else: else:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) if subtable is not None else tdSql.checkEqual(tdSql.queryResult[0][0] >= 0, True)

View File

@ -94,15 +94,19 @@ class TDTestCase:
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
if subtable: if subtable:
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
else: else:
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
return return
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.ctb_name}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
@ -111,17 +115,20 @@ class TDTestCase:
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
if subtable: if subtable:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
else: else:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{partition_elm_alias}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
return return
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)

View File

@ -117,14 +117,18 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition is None: elif partition is None:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)
@ -132,14 +136,18 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs(c1_value[1])}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition is None: elif partition is None:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}no_partition{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)

View File

@ -146,12 +146,15 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count)
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count)
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}`;', count_expected_res=self.tdCom.range_count) tbname = self.tdCom.get_subtable_wait(f'{tname}_{self.tdCom.subtable_prefix}{self.ctb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`', count_expected_res=self.tdCom.range_count)
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count) tdSql.checkEqual(tdSql.queryResult[0][0] , self.tdCom.range_count)
@ -161,12 +164,15 @@ class TDTestCase:
ptn_counter = 0 ptn_counter = 0
for c1_value in tdSql.queryResult: for c1_value in tdSql.queryResult:
if partition == "c1": if partition == "c1":
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{c1_value[1]}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "abs(c1)": elif partition == "abs(c1)":
abs_c1_value = abs(c1_value[1]) abs_c1_value = abs(c1_value[1])
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{abs_c1_value}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
elif partition == "tbname" and ptn_counter == 0: elif partition == "tbname" and ptn_counter == 0:
tdSql.query(f'select count(*) from `{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}`;') tbname = self.tdCom.get_subtable_wait(f'{self.tb_name}_{self.tdCom.subtable_prefix}{self.tb_name}{self.tdCom.subtable_suffix}')
tdSql.query(f'select count(*) from `{tbname}`')
ptn_counter += 1 ptn_counter += 1
tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True) tdSql.checkEqual(tdSql.queryResult[0][0] > 0, True)