Merge remote-tracking branch 'origin/fix/dnode' into fix/dnode_wxy
This commit is contained in:
commit
ae0e956837
|
@ -222,6 +222,7 @@ void blockDataCleanup(SSDataBlock* pDataBlock);
|
|||
size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
|
||||
|
||||
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
|
||||
|
||||
int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
||||
int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
|
||||
|
|
|
@ -78,6 +78,7 @@ int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
|
|||
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
|
||||
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
|
||||
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
|
||||
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf);
|
||||
|
||||
// STRUCT =================
|
||||
struct STColumn {
|
||||
|
|
|
@ -196,6 +196,7 @@ bool fmIsIntervalInterpoFunc(int32_t funcId);
|
|||
bool fmIsInterpFunc(int32_t funcId);
|
||||
bool fmIsLastRowFunc(int32_t funcId);
|
||||
bool fmIsSystemInfoFunc(int32_t funcId);
|
||||
bool fmIsImplicitTsFunc(int32_t funcId);
|
||||
|
||||
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
|
||||
|
||||
|
|
|
@ -104,6 +104,7 @@ typedef struct SIndefRowsFuncLogicNode {
|
|||
SNodeList* pFuncs;
|
||||
bool isTailFunc;
|
||||
bool isUniqueFunc;
|
||||
bool isTimeLineFunc;
|
||||
} SIndefRowsFuncLogicNode;
|
||||
|
||||
typedef struct SInterpFuncLogicNode {
|
||||
|
|
|
@ -251,7 +251,7 @@ typedef struct SSelectStmt {
|
|||
char stmtName[TSDB_TABLE_NAME_LEN];
|
||||
uint8_t precision;
|
||||
bool isEmptyResult;
|
||||
bool isTimeOrderQuery;
|
||||
bool isTimeLineResult;
|
||||
bool hasAggFuncs;
|
||||
bool hasRepeatScanFuncs;
|
||||
bool hasIndefiniteRowsFunc;
|
||||
|
@ -261,6 +261,7 @@ typedef struct SSelectStmt {
|
|||
bool hasTailFunc;
|
||||
bool hasInterpFunc;
|
||||
bool hasLastRowFunc;
|
||||
bool hasTimeLineFunc;
|
||||
bool groupSort;
|
||||
} SSelectStmt;
|
||||
|
||||
|
|
|
@ -362,6 +362,11 @@ typedef struct SOffsetAndContLen {
|
|||
} SOffsetAndContLen;
|
||||
*/
|
||||
|
||||
// block1: SOffsetAndContLen
|
||||
// block2: SOffsetAndContLen Array
|
||||
// block3: SRpcMsg Array
|
||||
// block4: SRpcMsg pCont Array
|
||||
|
||||
typedef struct SyncAppendEntriesBatch {
|
||||
uint32_t bytes;
|
||||
int32_t vgId;
|
||||
|
|
|
@ -388,6 +388,10 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_QRY_TASK_MSG_ERROR TAOS_DEF_ERROR_CODE(0, 0x0719)
|
||||
#define TSDB_CODE_QRY_JOB_FREED TAOS_DEF_ERROR_CODE(0, 0x071A)
|
||||
#define TSDB_CODE_QRY_TASK_STATUS_ERROR TAOS_DEF_ERROR_CODE(0, 0x071B)
|
||||
//json
|
||||
#define TSDB_CODE_QRY_JSON_IN_ERROR TAOS_DEF_ERROR_CODE(0, 0x071C)
|
||||
#define TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x071D)
|
||||
#define TSDB_CODE_QRY_JSON_IN_GROUP_ERROR TAOS_DEF_ERROR_CODE(0, 0x071E)
|
||||
|
||||
// grant
|
||||
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
|
||||
|
@ -573,6 +577,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_GROUP_BY_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x265B)
|
||||
#define TSDB_CODE_PAR_INVALID_TABLE_OPTION TAOS_DEF_ERROR_CODE(0, 0x265C)
|
||||
#define TSDB_CODE_PAR_INVALID_INTERP_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x265D)
|
||||
#define TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN TAOS_DEF_ERROR_CODE(0, 0x265E)
|
||||
|
||||
//planner
|
||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||
|
|
|
@ -108,13 +108,10 @@ static const SSysDbTableSchema userFuncSchema[] = {
|
|||
};
|
||||
|
||||
static const SSysDbTableSchema userIdxSchema[] = {
|
||||
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "index_database", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "column_name", .bytes = SYSTABLE_SCH_COL_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "index_type", .bytes = 10, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "index_extensions", .bytes = 256, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
|
||||
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
|
||||
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
|
||||
};
|
||||
|
||||
static const SSysDbTableSchema userStbsSchema[] = {
|
||||
|
|
|
@ -716,7 +716,12 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) {
|
|||
|
||||
void* left1 = colDataGetData(pColInfoData, left);
|
||||
void* right1 = colDataGetData(pColInfoData, right);
|
||||
|
||||
if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
if (tTagIsJson(left1) || tTagIsJson(right1)) {
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
__compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order);
|
||||
|
||||
int ret = fn(left1, right1);
|
||||
|
@ -890,7 +895,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
|
|||
SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, 0);
|
||||
|
||||
int64_t p0 = taosGetTimestampUs();
|
||||
|
||||
|
||||
__compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order);
|
||||
taosSort(pColInfoData->pData, pDataBlock->info.rows, pColInfoData->info.bytes, fn);
|
||||
|
||||
|
@ -919,6 +924,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
|
|||
}
|
||||
|
||||
taosqsort(index, rows, sizeof(int32_t), &helper, dataBlockCompar);
|
||||
if(terrno) return terrno;
|
||||
|
||||
int64_t p1 = taosGetTimestampUs();
|
||||
|
||||
|
@ -1431,9 +1437,39 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) {
|
|||
}
|
||||
}
|
||||
|
||||
static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end){
|
||||
int32_t dataOffset = -1;
|
||||
int32_t dataLen = 0;
|
||||
int32_t beigin = start;
|
||||
while(beigin < end){
|
||||
int32_t offset = pColInfoData->varmeta.offset[beigin];
|
||||
if(offset == -1) {
|
||||
beigin++;
|
||||
continue;
|
||||
}
|
||||
if(start != 0) {
|
||||
pColInfoData->varmeta.offset[beigin] = dataLen;
|
||||
}
|
||||
char *data = pColInfoData->pData + offset;
|
||||
if(dataOffset == -1) dataOffset = offset; // mark the begin of data
|
||||
int32_t type = pColInfoData->info.type;
|
||||
if (type == TSDB_DATA_TYPE_JSON) {
|
||||
dataLen += getJsonValueLen(data);
|
||||
} else {
|
||||
dataLen += varDataTLen(data);
|
||||
}
|
||||
beigin++;
|
||||
}
|
||||
if(dataOffset > 0){
|
||||
memmove(pColInfoData->pData, pColInfoData->pData + dataOffset, dataLen);
|
||||
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[start], (end - start) * sizeof(int32_t));
|
||||
}
|
||||
return dataLen;
|
||||
}
|
||||
|
||||
static void colDataTrimFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
|
||||
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
|
||||
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[n], (total - n) * sizeof(int32_t));
|
||||
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, n, total);
|
||||
memset(&pColInfoData->varmeta.offset[total - n], 0, n);
|
||||
} else {
|
||||
int32_t bytes = pColInfoData->info.bytes;
|
||||
|
@ -1461,6 +1497,33 @@ int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static void colDataKeepFirstNRows(SColumnInfoData* pColInfoData, size_t n, size_t total) {
|
||||
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
|
||||
pColInfoData->varmeta.length = colDataMoveVarData(pColInfoData, 0, n);
|
||||
memset(&pColInfoData->varmeta.offset[n], 0, total - n);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n) {
|
||||
if (n == 0) {
|
||||
blockDataCleanup(pBlock);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
if (pBlock->info.rows <= n) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
|
||||
colDataKeepFirstNRows(pColInfoData, n, pBlock->info.rows);
|
||||
}
|
||||
|
||||
pBlock->info.rows = n;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
|
||||
int64_t tbUid = pBlock->info.uid;
|
||||
int16_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
|
||||
|
|
|
@ -155,7 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
|
|||
void taosVariantDestroy(SVariant *pVar) {
|
||||
if (pVar == NULL) return;
|
||||
|
||||
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR
|
||||
|| pVar->nType == TSDB_DATA_TYPE_JSON) {
|
||||
taosMemoryFreeClear(pVar->pz);
|
||||
pVar->nLen = 0;
|
||||
}
|
||||
|
@ -184,7 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
|
|||
if (pSrc == NULL || pDst == NULL) return;
|
||||
|
||||
pDst->nType = pSrc->nType;
|
||||
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR) {
|
||||
if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR
|
||||
|| pSrc->nType == TSDB_DATA_TYPE_JSON) {
|
||||
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
|
||||
char *p = taosMemoryRealloc(pDst->pz, len);
|
||||
assert(p);
|
||||
|
@ -976,6 +978,7 @@ char *taosVariantGet(SVariant *pVar, int32_t type) {
|
|||
case TSDB_DATA_TYPE_FLOAT:
|
||||
return (char *)&pVar->d;
|
||||
case TSDB_DATA_TYPE_BINARY:
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
return (char *)pVar->pz;
|
||||
case TSDB_DATA_TYPE_NCHAR:
|
||||
return (char *)pVar->ucs4;
|
||||
|
|
|
@ -21,7 +21,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId) {
|
|||
|
||||
taosThreadRwlockRdlock(&pMgmt->lock);
|
||||
taosHashGetDup(pMgmt->hash, &vgId, sizeof(int32_t), (void *)&pVnode);
|
||||
if (pVnode == NULL) {
|
||||
if (pVnode == NULL || pVnode->dropped) {
|
||||
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
|
||||
} else {
|
||||
int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1);
|
||||
|
@ -81,16 +81,18 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
|
|||
|
||||
vmReleaseVnode(pMgmt, pVnode);
|
||||
while (pVnode->refCount > 0) taosMsleep(10);
|
||||
dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);
|
||||
|
||||
while (!taosQueueEmpty(pVnode->pWriteQ)) taosMsleep(10);
|
||||
while (!taosQueueEmpty(pVnode->pSyncQ)) taosMsleep(10);
|
||||
while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10);
|
||||
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
|
||||
while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10);
|
||||
dTrace("vgId:%d, vnode-fetch queue is empty", pVnode->vgId);
|
||||
|
||||
vmFreeQueue(pMgmt, pVnode);
|
||||
vnodeClose(pVnode->pImpl);
|
||||
pVnode->pImpl = NULL;
|
||||
|
||||
dDebug("vgId:%d, vnode is closed", pVnode->vgId);
|
||||
|
||||
if (pVnode->dropped) {
|
||||
|
|
|
@ -107,7 +107,7 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
|
|||
const STraceId *trace = &pMsg->info.traceId;
|
||||
dGTrace("vgId:%d, msg:%p get from vnode-sync queue", pVnode->vgId, pMsg);
|
||||
|
||||
int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL); // no response here
|
||||
int32_t code = vnodeProcessSyncReq(pVnode->pImpl, pMsg, NULL); // no response here
|
||||
dGTrace("vgId:%d, msg:%p is freed, code:0x%x", pVnode->vgId, pMsg, code);
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
|
@ -146,8 +146,8 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
|
|||
|
||||
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, pHead->vgId);
|
||||
if (pVnode == NULL) {
|
||||
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, type:%s", pHead->vgId, pMsg, terrstr(),
|
||||
TMSG_INFO(pMsg->msgType));
|
||||
dGError("vgId:%d, msg:%p failed to put into vnode queue since %s, msgtype:%s qtype:%d", pHead->vgId, pMsg,
|
||||
terrstr(), TMSG_INFO(pMsg->msgType), qtype);
|
||||
return terrno != 0 ? terrno : -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@ int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
|||
int32_t mndDropStreamTasks(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
|
||||
|
||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -47,6 +47,7 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, S
|
|||
void *mndBuildCreateVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *cntlen, bool standby);
|
||||
void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
|
||||
void *mndBuildAlterVnodeReq(SMnode *, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
|
||||
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "mndShow.h"
|
||||
#include "mndSma.h"
|
||||
#include "mndStb.h"
|
||||
#include "mndStream.h"
|
||||
#include "mndSubscribe.h"
|
||||
#include "mndTopic.h"
|
||||
#include "mndTrans.h"
|
||||
|
@ -645,7 +646,7 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
|
|||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pVgroup->dbUid == pNew->uid) {
|
||||
if (mndVgroupInDb(pVgroup, pNew->uid)) {
|
||||
if (mndBuildAlterVgroupAction(pMnode, pTrans, pNew, pVgroup, pArray) != 0) {
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
|
@ -927,6 +928,7 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
|
|||
if (mndDropOffsetByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSubByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropTopicByDB(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropStreamByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndDropSmasByDb(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
if (mndSetDropDbRedoActions(pMnode, pTrans, pDb) != 0) goto _OVER;
|
||||
|
||||
|
@ -947,7 +949,6 @@ static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
|
|||
mndTransSetRpcRsp(pTrans, pRsp, rspLen);
|
||||
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
|
@ -1006,7 +1007,7 @@ static int32_t mndGetDBTableNum(SDbObj *pDb, SMnode *pMnode) {
|
|||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pVgroup->dbUid == pDb->uid) {
|
||||
if (mndVgroupInDb(pVgroup, pDb->uid)) {
|
||||
numOfTables += pVgroup->numOfTables / TSDB_TABLE_NUM_UNIT;
|
||||
vindex++;
|
||||
}
|
||||
|
|
|
@ -225,10 +225,11 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
SVgObj* pVgroup;
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (strcmp(pVgroup->dbName, pStream->targetDb) != 0) {
|
||||
if (!mndVgroupInDb(pVgroup, pStream->targetDbUid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
if (pTask == NULL) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
|
@ -420,10 +421,11 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
SVgObj* pVgroup;
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pStream->sourceDbUid) {
|
||||
if (!mndVgroupInDb(pVgroup, pStream->sourceDbUid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
if (pInnerTask == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -483,10 +485,11 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
|
|||
SVgObj* pVgroup;
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pStream->sourceDbUid) {
|
||||
if (!mndVgroupInDb(pVgroup, pStream->sourceDbUid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
|
||||
if (pTask == NULL) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
|
@ -559,7 +562,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
|
|||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pTopic->dbUid) {
|
||||
if (!mndVgroupInDb(pVgroup, pTopic->dbUid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -523,6 +523,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
|
|||
streamObj.updateTime = streamObj.createTime;
|
||||
streamObj.uid = mndGenerateUid(pCreate->name, strlen(pCreate->name));
|
||||
streamObj.sourceDbUid = pDb->uid;
|
||||
streamObj.targetDbUid = pDb->uid;
|
||||
streamObj.version = 1;
|
||||
streamObj.sql = pCreate->sql;
|
||||
streamObj.smaId = smaObj.uid;
|
||||
|
@ -822,6 +823,17 @@ int32_t mndDropSmasByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
|
|||
if (pSma->stbUid == pStb->uid) {
|
||||
pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId);
|
||||
if (pVgroup == NULL) goto _OVER;
|
||||
|
||||
SStreamObj *pStream = mndAcquireStream(pMnode, pSma->name);
|
||||
if (pStream != NULL && pStream->smaId == pSma->uid) {
|
||||
if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) {
|
||||
mError("stream:%s, failed to drop task since %s", pStream->name, terrstr());
|
||||
goto _OVER;
|
||||
}
|
||||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
||||
goto _OVER;
|
||||
}
|
||||
}
|
||||
if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
|
||||
|
@ -842,36 +854,26 @@ _OVER:
|
|||
}
|
||||
|
||||
int32_t mndDropSmasByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
SSmaObj *pSma = NULL;
|
||||
void *pIter = NULL;
|
||||
SVgObj *pVgroup = NULL;
|
||||
int32_t code = -1;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
|
||||
while (1) {
|
||||
SSmaObj *pSma = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pSma->dbUid == pDb->uid) {
|
||||
pVgroup = mndAcquireVgroup(pMnode, pSma->dstVgId);
|
||||
if (pVgroup == NULL) goto _OVER;
|
||||
if (mndSetDropSmaVgroupCommitLogs(pMnode, pTrans, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaVgroupRedoActions(pMnode, pTrans, pDb, pVgroup) != 0) goto _OVER;
|
||||
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) goto _OVER;
|
||||
mndReleaseVgroup(pMnode, pVgroup);
|
||||
pVgroup = NULL;
|
||||
if (mndSetDropSmaCommitLogs(pMnode, pTrans, pSma) != 0) {
|
||||
sdbRelease(pSdb, pSma);
|
||||
sdbCancelFetch(pSdb, pSma);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
sdbRelease(pSdb, pSma);
|
||||
}
|
||||
|
||||
code = 0;
|
||||
|
||||
_OVER:
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
sdbRelease(pSdb, pSma);
|
||||
mndReleaseVgroup(pMnode, pVgroup);
|
||||
return code;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t mndProcessDropSmaReq(SRpcMsg *pReq) {
|
||||
|
@ -1129,14 +1131,14 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
|
|||
SSmaObj *pSma = NULL;
|
||||
int32_t cols = 0;
|
||||
|
||||
SDbObj *pDb = mndAcquireDb(pMnode, pShow->db);
|
||||
if (pDb == NULL) return 0;
|
||||
SStbObj *pStb = mndAcquireStb(pMnode, pShow->db);
|
||||
if (pStb == NULL) return 0;
|
||||
|
||||
while (numOfRows < rows) {
|
||||
pShow->pIter = sdbFetch(pSdb, SDB_SMA, pShow->pIter, (void **)&pSma);
|
||||
if (pShow->pIter == NULL) break;
|
||||
|
||||
if (pSma->dbUid != pDb->uid) {
|
||||
if (pSma->stbUid != pStb->uid) {
|
||||
sdbRelease(pSdb, pSma);
|
||||
continue;
|
||||
}
|
||||
|
@ -1145,22 +1147,16 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
|
|||
|
||||
SName smaName = {0};
|
||||
tNameFromString(&smaName, pSma->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_TO_VARSTR(n, (char *)tNameGetTableName(&smaName));
|
||||
cols++;
|
||||
|
||||
char n0[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_TO_VARSTR(n0, (char *)tNameGetTableName(&smaName));
|
||||
|
||||
SName stbName = {0};
|
||||
tNameFromString(&stbName, pSma->stb, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
|
||||
|
||||
char n1[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
STR_TO_VARSTR(n1, (char *)tNameGetTableName(&stbName));
|
||||
|
||||
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)n, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pSma->createdTime, false);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)n0, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)n1, false);
|
||||
|
@ -1168,11 +1164,14 @@ static int32_t mndRetrieveSma(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
|
|||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pSma->dstVgId, false);
|
||||
|
||||
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
|
||||
colDataAppend(pColInfo, numOfRows, (const char *)&pSma->createdTime, false);
|
||||
|
||||
numOfRows++;
|
||||
sdbRelease(pSdb, pSma);
|
||||
}
|
||||
|
||||
mndReleaseDb(pMnode, pDb);
|
||||
mndReleaseStb(pMnode, pStb);
|
||||
pShow->numOfRows += numOfRows;
|
||||
return numOfRows;
|
||||
}
|
||||
|
|
|
@ -621,12 +621,7 @@ static int32_t mndSetCreateStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pDb->uid) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pVgroup->isTsma) {
|
||||
if (!mndVgroupInDb(pVgroup, pDb->uid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
@ -664,12 +659,7 @@ static int32_t mndSetCreateStbUndoActions(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pDb->uid) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pVgroup->isTsma) {
|
||||
if (!mndVgroupInDb(pVgroup, pDb->uid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
@ -1297,12 +1287,7 @@ static int32_t mndSetAlterStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj
|
|||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pDb->uid) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pVgroup->isTsma) {
|
||||
if (!mndVgroupInDb(pVgroup, pDb->uid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
@ -1688,12 +1673,7 @@ static int32_t mndSetDropStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
|
|||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
|
||||
if (pIter == NULL) break;
|
||||
if (pVgroup->dbUid != pDb->uid) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pVgroup->isTsma) {
|
||||
if (!mndVgroupInDb(pVgroup, pDb->uid)) {
|
||||
sdbRelease(pSdb, pVgroup);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -673,27 +673,29 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
|
|||
|
||||
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
void *pIter = NULL;
|
||||
|
||||
void *pIter = NULL;
|
||||
SStreamObj *pStream = NULL;
|
||||
while (1) {
|
||||
SStreamObj *pStream = NULL;
|
||||
pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pStream);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (pStream->sourceDbUid == pDb->uid || pStream->targetDbUid == pDb->uid) {
|
||||
if (pStream->sourceDbUid != pStream->targetDbUid) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
mError("db:%s, failed to drop stream:%s since sourceDbUid:%" PRId64 " not match with targetDbUid:%" PRId64,
|
||||
pDb->name, pStream->name, pStream->sourceDbUid, pStream->targetDbUid);
|
||||
terrno = TSDB_CODE_MND_STREAM_ALREADY_EXIST;
|
||||
return -1;
|
||||
} else {
|
||||
// TODO drop all task on snode
|
||||
if (mndPersistDropStreamLog(pMnode, pTrans, pStream) < 0) {
|
||||
sdbRelease(pSdb, pStream);
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sdbRelease(pSdb, pStream);
|
||||
continue;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -90,7 +90,7 @@ static int32_t mndTransGetActionsSize(SArray *pArray) {
|
|||
for (int32_t i = 0; i < actionNum; ++i) {
|
||||
STransAction *pAction = taosArrayGet(pArray, i);
|
||||
if (pAction->actionType == TRANS_ACTION_RAW) {
|
||||
rawDataLen += (sdbGetRawTotalSize(pAction->pRaw) + sizeof(int32_t));
|
||||
rawDataLen += (sizeof(STransAction) + sdbGetRawTotalSize(pAction->pRaw));
|
||||
} else if (pAction->actionType == TRANS_ACTION_MSG) {
|
||||
rawDataLen += (sizeof(STransAction) + pAction->contLen);
|
||||
} else {
|
||||
|
@ -105,7 +105,7 @@ static int32_t mndTransGetActionsSize(SArray *pArray) {
|
|||
static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
||||
int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE;
|
||||
int32_t rawDataLen = sizeof(STrans) + TRANS_RESERVE_SIZE + pTrans->paramLen;
|
||||
rawDataLen += mndTransGetActionsSize(pTrans->redoActions);
|
||||
rawDataLen += mndTransGetActionsSize(pTrans->undoActions);
|
||||
rawDataLen += mndTransGetActionsSize(pTrans->commitActions);
|
||||
|
@ -226,7 +226,8 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
|
|||
|
||||
_OVER:
|
||||
if (terrno != 0) {
|
||||
mError("trans:%d, failed to encode to raw:%p len:%d since %s", pTrans->id, pRaw, dataPos, terrstr());
|
||||
mError("trans:%d, failed to encode to raw:%p maxlen:%d len:%d since %s", pTrans->id, pRaw, sdbGetRawTotalSize(pRaw),
|
||||
dataPos, terrstr());
|
||||
sdbFreeRaw(pRaw);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1146,13 +1147,13 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
|
|||
} else {
|
||||
code = TSDB_CODE_ACTION_IN_PROGRESS;
|
||||
}
|
||||
}
|
||||
if (pAction->rawWritten) {
|
||||
} else if (pAction->rawWritten) {
|
||||
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
|
||||
code = pAction->errCode;
|
||||
} else {
|
||||
mDebug("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
|
||||
}
|
||||
} else {
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1759,4 +1759,6 @@ _OVER:
|
|||
|
||||
taosArrayDestroy(pArray);
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
||||
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }
|
|
@ -185,5 +185,7 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
|
|||
.contLen = ntohl(pReq->length),
|
||||
};
|
||||
|
||||
ASSERT(tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) == 0);
|
||||
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
|
||||
tqDebug("failed to put into write-queue since %s", terrstr());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) {
|
|||
SArray* pTagVals = NULL;
|
||||
STag* pTag = (STag*)pCfg->pTags;
|
||||
|
||||
if (pCfg->pTags && pTag->flags & TD_TAG_JSON) {
|
||||
if (pCfg->pTags && tTagIsJson(pTag)) {
|
||||
char* pJson = parseTagDatatoJson(pTag);
|
||||
if (pJson) {
|
||||
*len += sprintf(buf + VARSTR_HEADER_SIZE + *len, "%s", pJson);
|
||||
|
|
|
@ -63,6 +63,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
|||
rowSize += pCtx[i].resDataInfo.interBufSize;
|
||||
}
|
||||
|
||||
rowSize += (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
|
||||
return rowSize;
|
||||
}
|
||||
|
||||
|
@ -112,7 +113,9 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
|
|||
p->groupId = *(uint64_t*)key;
|
||||
p->pos = *(SResultRowPosition*)pData;
|
||||
memcpy(p->key, (char*)key + sizeof(uint64_t), keyLen - sizeof(uint64_t));
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_groupRes, groupId:%"PRIu64",pageId:%d,offset:%d\n", p->groupId, p->pos.pageId, p->pos.offset);
|
||||
#endif
|
||||
taosArrayPush(pGroupResInfo->pRows, &p);
|
||||
}
|
||||
|
||||
|
@ -271,6 +274,7 @@ static bool isTableOk(STableKeyInfo* info, SNode* pTagCond, SMeta* metaHandle) {
|
|||
SNode* pNew = NULL;
|
||||
int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
terrno = code;
|
||||
nodesDestroyNode(pTagCondTmp);
|
||||
return false;
|
||||
}
|
||||
|
@ -323,12 +327,19 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo
|
|||
code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
|
||||
}
|
||||
|
||||
if (pTagCond) {
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
|
||||
terrno = code;
|
||||
return code;
|
||||
}
|
||||
|
||||
if(pTagCond){
|
||||
int32_t i = 0;
|
||||
while (i < taosArrayGetSize(pListInfo->pTableList)) {
|
||||
STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i);
|
||||
bool isOk = isTableOk(info, pTagCond, metaHandle);
|
||||
if (!isOk) {
|
||||
bool isOk = isTableOk(info, pTagCond, metaHandle);
|
||||
if(terrno) return terrno;
|
||||
if(!isOk){
|
||||
taosArrayRemove(pListInfo->pTableList, i);
|
||||
continue;
|
||||
}
|
||||
|
@ -586,13 +597,16 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
|
|||
}
|
||||
|
||||
for (int32_t i = 0; i < numOfOutput; ++i) {
|
||||
if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
|
||||
if (strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_select_value") == 0 ||
|
||||
strcmp(pCtx[i].pExpr->pExpr->_function.functionName, "_group_key") == 0) {
|
||||
pValCtx[num++] = &pCtx[i];
|
||||
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
|
||||
p = &pCtx[i];
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_setSelect num:%d", num);
|
||||
#endif
|
||||
if (p != NULL) {
|
||||
p->subsidiaries.pCtx = pValCtx;
|
||||
p->subsidiaries.num = num;
|
||||
|
|
|
@ -274,6 +274,9 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
|
|||
|
||||
// 1. close current opened time window
|
||||
if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId))) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_1");
|
||||
#endif
|
||||
SResultRowPosition pos = pResultRowInfo->cur;
|
||||
SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
|
||||
releaseBufPage(pResultBuf, pPage);
|
||||
|
@ -281,6 +284,9 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
|
|||
|
||||
// allocate a new buffer page
|
||||
if (pResult == NULL) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_2");
|
||||
#endif
|
||||
ASSERT(pSup->resultRowSize > 0);
|
||||
pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
|
||||
|
||||
|
@ -538,7 +544,9 @@ static int32_t doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SqlFunct
|
|||
if (pCtx[k].fpSet.process == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_process");
|
||||
#endif
|
||||
int32_t code = pCtx[k].fpSet.process(&pCtx[k]);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
qError("%s aggregate function error happens, code: %s", GET_TASKID(pOperator->pTaskInfo), tstrerror(code));
|
||||
|
@ -1413,7 +1421,9 @@ void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t
|
|||
if (pAggInfo->groupId != INT32_MIN && pAggInfo->groupId == groupId) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_setbuf, groupId:%"PRIu64, groupId);
|
||||
#endif
|
||||
doSetTableGroupOutputBuf(pOperator, pAggInfo, numOfOutput, groupId);
|
||||
|
||||
// record the current active group id
|
||||
|
@ -1489,11 +1499,15 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
int32_t numOfExprs) {
|
||||
int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
|
||||
int32_t start = pGroupResInfo->index;
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("\npage_copytoblock rows:%d", numOfRows);
|
||||
#endif
|
||||
for (int32_t i = start; i < numOfRows; i += 1) {
|
||||
SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
|
||||
SFilePage* page = getBufPage(pBuf, pPos->pos.pageId);
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_copytoblock pos pageId:%d, offset:%d", pPos->pos.pageId, pPos->pos.offset);
|
||||
#endif
|
||||
SResultRow* pRow = (SResultRow*)((char*)page + pPos->pos.offset);
|
||||
|
||||
doUpdateNumOfRows(pRow, numOfExprs, rowCellOffset);
|
||||
|
@ -1525,6 +1539,9 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
|
||||
pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
|
||||
if (pCtx[j].fpSet.finalize) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("\npage_finalize %d", numOfExprs);
|
||||
#endif
|
||||
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
|
||||
if (TAOS_FAILED(code)) {
|
||||
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
|
||||
|
@ -1553,9 +1570,9 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprI
|
|||
|
||||
releaseBufPage(pBuf, page);
|
||||
pBlock->info.rows += pRow->numOfRows;
|
||||
if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
|
||||
break;
|
||||
}
|
||||
// if (pBlock->info.rows >= pBlock->info.capacity) { // output buffer is full
|
||||
// break;
|
||||
// }
|
||||
}
|
||||
|
||||
qDebug("%s result generated, rows:%d, groupId:%" PRIu64, GET_TASKID(pTaskInfo), pBlock->info.rows,
|
||||
|
@ -2373,8 +2390,7 @@ static int32_t initExchangeOperator(SExchangePhysiNode* pExNode, SExchangeInfo*
|
|||
}
|
||||
|
||||
pInfo->pSources = taosArrayInit(numOfSources, sizeof(SDownstreamSourceNode));
|
||||
pInfo->pSourceDataInfo = taosArrayInit(numOfSources, sizeof(SSourceDataInfo));
|
||||
if (pInfo->pSourceDataInfo == NULL || pInfo->pSources == NULL) {
|
||||
if (pInfo->pSources == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -3165,8 +3181,9 @@ static int32_t handleLimitOffset(SOperatorInfo* pOperator, SSDataBlock* pBlock)
|
|||
}
|
||||
|
||||
// check for the limitation in each group
|
||||
if (pProjectInfo->limit.limit > 0 && pProjectInfo->curOutput + pRes->info.rows >= pProjectInfo->limit.limit) {
|
||||
pRes->info.rows = (int32_t)(pProjectInfo->limit.limit - pProjectInfo->curOutput);
|
||||
if (pProjectInfo->limit.limit >= 0 && pProjectInfo->curOutput + pRes->info.rows >= pProjectInfo->limit.limit) {
|
||||
int32_t keepRows = (int32_t)(pProjectInfo->limit.limit - pProjectInfo->curOutput);
|
||||
blockDataKeepFirstNRows(pRes, keepRows);
|
||||
if (pProjectInfo->slimit.limit > 0 && pProjectInfo->slimit.limit <= pProjectInfo->curGroupOutput) {
|
||||
pOperator->status = OP_EXEC_DONE;
|
||||
}
|
||||
|
@ -3480,11 +3497,12 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
|
|||
}
|
||||
|
||||
void initResultSizeInfo(SOperatorInfo* pOperator, int32_t numOfRows) {
|
||||
ASSERT(numOfRows != 0);
|
||||
pOperator->resultInfo.capacity = numOfRows;
|
||||
pOperator->resultInfo.threshold = numOfRows * 0.75;
|
||||
|
||||
if (pOperator->resultInfo.threshold == 0) {
|
||||
pOperator->resultInfo.capacity = numOfRows;
|
||||
pOperator->resultInfo.threshold = numOfRows;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3984,12 +4002,12 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo, int32_t groupNum)
|
|||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
}
|
||||
if (p == NULL) {
|
||||
if (taosArrayPush(sortSupport, groupId) != NULL) {
|
||||
if (taosArrayPush(sortSupport, groupId) == NULL) {
|
||||
qError("taos push support array error");
|
||||
taosArrayDestroy(sortSupport);
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
}
|
||||
if (taosArrayPush(pTableListInfo->pGroupList, &tGroup) != NULL) {
|
||||
if (taosArrayPush(pTableListInfo->pGroupList, &tGroup) == NULL) {
|
||||
qError("taos push group array error");
|
||||
taosArrayDestroy(sortSupport);
|
||||
return TSDB_CODE_QRY_APP_ERROR;
|
||||
|
@ -4069,6 +4087,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
} else {
|
||||
taosMemoryFree(keyBuf);
|
||||
nodesClearList(groupNew);
|
||||
metaReaderClear(&mr);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -4081,7 +4100,14 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
|
|||
} else {
|
||||
isNull[index++] = 0;
|
||||
char* data = nodesGetValueFromNode(pValue);
|
||||
if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON) {
|
||||
if (pValue->node.resType.type == TSDB_DATA_TYPE_JSON){
|
||||
if(tTagIsJson(data)){
|
||||
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
|
||||
taosMemoryFree(keyBuf);
|
||||
nodesClearList(groupNew);
|
||||
metaReaderClear(&mr);
|
||||
return terrno;
|
||||
}
|
||||
int32_t len = getJsonValueLen(data);
|
||||
memcpy(pStart, data, len);
|
||||
pStart += len;
|
||||
|
@ -4140,7 +4166,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
} else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
|
||||
STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
|
||||
int32_t code = createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
|
||||
if (code) {
|
||||
if(code){
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
||||
code = extractTableSchemaVersion(pHandle, pTableScanNode->scan.uid, pTaskInfo);
|
||||
|
@ -4167,7 +4194,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
|
|||
.maxTs = INT64_MIN,
|
||||
};
|
||||
if (pHandle) {
|
||||
createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
|
||||
int32_t code = createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
|
||||
if(code){
|
||||
pTaskInfo->code = code;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
SOperatorInfo* pOperator =
|
||||
|
|
|
@ -141,6 +141,10 @@ static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSData
|
|||
pkey->isNull = false;
|
||||
char* val = colDataGetData(pColInfoData, rowIndex);
|
||||
if (pkey->type == TSDB_DATA_TYPE_JSON) {
|
||||
if(tTagIsJson(val)){
|
||||
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
|
||||
return;
|
||||
}
|
||||
int32_t dataLen = getJsonValueLen(val);
|
||||
memcpy(pkey->pData, val, dataLen);
|
||||
} else if (IS_VAR_DATA_TYPE(pkey->type)) {
|
||||
|
@ -227,11 +231,15 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
int32_t len = 0;
|
||||
STimeWindow w = TSWINDOW_INITIALIZER;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
int32_t num = 0;
|
||||
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
|
||||
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
|
||||
if (!pInfo->isInit) {
|
||||
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
|
||||
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
|
||||
longjmp(pTaskInfo->env, terrno);
|
||||
}
|
||||
pInfo->isInit = true;
|
||||
num++;
|
||||
continue;
|
||||
|
@ -247,6 +255,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
|
|||
if (j == 0) {
|
||||
num++;
|
||||
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
|
||||
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
|
||||
longjmp(pTaskInfo->env, terrno);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -661,7 +672,11 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
|
|||
}
|
||||
}
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
doHashPartition(pOperator, pBlock);
|
||||
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
|
||||
longjmp(pTaskInfo->env, terrno);
|
||||
}
|
||||
}
|
||||
|
||||
SArray* groupArray = taosArrayInit(taosHashGetSize(pInfo->pGroupSet), sizeof(SDataGroupInfo));
|
||||
|
|
|
@ -2097,6 +2097,7 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle
|
|||
qDebug("no table qualified for query, TID:0x%" PRIx64 ", QID:0x%" PRIx64, taskId, queryId);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
pTableListInfo->needSortTableByGroupId = pTableScanNode->groupSort;
|
||||
code = generateGroupIdMap(pTableListInfo, pHandle, pTableScanNode->pGroupTags);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
|
|
|
@ -593,7 +593,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
if (size > sortBufSize) {
|
||||
// Perform the in-memory sort and then flush data in the buffer into disk.
|
||||
int64_t p = taosGetTimestampUs();
|
||||
blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
@ -608,7 +611,10 @@ static int32_t createInitialSources(SSortHandle* pHandle) {
|
|||
// Perform the in-memory sort and then flush data in the buffer into disk.
|
||||
int64_t p = taosGetTimestampUs();
|
||||
|
||||
blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
int32_t code = blockDataSort(pHandle->pDataBlock, pHandle->pSortInfo);
|
||||
if (code != 0) {
|
||||
return code;
|
||||
}
|
||||
|
||||
int64_t el = taosGetTimestampUs() - p;
|
||||
pHandle->sortElapsed += el;
|
||||
|
|
|
@ -32,7 +32,7 @@ extern "C" {
|
|||
#define FUNC_MGT_STRING_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(3)
|
||||
#define FUNC_MGT_DATETIME_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(4)
|
||||
#define FUNC_MGT_TIMELINE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(5)
|
||||
#define FUNC_MGT_TIMEORDER_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(6)
|
||||
#define FUNC_MGT_IMPLICIT_TS_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(6)
|
||||
#define FUNC_MGT_PSEUDO_COLUMN_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(7)
|
||||
#define FUNC_MGT_WINDOW_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(8)
|
||||
#define FUNC_MGT_SPECIAL_DATA_REQUIRED FUNC_MGT_FUNC_CLASSIFICATION_MASK(9)
|
||||
|
|
|
@ -238,7 +238,7 @@ static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t l
|
|||
static int32_t translateNowToday(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
// pseudo column do not need to check parameters
|
||||
|
||||
//add database precision as param
|
||||
// add database precision as param
|
||||
uint8_t dbPrec = pFunc->node.resType.precision;
|
||||
addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
|
||||
|
||||
|
@ -634,6 +634,12 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
|
|||
}
|
||||
|
||||
// param1 ~ param3
|
||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||
|
@ -643,12 +649,11 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
|
|||
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||
|
||||
pValue->notReserved = true;
|
||||
}
|
||||
|
||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"HISTOGRAM function normalized parameter should be 0/1");
|
||||
}
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = 512, .type = TSDB_DATA_TYPE_BINARY};
|
||||
|
@ -668,6 +673,12 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
|
|||
}
|
||||
|
||||
// param1 ~ param3
|
||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
|
||||
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
|
||||
|
@ -677,12 +688,11 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
|
|||
SValueNode* pValue = (SValueNode*)pParamNode;
|
||||
|
||||
pValue->notReserved = true;
|
||||
}
|
||||
|
||||
if (((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type != TSDB_DATA_TYPE_BINARY ||
|
||||
((SExprNode*)nodesListGetNode(pFunc->pParameterList, 3))->resType.type != TSDB_DATA_TYPE_BIGINT) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"HISTOGRAM function normalized parameter should be 0/1");
|
||||
}
|
||||
}
|
||||
|
||||
pFunc->node.resType =
|
||||
|
@ -1017,7 +1027,7 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
//add database precision as param
|
||||
// add database precision as param
|
||||
uint8_t dbPrec = pFunc->node.resType.precision;
|
||||
addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
|
||||
|
||||
|
@ -1031,13 +1041,7 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"The parameters of first/last can only be columns");
|
||||
}
|
||||
|
||||
pFunc->node.resType = ((SExprNode*)pPara)->resType;
|
||||
pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1051,11 +1055,6 @@ static int32_t translateFirstLastImpl(SFunctionNode* pFunc, char* pErrBuf, int32
|
|||
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
|
||||
int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes;
|
||||
if (isPartial) {
|
||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"The parameters of first/last can only be columns");
|
||||
}
|
||||
|
||||
pFunc->node.resType =
|
||||
(SDataType){.bytes = getFirstLastInfoSize(paraBytes) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
|
||||
} else {
|
||||
|
@ -1477,7 +1476,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int
|
|||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
//add database precision as param
|
||||
// add database precision as param
|
||||
uint8_t dbPrec = pFunc->node.resType.precision;
|
||||
addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
|
||||
|
||||
|
@ -1497,7 +1496,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
|
|||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
//add database precision as param
|
||||
// add database precision as param
|
||||
uint8_t dbPrec = pFunc->node.resType.precision;
|
||||
addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
|
||||
|
||||
|
@ -1525,7 +1524,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
|
|||
}
|
||||
}
|
||||
|
||||
//add database precision as param
|
||||
// add database precision as param
|
||||
uint8_t dbPrec = pFunc->node.resType.precision;
|
||||
addDbPrecisonParam(&pFunc->pParameterList, dbPrec);
|
||||
|
||||
|
@ -1543,7 +1542,7 @@ static int32_t translateToJson(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BINARY].bytes, .type = TSDB_DATA_TYPE_BINARY};
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_JSON].bytes, .type = TSDB_DATA_TYPE_JSON};
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1924,7 +1923,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "interp",
|
||||
.type = FUNCTION_TYPE_INTERP,
|
||||
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC,
|
||||
.classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getSelectivityFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -1934,7 +1933,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "derivative",
|
||||
.type = FUNCTION_TYPE_DERIVATIVE,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateDerivative,
|
||||
.getEnvFunc = getDerivativeFuncEnv,
|
||||
.initFunc = derivativeFuncSetup,
|
||||
|
@ -1944,7 +1943,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "irate",
|
||||
.type = FUNCTION_TYPE_IRATE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateIrate,
|
||||
.getEnvFunc = getIrateFuncEnv,
|
||||
.initFunc = irateFuncSetup,
|
||||
|
@ -1953,8 +1952,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
},
|
||||
{
|
||||
.name = "last_row",
|
||||
.type = FUNCTION_TYPE_LAST_ROWT,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.type = FUNCTION_TYPE_LAST_ROW,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -1964,7 +1963,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_cache_last_row",
|
||||
.type = FUNCTION_TYPE_CACHE_LAST_ROW,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateLastRow,
|
||||
.getEnvFunc = getMinmaxFuncEnv,
|
||||
.initFunc = minmaxFunctionSetup,
|
||||
|
@ -1974,7 +1973,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "first",
|
||||
.type = FUNCTION_TYPE_FIRST,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -1987,7 +1986,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_first_partial",
|
||||
.type = FUNCTION_TYPE_FIRST_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastPartial,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -1998,7 +1997,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_first_merge",
|
||||
.type = FUNCTION_TYPE_FIRST_MERGE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastMerge,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2009,7 +2008,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "last",
|
||||
.type = FUNCTION_TYPE_LAST,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLast,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2022,7 +2021,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_last_partial",
|
||||
.type = FUNCTION_TYPE_LAST_PARTIAL,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastPartial,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2033,7 +2032,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_last_merge",
|
||||
.type = FUNCTION_TYPE_LAST_MERGE,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateFirstLastMerge,
|
||||
.getEnvFunc = getFirstLastFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2044,7 +2043,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "twa",
|
||||
.type = FUNCTION_TYPE_TWA,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateInNumOutDou,
|
||||
.dataRequiredFunc = statisDataRequired,
|
||||
.getEnvFunc = getTwaFuncEnv,
|
||||
|
@ -2141,7 +2140,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "statecount",
|
||||
.type = FUNCTION_TYPE_STATE_COUNT,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
||||
.translateFunc = translateStateCount,
|
||||
.getEnvFunc = getStateFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2151,7 +2150,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "stateduration",
|
||||
.type = FUNCTION_TYPE_STATE_DURATION,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC,
|
||||
.translateFunc = translateStateDuration,
|
||||
.getEnvFunc = getStateFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
@ -2191,7 +2190,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "tail",
|
||||
.type = FUNCTION_TYPE_TAIL,
|
||||
.classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC | FUNC_MGT_FORBID_GROUP_BY_FUNC,
|
||||
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC |
|
||||
FUNC_MGT_FORBID_WINDOW_FUNC | FUNC_MGT_FORBID_GROUP_BY_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateTail,
|
||||
.getEnvFunc = getTailFuncEnv,
|
||||
.initFunc = tailFunctionSetup,
|
||||
|
@ -2202,7 +2202,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.name = "unique",
|
||||
.type = FUNCTION_TYPE_UNIQUE,
|
||||
.classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC |
|
||||
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC | FUNC_MGT_FORBID_GROUP_BY_FUNC,
|
||||
FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_FORBID_WINDOW_FUNC | FUNC_MGT_FORBID_GROUP_BY_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC,
|
||||
.translateFunc = translateUnique,
|
||||
.getEnvFunc = getUniqueFuncEnv,
|
||||
.initFunc = uniqueFunctionSetup,
|
||||
|
@ -2620,7 +2620,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
{
|
||||
.name = "_group_key",
|
||||
.type = FUNCTION_TYPE_GROUP_KEY,
|
||||
.classification = FUNC_MGT_AGG_FUNC,
|
||||
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SELECT_FUNC,
|
||||
.translateFunc = translateGroupKey,
|
||||
.getEnvFunc = getGroupKeyFuncEnv,
|
||||
.initFunc = functionSetup,
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "builtinsimpl.h"
|
||||
#include "cJSON.h"
|
||||
#include "function.h"
|
||||
#include "query.h"
|
||||
#include "querynodes.h"
|
||||
#include "taggfunction.h"
|
||||
#include "tcompare.h"
|
||||
|
@ -80,7 +81,7 @@ typedef struct STopBotRes {
|
|||
|
||||
typedef struct SFirstLastRes {
|
||||
bool hasResult;
|
||||
bool isNull; //used for last_row function only
|
||||
bool isNull; // used for last_row function only
|
||||
int32_t bytes;
|
||||
char buf[];
|
||||
} SFirstLastRes;
|
||||
|
@ -209,13 +210,13 @@ typedef struct SMavgInfo {
|
|||
} SMavgInfo;
|
||||
|
||||
typedef struct SSampleInfo {
|
||||
int32_t samples;
|
||||
int32_t totalPoints;
|
||||
int32_t numSampled;
|
||||
uint8_t colType;
|
||||
int16_t colBytes;
|
||||
char* data;
|
||||
STuplePos* tuplePos;
|
||||
int32_t samples;
|
||||
int32_t totalPoints;
|
||||
int32_t numSampled;
|
||||
uint8_t colType;
|
||||
int16_t colBytes;
|
||||
char* data;
|
||||
STuplePos* tuplePos;
|
||||
} SSampleInfo;
|
||||
|
||||
typedef struct STailItem {
|
||||
|
@ -270,20 +271,19 @@ typedef struct SDerivInfo {
|
|||
} SDerivInfo;
|
||||
|
||||
typedef struct SRateInfo {
|
||||
double firstValue;
|
||||
TSKEY firstKey;
|
||||
double lastValue;
|
||||
TSKEY lastKey;
|
||||
int8_t hasResult; // flag to denote has value
|
||||
double firstValue;
|
||||
TSKEY firstKey;
|
||||
double lastValue;
|
||||
TSKEY lastKey;
|
||||
int8_t hasResult; // flag to denote has value
|
||||
} SRateInfo;
|
||||
|
||||
typedef struct SGroupKeyInfo{
|
||||
bool hasResult;
|
||||
bool isNull;
|
||||
char data[];
|
||||
typedef struct SGroupKeyInfo {
|
||||
bool hasResult;
|
||||
bool isNull;
|
||||
char data[];
|
||||
} SGroupKeyInfo;
|
||||
|
||||
|
||||
#define SET_VAL(_info, numOfElem, res) \
|
||||
do { \
|
||||
if ((numOfElem) <= 0) { \
|
||||
|
@ -1472,8 +1472,8 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
|
|||
int32_t pageId = pTuplePos->pageId;
|
||||
int32_t offset = pTuplePos->offset;
|
||||
|
||||
if (pTuplePos->pageId != -1) {
|
||||
int32_t numOfCols = taosArrayGetSize(pCtx->pSrcBlock->pDataBlock);
|
||||
if (pTuplePos->pageId != -1 && pCtx->subsidiaries.num > 0) {
|
||||
int32_t numOfCols = pCtx->subsidiaries.num;
|
||||
SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
|
||||
|
||||
bool* nullList = (bool*)((char*)pPage + offset);
|
||||
|
@ -1484,22 +1484,21 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
|
|||
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
|
||||
|
||||
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
|
||||
int32_t srcSlotId = pFuncParam->pCol->slotId;
|
||||
int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
|
||||
|
||||
int32_t ps = 0;
|
||||
for (int32_t k = 0; k < srcSlotId; ++k) {
|
||||
SColumnInfoData* pSrcCol = taosArrayGet(pCtx->pSrcBlock->pDataBlock, k);
|
||||
ps += pSrcCol->info.bytes;
|
||||
}
|
||||
|
||||
SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
|
||||
if (nullList[srcSlotId]) {
|
||||
ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
|
||||
if (nullList[j]) {
|
||||
colDataAppendNULL(pDstCol, rowIndex);
|
||||
} else {
|
||||
colDataAppend(pDstCol, rowIndex, (pStart + ps), false);
|
||||
colDataAppend(pDstCol, rowIndex, pStart, false);
|
||||
}
|
||||
pStart += pDstCol->info.bytes;
|
||||
}
|
||||
|
||||
releaseBufPage(pCtx->pBuf, pPage);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2410,7 +2409,9 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t getFirstLastInfoSize(int32_t resBytes) { return sizeof(SFirstLastRes) + resBytes + sizeof(int64_t) + sizeof(STuplePos); }
|
||||
int32_t getFirstLastInfoSize(int32_t resBytes) {
|
||||
return sizeof(SFirstLastRes) + resBytes + sizeof(int64_t) + sizeof(STuplePos);
|
||||
}
|
||||
|
||||
bool getFirstLastFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
|
||||
SColumnNode* pNode = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||
|
@ -2492,7 +2493,7 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
memcpy(pInfo->buf, data, bytes);
|
||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
|
@ -2502,7 +2503,7 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
}
|
||||
pInfo->hasResult = true;
|
||||
//DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
pResInfo->numOfRes = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -2534,7 +2535,7 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
memcpy(pInfo->buf, data, bytes);
|
||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
|
@ -2544,7 +2545,7 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
}
|
||||
pInfo->hasResult = true;
|
||||
//DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
pResInfo->numOfRes = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -2598,7 +2599,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
memcpy(pInfo->buf, data, bytes);
|
||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
|
@ -2608,7 +2609,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
}
|
||||
pInfo->hasResult = true;
|
||||
//DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
pResInfo->numOfRes = 1;
|
||||
}
|
||||
break;
|
||||
|
@ -2630,7 +2631,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
memcpy(pInfo->buf, data, bytes);
|
||||
*(TSKEY*)(pInfo->buf + bytes) = cts;
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
|
@ -2641,7 +2642,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
pInfo->hasResult = true;
|
||||
pResInfo->numOfRes = 1;
|
||||
//DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -2653,7 +2654,7 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) {
|
||||
SInputColumnInfoData* pColInfo = &pCtx->input;
|
||||
int32_t start = pColInfo->startRowIndex;
|
||||
int32_t start = pColInfo->startRowIndex;
|
||||
|
||||
pOutput->bytes = pInput->bytes;
|
||||
TSKEY* tsIn = (TSKEY*)(pInput->buf + pInput->bytes);
|
||||
|
@ -2671,7 +2672,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
|
|||
}
|
||||
*tsOut = *tsIn;
|
||||
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
STuplePos* pTuplePos = (STuplePos*)(pOutput->buf + pOutput->bytes + sizeof(TSKEY));
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
if (!pOutput->hasResult) {
|
||||
|
@ -2718,7 +2719,7 @@ int32_t firstLastFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
|
||||
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
colDataAppend(pCol, pBlock->info.rows, pRes->buf, pResInfo->isNullRes);
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
||||
|
||||
|
@ -2729,8 +2730,8 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
|
||||
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
|
||||
|
||||
int32_t resultBytes = getFirstLastInfoSize(pRes->bytes);
|
||||
char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char));
|
||||
int32_t resultBytes = getFirstLastInfoSize(pRes->bytes);
|
||||
char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char));
|
||||
|
||||
memcpy(varDataVal(res), pRes, resultBytes);
|
||||
varDataSetLen(res, resultBytes);
|
||||
|
@ -2739,7 +2740,7 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
||||
|
||||
colDataAppend(pCol, pBlock->info.rows, res, false);
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
||||
|
||||
|
@ -2801,7 +2802,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
*(TSKEY*)(pInfo->buf) = cts;
|
||||
numOfElems++;
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
|
@ -2811,7 +2812,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
}
|
||||
pInfo->hasResult = true;
|
||||
//DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
pResInfo->numOfRes = 1;
|
||||
}
|
||||
break;
|
||||
|
@ -2833,7 +2834,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
*(TSKEY*)(pInfo->buf) = cts;
|
||||
numOfElems++;
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
if (pCtx->subsidiaries.num > 0) {
|
||||
STuplePos* pTuplePos = (STuplePos*)(pInfo->buf + bytes + sizeof(TSKEY));
|
||||
if (!pInfo->hasResult) {
|
||||
|
@ -2844,7 +2845,7 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
pInfo->hasResult = true;
|
||||
pResInfo->numOfRes = 1;
|
||||
//DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
// DO_UPDATE_TAG_COLUMNS(pCtx, ts);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -2854,7 +2855,6 @@ int32_t lastRowFunction(SqlFunctionCtx* pCtx) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
int32_t lastRowFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
||||
|
@ -2863,14 +2863,13 @@ int32_t lastRowFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
|
||||
SFirstLastRes* pRes = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
colDataAppend(pCol, pBlock->info.rows, pRes->buf + sizeof(TSKEY), pRes->isNull);
|
||||
//handle selectivity
|
||||
// handle selectivity
|
||||
STuplePos* pTuplePos = (STuplePos*)(pRes->buf + pRes->bytes + sizeof(TSKEY));
|
||||
setSelectivityValue(pCtx, pBlock, pTuplePos, pBlock->info.rows);
|
||||
|
||||
return pResInfo->numOfRes;
|
||||
}
|
||||
|
||||
|
||||
bool getDiffFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
|
||||
pEnv->calcMemSize = sizeof(SDiffInfo);
|
||||
return true;
|
||||
|
@ -2884,7 +2883,11 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
|||
SDiffInfo* pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
pDiffInfo->hasPrev = false;
|
||||
pDiffInfo->prev.i64 = 0;
|
||||
pDiffInfo->ignoreNegative = pCtx->param[1].param.i; // TODO set correct param
|
||||
if (pCtx->numOfParams > 1) {
|
||||
pDiffInfo->ignoreNegative = pCtx->param[1].param.i; // TODO set correct param
|
||||
} else {
|
||||
pDiffInfo->ignoreNegative = false;
|
||||
}
|
||||
pDiffInfo->includeNull = false;
|
||||
pDiffInfo->firstOutput = false;
|
||||
return true;
|
||||
|
@ -2999,10 +3002,8 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
|||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
|
||||
|
||||
int32_t numOfElems = 0;
|
||||
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
|
||||
int32_t startOffset = pCtx->offset;
|
||||
|
||||
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
|
||||
|
@ -3014,9 +3015,6 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
|||
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
|
||||
if (pDiffInfo->includeNull) {
|
||||
colDataSetNull_f(pOutput->nullbitmap, pos);
|
||||
if (tsList != NULL) {
|
||||
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
|
||||
}
|
||||
|
||||
numOfElems += 1;
|
||||
}
|
||||
|
@ -3027,9 +3025,6 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
if (pDiffInfo->hasPrev) {
|
||||
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order);
|
||||
if (pTsOutput != NULL) {
|
||||
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
|
||||
}
|
||||
|
||||
numOfElems++;
|
||||
} else {
|
||||
|
@ -3045,9 +3040,6 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
|||
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
|
||||
if (pDiffInfo->includeNull) {
|
||||
colDataSetNull_f(pOutput->nullbitmap, pos);
|
||||
if (tsList != NULL) {
|
||||
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
|
||||
}
|
||||
|
||||
numOfElems += 1;
|
||||
}
|
||||
|
@ -3059,9 +3051,6 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
|||
// there is a row of previous data block to be handled in the first place.
|
||||
if (pDiffInfo->hasPrev) {
|
||||
doHandleDiff(pDiffInfo, pInputCol->info.type, pv, pOutput, pos, pCtx->order);
|
||||
if (pTsOutput != NULL) {
|
||||
colDataAppendInt64(pTsOutput, pos, &pDiffInfo->prevTs);
|
||||
}
|
||||
|
||||
numOfElems++;
|
||||
} else {
|
||||
|
@ -3069,9 +3058,6 @@ int32_t diffFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
pDiffInfo->hasPrev = true;
|
||||
if (pTsOutput != NULL) {
|
||||
pDiffInfo->prevTs = tsList[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3208,7 +3194,10 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
|||
if (pCtx->subsidiaries.num > 0) {
|
||||
saveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
|
||||
pItem->tuplePos.offset);
|
||||
#endif
|
||||
// allocate the buffer and keep the data of this row into the new allocated buffer
|
||||
pEntryInfo->numOfRes++;
|
||||
taosheapsort((void*)pItems, sizeof(STopBotResItem), pEntryInfo->numOfRes, (const void*)&type, topBotResComparFn,
|
||||
|
@ -3229,7 +3218,9 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
|||
if (pCtx->subsidiaries.num > 0) {
|
||||
copyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
|
||||
#endif
|
||||
taosheapadjust((void*)pItems, sizeof(STopBotResItem), 0, pEntryInfo->numOfRes - 1, (const void*)&type,
|
||||
topBotResComparFn, NULL, !isTopQuery);
|
||||
}
|
||||
|
@ -3239,7 +3230,11 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
|
|||
void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
|
||||
SFilePage* pPage = NULL;
|
||||
|
||||
int32_t completeRowSize = pSrcBlock->info.rowSize + (int32_t) taosArrayGetSize(pSrcBlock->pDataBlock) * sizeof(bool);
|
||||
int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
|
||||
for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
|
||||
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
|
||||
completeRowSize += pc->pExpr->base.resSchema.bytes;
|
||||
}
|
||||
|
||||
if (pCtx->curBufPage == -1) {
|
||||
pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
|
||||
|
@ -3257,19 +3252,22 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
|
|||
// keep the current row data, extract method
|
||||
int32_t offset = 0;
|
||||
bool* nullList = (bool*)((char*)pPage + pPage->num);
|
||||
char* pStart = (char*)(nullList + sizeof(bool) * (int32_t) taosArrayGetSize(pSrcBlock->pDataBlock));
|
||||
for (int32_t i = 0; i < (int32_t) taosArrayGetSize(pSrcBlock->pDataBlock); ++i) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, i);
|
||||
bool isNull = colDataIsNull_s(pCol, rowIndex);
|
||||
if (isNull) {
|
||||
nullList[i] = true;
|
||||
char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
|
||||
for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
|
||||
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
|
||||
|
||||
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
|
||||
int32_t srcSlotId = pFuncParam->pCol->slotId;
|
||||
|
||||
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
|
||||
if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
|
||||
offset += pCol->info.bytes;
|
||||
continue;
|
||||
}
|
||||
|
||||
char* p = colDataGetData(pCol, rowIndex);
|
||||
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
memcpy(pStart + offset, p, varDataTLen(p));
|
||||
memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
|
||||
} else {
|
||||
memcpy(pStart + offset, p, pCol->info.bytes);
|
||||
}
|
||||
|
@ -3287,14 +3285,18 @@ void saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
|
|||
void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
|
||||
SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
|
||||
|
||||
int32_t numOfCols = taosArrayGetSize(pSrcBlock->pDataBlock);
|
||||
int32_t numOfCols = pCtx->subsidiaries.num;
|
||||
|
||||
bool* nullList = (bool*)((char*)pPage + pPos->offset);
|
||||
char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
|
||||
|
||||
int32_t offset = 0;
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, i);
|
||||
SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
|
||||
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
|
||||
int32_t srcSlotId = pFuncParam->pCol->slotId;
|
||||
|
||||
SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
|
||||
if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
|
||||
offset += pCol->info.bytes;
|
||||
continue;
|
||||
|
@ -3302,7 +3304,7 @@ void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
|
|||
|
||||
char* p = colDataGetData(pCol, rowIndex);
|
||||
if (IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
memcpy(pStart + offset, p, varDataTLen(p));
|
||||
memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
|
||||
} else {
|
||||
memcpy(pStart + offset, p, pCol->info.bytes);
|
||||
}
|
||||
|
@ -3316,7 +3318,7 @@ void copyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pS
|
|||
|
||||
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
|
||||
STopBotRes* pRes = GET_ROWCELL_INTERBUF(pEntryInfo);
|
||||
STopBotRes* pRes = getTopBotOutputInfo(pCtx);
|
||||
|
||||
int16_t type = pCtx->input.pData[0]->info.type;
|
||||
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
||||
|
@ -3333,7 +3335,10 @@ int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
} else {
|
||||
colDataAppend(pCol, currentRow, (const char*)&pItem->v.i, false);
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
qDebug("page_finalize i:%d,item:%p,pageId:%d, offset:%d\n", i, pItem, pItem->tuplePos.pageId,
|
||||
pItem->tuplePos.offset);
|
||||
#endif
|
||||
setSelectivityValue(pCtx, pBlock, &pRes->pItems[i].tuplePos, currentRow);
|
||||
currentRow += 1;
|
||||
}
|
||||
|
@ -3572,7 +3577,7 @@ bool elapsedFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo
|
|||
pInfo->min = MAX_TS_KEY;
|
||||
pInfo->max = 0;
|
||||
|
||||
if (pCtx->numOfParams > 2) {
|
||||
if (pCtx->numOfParams > 1) {
|
||||
pInfo->timeUnit = pCtx->param[1].param.i;
|
||||
} else {
|
||||
pInfo->timeUnit = 1;
|
||||
|
@ -4513,7 +4518,6 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
|
|||
SMavgInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
|
||||
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
|
||||
|
@ -4553,10 +4557,6 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) {
|
|||
colDataAppend(pOutput, pos, (char*)&result, false);
|
||||
}
|
||||
|
||||
// TODO: remove this after pTsOutput is handled
|
||||
if (pTsOutput != NULL) {
|
||||
colDataAppendInt64(pTsOutput, pos, &tsList[i]);
|
||||
}
|
||||
numOfElems++;
|
||||
}
|
||||
|
||||
|
@ -4903,11 +4903,11 @@ static void doModeAdd(SModeInfo* pInfo, char* data, bool isNull) {
|
|||
return;
|
||||
}
|
||||
|
||||
int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
|
||||
SModeItem** pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes);
|
||||
int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
|
||||
SModeItem** pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes);
|
||||
if (pHashItem == NULL) {
|
||||
int32_t size = sizeof(SModeItem) + pInfo->colBytes;
|
||||
SModeItem* pItem = (SModeItem*)(pInfo->pItems + pInfo->numOfPoints * size);
|
||||
int32_t size = sizeof(SModeItem) + pInfo->colBytes;
|
||||
SModeItem* pItem = (SModeItem*)(pInfo->pItems + pInfo->numOfPoints * size);
|
||||
memcpy(pItem->data, data, pInfo->colBytes);
|
||||
pItem->count += 1;
|
||||
|
||||
|
@ -4920,7 +4920,7 @@ static void doModeAdd(SModeInfo* pInfo, char* data, bool isNull) {
|
|||
|
||||
int32_t modeFunction(SqlFunctionCtx* pCtx) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SModeInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
SModeInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
|
||||
|
@ -4968,7 +4968,6 @@ int32_t modeFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
return pResInfo->numOfRes;
|
||||
}
|
||||
|
||||
|
||||
bool getTwaFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
|
||||
pEnv->calcMemSize = sizeof(STwaInfo);
|
||||
return true;
|
||||
|
@ -5220,7 +5219,7 @@ int32_t twaFinalize(struct SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
return functionFinalize(pCtx, pBlock);
|
||||
}
|
||||
|
||||
bool blockDistSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo) {
|
||||
bool blockDistSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
|
||||
if (!functionSetup(pCtx, pResultInfo)) {
|
||||
return false;
|
||||
}
|
||||
|
@ -5252,7 +5251,7 @@ int32_t blockDistFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
pDistInfo->defMinRows = p1.defMinRows;
|
||||
pDistInfo->defMaxRows = p1.defMaxRows;
|
||||
pDistInfo->rowSize = p1.rowSize;
|
||||
pDistInfo->rowSize = p1.rowSize;
|
||||
pDistInfo->numOfSmallBlocks = p1.numOfSmallBlocks;
|
||||
|
||||
if (pDistInfo->minRows > p1.minRows) {
|
||||
|
@ -5338,16 +5337,18 @@ int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
int32_t row = 0;
|
||||
char st[256] = {0};
|
||||
double totalRawSize = pData->totalRows * pData->rowSize;
|
||||
int32_t len =
|
||||
sprintf(st + VARSTR_HEADER_SIZE, "Total_Blocks=[%d] Total_Size=[%.2f Kb] Average_size=[%.2f Kb] Compression_Ratio=[%.2f %c]",
|
||||
pData->numOfBlocks, pData->totalSize / 1024.0, ((double)pData->totalSize) / pData->numOfBlocks,
|
||||
pData->totalSize * 100 / totalRawSize, '%');
|
||||
int32_t len = sprintf(st + VARSTR_HEADER_SIZE,
|
||||
"Total_Blocks=[%d] Total_Size=[%.2f Kb] Average_size=[%.2f Kb] Compression_Ratio=[%.2f %c]",
|
||||
pData->numOfBlocks, pData->totalSize / 1024.0, ((double)pData->totalSize) / pData->numOfBlocks,
|
||||
pData->totalSize * 100 / totalRawSize, '%');
|
||||
|
||||
varDataSetLen(st, len);
|
||||
colDataAppend(pColInfo, row++, st, false);
|
||||
|
||||
len = sprintf(st + VARSTR_HEADER_SIZE, "Total_Rows=[%"PRId64"] Inmem_Rows=[%d] MinRows=[%d] MaxRows=[%d] Average_Rows=[%"PRId64"]",
|
||||
pData->totalRows, pData->numOfInmemRows, pData->minRows, pData->maxRows, pData->totalRows / pData->numOfBlocks);
|
||||
len = sprintf(st + VARSTR_HEADER_SIZE,
|
||||
"Total_Rows=[%" PRId64 "] Inmem_Rows=[%d] MinRows=[%d] MaxRows=[%d] Average_Rows=[%" PRId64 "]",
|
||||
pData->totalRows, pData->numOfInmemRows, pData->minRows, pData->maxRows,
|
||||
pData->totalRows / pData->numOfBlocks);
|
||||
|
||||
varDataSetLen(st, len);
|
||||
colDataAppend(pColInfo, row++, st, false);
|
||||
|
@ -5523,25 +5524,25 @@ bool irateFuncSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
|
|||
|
||||
SRateInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
pInfo->firstKey = INT64_MIN;
|
||||
pInfo->lastKey = INT64_MIN;
|
||||
pInfo->firstKey = INT64_MIN;
|
||||
pInfo->lastKey = INT64_MIN;
|
||||
pInfo->firstValue = (double)INT64_MIN;
|
||||
pInfo->lastValue = (double)INT64_MIN;
|
||||
pInfo->lastValue = (double)INT64_MIN;
|
||||
|
||||
pInfo->hasResult = 0;
|
||||
pInfo->hasResult = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t irateFunction(SqlFunctionCtx* pCtx) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SRateInfo* pRateInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
SRateInfo* pRateInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
|
||||
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
|
||||
|
||||
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
|
||||
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
|
||||
|
||||
int32_t numOfElems = 0;
|
||||
int32_t type = pInputCol->info.type;
|
||||
|
@ -5553,13 +5554,13 @@ int32_t irateFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
numOfElems++;
|
||||
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
double v = 0;
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
double v = 0;
|
||||
GET_TYPED_DATA(v, double, type, data);
|
||||
|
||||
if (INT64_MIN == pRateInfo->lastKey) {
|
||||
pRateInfo->lastValue = v;
|
||||
pRateInfo->lastKey = tsList[i];
|
||||
pRateInfo->lastKey = tsList[i];
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -5570,7 +5571,7 @@ int32_t irateFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
pRateInfo->lastValue = v;
|
||||
pRateInfo->lastKey = tsList[i];
|
||||
pRateInfo->lastKey = tsList[i];
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -5579,7 +5580,6 @@ int32_t irateFunction(SqlFunctionCtx* pCtx) {
|
|||
pRateInfo->firstValue = v;
|
||||
pRateInfo->firstKey = tsList[i];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
SET_VAL(pResInfo, numOfElems, 1);
|
||||
|
@ -5605,7 +5605,7 @@ static double doCalcRate(const SRateInfo* pRateInfo, double tickPerSec) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
return (duration > 0)? ((double)diff) / (duration/tickPerSec):0.0;
|
||||
return (duration > 0) ? ((double)diff) / (duration / tickPerSec) : 0.0;
|
||||
}
|
||||
|
||||
int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
|
@ -5616,7 +5616,7 @@ int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
pResInfo->isNullRes = (pResInfo->numOfRes == 0) ? 1 : 0;
|
||||
|
||||
SRateInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
double result = doCalcRate(pInfo, (double)TSDB_TICK_PER_SECOND(pCtx->param[1].param.i));
|
||||
double result = doCalcRate(pInfo, (double)TSDB_TICK_PER_SECOND(pCtx->param[1].param.i));
|
||||
colDataAppend(pCol, pBlock->info.rows, (const char*)&result, pResInfo->isNullRes);
|
||||
|
||||
return pResInfo->numOfRes;
|
||||
|
@ -5624,16 +5624,14 @@ int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
|||
|
||||
int32_t groupKeyFunction(SqlFunctionCtx* pCtx) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
|
||||
int32_t bytes = pInputCol->info.bytes;
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
|
||||
int32_t startIndex = pInput->startRowIndex;
|
||||
|
||||
//escape rest of data blocks to avoid first entry to be overwritten.
|
||||
// escape rest of data blocks to avoid first entry to be overwritten.
|
||||
if (pInfo->hasResult) {
|
||||
goto _group_key_over;
|
||||
}
|
||||
|
@ -5645,7 +5643,12 @@ int32_t groupKeyFunction(SqlFunctionCtx* pCtx) {
|
|||
}
|
||||
|
||||
char* data = colDataGetData(pInputCol, startIndex);
|
||||
memcpy(pInfo->data, data, bytes);
|
||||
if (IS_VAR_DATA_TYPE(pInputCol->info.type)) {
|
||||
memcpy(pInfo->data, data,
|
||||
(pInputCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(data) : varDataTLen(data));
|
||||
} else {
|
||||
memcpy(pInfo->data, data, pInputCol->info.bytes);
|
||||
}
|
||||
pInfo->hasResult = true;
|
||||
|
||||
_group_key_over:
|
||||
|
|
|
@ -181,6 +181,8 @@ bool fmIsForbidGroupByFunc(int32_t funcId) { return isSpecificClassifyFunc(funcI
|
|||
|
||||
bool fmIsSystemInfoFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_SYSTEM_INFO_FUNC); }
|
||||
|
||||
bool fmIsImplicitTsFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_IMPLICIT_TS_FUNC); }
|
||||
|
||||
bool fmIsInterpFunc(int32_t funcId) {
|
||||
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
|
||||
return false;
|
||||
|
|
|
@ -35,7 +35,7 @@ if (${BUILD_WITH_INVERTEDINDEX})
|
|||
endif(${BUILD_WITH_INVERTEDINDEX})
|
||||
|
||||
|
||||
if (${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
# if (${BUILD_TEST})
|
||||
# add_subdirectory(test)
|
||||
# endif(${BUILD_TEST})
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "querynodes.h"
|
||||
#include "taos.h"
|
||||
#include "taoserror.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
#define COPY_SCALAR_FIELD(fldname) \
|
||||
do { \
|
||||
|
@ -164,7 +165,15 @@ static int32_t valueNodeCopy(const SValueNode* pSrc, SValueNode* pDst) {
|
|||
memcpy(pDst->datum.p, pSrc->datum.p, len);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
case TSDB_DATA_TYPE_JSON:{
|
||||
int32_t len = getJsonValueLen(pSrc->datum.p);
|
||||
pDst->datum.p = taosMemoryCalloc(1, len);
|
||||
if (NULL == pDst->datum.p) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
memcpy(pDst->datum.p, pSrc->datum.p, len);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DECIMAL:
|
||||
case TSDB_DATA_TYPE_BLOB:
|
||||
case TSDB_DATA_TYPE_MEDIUMBLOB:
|
||||
|
@ -600,7 +609,7 @@ static int32_t selectStmtCopy(const SSelectStmt* pSrc, SSelectStmt* pDst) {
|
|||
COPY_CHAR_ARRAY_FIELD(stmtName);
|
||||
COPY_SCALAR_FIELD(precision);
|
||||
COPY_SCALAR_FIELD(isEmptyResult);
|
||||
COPY_SCALAR_FIELD(isTimeOrderQuery);
|
||||
COPY_SCALAR_FIELD(isTimeLineResult);
|
||||
COPY_SCALAR_FIELD(hasAggFuncs);
|
||||
COPY_SCALAR_FIELD(hasRepeatScanFuncs);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "querynodes.h"
|
||||
#include "taoserror.h"
|
||||
#include "tjson.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
static int32_t nodeToJson(const void* pObj, SJson* pJson);
|
||||
static int32_t jsonToNode(const SJson* pJson, void* pObj);
|
||||
|
@ -2629,7 +2630,18 @@ static int32_t datumToJson(const void* pObj, SJson* pJson) {
|
|||
case TSDB_DATA_TYPE_VARBINARY:
|
||||
code = tjsonAddStringToObject(pJson, jkValueDatum, varDataVal(pNode->datum.p));
|
||||
break;
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
case TSDB_DATA_TYPE_JSON:{
|
||||
int32_t len = getJsonValueLen(pNode->datum.p);
|
||||
char* buf = taosMemoryCalloc( len * 2 + 1, sizeof(char));
|
||||
code = taosHexEncode(pNode->datum.p, buf, len);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(buf);
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
code = tjsonAddStringToObject(pJson, jkValueDatum, buf);
|
||||
taosMemoryFree(buf);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DECIMAL:
|
||||
case TSDB_DATA_TYPE_BLOB:
|
||||
// todo
|
||||
|
@ -2752,7 +2764,30 @@ static int32_t jsonToDatum(const SJson* pJson, void* pObj) {
|
|||
}
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
case TSDB_DATA_TYPE_JSON:{
|
||||
pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes);
|
||||
if (NULL == pNode->datum.p) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
break;
|
||||
}
|
||||
char* buf = taosMemoryCalloc(1, pNode->node.resType.bytes * 2 + 1);
|
||||
if (NULL == buf) {
|
||||
code = TSDB_CODE_OUT_OF_MEMORY;
|
||||
break;
|
||||
}
|
||||
code = tjsonGetStringValue(pJson, jkValueDatum, buf);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(buf);
|
||||
break;
|
||||
}
|
||||
code = taosHexDecode(buf, pNode->datum.p, pNode->node.resType.bytes);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
taosMemoryFree(buf);
|
||||
break;
|
||||
}
|
||||
taosMemoryFree(buf);
|
||||
break;
|
||||
}
|
||||
case TSDB_DATA_TYPE_DECIMAL:
|
||||
case TSDB_DATA_TYPE_BLOB:
|
||||
// todo
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include "taos.h"
|
||||
#include "taoserror.h"
|
||||
#include "thash.h"
|
||||
#include "tdatablock.h"
|
||||
|
||||
static SNode* makeNode(ENodeType type, size_t size) {
|
||||
SNode* p = taosMemoryCalloc(1, size);
|
||||
|
@ -1675,6 +1676,10 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal) {
|
|||
pVal->pz[pVal->nLen + VARSTR_HEADER_SIZE] = 0;
|
||||
break;
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
pVal->nLen = getJsonValueLen(pNode->datum.p);
|
||||
pVal->pz = taosMemoryMalloc(pVal->nLen);
|
||||
memcpy(pVal->pz, pNode->datum.p, pVal->nLen);
|
||||
break;
|
||||
case TSDB_DATA_TYPE_DECIMAL:
|
||||
case TSDB_DATA_TYPE_BLOB:
|
||||
// todo
|
||||
|
|
|
@ -62,7 +62,6 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta);
|
|||
int32_t getNumOfTags(const STableMeta* pTableMeta);
|
||||
STableComInfo getTableInfo(const STableMeta* pTableMeta);
|
||||
STableMeta* tableMetaDup(const STableMeta* pTableMeta);
|
||||
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, SMsgBuf* pMsgBuf);
|
||||
|
||||
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
|
||||
|
||||
|
|
|
@ -717,7 +717,7 @@ SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pPr
|
|||
select->pProjectionList = pProjectionList;
|
||||
select->pFromTable = pTable;
|
||||
sprintf(select->stmtName, "%p", select);
|
||||
select->isTimeOrderQuery = true;
|
||||
select->isTimeLineResult = true;
|
||||
return (SNode*)select;
|
||||
}
|
||||
|
||||
|
|
|
@ -440,6 +440,10 @@ static bool isTimelineFunc(const SNode* pNode) {
|
|||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsTimelineFunc(((SFunctionNode*)pNode)->funcId));
|
||||
}
|
||||
|
||||
static bool isImplicitTsFunc(const SNode* pNode) {
|
||||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsImplicitTsFunc(((SFunctionNode*)pNode)->funcId));
|
||||
}
|
||||
|
||||
static bool isScanPseudoColumnFunc(const SNode* pNode) {
|
||||
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsScanPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
|
||||
}
|
||||
|
@ -479,6 +483,35 @@ static SNodeList* getProjectList(const SNode* pNode) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static bool isTimeLineQuery(SNode* pStmt) {
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
return ((SSelectStmt*)pStmt)->isTimeLineResult;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool isPrimaryKeyImpl(SNode* pExpr) {
|
||||
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
|
||||
return (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pExpr)->colId);
|
||||
} else if (QUERY_NODE_FUNCTION == nodeType(pExpr)) {
|
||||
SFunctionNode* pFunc = (SFunctionNode*)pExpr;
|
||||
if (FUNCTION_TYPE_SELECT_VALUE == pFunc->funcType) {
|
||||
return isPrimaryKeyImpl(nodesListGetNode(pFunc->pParameterList, 0));
|
||||
} else if (FUNCTION_TYPE_WSTARTTS == pFunc->funcType || FUNCTION_TYPE_WENDTS == pFunc->funcType) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) {
|
||||
if (!isTimeLineQuery(pTable->pSubquery)) {
|
||||
return false;
|
||||
}
|
||||
return isPrimaryKeyImpl(pExpr);
|
||||
}
|
||||
|
||||
static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* pColSchema, int32_t tagFlag,
|
||||
SColumnNode* pCol) {
|
||||
strcpy(pCol->dbName, pTable->table.dbName);
|
||||
|
@ -500,7 +533,7 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p
|
|||
}
|
||||
}
|
||||
|
||||
static void setColumnInfoByExpr(const STableNode* pTable, SExprNode* pExpr, SColumnNode** pColRef) {
|
||||
static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColumnNode** pColRef) {
|
||||
SColumnNode* pCol = *pColRef;
|
||||
|
||||
// pCol->pProjectRef = (SNode*)pExpr;
|
||||
|
@ -508,15 +541,8 @@ static void setColumnInfoByExpr(const STableNode* pTable, SExprNode* pExpr, SCol
|
|||
pExpr->pAssociation = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
|
||||
}
|
||||
taosArrayPush(pExpr->pAssociation, &pColRef);
|
||||
if (NULL != pTable) {
|
||||
strcpy(pCol->tableAlias, pTable->tableAlias);
|
||||
} else if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
|
||||
SColumnNode* pProjCol = (SColumnNode*)pExpr;
|
||||
strcpy(pCol->tableAlias, pProjCol->tableAlias);
|
||||
pCol->tableId = pProjCol->tableId;
|
||||
pCol->colId = pProjCol->colId;
|
||||
pCol->colType = pProjCol->colType;
|
||||
}
|
||||
strcpy(pCol->tableAlias, pTable->table.tableAlias);
|
||||
pCol->colId = isPrimaryKey(pTable, (SNode*)pExpr) ? PRIMARYKEY_TIMESTAMP_COL_ID : 0;
|
||||
strcpy(pCol->colName, pExpr->aliasName);
|
||||
if ('\0' == pCol->node.aliasName[0]) {
|
||||
strcpy(pCol->node.aliasName, pCol->colName);
|
||||
|
@ -538,8 +564,9 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
|
|||
nodesListAppend(pList, (SNode*)pCol);
|
||||
}
|
||||
} else {
|
||||
SNodeList* pProjectList = getProjectList(((STempTableNode*)pTable)->pSubquery);
|
||||
SNode* pNode;
|
||||
STempTableNode* pTempTable = (STempTableNode*)pTable;
|
||||
SNodeList* pProjectList = getProjectList(pTempTable->pSubquery);
|
||||
SNode* pNode;
|
||||
FOREACH(pNode, pProjectList) {
|
||||
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
|
||||
if (NULL == pCol) {
|
||||
|
@ -547,7 +574,7 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
|
|||
}
|
||||
nodesListAppend(pList, (SNode*)pCol);
|
||||
SListCell* pCell = nodesListGetCell(pList, LIST_LENGTH(pList) - 1);
|
||||
setColumnInfoByExpr(pTable, (SExprNode*)pNode, (SColumnNode**)&pCell->pNode);
|
||||
setColumnInfoByExpr(pTempTable, (SExprNode*)pNode, (SColumnNode**)&pCell->pNode);
|
||||
}
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -557,35 +584,6 @@ static bool isInternalPrimaryKey(const SColumnNode* pCol) {
|
|||
return PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && 0 == strcmp(pCol->colName, PK_TS_COL_INTERNAL_NAME);
|
||||
}
|
||||
|
||||
static bool isTimeOrderQuery(SNode* pStmt) {
|
||||
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
|
||||
return ((SSelectStmt*)pStmt)->isTimeOrderQuery;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool isPrimaryKeyImpl(STempTableNode* pTable, SNode* pExpr) {
|
||||
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
|
||||
return (PRIMARYKEY_TIMESTAMP_COL_ID == ((SColumnNode*)pExpr)->colId);
|
||||
} else if (QUERY_NODE_FUNCTION == nodeType(pExpr)) {
|
||||
SFunctionNode* pFunc = (SFunctionNode*)pExpr;
|
||||
if (FUNCTION_TYPE_SELECT_VALUE == pFunc->funcType) {
|
||||
return isPrimaryKeyImpl(pTable, nodesListGetNode(pFunc->pParameterList, 0));
|
||||
} else if (FUNCTION_TYPE_WSTARTTS == pFunc->funcType || FUNCTION_TYPE_WENDTS == pFunc->funcType) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool isPrimaryKey(STempTableNode* pTable, SNode* pExpr) {
|
||||
if (!isTimeOrderQuery(pTable->pSubquery)) {
|
||||
return false;
|
||||
}
|
||||
return isPrimaryKeyImpl(pTable, pExpr);
|
||||
}
|
||||
|
||||
static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef, const STableNode* pTable,
|
||||
bool* pFound) {
|
||||
SColumnNode* pCol = *pColRef;
|
||||
|
@ -606,18 +604,19 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef,
|
|||
}
|
||||
}
|
||||
} else {
|
||||
SNodeList* pProjectList = getProjectList(((STempTableNode*)pTable)->pSubquery);
|
||||
SNode* pNode;
|
||||
STempTableNode* pTempTable = (STempTableNode*)pTable;
|
||||
SNodeList* pProjectList = getProjectList(pTempTable->pSubquery);
|
||||
SNode* pNode;
|
||||
FOREACH(pNode, pProjectList) {
|
||||
SExprNode* pExpr = (SExprNode*)pNode;
|
||||
if (0 == strcmp(pCol->colName, pExpr->aliasName)) {
|
||||
if (*pFound) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pCol->colName);
|
||||
}
|
||||
setColumnInfoByExpr(pTable, pExpr, pColRef);
|
||||
setColumnInfoByExpr(pTempTable, pExpr, pColRef);
|
||||
*pFound = true;
|
||||
} else if (isPrimaryKey((STempTableNode*)pTable, pNode) && isInternalPrimaryKey(pCol)) {
|
||||
setColumnInfoByExpr(pTable, pExpr, pColRef);
|
||||
} else if (isPrimaryKey(pTempTable, pNode) && isInternalPrimaryKey(pCol)) {
|
||||
setColumnInfoByExpr(pTempTable, pExpr, pColRef);
|
||||
*pFound = true;
|
||||
}
|
||||
}
|
||||
|
@ -1259,6 +1258,7 @@ static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
|
|||
pSelect->hasTailFunc = pSelect->hasTailFunc ? true : (FUNCTION_TYPE_TAIL == pFunc->funcType);
|
||||
pSelect->hasInterpFunc = pSelect->hasInterpFunc ? true : (FUNCTION_TYPE_INTERP == pFunc->funcType);
|
||||
pSelect->hasLastRowFunc = pSelect->hasLastRowFunc ? true : (FUNCTION_TYPE_LAST_ROW == pFunc->funcType);
|
||||
pSelect->hasTimeLineFunc = pSelect->hasLastRowFunc ? true : fmIsTimelineFunc(pFunc->funcId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1477,7 +1477,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode
|
|||
strcpy(pFunc->node.aliasName, ((SExprNode*)*pNode)->aliasName);
|
||||
pCxt->errCode = nodesListMakeAppend(&pFunc->pParameterList, *pNode);
|
||||
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
|
||||
pCxt->errCode == getFuncInfo(pCxt, pFunc);
|
||||
pCxt->errCode = getFuncInfo(pCxt, pFunc);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == pCxt->errCode) {
|
||||
*pNode = (SNode*)pFunc;
|
||||
|
@ -1633,6 +1633,16 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t checkWindowFuncCoexist(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
if (NULL == pSelect->pWindow) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (NULL != pSelect->pWindow && !pSelect->hasAggFuncs) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t toVgroupsInfo(SArray* pVgs, SVgroupsInfo** pVgsInfo) {
|
||||
size_t vgroupNum = taosArrayGetSize(pVgs);
|
||||
*pVgsInfo = taosMemoryCalloc(1, sizeof(SVgroupsInfo) + sizeof(SVgroupInfo) * vgroupNum);
|
||||
|
@ -2137,7 +2147,7 @@ static int32_t translateGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
|||
}
|
||||
if (NULL != pSelect->pGroupByList) {
|
||||
pCxt->currClause = SQL_CLAUSE_GROUP_BY;
|
||||
pSelect->isTimeOrderQuery = false;
|
||||
pSelect->isTimeLineResult = false;
|
||||
return translateExprList(pCxt, pSelect->pGroupByList);
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -2471,9 +2481,9 @@ static int32_t createPrimaryKeyCol(STranslateContext* pCxt, SNode** pPrimaryKey)
|
|||
return code;
|
||||
}
|
||||
|
||||
static EDealRes rewriteTimelineFuncImpl(SNode* pNode, void* pContext) {
|
||||
static EDealRes appendTsForImplicitTsFuncImpl(SNode* pNode, void* pContext) {
|
||||
STranslateContext* pCxt = pContext;
|
||||
if (isTimelineFunc(pNode)) {
|
||||
if (isImplicitTsFunc(pNode)) {
|
||||
SFunctionNode* pFunc = (SFunctionNode*)pNode;
|
||||
SNode* pPrimaryKey = NULL;
|
||||
pCxt->errCode = createPrimaryKeyCol(pCxt, &pPrimaryKey);
|
||||
|
@ -2485,8 +2495,8 @@ static EDealRes rewriteTimelineFuncImpl(SNode* pNode, void* pContext) {
|
|||
return DEAL_RES_CONTINUE;
|
||||
}
|
||||
|
||||
static int32_t rewriteTimelineFunc(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
nodesWalkSelectStmt(pSelect, SQL_CLAUSE_FROM, rewriteTimelineFuncImpl, pCxt);
|
||||
static int32_t appendTsForImplicitTsFunc(STranslateContext* pCxt, SSelectStmt* pSelect) {
|
||||
nodesWalkSelectStmt(pSelect, SQL_CLAUSE_FROM, appendTsForImplicitTsFuncImpl, pCxt);
|
||||
return pCxt->errCode;
|
||||
}
|
||||
|
||||
|
@ -2579,6 +2589,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkAggColCoexist(pCxt, pSelect);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkWindowFuncCoexist(pCxt, pSelect);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = checkLimit(pCxt, pSelect);
|
||||
}
|
||||
|
@ -2586,7 +2599,7 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
|
|||
code = translateInterp(pCxt, pSelect);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteTimelineFunc(pCxt, pSelect);
|
||||
code = appendTsForImplicitTsFunc(pCxt, pSelect);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = replaceOrderByAlias(pCxt, pSelect->pProjectionList, pSelect->pOrderByList);
|
||||
|
@ -3208,7 +3221,7 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN
|
|||
if (TSDB_CODE_SUCCESS == code) {
|
||||
if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_BINARY_LEN) ||
|
||||
(TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_NCHAR_LEN)) {
|
||||
code = code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
|
||||
}
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
|
|
|
@ -185,19 +185,21 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
case TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG:
|
||||
return "The REDISTRIBUTE VGROUP statement only support 1 to 3 dnodes";
|
||||
case TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC:
|
||||
return "%s function does not supportted in fill query";
|
||||
return "%s function is not supported in fill query";
|
||||
case TSDB_CODE_PAR_INVALID_WINDOW_PC:
|
||||
return "_WSTARTTS, _WENDTS and _WDURATION can only be used in window query";
|
||||
case TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC:
|
||||
return "%s function does not supportted in time window query";
|
||||
return "%s function is not supported in time window query";
|
||||
case TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC:
|
||||
return "%s function does not supportted in stream query";
|
||||
return "%s function is not supported in stream query";
|
||||
case TSDB_CODE_PAR_GROUP_BY_NOT_ALLOWED_FUNC:
|
||||
return "%s function does not supportted in group query";
|
||||
return "%s function is not supported in group query";
|
||||
case TSDB_CODE_PAR_INVALID_TABLE_OPTION:
|
||||
return "Invalid option %s";
|
||||
case TSDB_CODE_PAR_INVALID_INTERP_CLAUSE:
|
||||
return "Invalid usage of RANGE clause, EVERY clause or FILL clause";
|
||||
case TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN:
|
||||
return "No valid function in window query";
|
||||
case TSDB_CODE_OUT_OF_MEMORY:
|
||||
return "Out of memory";
|
||||
default:
|
||||
|
@ -219,6 +221,7 @@ int32_t buildInvalidOperationMsg(SMsgBuf* pBuf, const char* msg) {
|
|||
}
|
||||
|
||||
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr) {
|
||||
if(pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
|
||||
const char* msgFormat1 = "syntax error near \'%s\'";
|
||||
const char* msgFormat2 = "syntax error near \'%s\' (%s)";
|
||||
const char* msgFormat3 = "%s";
|
||||
|
@ -337,16 +340,16 @@ int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen) {
|
|||
static bool isValidateTag(char* input) {
|
||||
if (!input) return false;
|
||||
for (size_t i = 0; i < strlen(input); ++i) {
|
||||
#ifdef WINDOWS
|
||||
#ifdef WINDOWS
|
||||
if (input[i] < 0x20 || input[i] > 0x7E) return false;
|
||||
#else
|
||||
#else
|
||||
if (isprint(input[i]) == 0) return false;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, SMsgBuf* pMsgBuf) {
|
||||
int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, void* pMsgBuf) {
|
||||
int32_t retCode = TSDB_CODE_SUCCESS;
|
||||
cJSON* root = NULL;
|
||||
SHashObj* keyHash = NULL;
|
||||
|
@ -381,7 +384,8 @@ int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, SMs
|
|||
|
||||
char* jsonKey = item->string;
|
||||
if (!isValidateTag(jsonKey)) {
|
||||
fprintf(stdout,"%s(%d) %s %08" PRId64 "\n", __FILE__, __LINE__,__func__,taosGetSelfPthreadId());fflush(stdout);
|
||||
fprintf(stdout, "%s(%d) %s %08" PRId64 "\n", __FILE__, __LINE__, __func__, taosGetSelfPthreadId());
|
||||
fflush(stdout);
|
||||
retCode = buildSyntaxErrMsg(pMsgBuf, "json key not validate", jsonKey);
|
||||
goto end;
|
||||
}
|
||||
|
|
|
@ -542,6 +542,7 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt
|
|||
|
||||
pIdfRowsFunc->isTailFunc = pSelect->hasTailFunc;
|
||||
pIdfRowsFunc->isUniqueFunc = pSelect->hasUniqueFunc;
|
||||
pIdfRowsFunc->isTimeLineFunc = pSelect->hasTimeLineFunc;
|
||||
|
||||
// indefinite rows functions and _select_values functions
|
||||
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsVectorFunc, &pIdfRowsFunc->pFuncs);
|
||||
|
@ -727,6 +728,9 @@ static int32_t createFillLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
|
|||
}
|
||||
|
||||
SFillNode* pFillNode = (SFillNode*)(((SIntervalWindowNode*)pSelect->pWindow)->pFill);
|
||||
if (FILL_MODE_NONE == pFillNode->mode) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SFillLogicNode* pFill = (SFillLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_FILL);
|
||||
if (NULL == pFill) {
|
||||
|
|
|
@ -594,7 +594,7 @@ static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* p
|
|||
|
||||
typedef struct SPartAggCondContext {
|
||||
SAggLogicNode* pAgg;
|
||||
bool hasAggFunc;
|
||||
bool hasAggFunc;
|
||||
} SPartAggCondContext;
|
||||
|
||||
static EDealRes partAggCondHasAggFuncImpl(SNode* pNode, void* pContext) {
|
||||
|
@ -619,11 +619,11 @@ static int32_t partitionAggCondHasAggFunc(SAggLogicNode* pAgg, SNode* pCond) {
|
|||
|
||||
static int32_t partitionAggCondConj(SAggLogicNode* pAgg, SNode** ppAggFuncCond, SNode** ppGroupKeyCond) {
|
||||
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)pAgg->node.pConditions;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
|
||||
SNodeList* pAggFuncConds = NULL;
|
||||
SNodeList* pGroupKeyConds = NULL;
|
||||
SNode* pCond = NULL;
|
||||
SNode* pCond = NULL;
|
||||
FOREACH(pCond, pLogicCond->pParameterList) {
|
||||
if (partitionAggCondHasAggFunc(pAgg, pCond)) {
|
||||
code = nodesListMakeAppend(&pAggFuncConds, nodesCloneNode(pCond));
|
||||
|
@ -677,14 +677,14 @@ static int32_t pushCondToAggCond(SOptimizeContext* pCxt, SAggLogicNode* pAgg, SN
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
typedef struct SRewriteAggGroupKeyCondContext{
|
||||
SAggLogicNode *pAgg;
|
||||
int32_t errCode;
|
||||
typedef struct SRewriteAggGroupKeyCondContext {
|
||||
SAggLogicNode* pAgg;
|
||||
int32_t errCode;
|
||||
} SRewriteAggGroupKeyCondContext;
|
||||
|
||||
static EDealRes rewriteAggGroupKeyCondForPushDownImpl(SNode** pNode, void* pContext) {
|
||||
SRewriteAggGroupKeyCondContext* pCxt = pContext;
|
||||
SAggLogicNode* pAgg = pCxt->pAgg;
|
||||
SAggLogicNode* pAgg = pCxt->pAgg;
|
||||
if (QUERY_NODE_COLUMN == nodeType(*pNode)) {
|
||||
SNode* pGroupKey = NULL;
|
||||
FOREACH(pGroupKey, pAgg->pGroupKeys) {
|
||||
|
@ -717,15 +717,15 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg
|
|||
OPTIMIZE_FLAG_TEST_MASK(pAgg->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
//TODO: remove it after full implementation of pushing down to child
|
||||
// TODO: remove it after full implementation of pushing down to child
|
||||
if (1 != LIST_LENGTH(pAgg->node.pChildren) ||
|
||||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
|
||||
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
SNode* pAggFuncCond = NULL;
|
||||
SNode* pGroupKeyCond = NULL;
|
||||
|
||||
SNode* pAggFuncCond = NULL;
|
||||
SNode* pGroupKeyCond = NULL;
|
||||
int32_t code = partitionAggCond(pAgg, &pAggFuncCond, &pGroupKeyCond);
|
||||
if (TSDB_CODE_SUCCESS == code && NULL != pAggFuncCond) {
|
||||
code = pushCondToAggCond(pCxt, pAgg, &pAggFuncCond);
|
||||
|
@ -1462,9 +1462,17 @@ static int32_t rewriteTailOptCreateSort(SIndefRowsFuncLogicNode* pIndef, SLogicN
|
|||
TSWAP(pSort->node.pChildren, pIndef->node.pChildren);
|
||||
pSort->node.precision = pIndef->node.precision;
|
||||
|
||||
SFunctionNode* pTail = NULL;
|
||||
SNode* pFunc = NULL;
|
||||
FOREACH(pFunc, pIndef->pFuncs) {
|
||||
if (FUNCTION_TYPE_TAIL == ((SFunctionNode*)pFunc)->funcType) {
|
||||
pTail = (SFunctionNode*)pFunc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// tail(expr, [limit, offset,] _rowts)
|
||||
SFunctionNode* pTail = (SFunctionNode*)nodesListGetNode(pIndef->pFuncs, 0);
|
||||
int32_t rowtsIndex = LIST_LENGTH(pTail->pParameterList) - 1;
|
||||
int32_t rowtsIndex = LIST_LENGTH(pTail->pParameterList) - 1;
|
||||
|
||||
int32_t code = nodesListMakeStrictAppend(
|
||||
&pSort->pSortKeys, rewriteTailOptCreateOrderByExpr(nodesListGetNode(pTail->pParameterList, rowtsIndex)));
|
||||
|
@ -1484,12 +1492,12 @@ static int32_t rewriteTailOptCreateSort(SIndefRowsFuncLogicNode* pIndef, SLogicN
|
|||
return code;
|
||||
}
|
||||
|
||||
static SNode* rewriteTailOptCreateProjectExpr(SFunctionNode* pTail) {
|
||||
SNode* pExpr = nodesCloneNode(nodesListGetNode(pTail->pParameterList, 0));
|
||||
static SNode* rewriteTailOptCreateProjectExpr(SFunctionNode* pFunc) {
|
||||
SNode* pExpr = nodesCloneNode(nodesListGetNode(pFunc->pParameterList, 0));
|
||||
if (NULL == pExpr) {
|
||||
return NULL;
|
||||
}
|
||||
strcpy(((SExprNode*)pExpr)->aliasName, pTail->node.aliasName);
|
||||
strcpy(((SExprNode*)pExpr)->aliasName, pFunc->node.aliasName);
|
||||
return pExpr;
|
||||
}
|
||||
|
||||
|
@ -1502,12 +1510,22 @@ static int32_t rewriteTailOptCreateProject(SIndefRowsFuncLogicNode* pIndef, SLog
|
|||
TSWAP(pProject->node.pTargets, pIndef->node.pTargets);
|
||||
pProject->node.precision = pIndef->node.precision;
|
||||
|
||||
// tail(expr, [limit, offset,] _rowts)
|
||||
SFunctionNode* pTail = (SFunctionNode*)nodesListGetNode(pIndef->pFuncs, 0);
|
||||
int32_t limitIndex = LIST_LENGTH(pTail->pParameterList) > 2 ? 1 : -1;
|
||||
int32_t offsetIndex = LIST_LENGTH(pTail->pParameterList) > 3 ? 2 : -1;
|
||||
int32_t code = TSDB_CODE_SUCCESS;
|
||||
SFunctionNode* pTail = NULL;
|
||||
SNode* pFunc = NULL;
|
||||
FOREACH(pFunc, pIndef->pFuncs) {
|
||||
code = nodesListMakeStrictAppend(&pProject->pProjections, rewriteTailOptCreateProjectExpr((SFunctionNode*)pFunc));
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
break;
|
||||
}
|
||||
if (FUNCTION_TYPE_TAIL == ((SFunctionNode*)pFunc)->funcType) {
|
||||
pTail = (SFunctionNode*)pFunc;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t code = nodesListMakeStrictAppend(&pProject->pProjections, rewriteTailOptCreateProjectExpr(pTail));
|
||||
// tail(expr, [limit, offset,] _rowts)
|
||||
int32_t limitIndex = LIST_LENGTH(pTail->pParameterList) > 2 ? 1 : -1;
|
||||
int32_t offsetIndex = LIST_LENGTH(pTail->pParameterList) > 3 ? 2 : -1;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteTailOptCreateLimit(limitIndex < 0 ? NULL : nodesListGetNode(pTail->pParameterList, limitIndex),
|
||||
offsetIndex < 0 ? NULL : nodesListGetNode(pTail->pParameterList, offsetIndex),
|
||||
|
@ -1862,7 +1880,7 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) {
|
|||
}
|
||||
|
||||
static int32_t mergeProjectsOptimizeImpl(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan, SLogicNode* pSelfNode) {
|
||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSelfNode->pChildren, 0);
|
||||
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pSelfNode->pChildren, 0);
|
||||
|
||||
SMergeProjectionsContext cxt = {.pChildProj = (SProjectLogicNode*)pChild, .errCode = TSDB_CODE_SUCCESS};
|
||||
nodesRewriteExprs(((SProjectLogicNode*)pSelfNode)->pProjections, mergeProjectionsExpr, &cxt);
|
||||
|
|
|
@ -35,6 +35,8 @@ TEST_F(PlanBasicTest, whereClause) {
|
|||
run("SELECT * FROM t1 WHERE c1 > 10");
|
||||
|
||||
run("SELECT * FROM t1 WHERE ts > TIMESTAMP '2022-04-01 00:00:00' and ts < TIMESTAMP '2022-04-30 23:59:59'");
|
||||
|
||||
run("SELECT ts, c1 FROM t1 WHERE ts > NOW AND ts IS NULL AND (c1 > 0 OR c3 < 20)");
|
||||
}
|
||||
|
||||
TEST_F(PlanBasicTest, func) {
|
||||
|
@ -103,6 +105,22 @@ TEST_F(PlanBasicTest, lastRowFunc) {
|
|||
run("SELECT LAST_ROW(c1), SUM(c3) FROM t1");
|
||||
}
|
||||
|
||||
TEST_F(PlanBasicTest, timeLineFunc) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("SELECT CSUM(c1) FROM t1");
|
||||
|
||||
run("SELECT CSUM(c1) FROM st1");
|
||||
}
|
||||
|
||||
TEST_F(PlanBasicTest, multiResFunc) {
|
||||
useDb("root", "test");
|
||||
|
||||
run("SELECT LAST(*) FROM t1");
|
||||
|
||||
run("SELECT LAST(c1 + 10, c2) FROM st1");
|
||||
}
|
||||
|
||||
TEST_F(PlanBasicTest, sampleFunc) {
|
||||
useDb("root", "test");
|
||||
|
||||
|
|
|
@ -305,18 +305,21 @@ int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t
|
|||
|
||||
char* parseTagDatatoJson(void* p) {
|
||||
char* string = NULL;
|
||||
cJSON* json = cJSON_CreateObject();
|
||||
if (json == NULL) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
SArray* pTagVals = NULL;
|
||||
cJSON* json = NULL;
|
||||
if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
int16_t nCols = taosArrayGetSize(pTagVals);
|
||||
if (nCols == 0) {
|
||||
goto end;
|
||||
}
|
||||
char tagJsonKey[256] = {0};
|
||||
json = cJSON_CreateObject();
|
||||
if (json == NULL) {
|
||||
goto end;
|
||||
}
|
||||
for (int j = 0; j < nCols; ++j) {
|
||||
STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
|
||||
// json key encode by binary
|
||||
|
@ -374,6 +377,10 @@ char* parseTagDatatoJson(void* p) {
|
|||
string = cJSON_PrintUnformatted(json);
|
||||
end:
|
||||
cJSON_Delete(json);
|
||||
taosArrayDestroy(pTagVals);
|
||||
if(string == NULL){
|
||||
string = strdup(TSDB_DATA_NULL_STR_L);
|
||||
}
|
||||
return string;
|
||||
}
|
||||
|
||||
|
|
|
@ -192,6 +192,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
|
|||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return 18;
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
terrno = TSDB_CODE_QRY_JSON_IN_ERROR;
|
||||
return 0;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
|
@ -215,6 +218,9 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
|
|||
case TSDB_DATA_TYPE_DOUBLE:
|
||||
case TSDB_DATA_TYPE_TIMESTAMP:
|
||||
return 24;
|
||||
case TSDB_DATA_TYPE_JSON:
|
||||
terrno = TSDB_CODE_QRY_JSON_IN_ERROR;
|
||||
return 0;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
|
|
|
@ -551,7 +551,9 @@ int32_t sclExecOperator(SOperatorNode *node, SScalarCtx *ctx, SScalarParam *outp
|
|||
SScalarParam* pLeft = ¶ms[0];
|
||||
SScalarParam* pRight = paramNum > 1 ? ¶ms[1] : NULL;
|
||||
|
||||
terrno = TSDB_CODE_SUCCESS;
|
||||
OperatorFn(pLeft, pRight, output, TSDB_ORDER_ASC);
|
||||
code = terrno;
|
||||
|
||||
_return:
|
||||
for (int32_t i = 0; i < paramNum; ++i) {
|
||||
|
@ -693,7 +695,11 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
|
|||
res->node.resType.scale = output.columnData->info.scale;
|
||||
res->node.resType.precision = output.columnData->info.precision;
|
||||
int32_t type = output.columnData->info.type;
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
if (type == TSDB_DATA_TYPE_JSON){
|
||||
int32_t len = getJsonValueLen(output.columnData->pData);
|
||||
res->datum.p = taosMemoryCalloc(len, 1);
|
||||
memcpy(res->datum.p, output.columnData->pData, len);
|
||||
} else if (IS_VAR_DATA_TYPE(type)) {
|
||||
res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1);
|
||||
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
|
||||
} else {
|
||||
|
|
|
@ -1152,42 +1152,30 @@ int32_t toJsonFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOu
|
|||
|
||||
char tmp[TSDB_MAX_JSON_TAG_LEN] = {0};
|
||||
for (int32_t i = 0; i < pInput[0].numOfRows; ++i) {
|
||||
if (colDataIsNull_s(pInput[0].columnData, i)) {
|
||||
colDataAppendNULL(pOutput->columnData, i);
|
||||
continue;
|
||||
}
|
||||
char *input = pInput[0].columnData->pData + pInput[0].columnData->varmeta.offset[i];
|
||||
SArray* pTagVals = taosArrayInit(8, sizeof(STagVal));
|
||||
STag* pTag = NULL;
|
||||
|
||||
if(type == TSDB_DATA_TYPE_NCHAR){
|
||||
if (varDataTLen(input) > TSDB_MAX_JSON_TAG_LEN){
|
||||
colDataAppendNULL(pOutput->columnData, i);
|
||||
continue;
|
||||
}
|
||||
int32_t len = taosUcs4ToMbs((TdUcs4 *)varDataVal(input), varDataLen(input), tmp);
|
||||
if (len < 0) {
|
||||
colDataAppendNULL(pOutput->columnData, i);
|
||||
continue;
|
||||
}
|
||||
tmp[len] = 0;
|
||||
if (colDataIsNull_s(pInput[0].columnData, i)) {
|
||||
tTagNew(pTagVals, 1, true, &pTag);
|
||||
}else{
|
||||
char *input = pInput[0].columnData->pData + pInput[0].columnData->varmeta.offset[i];
|
||||
if (varDataLen(input) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE){
|
||||
colDataAppendNULL(pOutput->columnData, i);
|
||||
continue;
|
||||
taosArrayDestroy(pTagVals);
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
memcpy(tmp, varDataVal(input), varDataLen(input));
|
||||
tmp[varDataLen(input)] = 0;
|
||||
if(parseJsontoTagData(tmp, pTagVals, &pTag, NULL)){
|
||||
tTagNew(pTagVals, 1, true, &pTag);
|
||||
}
|
||||
}
|
||||
|
||||
if(!tjsonValidateJson(tmp)){
|
||||
colDataAppendNULL(pOutput->columnData, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
colDataAppend(pOutput->columnData, i, input, false);
|
||||
colDataAppend(pOutput->columnData, i, (const char*)pTag, false);
|
||||
tTagFree(pTag);
|
||||
taosArrayDestroy(pTagVals);
|
||||
}
|
||||
|
||||
pOutput->numOfRows = pInput->numOfRows;
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -160,6 +160,9 @@ int64_t getVectorBigintValue_JSON(void *src, int32_t index){
|
|||
return 0;
|
||||
} else if(*data == TSDB_DATA_TYPE_NCHAR) { // json inner type can not be BINARY
|
||||
convertNcharToDouble(data+CHAR_BYTES, &out);
|
||||
} else if(tTagIsJson(data)){
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
return 0;
|
||||
} else {
|
||||
convertNumberToNumber(data+CHAR_BYTES, &out, *data, TSDB_DATA_TYPE_DOUBLE);
|
||||
}
|
||||
|
@ -416,6 +419,9 @@ int32_t vectorConvertFromVarData(const SScalarParam* pIn, SScalarParam* pOut, in
|
|||
else if(*data == TSDB_DATA_TYPE_NCHAR) {
|
||||
data += CHAR_BYTES;
|
||||
convertType = TSDB_DATA_TYPE_NCHAR;
|
||||
} else if(tTagIsJson(data)){
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
return terrno;
|
||||
} else {
|
||||
convertNumberToNumber(data+CHAR_BYTES, colDataGetNumData(pOut->columnData, i), *data, outType);
|
||||
continue;
|
||||
|
@ -461,7 +467,10 @@ double getVectorDoubleValue_JSON(void *src, int32_t index){
|
|||
return out;
|
||||
} else if(*data == TSDB_DATA_TYPE_NCHAR) { // json inner type can not be BINARY
|
||||
convertNcharToDouble(data+CHAR_BYTES, &out);
|
||||
} else {
|
||||
} else if(tTagIsJson(data)){
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
return 0;
|
||||
} else{
|
||||
convertNumberToNumber(data+CHAR_BYTES, &out, *data, TSDB_DATA_TYPE_DOUBLE);
|
||||
}
|
||||
return out;
|
||||
|
@ -493,10 +502,18 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t
|
|||
}
|
||||
|
||||
if(typeLeft == TSDB_DATA_TYPE_JSON){
|
||||
if(tTagIsJson(*pLeftData)){
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
return false;
|
||||
}
|
||||
typeLeft = **pLeftData;
|
||||
(*pLeftData) ++;
|
||||
}
|
||||
if(typeRight == TSDB_DATA_TYPE_JSON){
|
||||
if(tTagIsJson(*pLeftData)){
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
return false;
|
||||
}
|
||||
typeRight = **pRightData;
|
||||
(*pRightData) ++;
|
||||
}
|
||||
|
@ -1576,7 +1593,11 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
|
|||
void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
|
||||
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1;
|
||||
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
|
||||
|
||||
__compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr);
|
||||
if(terrno != TSDB_CODE_SUCCESS){
|
||||
return;
|
||||
}
|
||||
|
||||
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
|
||||
|
||||
|
@ -1709,6 +1730,7 @@ void vectorIsTrue(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
|
|||
STagVal getJsonValue(char *json, char *key, bool *isExist) {
|
||||
STagVal val = {.pKey = key};
|
||||
if (tTagIsJson((const STag *)json) == false){
|
||||
terrno = TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR;
|
||||
if(isExist){
|
||||
*isExist = false;
|
||||
}
|
||||
|
|
|
@ -40,8 +40,8 @@ typedef struct SSyncSnapshotSender {
|
|||
bool start;
|
||||
int32_t seq;
|
||||
int32_t ack;
|
||||
void * pReader;
|
||||
void * pCurrentBlock;
|
||||
void *pReader;
|
||||
void *pCurrentBlock;
|
||||
int32_t blockLen;
|
||||
SSnapshot snapshot;
|
||||
SSyncCfg lastConfig;
|
||||
|
@ -56,20 +56,20 @@ typedef struct SSyncSnapshotSender {
|
|||
SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex);
|
||||
void snapshotSenderDestroy(SSyncSnapshotSender *pSender);
|
||||
bool snapshotSenderIsStart(SSyncSnapshotSender *pSender);
|
||||
void snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader);
|
||||
void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish);
|
||||
int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader);
|
||||
int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish);
|
||||
int32_t snapshotSend(SSyncSnapshotSender *pSender);
|
||||
int32_t snapshotReSend(SSyncSnapshotSender *pSender);
|
||||
|
||||
cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender);
|
||||
char * snapshotSender2Str(SSyncSnapshotSender *pSender);
|
||||
char * snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event);
|
||||
char *snapshotSender2Str(SSyncSnapshotSender *pSender);
|
||||
char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event);
|
||||
|
||||
//---------------------------------------------------
|
||||
typedef struct SSyncSnapshotReceiver {
|
||||
bool start;
|
||||
int32_t ack;
|
||||
void * pWriter;
|
||||
void *pWriter;
|
||||
SyncTerm term;
|
||||
SyncTerm privateTerm;
|
||||
SSnapshot snapshot;
|
||||
|
@ -80,13 +80,13 @@ typedef struct SSyncSnapshotReceiver {
|
|||
|
||||
SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId);
|
||||
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver);
|
||||
void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg);
|
||||
bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver);
|
||||
void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver);
|
||||
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg);
|
||||
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver);
|
||||
bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver);
|
||||
|
||||
cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver);
|
||||
char * snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
|
||||
char * snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event);
|
||||
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
|
||||
char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event);
|
||||
|
||||
//---------------------------------------------------
|
||||
// on message
|
||||
|
|
|
@ -628,6 +628,8 @@ int32_t syncNodeOnAppendEntriesCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
|||
|
||||
#endif
|
||||
|
||||
static int32_t syncNodeMakeLogSame2(SSyncNode* ths, SyncAppendEntriesBatch* pMsg) { return 0; }
|
||||
|
||||
static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
||||
int32_t code;
|
||||
|
||||
|
@ -719,7 +721,282 @@ static bool syncNodeOnAppendEntriesLogOK(SSyncNode* pSyncNode, SyncAppendEntries
|
|||
return false;
|
||||
}
|
||||
|
||||
int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatch* pMsg) { return 0; }
|
||||
int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatch* pMsg) {
|
||||
int32_t ret = 0;
|
||||
int32_t code = 0;
|
||||
|
||||
// if already drop replica, do not process
|
||||
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
|
||||
syncNodeEventLog(ths, "recv sync-append-entries-batch, maybe replica already dropped");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// maybe update term
|
||||
if (pMsg->term > ths->pRaftStore->currentTerm) {
|
||||
syncNodeUpdateTerm(ths, pMsg->term);
|
||||
}
|
||||
ASSERT(pMsg->term <= ths->pRaftStore->currentTerm);
|
||||
|
||||
// reset elect timer
|
||||
if (pMsg->term == ths->pRaftStore->currentTerm) {
|
||||
ths->leaderCache = pMsg->srcId;
|
||||
syncNodeResetElectTimer(ths);
|
||||
}
|
||||
ASSERT(pMsg->dataLen >= 0);
|
||||
|
||||
// candidate to follower
|
||||
//
|
||||
// operation:
|
||||
// to follower
|
||||
do {
|
||||
bool condition = pMsg->term == ths->pRaftStore->currentTerm && ths->state == TAOS_SYNC_STATE_CANDIDATE;
|
||||
if (condition) {
|
||||
syncNodeEventLog(ths, "recv sync-append-entries-batch, candidate to follower");
|
||||
|
||||
syncNodeBecomeFollower(ths, "from candidate by append entries");
|
||||
// do not reply?
|
||||
return ret;
|
||||
}
|
||||
} while (0);
|
||||
|
||||
// fake match2
|
||||
//
|
||||
// condition1:
|
||||
// preIndex <= my commit index
|
||||
//
|
||||
// operation:
|
||||
// if hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex, append entry
|
||||
// match my-commit-index or my-commit-index + 1
|
||||
// no operation on log
|
||||
do {
|
||||
bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) &&
|
||||
(pMsg->prevLogIndex <= ths->commitIndex);
|
||||
if (condition) {
|
||||
do {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf),
|
||||
"recv sync-append-entries-batch, fake match2, pre-index:%ld, pre-term:%lu, datalen:%d",
|
||||
pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
|
||||
syncNodeEventLog(ths, logBuf);
|
||||
} while (0);
|
||||
|
||||
SyncIndex matchIndex = ths->commitIndex;
|
||||
bool hasAppendEntries = pMsg->dataLen > 0;
|
||||
if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) {
|
||||
SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE];
|
||||
memset(rpcMsgArr, 0, sizeof(rpcMsgArr));
|
||||
int32_t retArrSize = 0;
|
||||
syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, SYNC_MAX_BATCH_SIZE, &retArrSize);
|
||||
|
||||
// make log same
|
||||
do {
|
||||
SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore);
|
||||
bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex;
|
||||
|
||||
if (hasExtraEntries) {
|
||||
// make log same, rollback deleted entries
|
||||
code = syncNodeMakeLogSame2(ths, pMsg);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
|
||||
} while (0);
|
||||
|
||||
// append entry batch
|
||||
for (int32_t i = 0; i < retArrSize; ++i) {
|
||||
SSyncRaftEntry* pAppendEntry = syncEntryBuild(1234);
|
||||
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry);
|
||||
if (code != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
code = syncNodePreCommit(ths, pAppendEntry);
|
||||
ASSERT(code == 0);
|
||||
|
||||
syncEntryDestory(pAppendEntry);
|
||||
}
|
||||
|
||||
// fsync once
|
||||
SSyncLogStoreData* pData = ths->pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
walFsync(pWal, true);
|
||||
|
||||
// update match index
|
||||
matchIndex = pMsg->prevLogIndex + retArrSize;
|
||||
}
|
||||
|
||||
// prepare response msg
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
|
||||
pReply->srcId = ths->myRaftId;
|
||||
pReply->destId = pMsg->srcId;
|
||||
pReply->term = ths->pRaftStore->currentTerm;
|
||||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = matchIndex;
|
||||
|
||||
// send response
|
||||
SRpcMsg rpcMsg;
|
||||
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
|
||||
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
|
||||
syncAppendEntriesReplyDestroy(pReply);
|
||||
|
||||
return ret;
|
||||
}
|
||||
} while (0);
|
||||
|
||||
// calculate logOK here, before will coredump, due to fake match
|
||||
// bool logOK = syncNodeOnAppendEntriesLogOK(ths, pMsg);
|
||||
bool logOK = true;
|
||||
|
||||
// not match
|
||||
//
|
||||
// condition1:
|
||||
// term < myTerm
|
||||
//
|
||||
// condition2:
|
||||
// !logOK
|
||||
//
|
||||
// operation:
|
||||
// not match
|
||||
// no operation on log
|
||||
do {
|
||||
bool condition1 = pMsg->term < ths->pRaftStore->currentTerm;
|
||||
bool condition2 =
|
||||
(pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && !logOK;
|
||||
bool condition = condition1 || condition2;
|
||||
|
||||
if (condition) {
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, not match, pre-index:%ld, pre-term:%lu, datalen:%d",
|
||||
pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
|
||||
syncNodeEventLog(ths, logBuf);
|
||||
|
||||
// prepare response msg
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
|
||||
pReply->srcId = ths->myRaftId;
|
||||
pReply->destId = pMsg->srcId;
|
||||
pReply->term = ths->pRaftStore->currentTerm;
|
||||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = false;
|
||||
pReply->matchIndex = SYNC_INDEX_INVALID;
|
||||
|
||||
// send response
|
||||
SRpcMsg rpcMsg;
|
||||
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
|
||||
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
|
||||
syncAppendEntriesReplyDestroy(pReply);
|
||||
|
||||
return ret;
|
||||
}
|
||||
} while (0);
|
||||
|
||||
// really match
|
||||
//
|
||||
// condition:
|
||||
// logOK
|
||||
//
|
||||
// operation:
|
||||
// match
|
||||
// make log same
|
||||
do {
|
||||
bool condition = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && logOK;
|
||||
if (condition) {
|
||||
// has extra entries (> preIndex) in local log
|
||||
SyncIndex myLastIndex = syncNodeGetLastIndex(ths);
|
||||
bool hasExtraEntries = myLastIndex > pMsg->prevLogIndex;
|
||||
|
||||
// has entries in SyncAppendEntries msg
|
||||
bool hasAppendEntries = pMsg->dataLen > 0;
|
||||
|
||||
char logBuf[128];
|
||||
snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, match, pre-index:%ld, pre-term:%lu, datalen:%d",
|
||||
pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->dataLen);
|
||||
syncNodeEventLog(ths, logBuf);
|
||||
|
||||
if (hasExtraEntries) {
|
||||
// make log same, rollback deleted entries
|
||||
// code = syncNodeMakeLogSame(ths, pMsg);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
|
||||
int32_t retArrSize = 0;
|
||||
if (hasAppendEntries) {
|
||||
SRpcMsg rpcMsgArr[SYNC_MAX_BATCH_SIZE];
|
||||
memset(rpcMsgArr, 0, sizeof(rpcMsgArr));
|
||||
syncAppendEntriesBatch2RpcMsgArray(pMsg, rpcMsgArr, SYNC_MAX_BATCH_SIZE, &retArrSize);
|
||||
|
||||
// append entry batch
|
||||
for (int32_t i = 0; i < retArrSize; ++i) {
|
||||
SSyncRaftEntry* pAppendEntry = syncEntryBuild(1234);
|
||||
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry);
|
||||
if (code != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
code = syncNodePreCommit(ths, pAppendEntry);
|
||||
ASSERT(code == 0);
|
||||
|
||||
syncEntryDestory(pAppendEntry);
|
||||
}
|
||||
|
||||
// fsync once
|
||||
SSyncLogStoreData* pData = ths->pLogStore->data;
|
||||
SWal* pWal = pData->pWal;
|
||||
walFsync(pWal, true);
|
||||
}
|
||||
|
||||
// prepare response msg
|
||||
SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId);
|
||||
pReply->srcId = ths->myRaftId;
|
||||
pReply->destId = pMsg->srcId;
|
||||
pReply->term = ths->pRaftStore->currentTerm;
|
||||
pReply->privateTerm = ths->pNewNodeReceiver->privateTerm;
|
||||
pReply->success = true;
|
||||
pReply->matchIndex = hasAppendEntries ? pMsg->prevLogIndex + retArrSize : pMsg->prevLogIndex;
|
||||
|
||||
// send response
|
||||
SRpcMsg rpcMsg;
|
||||
syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg);
|
||||
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
|
||||
syncAppendEntriesReplyDestroy(pReply);
|
||||
|
||||
// maybe update commit index, leader notice me
|
||||
if (pMsg->commitIndex > ths->commitIndex) {
|
||||
// has commit entry in local
|
||||
if (pMsg->commitIndex <= ths->pLogStore->syncLogLastIndex(ths->pLogStore)) {
|
||||
// advance commit index to sanpshot first
|
||||
SSnapshot snapshot;
|
||||
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
|
||||
if (snapshot.lastApplyIndex >= 0 && snapshot.lastApplyIndex > ths->commitIndex) {
|
||||
SyncIndex commitBegin = ths->commitIndex;
|
||||
SyncIndex commitEnd = snapshot.lastApplyIndex;
|
||||
ths->commitIndex = snapshot.lastApplyIndex;
|
||||
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "commit by snapshot from index:%ld to index:%ld", commitBegin,
|
||||
commitEnd);
|
||||
syncNodeEventLog(ths, eventLog);
|
||||
}
|
||||
|
||||
SyncIndex beginIndex = ths->commitIndex + 1;
|
||||
SyncIndex endIndex = pMsg->commitIndex;
|
||||
|
||||
// update commit index
|
||||
ths->commitIndex = pMsg->commitIndex;
|
||||
|
||||
// call back Wal
|
||||
code = ths->pLogStore->updateCommitIndex(ths->pLogStore, ths->commitIndex);
|
||||
ASSERT(code == 0);
|
||||
|
||||
code = syncNodeCommit(ths, beginIndex, endIndex, ths->state);
|
||||
ASSERT(code == 0);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
} while (0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMsg) {
|
||||
int32_t ret = 0;
|
||||
|
|
|
@ -80,7 +80,7 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
|
|||
bool snapshotSenderIsStart(SSyncSnapshotSender *pSender) { return pSender->start; }
|
||||
|
||||
// begin send snapshot by snapshot, pReader
|
||||
void snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader) {
|
||||
int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void *pReader) {
|
||||
ASSERT(!snapshotSenderIsStart(pSender));
|
||||
|
||||
// init snapshot and reader
|
||||
|
@ -181,9 +181,11 @@ void snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshot snapshot, void
|
|||
syncNodeEventLog(pSender->pSyncNode, eventLog);
|
||||
taosMemoryFree(eventLog);
|
||||
} while (0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) {
|
||||
int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) {
|
||||
// close reader
|
||||
if (pSender->pReader != NULL) {
|
||||
int32_t ret = pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader);
|
||||
|
@ -208,6 +210,8 @@ void snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) {
|
|||
syncNodeEventLog(pSender->pSyncNode, eventLog);
|
||||
taosMemoryFree(eventLog);
|
||||
} while (0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// when sender receive ack, call this function to send msg from seq
|
||||
|
@ -349,14 +353,14 @@ cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
|
|||
|
||||
char *snapshotSender2Str(SSyncSnapshotSender *pSender) {
|
||||
cJSON *pJson = snapshotSender2Json(pSender);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) {
|
||||
int32_t len = 256;
|
||||
char * s = taosMemoryMalloc(len);
|
||||
char *s = taosMemoryMalloc(len);
|
||||
|
||||
SRaftId destId = pSender->pSyncNode->replicasId[pSender->replicaIndex];
|
||||
char host[64];
|
||||
|
@ -471,7 +475,7 @@ static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) {
|
|||
|
||||
// if receiver receive msg from seq = SYNC_SNAPSHOT_SEQ_BEGIN, start receiver
|
||||
// if already start, force close, start again
|
||||
void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg) {
|
||||
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg) {
|
||||
if (!snapshotReceiverIsStart(pReceiver)) {
|
||||
// first start
|
||||
snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg);
|
||||
|
@ -486,9 +490,11 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTer
|
|||
// start again
|
||||
snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
|
||||
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
|
||||
if (pReceiver->pWriter != NULL) {
|
||||
int32_t ret =
|
||||
pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, false);
|
||||
|
@ -506,6 +512,8 @@ void snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
|
|||
syncNodeEventLog(pReceiver->pSyncNode, eventLog);
|
||||
taosMemoryFree(eventLog);
|
||||
} while (0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) {
|
||||
|
@ -604,7 +612,7 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
|
|||
cJSON_AddStringToObject(pFromId, "addr", u64buf);
|
||||
{
|
||||
uint64_t u64 = pReceiver->fromId.addr;
|
||||
cJSON * pTmp = pFromId;
|
||||
cJSON *pTmp = pFromId;
|
||||
char host[128] = {0};
|
||||
uint16_t port;
|
||||
syncUtilU642Addr(u64, host, sizeof(host), &port);
|
||||
|
@ -637,14 +645,14 @@ cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver) {
|
|||
|
||||
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver) {
|
||||
cJSON *pJson = snapshotReceiver2Json(pReceiver);
|
||||
char * serialized = cJSON_Print(pJson);
|
||||
char *serialized = cJSON_Print(pJson);
|
||||
cJSON_Delete(pJson);
|
||||
return serialized;
|
||||
}
|
||||
|
||||
char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event) {
|
||||
int32_t len = 256;
|
||||
char * s = taosMemoryMalloc(len);
|
||||
char *s = taosMemoryMalloc(len);
|
||||
|
||||
SRaftId fromId = pReceiver->fromId;
|
||||
char host[128];
|
||||
|
|
|
@ -202,7 +202,7 @@ int32_t taosHexEncode(const char *src, char *dst, int32_t len) {
|
|||
}
|
||||
|
||||
for (int32_t i = 0; i < len; ++i) {
|
||||
sprintf(dst + i * 2, "%02x", src[i] & 0xff);
|
||||
sprintf(dst + i * 2, "%02x", src[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -213,10 +213,10 @@ int32_t taosHexDecode(const char *src, char *dst, int32_t len) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
uint16_t hn, ln, out;
|
||||
uint8_t hn, ln, out;
|
||||
for (int i = 0, j = 0; i < len * 2; i += 2, ++j ) {
|
||||
hn = src[i] > '9' ? src[i] - 'A' + 10 : src[i] - '0';
|
||||
ln = src[i + 1] > '9' ? src[i + 1] - 'A' + 10 : src[i + 1] - '0';
|
||||
hn = src[i] > '9' ? src[i] - 'a' + 10 : src[i] - '0';
|
||||
ln = src[i + 1] > '9' ? src[i + 1] - 'a' + 10 : src[i + 1] - '0';
|
||||
|
||||
out = (hn << 4) | ln;
|
||||
memcpy(dst + j, &out, 1);
|
||||
|
|
|
@ -394,6 +394,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_DUPLICATTED_OPERATION, "Duplicatted operation
|
|||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_MSG_ERROR, "Task message error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JOB_FREED, "Job already freed")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TASK_STATUS_ERROR, "Task status error")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_ERROR, "Json not support in in/notin operator")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_NOT_SUPPORT_ERROR, "Json not support in this place")
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_JSON_IN_GROUP_ERROR, "Json not support in group/partition by")
|
||||
|
||||
// grant
|
||||
TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_EXPIRED, "License expired")
|
||||
|
|
|
@ -193,7 +193,9 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
|
|||
|
||||
char* pDataBuf = pg->pData;
|
||||
memset(pDataBuf, 0, getAllocPageSize(pBuf->pageSize));
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
uDebug("page_flush %p, pageId:%d, offset:%d", pDataBuf, pg->pageId, pg->offset);
|
||||
#endif
|
||||
pg->length = size; // on disk size
|
||||
return pDataBuf;
|
||||
}
|
||||
|
@ -440,6 +442,9 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
|
|||
}
|
||||
|
||||
((void**)pi->pData)[0] = pi;
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
uDebug("page_getNewBufPage , pi->pData:%p, pageId:%d, offset:%"PRId64, pi->pData, pi->pageId, pi->offset);
|
||||
#endif
|
||||
return (void*)(GET_DATA_PAYLOAD(pi));
|
||||
}
|
||||
|
||||
|
@ -462,7 +467,9 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
|
|||
|
||||
lruListMoveToFront(pBuf->lruList, (*pi));
|
||||
(*pi)->used = true;
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
uDebug("page_getBufPage1 pageId:%d, offset:%"PRId64, (*pi)->pageId, (*pi)->offset);
|
||||
#endif
|
||||
return (void*)(GET_DATA_PAYLOAD(*pi));
|
||||
} else { // not in memory
|
||||
assert((*pi)->pData == NULL && (*pi)->pn == NULL && (((*pi)->length >= 0 && (*pi)->offset >= 0) || ((*pi)->length == -1 && (*pi)->offset == -1)));
|
||||
|
@ -494,7 +501,9 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
uDebug("page_getBufPage2 pageId:%d, offset:%"PRId64, (*pi)->pageId, (*pi)->offset);
|
||||
#endif
|
||||
return (void*)(GET_DATA_PAYLOAD(*pi));
|
||||
}
|
||||
}
|
||||
|
@ -506,8 +515,11 @@ void releaseBufPage(SDiskbasedBuf* pBuf, void* page) {
|
|||
}
|
||||
|
||||
void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
|
||||
#ifdef BUF_PAGE_DEBUG
|
||||
uDebug("page_releaseBufPageInfo pageId:%d, used:%d, offset:%"PRId64, pi->pageId, pi->used, pi->offset);
|
||||
#endif
|
||||
assert(pi->pData != NULL && pi->used == true);
|
||||
|
||||
// assert(pi->pData != NULL);
|
||||
pi->used = false;
|
||||
pBuf->statis.releasePages += 1;
|
||||
}
|
||||
|
|
|
@ -93,6 +93,7 @@
|
|||
./test.sh -f tsim/stream/basic0.sim
|
||||
./test.sh -f tsim/stream/basic1.sim
|
||||
./test.sh -f tsim/stream/basic2.sim
|
||||
./test.sh -f tsim/stream/drop_stream.sim
|
||||
./test.sh -f tsim/stream/distributeInterval0.sim
|
||||
# ./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
|
||||
# ./test.sh -f tsim/stream/distributesession0.sim
|
||||
|
@ -159,6 +160,7 @@
|
|||
#./test.sh -f tsim/mnode/basic1.sim -m
|
||||
|
||||
# --- sma
|
||||
./test.sh -f tsim/sma/drop_sma.sim
|
||||
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ echo "jniDebugFlag 143" >> $TAOS_CFG
|
|||
echo "qDebugFlag 143" >> $TAOS_CFG
|
||||
echo "rpcDebugFlag 143" >> $TAOS_CFG
|
||||
echo "tmrDebugFlag 131" >> $TAOS_CFG
|
||||
echo "uDebugFlag 143" >> $TAOS_CFG
|
||||
echo "uDebugFlag 131" >> $TAOS_CFG
|
||||
echo "sDebugFlag 143" >> $TAOS_CFG
|
||||
echo "wDebugFlag 143" >> $TAOS_CFG
|
||||
echo "numOfLogLines 20000000" >> $TAOS_CFG
|
||||
|
|
|
@ -64,12 +64,59 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in
|
|||
print --> drop stb
|
||||
sql drop table stb;
|
||||
|
||||
print ========== step5
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint, c_float float, c_double double, c_bool bool, c_binary binary(16), c_nchar nchar(32), c_ts timestamp, c_tint_un tinyint unsigned, c_sint_un smallint unsigned, c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
|
||||
print ========== step6 repeat
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint ) tags (t_int int);
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
|
||||
print ========== step7
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||
|
||||
sql create table ct1 using stb1 tags ( 1 );
|
||||
sql create table ct2 using stb1 tags ( 2 );
|
||||
sql create table ct3 using stb1 tags ( 3 );
|
||||
sql create table ct4 using stb1 tags ( 4 );
|
||||
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m;
|
||||
sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m;
|
||||
|
||||
sql DROP INDEX sma_index_1 ;
|
||||
sql DROP INDEX sma_index_2 ;
|
||||
sql DROP INDEX sma_index_3 ;
|
||||
|
||||
print ========== step8
|
||||
sql drop database if exists db;
|
||||
sql create database db duration 300;
|
||||
sql use db;
|
||||
sql create table stb1(ts timestamp, c_int int, c_bint bigint, c_sint smallint, c_tint tinyint,c_float float, c_double double, c_bool bool,c_binary binary(16), c_nchar nchar(32), c_ts timestamp,c_tint_un tinyint unsigned, c_sint_un smallint unsigned,c_int_un int unsigned, c_bint_un bigint unsigned) tags (t_int int);
|
||||
|
||||
sql create table ct1 using stb1 tags ( 1 );
|
||||
sql create table ct2 using stb1 tags ( 2 );
|
||||
sql create table ct3 using stb1 tags ( 3 );
|
||||
sql create table ct4 using stb1 tags ( 4 );
|
||||
|
||||
sql CREATE SMA INDEX sma_index_1 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) watermark 5s;
|
||||
sql CREATE SMA INDEX sma_index_2 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) sliding(6m) max_delay 6m;
|
||||
sql CREATE SMA INDEX sma_index_3 ON stb1 function(min(c_int), max(c_int)) interval(6m, 10s) watermark 5s max_delay 6m;
|
||||
|
||||
sql DROP INDEX sma_index_1 ;
|
||||
sql DROP INDEX sma_index_2 ;
|
||||
sql DROP INDEX sma_index_3 ;
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode5 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode6 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode7 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode8 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
|
@ -0,0 +1,222 @@
|
|||
system sh/stop_dnodes.sh
|
||||
system sh/deploy.sh -n dnode1 -i 1
|
||||
system sh/deploy.sh -n dnode2 -i 2
|
||||
system sh/deploy.sh -n dnode3 -i 3
|
||||
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
|
||||
system sh/cfg.sh -n dnode2 -c supportVnodes -v 4
|
||||
system sh/cfg.sh -n dnode3 -c supportVnodes -v 4
|
||||
|
||||
print ========== step1
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sql connect
|
||||
|
||||
print ========== step2
|
||||
sql create dnode $hostname port 7200
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
|
||||
$x = 0
|
||||
step2:
|
||||
$x = $x + 1
|
||||
sleep 1000
|
||||
if $x == 10 then
|
||||
print ====> dnode not ready!
|
||||
return -1
|
||||
endi
|
||||
sql show dnodes
|
||||
print ===> $data00 $data01 $data02 $data03 $data04 $data05
|
||||
print ===> $data10 $data11 $data12 $data13 $data14 $data15
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
if $data(1)[4] != ready then
|
||||
goto step2
|
||||
endi
|
||||
if $data(2)[4] != ready then
|
||||
goto step2
|
||||
endi
|
||||
|
||||
print ========== step3
|
||||
sql drop database if exists test;
|
||||
sql create database if not exists test vgroups 1 precision "ms" ;
|
||||
sql use test;
|
||||
sql create table test.scalar_function_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 binary(256), t12 nchar(256), t13 bool) ;
|
||||
sql create table scalar_function_ct1 using scalar_function_stb tags (-38, -32456, 509722288, -1404014954778348330, 87, 8879, 3351927345, 1840080781675115605, 3.002364316200592e+38, 6.698140580387119e+37, "bktezshfyvmrmgzwrwerytfwudlblkyyxismpommiqpqsptpiucptwqutzhajxbiitqxkrpobqhgqvjlvgsudewmelpunjspurbpbbwypvgbwjfrwidrchnojtxyhrwfjwgdiabzfoujxkwcjjxjqsrnhmryjhrykldmdfiwircdfahldtrtuafzvybkihyjatiqivbtpydjtmbfddcgyzjuqidwcchtsamnwyqwvajftayyvfrmqcqygbxmxgjx", "ddlxkxhrvviwnjeqhewbercnlontwbsyevcjsocrwyupautsjkdzqbwuzsuetptgsdfyjzfkqyobkysikpaxtqqonxtocfowaehgovshwyciyzfmdmcmwaolkhdunfhwhcanetepxyppuullxnclockmadyaaufywllwburgsfxizcjgzvboydpqymlwgktslclidbcwiyyubyuvhjgwldkgxswigjkpbpslvlsbigdlmuldmtbqencbntbaohxr", False) ;
|
||||
sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) ;
|
||||
sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int);
|
||||
sql create table scalar_ct1 using scalar_stb tags(10);
|
||||
sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20));
|
||||
sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb;
|
||||
sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb;
|
||||
sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
|
||||
sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
|
||||
sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
|
||||
sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb;
|
||||
sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb;
|
||||
sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
|
||||
sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb;
|
||||
sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
|
||||
sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
|
||||
sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb;
|
||||
sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb;
|
||||
sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb;
|
||||
sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
|
||||
sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
|
||||
sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb;
|
||||
sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
|
||||
sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
|
||||
sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb;
|
||||
sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
|
||||
sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
|
||||
sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb;
|
||||
sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
|
||||
sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
|
||||
sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb;
|
||||
sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
|
||||
sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
|
||||
sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb;
|
||||
sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
|
||||
sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
|
||||
sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
|
||||
sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
|
||||
sql insert into scalar_tb values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
|
||||
sql insert into scalar_tb values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
|
||||
sql insert into scalar_tb values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
|
||||
print ========== step4
|
||||
sql drop database test;
|
||||
|
||||
|
||||
print ========== step5 repeat
|
||||
sql drop database if exists test;
|
||||
sql create database if not exists test vgroups 1 precision "ms" ;
|
||||
sql use test;
|
||||
sql create table test.scalar_function_stb (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 tinyint unsigned, t6 smallint unsigned, t7 int unsigned, t8 bigint unsigned, t9 float, t10 double, t11 binary(256), t12 nchar(256), t13 bool) ;
|
||||
sql create table scalar_function_ct1 using scalar_function_stb tags (-38, -32456, 509722288, -1404014954778348330, 87, 8879, 3351927345, 1840080781675115605, 3.002364316200592e+38, 6.698140580387119e+37, "bktezshfyvmrmgzwrwerytfwudlblkyyxismpommiqpqsptpiucptwqutzhajxbiitqxkrpobqhgqvjlvgsudewmelpunjspurbpbbwypvgbwjfrwidrchnojtxyhrwfjwgdiabzfoujxkwcjjxjqsrnhmryjhrykldmdfiwircdfahldtrtuafzvybkihyjatiqivbtpydjtmbfddcgyzjuqidwcchtsamnwyqwvajftayyvfrmqcqygbxmxgjx", "ddlxkxhrvviwnjeqhewbercnlontwbsyevcjsocrwyupautsjkdzqbwuzsuetptgsdfyjzfkqyobkysikpaxtqqonxtocfowaehgovshwyciyzfmdmcmwaolkhdunfhwhcanetepxyppuullxnclockmadyaaufywllwburgsfxizcjgzvboydpqymlwgktslclidbcwiyyubyuvhjgwldkgxswigjkpbpslvlsbigdlmuldmtbqencbntbaohxr", False) ;
|
||||
sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 tinyint unsigned, c6 smallint unsigned, c7 int unsigned, c8 bigint unsigned, c9 float, c10 double, c11 binary(256), c12 nchar(256), c13 bool) ;
|
||||
sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int);
|
||||
sql create table scalar_ct1 using scalar_stb tags(10);
|
||||
sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20));
|
||||
sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb;
|
||||
sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb;
|
||||
sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb;
|
||||
sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb;
|
||||
sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb;
|
||||
sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb;
|
||||
sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb;
|
||||
sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1;
|
||||
sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb;
|
||||
sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb;
|
||||
sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb;
|
||||
sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb;
|
||||
sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1;
|
||||
sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb;
|
||||
sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb;
|
||||
sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1;
|
||||
sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb;
|
||||
sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb;
|
||||
sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb;
|
||||
sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1;
|
||||
sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb;
|
||||
sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb;
|
||||
sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1;
|
||||
sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb;
|
||||
sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb;
|
||||
sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1;
|
||||
sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb;
|
||||
sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb;
|
||||
sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1;
|
||||
sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb;
|
||||
sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb;
|
||||
sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1;
|
||||
sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb;
|
||||
sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb;
|
||||
sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1;
|
||||
sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb;
|
||||
sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb;
|
||||
sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1;
|
||||
sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb;
|
||||
sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
|
||||
sql insert into scalar_ct1 values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
|
||||
sql insert into scalar_tb values (1656668180503, 100, 100.1, "beijing", "taos", "Taos");
|
||||
sql insert into scalar_tb values (1656668180503+1s, -50, -50.1, "tianjin", "taosdata", "Taosdata");
|
||||
sql insert into scalar_tb values (1656668180503+2s, 0, Null, "hebei", "TDengine", Null);
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
sql insert into scalar_ct1 values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
sql insert into scalar_tb values (1656668180503+1s, -50, 50.1, "beiJing", "TDengine", "taos");
|
||||
|
||||
print ========== step6 repeat
|
||||
sql drop database test;
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode3 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode4 -s stop -x SIGINT
|
|
@ -1397,9 +1397,9 @@ class TDTestCase:
|
|||
tdSql.error(sql2)
|
||||
tdSql.error(sql3)
|
||||
|
||||
tdSql.error("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);")
|
||||
tdSql.query("select elapsed(ts,10s) from (select ts,tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);")
|
||||
|
||||
tdSql.error("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);")
|
||||
tdSql.query("select elapsed(ts,10s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) where ts>=\"2015-01-01 00:00:00.000\" and ts < \"2015-01-01 00:10:00.000\" interval(1s) fill(prev);")
|
||||
|
||||
# ===============================================inner nest============================================
|
||||
|
||||
|
@ -1486,9 +1486,9 @@ class TDTestCase:
|
|||
tdSql.checkRows(1)
|
||||
tdSql.checkData(0,0,9)
|
||||
|
||||
tdSql.error('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
|
||||
tdSql.query('select elapsed(ts,10s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
|
||||
|
||||
tdSql.error('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
|
||||
tdSql.query('select elapsed(ts,10s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
|
||||
|
||||
tdSql.error('select elapsed(ts,10s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ')
|
||||
|
||||
|
|
|
@ -58,8 +58,8 @@ class TDTestCase:
|
|||
for coltype in coltypes:
|
||||
colname = coltype[0]
|
||||
if coltype[1] in support_types and coltype[-1] != "TAG" :
|
||||
irate_sql = "select irate({}) from (select * from {} order by tbname ) ".format(colname, tbname)
|
||||
origin_sql = "select ts , {} , cast(ts as bigint) from (select ts , {} from {} order by ts desc limit 2 offset 0 ) order by ts".format(colname,colname, tbname)
|
||||
irate_sql = "select irate({}) from {}".format(colname, tbname)
|
||||
origin_sql = "select tail({}, 2), cast(ts as bigint) from {} order by ts".format(colname, tbname)
|
||||
|
||||
tdSql.query(irate_sql)
|
||||
irate_result = tdSql.queryResult
|
||||
|
@ -68,10 +68,10 @@ class TDTestCase:
|
|||
irate_value = irate_result[0][0]
|
||||
if origin_result[1][-1] - origin_result[0][-1] == 0:
|
||||
comput_irate_value = 0
|
||||
elif (origin_result[1][1] - origin_result[0][1])<0:
|
||||
comput_irate_value = origin_result[1][1]*1000/( origin_result[1][-1] - origin_result[0][-1])
|
||||
elif (origin_result[1][0] - origin_result[0][0])<0:
|
||||
comput_irate_value = origin_result[1][0]*1000/( origin_result[1][-1] - origin_result[0][-1])
|
||||
else:
|
||||
comput_irate_value = (origin_result[1][1] - origin_result[0][1])*1000/( origin_result[1][-1] - origin_result[0][-1])
|
||||
comput_irate_value = (origin_result[1][0] - origin_result[0][0])*1000/( origin_result[1][-1] - origin_result[0][-1])
|
||||
if comput_irate_value ==irate_value:
|
||||
tdLog.info(" irate work as expected , sql is %s "% irate_sql)
|
||||
else:
|
||||
|
|
|
@ -38,7 +38,10 @@ class TDTestCase:
|
|||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
# tdSql.prepare()
|
||||
tdSql.execute('drop database if exists db')
|
||||
tdSql.execute('create database db vgroups 1')
|
||||
tdSql.execute('use db')
|
||||
print("============== STEP 1 ===== prepare data & validate json string")
|
||||
tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json, tagint int)")
|
||||
tdSql.error("create table if not exists jsons1(ts timestamp, data json) tags(tagint int)")
|
||||
|
@ -56,6 +59,22 @@ class TDTestCase:
|
|||
tdSql.query("select jtag from jsons1_8")
|
||||
tdSql.checkData(0, 0, '{" ":90,"1tag$":2,"tag1":null}')
|
||||
|
||||
tdSql.query("select ts,jtag from jsons1 order by ts limit 2,3")
|
||||
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
|
||||
tdSql.checkData(0, 1, '{"tag1":5,"tag2":"beijing"}')
|
||||
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
|
||||
tdSql.checkData(1, 1, '{"tag1":false,"tag2":"beijing"}')
|
||||
tdSql.checkData(2, 0, '2020-06-02 09:18:48.000')
|
||||
tdSql.checkData(2, 1, '{"tag1":null,"tag2":"shanghai","tag3":"hello"}')
|
||||
|
||||
tdSql.query("select ts,jtag->'tag1' from jsons1 order by ts limit 2,3")
|
||||
tdSql.checkData(0, 0, '2020-06-02 09:17:08.000')
|
||||
tdSql.checkData(0, 1, '5.000000000')
|
||||
tdSql.checkData(1, 0, '2020-06-02 09:17:48.000')
|
||||
tdSql.checkData(1, 1, 'false')
|
||||
tdSql.checkData(2, 0, '2020-06-02 09:18:48.000')
|
||||
tdSql.checkData(2, 1, 'null')
|
||||
|
||||
# test empty json string, save as jtag is NULL
|
||||
tdSql.execute("insert into jsons1_9 using jsons1 tags('\t') values (1591062328000, 24, NULL, '你就会', '2sdw')")
|
||||
tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('')")
|
||||
|
@ -218,9 +237,19 @@ class TDTestCase:
|
|||
|
||||
# test where with json tag
|
||||
tdSql.query("select * from jsons1_1 where jtag is not null")
|
||||
# tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
|
||||
tdSql.error("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
|
||||
tdSql.error("select * from jsons1 where jtag->'tag1'={}")
|
||||
|
||||
# test json error
|
||||
tdSql.error("select jtag + 1 from jsons1")
|
||||
tdSql.error("select jtag > 1 from jsons1")
|
||||
tdSql.error("select jtag like \"1\" from jsons1")
|
||||
tdSql.error("select jtag in (\"1\") from jsons1")
|
||||
tdSql.error("select jtag from jsons1 where jtag > 1")
|
||||
tdSql.error("select jtag from jsons1 where jtag like 'fsss'")
|
||||
tdSql.error("select jtag from jsons1 where jtag in (1)")
|
||||
|
||||
|
||||
# where json value is string
|
||||
tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'")
|
||||
tdSql.checkRows(2)
|
||||
|
@ -369,7 +398,7 @@ class TDTestCase:
|
|||
tdSql.checkRows(2)
|
||||
|
||||
# test where condition in no support in
|
||||
# tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
|
||||
tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
|
||||
|
||||
# test where condition match/nmath
|
||||
tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'")
|
||||
|
@ -387,8 +416,8 @@ class TDTestCase:
|
|||
tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
|
||||
tdSql.query("select distinct jtag->'tag1' from jsons1")
|
||||
tdSql.checkRows(8)
|
||||
tdSql.query("select distinct jtag from jsons1")
|
||||
tdSql.checkRows(9)
|
||||
# tdSql.query("select distinct jtag from jsons1")
|
||||
# tdSql.checkRows(9)
|
||||
|
||||
#test dumplicate key with normal colomn
|
||||
tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
|
||||
|
@ -424,62 +453,56 @@ class TDTestCase:
|
|||
tdSql.checkData(7, 1, "false")
|
||||
|
||||
|
||||
# tdSql.error("select count(*) from jsons1 group by jtag")
|
||||
# tdSql.error("select count(*) from jsons1 partition by jtag")
|
||||
# tdSql.error("select count(*) from jsons1 group by jtag order by jtag")
|
||||
tdSql.error("select count(*) from jsons1 group by jtag")
|
||||
tdSql.error("select count(*) from jsons1 partition by jtag")
|
||||
tdSql.error("select count(*) from jsons1 group by jtag order by jtag")
|
||||
tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag->'tag2'")
|
||||
tdSql.error("select count(*) from jsons1 group by jtag->'tag1' order by jtag")
|
||||
# tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
|
||||
# tdSql.checkRows(8)
|
||||
# tdSql.checkData(0, 0, 2)
|
||||
# tdSql.checkData(0, 1, '"femail"')
|
||||
# tdSql.checkData(1, 0, 2)
|
||||
# tdSql.checkData(1, 1, '"收到货"')
|
||||
# tdSql.checkData(2, 0, 1)
|
||||
# tdSql.checkData(2, 1, "11.000000000")
|
||||
# tdSql.checkData(5, 0, 1)
|
||||
# tdSql.checkData(5, 1, "false")
|
||||
# tdSql.checkData(6, 0, 1)
|
||||
# tdSql.checkData(6, 1, "null")
|
||||
# tdSql.checkData(7, 0, 2)
|
||||
# tdSql.checkData(7, 1, None)
|
||||
tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' desc")
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 0, 2)
|
||||
tdSql.checkData(0, 1, '"femail"')
|
||||
tdSql.checkData(1, 0, 2)
|
||||
tdSql.checkData(1, 1, '"收到货"')
|
||||
tdSql.checkData(2, 0, 1)
|
||||
tdSql.checkData(2, 1, "11.000000000")
|
||||
tdSql.checkData(5, 0, 1)
|
||||
tdSql.checkData(5, 1, "false")
|
||||
|
||||
tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(2, 0, 1)
|
||||
tdSql.checkData(2, 1, "false")
|
||||
tdSql.checkData(5, 0, 1)
|
||||
tdSql.checkData(5, 1, "11.000000000")
|
||||
tdSql.checkData(7, 0, 2)
|
||||
tdSql.checkData(7, 1, '"femail"')
|
||||
|
||||
# tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1' asc")
|
||||
# tdSql.checkRows(8)
|
||||
# tdSql.checkData(0, 0, 2)
|
||||
# tdSql.checkData(0, 1, None)
|
||||
# tdSql.checkData(2, 0, 1)
|
||||
# tdSql.checkData(2, 1, "false")
|
||||
# tdSql.checkData(5, 0, 1)
|
||||
# tdSql.checkData(5, 1, "11.000000000")
|
||||
# tdSql.checkData(7, 0, 2)
|
||||
# tdSql.checkData(7, 1, '"femail"')
|
||||
#
|
||||
# test stddev with group by json tag
|
||||
# tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
|
||||
# tdSql.checkRows(8)
|
||||
# tdSql.checkData(0, 0, 10)
|
||||
# tdSql.checkData(0, 1, None)
|
||||
# tdSql.checkData(4, 0, 0)
|
||||
# tdSql.checkData(4, 1, "5.000000000")
|
||||
# tdSql.checkData(7, 0, 11)
|
||||
# tdSql.checkData(7, 1, '"femail"')
|
||||
#
|
||||
# res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
|
||||
# cname_list = []
|
||||
# cname_list.append("stddev(dataint)")
|
||||
# cname_list.append("jsons1.jtag->'tag1'")
|
||||
# tdSql.checkColNameList(res, cname_list)
|
||||
tdSql.query("select stddev(dataint),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
|
||||
tdSql.checkRows(8)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(4, 0, 0)
|
||||
tdSql.checkData(4, 1, "5.000000000")
|
||||
tdSql.checkData(7, 0, 11)
|
||||
tdSql.checkData(7, 1, '"femail"')
|
||||
|
||||
res = tdSql.getColNameList("select stddev(dataint),jsons1.jtag->'tag1' from jsons1 group by jsons1.jtag->'tag1' order by jtag->'tag1'")
|
||||
cname_list = []
|
||||
cname_list.append("stddev(dataint)")
|
||||
cname_list.append("jsons1.jtag->'tag1'")
|
||||
tdSql.checkColNameList(res, cname_list)
|
||||
|
||||
# test top/bottom with group by json tag
|
||||
# tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
|
||||
# tdSql.checkRows(11)
|
||||
# tdSql.checkData(0, 1, None)
|
||||
# tdSql.checkData(2, 0, 4)
|
||||
# tdSql.checkData(3, 0, 3)
|
||||
# tdSql.checkData(3, 1, "false")
|
||||
# tdSql.checkData(8, 0, 2)
|
||||
# tdSql.checkData(10, 1, '"femail"')
|
||||
tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkData(0, 1, None)
|
||||
tdSql.checkData(2, 0, 4)
|
||||
tdSql.checkData(3, 0, 3)
|
||||
tdSql.checkData(3, 1, "false")
|
||||
tdSql.checkData(8, 0, 2)
|
||||
tdSql.checkData(10, 1, '"femail"')
|
||||
|
||||
# test having
|
||||
# tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1")
|
||||
|
@ -492,6 +515,7 @@ class TDTestCase:
|
|||
tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}')
|
||||
|
||||
tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
|
||||
tdSql.error("select t->'tag1' from (select jtag->'tag1' as t, dataint from jsons1)")
|
||||
# tdSql.query("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)")
|
||||
# tdSql.checkRows(11)
|
||||
# tdSql.checkData(1, 1, "jsons1_1")
|
||||
|
@ -519,9 +543,10 @@ class TDTestCase:
|
|||
tdSql.checkData(0, 0, 10)
|
||||
tdSql.query("select avg(dataint) from jsons1 where jtag is not null")
|
||||
tdSql.checkData(0, 0, 5.3)
|
||||
# tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
|
||||
# tdSql.checkData(0, 0, 36)
|
||||
# tdSql.error("select irate(dataint) from jsons1 where jtag is not null")
|
||||
tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
|
||||
tdSql.checkData(0, 0, 28.386363636363637)
|
||||
tdSql.query("select irate(dataint) from jsons1 where jtag is not null")
|
||||
|
||||
tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null")
|
||||
tdSql.checkData(0, 0, 45)
|
||||
tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1")
|
||||
|
@ -549,9 +574,9 @@ class TDTestCase:
|
|||
|
||||
#test calculation function:diff/derivative/spread/ceil/floor/round/
|
||||
tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1")
|
||||
# tdSql.checkRows(2)
|
||||
# tdSql.checkData(0, 0, -1)
|
||||
# tdSql.checkData(1, 0, 10)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, -1)
|
||||
tdSql.checkData(1, 0, 10)
|
||||
tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1")
|
||||
tdSql.checkData(0, 0, -2)
|
||||
tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1")
|
||||
|
@ -608,14 +633,14 @@ class TDTestCase:
|
|||
tdSql.checkRows(1)
|
||||
|
||||
# function not ready
|
||||
# tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(3)
|
||||
# tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
# tdSql.checkRows(1)
|
||||
tdSql.query("select tail(dataint,1) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select unique(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(3)
|
||||
tdSql.query("select mode(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
tdSql.query("select irate(dataint) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
#str function
|
||||
tdSql.query("select upper(dataStr) from jsons1 where jtag->'tag1'>1;")
|
||||
|
@ -659,13 +684,26 @@ class TDTestCase:
|
|||
tdSql.query("select ELAPSED(ts,1h) from jsons1 where jtag->'tag1'>1;")
|
||||
tdSql.checkRows(1)
|
||||
|
||||
#
|
||||
# #test TD-12077
|
||||
# to_json()
|
||||
tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
|
||||
tdSql.checkRows(2)
|
||||
# tdSql.checkData(0, 0, '{"abc":123}')
|
||||
# tdSql.checkData(1, 0, '{"abc":123}')
|
||||
tdSql.query("select to_json('null') from jsons1_1")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 'null')
|
||||
tdSql.checkData(1, 0, 'null')
|
||||
tdSql.query("select to_json('{\"key\"}') from jsons1_1")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 0, 'null')
|
||||
tdSql.checkData(1, 0, 'null')
|
||||
|
||||
#test TD-12077
|
||||
tdSql.execute("insert into jsons1_16 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":-2.111}') values(1591062628000, 2, NULL, '你就会', 'dws')")
|
||||
tdSql.query("select jtag->'tag3' from jsons1_16")
|
||||
tdSql.checkData(0, 0, '-2.111000000')
|
||||
|
||||
# # test TD-12452
|
||||
# test TD-12452
|
||||
tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag=NULL")
|
||||
tdSql.query("select jtag from jsons1_1")
|
||||
tdSql.checkData(0, 0, None)
|
||||
|
|
|
@ -92,8 +92,6 @@ class TDTestCase:
|
|||
"select tail(c1,1) , min(c1) from t1",
|
||||
"select tail(c1,1) , spread(c1) from t1",
|
||||
"select tail(c1,1) , diff(c1) from t1",
|
||||
"select tail(c1,1) , abs(c1) from t1",
|
||||
"select tail(c1,1) , c1 from t1",
|
||||
"select tail from stb1 partition by tbname",
|
||||
"select tail(123--123)==1 from stb1 partition by tbname",
|
||||
"select tail(123,123) from stb1 partition by tbname",
|
||||
|
@ -115,10 +113,7 @@ class TDTestCase:
|
|||
"select tail(c1,1) , avg(c1) from stb1 partition by tbname",
|
||||
"select tail(c1,1) , min(c1) from stb1 partition by tbname",
|
||||
"select tail(c1,1) , spread(c1) from stb1 partition by tbname",
|
||||
"select tail(c1,1) , diff(c1) from stb1 partition by tbname",
|
||||
"select tail(c1,1) , abs(c1) from stb1 partition by tbname",
|
||||
"select tail(c1,1) , c1 from stb1 partition by tbname"
|
||||
|
||||
"select tail(c1,1) , diff(c1) from stb1 partition by tbname",
|
||||
]
|
||||
for error_sql in error_sql_lists:
|
||||
tdSql.error(error_sql)
|
||||
|
@ -266,17 +261,17 @@ class TDTestCase:
|
|||
tdSql.query("select tail(c1,10,10) from ct1")
|
||||
tdSql.checkRows(3)
|
||||
|
||||
tdSql.error("select tail(c1,10,10),tbname from ct1")
|
||||
tdSql.error("select tail(c1,10,10),t1 from ct1")
|
||||
tdSql.query("select tail(c1,10,10),tbname from ct1")
|
||||
tdSql.query("select tail(c1,10,10),t1 from ct1")
|
||||
|
||||
# tail with common col
|
||||
tdSql.error("select tail(c1,10,10) ,ts from ct1")
|
||||
tdSql.error("select tail(c1,10,10) ,c1 from ct1")
|
||||
tdSql.query("select tail(c1,10,10) ,ts from ct1")
|
||||
tdSql.query("select tail(c1,10,10) ,c1 from ct1")
|
||||
|
||||
# tail with scalar function
|
||||
tdSql.error("select tail(c1,10,10) ,abs(c1) from ct1")
|
||||
tdSql.query("select tail(c1,10,10) ,abs(c1) from ct1")
|
||||
tdSql.error("select tail(c1,10,10) , tail(c2,10,10) from ct1")
|
||||
tdSql.error("select tail(c1,10,10) , abs(c2)+2 from ct1")
|
||||
tdSql.query("select tail(c1,10,10) , abs(c2)+2 from ct1")
|
||||
|
||||
# bug need fix for scalar value or compute again
|
||||
# tdSql.error(" select tail(c1,10,10) , 123 from ct1")
|
||||
|
|
|
@ -63,7 +63,7 @@ python3 ./test.py -f 2-query/To_unixtimestamp.py
|
|||
python3 ./test.py -f 2-query/timetruncate.py
|
||||
python3 ./test.py -f 2-query/diff.py
|
||||
python3 ./test.py -f 2-query/Timediff.py
|
||||
#python3 ./test.py -f 2-query/json_tag.py
|
||||
python3 ./test.py -f 2-query/json_tag.py
|
||||
|
||||
python3 ./test.py -f 2-query/top.py
|
||||
python3 ./test.py -f 2-query/bottom.py
|
||||
|
|
|
@ -482,6 +482,7 @@ int32_t shellReadCommand(char *command) {
|
|||
#endif
|
||||
break;
|
||||
case 4: // EOF or Ctrl+D
|
||||
taosResetTerminalMode();
|
||||
printf("\n");
|
||||
return -1;
|
||||
case 5: // ctrl E
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 7105027650b51e701cfa1dac11b8fb42d447dd01
|
||||
Subproject commit 5fdd694621fbb7bd2d6102ff4feaec92a7001038
|
Loading…
Reference in New Issue