Merge remote-tracking branch 'origin/3.0' into fix/tsim

This commit is contained in:
Shengliang Guan 2022-07-04 17:54:25 +08:00
commit c58a84750e
57 changed files with 2838 additions and 1269 deletions

View File

@ -127,6 +127,25 @@ def pre_test(){
'''
return 1
}
def pre_test_build_mac() {
sh '''
hostname
date
'''
sh '''
cd ${WK}
rm -rf debug
mkdir debug
'''
sh '''
cd ${WK}/debug
cmake ..
make -j8
'''
sh '''
date
'''
}
def pre_test_win(){
bat '''
hostname
@ -334,6 +353,17 @@ pipeline {
}
}
}
stage('mac test') {
agent{label " Mac_catalina "}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 20, unit: 'MINUTES'){
pre_test()
pre_test_build_mac()
}
}
}
}
stage('linux test') {
agent{label " worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }

View File

@ -22,8 +22,8 @@ extern "C" {
#include "tdef.h"
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr, type) (((SNode*)(nodeptr))->type = (type))
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr, nodetype) (((SNode*)(nodeptr))->type = (nodetype))
#define LIST_LENGTH(l) (NULL != (l) ? (l)->length : 0)
@ -118,6 +118,7 @@ typedef enum ENodeType {
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,

View File

@ -25,6 +25,8 @@ extern "C" {
typedef struct SFilterInfo SFilterInfo;
int32_t scalarGetOperatorResultType(SDataType left, SDataType right, EOperatorType op, SDataType* pRes);
/*
pNode will be freed in API;
*pRes need to freed in caller

View File

@ -579,6 +579,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_TABLE_OPTION TAOS_DEF_ERROR_CODE(0, 0x265C)
#define TSDB_CODE_PAR_INVALID_INTERP_CLAUSE TAOS_DEF_ERROR_CODE(0, 0x265D)
#define TSDB_CODE_PAR_NO_VALID_FUNC_IN_WIN TAOS_DEF_ERROR_CODE(0, 0x265E)
#define TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE TAOS_DEF_ERROR_CODE(0, 0x265F)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
@ -628,6 +629,8 @@ int32_t* taosGetErrno();
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
#define TSDB_CODE_INDEX_INVALID_FILE TAOS_DEF_ERROR_CODE(0, 0x3201)
//tmq
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)

View File

@ -895,7 +895,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
SBlockOrderInfo* pOrder = taosArrayGet(pOrderInfo, 0);
int64_t p0 = taosGetTimestampUs();
__compar_fn_t fn = getKeyComparFunc(pColInfoData->info.type, pOrder->order);
taosSort(pColInfoData->pData, pDataBlock->info.rows, pColInfoData->info.bytes, fn);
@ -923,8 +923,9 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo) {
pInfo->pColData = taosArrayGet(pDataBlock->pDataBlock, pInfo->slotId);
}
terrno = 0;
taosqsort(index, rows, sizeof(int32_t), &helper, dataBlockCompar);
if(terrno) return terrno;
if (terrno) return terrno;
int64_t p1 = taosGetTimestampUs();
@ -1438,21 +1439,21 @@ static void doShiftBitmap(char* nullBitmap, size_t n, size_t total) {
}
}
static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end){
static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, size_t end) {
int32_t dataOffset = -1;
int32_t dataLen = 0;
int32_t beigin = start;
while(beigin < end){
while (beigin < end) {
int32_t offset = pColInfoData->varmeta.offset[beigin];
if(offset == -1) {
if (offset == -1) {
beigin++;
continue;
}
if(start != 0) {
if (start != 0) {
pColInfoData->varmeta.offset[beigin] = dataLen;
}
char *data = pColInfoData->pData + offset;
if(dataOffset == -1) dataOffset = offset; // mark the begin of data
char* data = pColInfoData->pData + offset;
if (dataOffset == -1) dataOffset = offset; // mark the begin of data
int32_t type = pColInfoData->info.type;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen += getJsonValueLen(data);
@ -1461,7 +1462,7 @@ static int32_t colDataMoveVarData(SColumnInfoData* pColInfoData, size_t start, s
}
beigin++;
}
if(dataOffset > 0){
if (dataOffset > 0) {
memmove(pColInfoData->pData, pColInfoData->pData + dataOffset, dataLen);
memmove(pColInfoData->varmeta.offset, &pColInfoData->varmeta.offset[start], (end - start) * sizeof(int32_t));
}

View File

@ -87,8 +87,8 @@ static SProcQueue *dmInitProcQueue(SProc *proc, char *ptr, int32_t size) {
static void dmCleanupProcQueue(SProcQueue *queue) {}
static inline int32_t dmPushToProcQueue(SProc *proc, SProcQueue *queue, SRpcMsg *pMsg, EProcFuncType ftype) {
const void *pHead = pMsg;
const void *pBody = pMsg->pCont;
const void * pHead = pMsg;
const void * pBody = pMsg->pCont;
const int16_t rawHeadLen = sizeof(SRpcMsg);
const int32_t rawBodyLen = pMsg->contLen;
const int16_t headLen = CEIL8(rawHeadLen);
@ -257,7 +257,7 @@ int32_t dmInitProc(struct SMgmtWrapper *pWrapper) {
proc->wrapper = pWrapper;
proc->name = pWrapper->name;
SShm *shm = &proc->shm;
SShm * shm = &proc->shm;
int32_t cstart = 0;
int32_t csize = CEIL8(shm->size / 2);
int32_t pstart = csize;
@ -281,13 +281,13 @@ int32_t dmInitProc(struct SMgmtWrapper *pWrapper) {
}
static void *dmConsumChildQueue(void *param) {
SProc *proc = param;
SProc * proc = param;
SMgmtWrapper *pWrapper = proc->wrapper;
SProcQueue *queue = proc->cqueue;
SProcQueue * queue = proc->cqueue;
int32_t numOfMsgs = 0;
int32_t code = 0;
EProcFuncType ftype = DND_FUNC_REQ;
SRpcMsg *pMsg = NULL;
SRpcMsg * pMsg = NULL;
dDebug("node:%s, start to consume from cqueue", proc->name);
do {
@ -324,13 +324,13 @@ static void *dmConsumChildQueue(void *param) {
}
static void *dmConsumParentQueue(void *param) {
SProc *proc = param;
SProc * proc = param;
SMgmtWrapper *pWrapper = proc->wrapper;
SProcQueue *queue = proc->pqueue;
SProcQueue * queue = proc->pqueue;
int32_t numOfMsgs = 0;
int32_t code = 0;
EProcFuncType ftype = DND_FUNC_REQ;
SRpcMsg *pMsg = NULL;
SRpcMsg * pMsg = NULL;
dDebug("node:%s, start to consume from pqueue", proc->name);
do {
@ -353,7 +353,7 @@ static void *dmConsumParentQueue(void *param) {
rpcRegisterBrokenLinkArg(pMsg);
} else if (ftype == DND_FUNC_RELEASE) {
dmRemoveProcRpcHandle(proc, pMsg->info.handle);
rpcReleaseHandle(pMsg->info.handle, (int8_t)pMsg->code);
rpcReleaseHandle(&pMsg->info, TAOS_CONN_SERVER);
} else {
dError("node:%s, invalid ftype:%d from pqueue", proc->name, ftype);
rpcFreeCont(pMsg->pCont);

View File

@ -245,7 +245,7 @@ static inline void dmReleaseHandle(SRpcHandleInfo *pHandle, int8_t type) {
SRpcMsg msg = {.code = type, .info = *pHandle};
dmPutToProcPQueue(&pWrapper->proc, &msg, DND_FUNC_RELEASE);
} else {
rpcReleaseHandle(pHandle->handle, type);
rpcReleaseHandle(pHandle, type);
}
}

View File

@ -915,9 +915,9 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *
return TSDB_CODE_SUCCESS;
}
if (pItem->type == 1) {
if (pItem->type == TSDB_RETENTION_L1) {
qTaskInfo = pRSmaInfo->items[0].taskInfo;
} else if (pItem->type == 2) {
} else if (pItem->type == TSDB_RETENTION_L2) {
qTaskInfo = pRSmaInfo->items[1].taskInfo;
} else {
ASSERT(0);
@ -1233,7 +1233,6 @@ static void tdRSmaPersistTask(SRSmaStat *pRSmaStat) {
} else {
smaWarn("vgId:%d, persist task in abnormal stat %" PRIi8, SMA_VID(pRSmaStat->pSma),
atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)));
ASSERT(0);
}
atomic_store_8(RSMA_RUNNING_STAT(pRSmaStat), 0);
taosReleaseRef(smaMgmt.smaRef, pRSmaStat->refId);

View File

@ -18,6 +18,7 @@
#include "querynodes.h"
#include "scalar.h"
#include "taoserror.h"
#include "cJSON.h"
static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) {
va_list vArgList;
@ -796,6 +797,165 @@ static int32_t translateLeastSQR(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
typedef enum { UNKNOWN_BIN = 0, USER_INPUT_BIN, LINEAR_BIN, LOG_BIN } EHistoBinType;
static int8_t validateHistogramBinType(char* binTypeStr) {
int8_t binType;
if (strcasecmp(binTypeStr, "user_input") == 0) {
binType = USER_INPUT_BIN;
} else if (strcasecmp(binTypeStr, "linear_bin") == 0) {
binType = LINEAR_BIN;
} else if (strcasecmp(binTypeStr, "log_bin") == 0) {
binType = LOG_BIN;
} else {
binType = UNKNOWN_BIN;
}
return binType;
}
static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* errMsg, int32_t msgLen) {
const char *msg1 = "HISTOGRAM function requires four parameters";
const char *msg3 = "HISTOGRAM function invalid format for binDesc parameter";
const char *msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]";
const char *msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]";
const char *msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0";
const char *msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type";
const char *msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1";
cJSON* binDesc = cJSON_Parse(binDescStr);
int32_t numOfBins;
double* intervals;
if (cJSON_IsObject(binDesc)) { /* linaer/log bins */
int32_t numOfParams = cJSON_GetArraySize(binDesc);
int32_t startIndex;
if (numOfParams != 4) {
snprintf(errMsg, msgLen, "%s", msg1);
return false;
}
cJSON* start = cJSON_GetObjectItem(binDesc, "start");
cJSON* factor = cJSON_GetObjectItem(binDesc, "factor");
cJSON* width = cJSON_GetObjectItem(binDesc, "width");
cJSON* count = cJSON_GetObjectItem(binDesc, "count");
cJSON* infinity = cJSON_GetObjectItem(binDesc, "infinity");
if (!cJSON_IsNumber(start) || !cJSON_IsNumber(count) || !cJSON_IsBool(infinity)) {
snprintf(errMsg, msgLen, "%s", msg3);
return false;
}
if (count->valueint <= 0 || count->valueint > 1000) { // limit count to 1000
snprintf(errMsg, msgLen, "%s", msg4);
return false;
}
if (isinf(start->valuedouble) || (width != NULL && isinf(width->valuedouble)) ||
(factor != NULL && isinf(factor->valuedouble)) || (count != NULL && isinf(count->valuedouble))) {
snprintf(errMsg, msgLen, "%s", msg5);
return false;
}
int32_t counter = (int32_t)count->valueint;
if (infinity->valueint == false) {
startIndex = 0;
numOfBins = counter + 1;
} else {
startIndex = 1;
numOfBins = counter + 3;
}
intervals = taosMemoryCalloc(numOfBins, sizeof(double));
if (cJSON_IsNumber(width) && factor == NULL && binType == LINEAR_BIN) {
// linear bin process
if (width->valuedouble == 0) {
snprintf(errMsg, msgLen, "%s", msg6);
taosMemoryFree(intervals);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
intervals[startIndex] = start->valuedouble + i * width->valuedouble;
if (isinf(intervals[startIndex])) {
snprintf(errMsg, msgLen, "%s", msg5);
taosMemoryFree(intervals);
return false;
}
startIndex++;
}
} else if (cJSON_IsNumber(factor) && width == NULL && binType == LOG_BIN) {
// log bin process
if (start->valuedouble == 0) {
snprintf(errMsg, msgLen, "%s", msg7);
taosMemoryFree(intervals);
return false;
}
if (factor->valuedouble < 0 || factor->valuedouble == 0 || factor->valuedouble == 1) {
snprintf(errMsg, msgLen, "%s", msg8);
taosMemoryFree(intervals);
return false;
}
for (int i = 0; i < counter + 1; ++i) {
intervals[startIndex] = start->valuedouble * pow(factor->valuedouble, i * 1.0);
if (isinf(intervals[startIndex])) {
snprintf(errMsg, msgLen, "%s", msg5);
taosMemoryFree(intervals);
return false;
}
startIndex++;
}
} else {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
return false;
}
if (infinity->valueint == true) {
intervals[0] = -INFINITY;
intervals[numOfBins - 1] = INFINITY;
// in case of desc bin orders, -inf/inf should be swapped
ASSERT(numOfBins >= 4);
if (intervals[1] > intervals[numOfBins - 2]) {
TSWAP(intervals[0], intervals[numOfBins - 1]);
}
}
} else if (cJSON_IsArray(binDesc)) { /* user input bins */
if (binType != USER_INPUT_BIN) {
snprintf(errMsg, msgLen, "%s", msg3);
return false;
}
numOfBins = cJSON_GetArraySize(binDesc);
intervals = taosMemoryCalloc(numOfBins, sizeof(double));
cJSON* bin = binDesc->child;
if (bin == NULL) {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
return false;
}
int i = 0;
while (bin) {
intervals[i] = bin->valuedouble;
if (!cJSON_IsNumber(bin)) {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
return false;
}
if (i != 0 && intervals[i] <= intervals[i - 1]) {
snprintf(errMsg, msgLen, "%s", msg3);
taosMemoryFree(intervals);
return false;
}
bin = bin->next;
i++;
}
} else {
snprintf(errMsg, msgLen, "%s", msg3);
return false;
}
taosMemoryFree(intervals);
return true;
}
static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
if (4 != numOfParams) {
@ -814,6 +974,8 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
int8_t binType;
char* binDesc;
for (int32_t i = 1; i < numOfParams; ++i) {
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
@ -824,6 +986,23 @@ static int32_t translateHistogram(SFunctionNode* pFunc, char* pErrBuf, int32_t l
pValue->notReserved = true;
if (i == 1) {
binType = validateHistogramBinType(varDataVal(pValue->datum.p));
if (binType == UNKNOWN_BIN) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"HISTOGRAM function binType parameter should be "
"\"user_input\", \"log_bin\" or \"linear_bin\"");
}
}
if (i == 2) {
char errMsg[128] = {0};
binDesc = varDataVal(pValue->datum.p);
if (!validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg);
}
}
if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"HISTOGRAM function normalized parameter should be 0/1");
@ -853,6 +1032,8 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
int8_t binType;
char* binDesc;
for (int32_t i = 1; i < numOfParams; ++i) {
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
if (QUERY_NODE_VALUE != nodeType(pParamNode)) {
@ -863,6 +1044,23 @@ static int32_t translateHistogramImpl(SFunctionNode* pFunc, char* pErrBuf, int32
pValue->notReserved = true;
if (i == 1) {
binType = validateHistogramBinType(varDataVal(pValue->datum.p));
if (binType == UNKNOWN_BIN) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"HISTOGRAM function binType parameter should be "
"\"user_input\", \"log_bin\" or \"linear_bin\"");
}
}
if (i == 2) {
char errMsg[128] = {0};
binDesc = varDataVal(pValue->datum.p);
if (!validateHistogramBinDesc(binDesc, binType, errMsg, (int32_t)sizeof(errMsg))) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR, errMsg);
}
}
if (i == 3 && pValue->datum.i != 1 && pValue->datum.i != 0) {
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
"HISTOGRAM function normalized parameter should be 0/1");
@ -2112,7 +2310,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "histogram",
.type = FUNCTION_TYPE_HISTOGRAM,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC,
.translateFunc = translateHistogram,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,

View File

@ -260,7 +260,7 @@ bool fmIsSameInOutType(int32_t funcId) {
}
static int32_t getFuncInfo(SFunctionNode* pFunc) {
char msg[64] = {0};
char msg[128] = {0};
return fmGetFuncInfo(pFunc, msg, sizeof(msg));
}

View File

@ -21,7 +21,7 @@ extern "C" {
#endif
#include "indexFstAutomation.h"
#include "indexFstCountingWriter.h"
#include "indexFstFile.h"
#include "indexFstNode.h"
#include "indexFstRegistry.h"
#include "indexFstUtil.h"
@ -90,8 +90,8 @@ FstBuilderNode* fstUnFinishedNodesPopEmpty(FstUnFinishedNodes* nodes);
uint64_t fstUnFinishedNodesFindCommPrefixAndSetOutput(FstUnFinishedNodes* node, FstSlice bs, Output in, Output* out);
typedef struct FstBuilder {
FstCountingWriter* wrt; // The FST raw data is written directly to `wtr`.
FstUnFinishedNodes* unfinished; // The stack of unfinished nodes
IdxFstFile* wrt; // The FST raw data is written directly to `wtr`.
FstUnFinishedNodes* unfinished; // The stack of unfinished nodes
FstRegistry* registry; // A map of finished nodes.
FstSlice last; // The last word added
CompiledAddr lastAddr; // The address of the last compiled node
@ -125,9 +125,9 @@ FstState fstStateCreateFrom(FstSlice* data, CompiledAddr addr);
FstState fstStateCreate(State state);
// compile
void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uint8_t inp);
void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTransition* trn);
void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuilderNode* node);
void fstStateCompileForOneTransNext(IdxFstFile* w, CompiledAddr addr, uint8_t inp);
void fstStateCompileForOneTrans(IdxFstFile* w, CompiledAddr addr, FstTransition* trn);
void fstStateCompileForAnyTrans(IdxFstFile* w, CompiledAddr addr, FstBuilderNode* node);
// set_comm_input
void fstStateSetCommInput(FstState* state, uint8_t inp);
@ -282,7 +282,7 @@ FStmSt* stmBuilderIntoStm(FStmBuilder* sb);
bool fstVerify(Fst* fst);
// refactor this function
bool fstBuilderNodeCompileTo(FstBuilderNode* b, FstCountingWriter* wrt, CompiledAddr lastAddr, CompiledAddr startAddr);
bool fstBuilderNodeCompileTo(FstBuilderNode* b, IdxFstFile* wrt, CompiledAddr lastAddr, CompiledAddr startAddr);
typedef struct StreamState {
FstNode* node;

View File

@ -1,96 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __INDEX_FST_COUNTING_WRITER_H__
#define __INDEX_FST_COUNTING_WRITER_H__
#include "indexInt.h"
#ifdef __cplusplus
extern "C" {
#endif
//#define USE_MMAP 1
#define DefaultMem 1024 * 1024
static char tmpFile[] = "./index";
typedef enum WriterType { TMemory, TFile } WriterType;
typedef struct WriterCtx {
int (*write)(struct WriterCtx* ctx, uint8_t* buf, int len);
int (*read)(struct WriterCtx* ctx, uint8_t* buf, int len);
int (*flush)(struct WriterCtx* ctx);
int (*readFrom)(struct WriterCtx* ctx, uint8_t* buf, int len, int32_t offset);
int (*size)(struct WriterCtx* ctx);
WriterType type;
union {
struct {
TdFilePtr pFile;
bool readOnly;
char buf[256];
int size;
#ifdef USE_MMAP
char* ptr;
#endif
} file;
struct {
int32_t capa;
char* buf;
} mem;
};
int32_t offset;
int32_t limit;
} WriterCtx;
static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len);
static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len);
static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t offset);
static int writeCtxDoFlush(WriterCtx* ctx);
WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity);
void writerCtxDestroy(WriterCtx* w, bool remove);
typedef uint32_t CheckSummer;
typedef struct FstCountingWriter {
void* wrt; // wrap any writer that counts and checksum bytes written
uint64_t count;
CheckSummer summer;
} FstCountingWriter;
int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len);
int fstCountingWriterRead(FstCountingWriter* write, uint8_t* buf, uint32_t len);
int fstCountingWriterFlush(FstCountingWriter* write);
uint32_t fstCountingWriterMaskedCheckSum(FstCountingWriter* write);
FstCountingWriter* fstCountingWriterCreate(void* wtr);
void fstCountingWriterDestroy(FstCountingWriter* w);
void fstCountingWriterPackUintIn(FstCountingWriter* writer, uint64_t n, uint8_t nBytes);
uint8_t fstCountingWriterPackUint(FstCountingWriter* writer, uint64_t n);
#define FST_WRITER_COUNT(writer) (writer->count)
#define FST_WRITER_INTER_WRITER(writer) (writer->wtr)
#define FST_WRITE_CHECK_SUMMER(writer) (writer->summer)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __INDEX_FST_FILE_H__
#define __INDEX_FST_FILE_H__
#include "indexInt.h"
#ifdef __cplusplus
extern "C" {
#endif
//#define USE_MMAP 1
#define DefaultMem 1024 * 1024
static char tmpFile[] = "./index";
typedef enum WriterType { TMemory, TFile } WriterType;
typedef struct IFileCtx {
int (*write)(struct IFileCtx* ctx, uint8_t* buf, int len);
int (*read)(struct IFileCtx* ctx, uint8_t* buf, int len);
int (*flush)(struct IFileCtx* ctx);
int (*readFrom)(struct IFileCtx* ctx, uint8_t* buf, int len, int32_t offset);
int (*size)(struct IFileCtx* ctx);
WriterType type;
union {
struct {
TdFilePtr pFile;
bool readOnly;
char buf[256];
int64_t size;
#ifdef USE_MMAP
char* ptr;
#endif
} file;
struct {
int32_t cap;
char* buf;
} mem;
};
int32_t offset;
int32_t limit;
} IFileCtx;
static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len);
static int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len);
static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t offset);
static int idxFileCtxDoFlush(IFileCtx* ctx);
IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity);
void idxFileCtxDestroy(IFileCtx* w, bool remove);
typedef uint32_t CheckSummer;
typedef struct IdxFstFile {
void* wrt; // wrap any writer that counts and checksum bytes written
uint64_t count;
CheckSummer summer;
} IdxFstFile;
int idxFileWrite(IdxFstFile* write, uint8_t* buf, uint32_t len);
int idxFileRead(IdxFstFile* write, uint8_t* buf, uint32_t len);
int idxFileFlush(IdxFstFile* write);
uint32_t idxFileMaskedCheckSum(IdxFstFile* write);
IdxFstFile* idxFileCreate(void* wtr);
void idxFileDestroy(IdxFstFile* w);
void idxFilePackUintIn(IdxFstFile* writer, uint64_t n, uint8_t nBytes);
uint8_t idxFilePackUint(IdxFstFile* writer, uint64_t n);
#define FST_WRITER_COUNT(writer) (writer->count)
#define FST_WRITER_INTER_WRITER(writer) (writer->wtr)
#define FST_WRITE_CHECK_SUMMER(writer) (writer->summer)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -20,12 +20,12 @@
extern "C" {
#endif
#include "indexFstCountingWriter.h"
#include "indexFstFile.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#define FST_BUILDER_NODE_IS_FINAL(bn) (bn->isFinal)
#define FST_BUILDER_NODE_TRANS_ISEMPTY(bn) (taosArrayGetSize(bn->trans) == 0)
#define FST_BUILDER_NODE_IS_FINAL(bn) (bn->isFinal)
#define FST_BUILDER_NODE_TRANS_ISEMPTY(bn) (taosArrayGetSize(bn->trans) == 0)
#define FST_BUILDER_NODE_FINALOUTPUT_ISZERO(bn) (bn->finalOutput == 0)
typedef struct FstTransition {
@ -46,7 +46,7 @@ FstBuilderNode* fstBuilderNodeClone(FstBuilderNode* src);
void fstBuilderNodeCloneFrom(FstBuilderNode* dst, FstBuilderNode* src);
// bool fstBuilderNodeCompileTo(FstBuilderNode *b, FstCountingWriter *wrt,
// bool fstBuilderNodeCompileTo(FstBuilderNode *b, IdxFile' *wrt,
// CompiledAddr lastAddr, CompiledAddr startAddr);
bool fstBuilderNodeEqual(FstBuilderNode* n1, FstBuilderNode* n2);

View File

@ -16,7 +16,7 @@
#define __INDEX_TFILE_H__
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstFile.h"
#include "indexInt.h"
#include "indexTfile.h"
#include "indexUtil.h"
@ -59,7 +59,7 @@ typedef struct TFileCache {
typedef struct TFileWriter {
FstBuilder* fb;
WriterCtx* ctx;
IFileCtx* ctx;
TFileHeader header;
uint32_t offset;
} TFileWriter;
@ -68,7 +68,7 @@ typedef struct TFileWriter {
typedef struct TFileReader {
T_REF_DECLARE()
Fst* fst;
WriterCtx* ctx;
IFileCtx* ctx;
TFileHeader header;
bool remove;
} TFileReader;
@ -103,7 +103,7 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* read
TFileReader* tfileGetReaderByCol(IndexTFile* tf, uint64_t suid, char* colName);
TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const char* colName);
TFileReader* tfileReaderCreate(WriterCtx* ctx);
TFileReader* tfileReaderCreate(IFileCtx* ctx);
void tfileReaderDestroy(TFileReader* reader);
int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr);
void tfileReaderRef(TFileReader* reader);
@ -111,7 +111,7 @@ void tfileReaderUnRef(TFileReader* reader);
TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t type);
void tfileWriterClose(TFileWriter* tw);
TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header);
TFileWriter* tfileWriterCreate(IFileCtx* ctx, TFileHeader* header);
void tfileWriterDestroy(TFileWriter* tw);
int tfileWriterPut(TFileWriter* tw, void* data, bool order);
int tfileWriterFinish(TFileWriter* tw);

View File

@ -39,7 +39,7 @@
#define INDEX_DATA_BIGINT_NULL 0x8000000000000000LL
#define INDEX_DATA_TIMESTAMP_NULL TSDB_DATA_BIGINT_NULL
#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
#define INDEX_DATA_FLOAT_NULL 0x7FF00000 // it is an NAN
#define INDEX_DATA_DOUBLE_NULL 0x7FFFFF0000000000LL // an NAN
#define INDEX_DATA_NCHAR_NULL 0xFFFFFFFF
#define INDEX_DATA_BINARY_NULL 0xFF
@ -614,7 +614,7 @@ static int idxGenTFile(SIndex* sIdx, IndexCache* cache, SArray* batch) {
return ret;
END:
if (tw != NULL) {
writerCtxDestroy(tw->ctx, true);
idxFileCtxDestroy(tw->ctx, true);
taosMemoryFree(tw);
}
return -1;

View File

@ -19,11 +19,11 @@
#include "tchecksum.h"
#include "tcoding.h"
static void fstPackDeltaIn(FstCountingWriter* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr, uint8_t nBytes) {
static void fstPackDeltaIn(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr, uint8_t nBytes) {
CompiledAddr deltaAddr = (transAddr == EMPTY_ADDRESS) ? EMPTY_ADDRESS : nodeAddr - transAddr;
fstCountingWriterPackUintIn(wrt, deltaAddr, nBytes);
idxFilePackUintIn(wrt, deltaAddr, nBytes);
}
static uint8_t fstPackDetla(FstCountingWriter* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) {
static uint8_t fstPackDetla(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) {
uint8_t nBytes = packDeltaSize(nodeAddr, transAddr);
fstPackDeltaIn(wrt, nodeAddr, transAddr, nBytes);
return nBytes;
@ -208,7 +208,7 @@ FstState fstStateCreate(State state) {
return fstStateDict[idx];
}
// compile
void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uint8_t inp) {
void fstStateCompileForOneTransNext(IdxFstFile* w, CompiledAddr addr, uint8_t inp) {
FstState s = fstStateCreate(OneTransNext);
fstStateSetCommInput(&s, inp);
@ -216,21 +216,21 @@ void fstStateCompileForOneTransNext(FstCountingWriter* w, CompiledAddr addr, uin
uint8_t v = fstStateCommInput(&s, &null);
if (null) {
// w->write_all(&[inp])
fstCountingWriterWrite(w, &inp, 1);
idxFileWrite(w, &inp, 1);
}
fstCountingWriterWrite(w, &(s.val), 1);
idxFileWrite(w, &(s.val), 1);
// w->write_all(&[s.val])
return;
}
void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTransition* trn) {
void fstStateCompileForOneTrans(IdxFstFile* w, CompiledAddr addr, FstTransition* trn) {
Output out = trn->out;
uint8_t outPackSize = (out == 0 ? 0 : fstCountingWriterPackUint(w, out));
uint8_t outPackSize = (out == 0 ? 0 : idxFilePackUint(w, out));
uint8_t transPackSize = fstPackDetla(w, addr, trn->addr);
PackSizes packSizes = 0;
FST_SET_OUTPUT_PACK_SIZE(packSizes, outPackSize);
FST_SET_TRANSITION_PACK_SIZE(packSizes, transPackSize);
fstCountingWriterWrite(w, (char*)&packSizes, sizeof(packSizes));
idxFileWrite(w, (char*)&packSizes, sizeof(packSizes));
FstState st = fstStateCreate(OneTrans);
@ -239,12 +239,12 @@ void fstStateCompileForOneTrans(FstCountingWriter* w, CompiledAddr addr, FstTran
bool null = false;
uint8_t inp = fstStateCommInput(&st, &null);
if (null == true) {
fstCountingWriterWrite(w, (char*)&trn->inp, sizeof(trn->inp));
idxFileWrite(w, (char*)&trn->inp, sizeof(trn->inp));
}
fstCountingWriterWrite(w, (char*)(&(st.val)), sizeof(st.val));
idxFileWrite(w, (char*)(&(st.val)), sizeof(st.val));
return;
}
void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuilderNode* node) {
void fstStateCompileForAnyTrans(IdxFstFile* w, CompiledAddr addr, FstBuilderNode* node) {
int32_t sz = taosArrayGetSize(node->trans);
assert(sz <= 256);
@ -275,11 +275,11 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
if (anyOuts) {
if (FST_BUILDER_NODE_IS_FINAL(node)) {
fstCountingWriterPackUintIn(w, node->finalOutput, oSize);
idxFilePackUintIn(w, node->finalOutput, oSize);
}
for (int32_t i = sz - 1; i >= 0; i--) {
FstTransition* t = taosArrayGet(node->trans, i);
fstCountingWriterPackUintIn(w, t->out, oSize);
idxFilePackUintIn(w, t->out, oSize);
}
}
for (int32_t i = sz - 1; i >= 0; i--) {
@ -288,7 +288,7 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
}
for (int32_t i = sz - 1; i >= 0; i--) {
FstTransition* t = taosArrayGet(node->trans, i);
fstCountingWriterWrite(w, (char*)&t->inp, 1);
idxFileWrite(w, (char*)&t->inp, 1);
// fstPackDeltaIn(w, addr, t->addr, tSize);
}
if (sz > TRANS_INDEX_THRESHOLD) {
@ -306,10 +306,10 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
index[t->inp] = i;
// fstPackDeltaIn(w, addr, t->addr, tSize);
}
fstCountingWriterWrite(w, (char*)index, 256);
idxFileWrite(w, (char*)index, 256);
taosMemoryFree(index);
}
fstCountingWriterWrite(w, (char*)&packSizes, 1);
idxFileWrite(w, (char*)&packSizes, 1);
bool null = false;
fstStateStateNtrans(&st, &null);
if (null == true) {
@ -318,12 +318,12 @@ void fstStateCompileForAnyTrans(FstCountingWriter* w, CompiledAddr addr, FstBuil
// encoded in the state byte.
uint8_t v = 1;
if (sz == 256) {
fstCountingWriterWrite(w, (char*)&v, 1);
idxFileWrite(w, (char*)&v, 1);
} else {
fstCountingWriterWrite(w, (char*)&sz, 1);
idxFileWrite(w, (char*)&sz, 1);
}
}
fstCountingWriterWrite(w, (char*)(&(st.val)), 1);
idxFileWrite(w, (char*)(&(st.val)), 1);
return;
}
@ -753,7 +753,7 @@ bool fstNodeCompile(FstNode* node, void* w, CompiledAddr lastAddr, CompiledAddr
return true;
}
bool fstBuilderNodeCompileTo(FstBuilderNode* b, FstCountingWriter* wrt, CompiledAddr lastAddr, CompiledAddr startAddr) {
bool fstBuilderNodeCompileTo(FstBuilderNode* b, IdxFstFile* wrt, CompiledAddr lastAddr, CompiledAddr startAddr) {
return fstNodeCompile(NULL, wrt, lastAddr, startAddr, b);
}
@ -763,7 +763,7 @@ FstBuilder* fstBuilderCreate(void* w, FstType ty) {
return b;
}
b->wrt = fstCountingWriterCreate(w);
b->wrt = idxFileCreate(w);
b->unfinished = fstUnFinishedNodesCreate();
b->registry = fstRegistryCreate(10000, 2);
b->last = fstSliceCreate(NULL, 0);
@ -773,12 +773,12 @@ FstBuilder* fstBuilderCreate(void* w, FstType ty) {
char buf64[8] = {0};
void* pBuf64 = buf64;
taosEncodeFixedU64(&pBuf64, VERSION);
fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64));
idxFileWrite(b->wrt, buf64, sizeof(buf64));
pBuf64 = buf64;
memset(buf64, 0, sizeof(buf64));
taosEncodeFixedU64(&pBuf64, ty);
fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64));
idxFileWrite(b->wrt, buf64, sizeof(buf64));
return b;
}
@ -787,7 +787,7 @@ void fstBuilderDestroy(FstBuilder* b) {
return;
}
fstCountingWriterDestroy(b->wrt);
idxFileDestroy(b->wrt);
fstUnFinishedNodesDestroy(b->unfinished);
fstRegistryDestroy(b->registry);
fstSliceDestroy(&b->last);
@ -905,21 +905,19 @@ void* fstBuilderInsertInner(FstBuilder* b) {
void* pBuf64 = buf64;
taosEncodeFixedU64(&pBuf64, b->len);
fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64));
idxFileWrite(b->wrt, buf64, sizeof(buf64));
pBuf64 = buf64;
taosEncodeFixedU64(&pBuf64, rootAddr);
fstCountingWriterWrite(b->wrt, buf64, sizeof(buf64));
idxFileWrite(b->wrt, buf64, sizeof(buf64));
char buf32[4] = {0};
void* pBuf32 = buf32;
uint32_t sum = fstCountingWriterMaskedCheckSum(b->wrt);
uint32_t sum = idxFileMaskedCheckSum(b->wrt);
taosEncodeFixedU32(&pBuf32, sum);
fstCountingWriterWrite(b->wrt, buf32, sizeof(buf32));
idxFileWrite(b->wrt, buf32, sizeof(buf32));
fstCountingWriterFlush(b->wrt);
// fstCountingWriterDestroy(b->wrt);
// b->wrt = NULL;
idxFileFlush(b->wrt);
return b->wrt;
}
void fstBuilderFinish(FstBuilder* b) { fstBuilderInsertInner(b); }

View File

@ -61,9 +61,10 @@ void dfaBuilderDestroy(FstDfaBuilder *builder) {
pIter = taosHashIterate(builder->cache, pIter);
}
taosHashCleanup(builder->cache);
taosMemoryFree(builder);
}
FstDfa *dfaBuilderBuild(FstDfaBuilder *builder) {
FstDfa *dfaBuilder(FstDfaBuilder *builder) {
uint32_t sz = taosArrayGetSize(builder->dfa->insts);
FstSparseSet *cur = sparSetCreate(sz);
FstSparseSet *nxt = sparSetCreate(sz);

View File

@ -13,13 +13,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "indexFstCountingWriter.h"
#include "indexFstFile.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "os.h"
#include "tutil.h"
static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len) {
static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
if (ctx->type == TFile) {
assert(len == taosWriteFile(ctx->file.pFile, buf, len));
} else {
@ -28,7 +28,7 @@ static int writeCtxDoWrite(WriterCtx* ctx, uint8_t* buf, int len) {
ctx->offset += len;
return len;
}
static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len) {
static int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len) {
int nRead = 0;
if (ctx->type == TFile) {
#ifdef USE_MMAP
@ -44,7 +44,7 @@ static int writeCtxDoRead(WriterCtx* ctx, uint8_t* buf, int len) {
return nRead;
}
static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t offset) {
static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t offset) {
int nRead = 0;
if (ctx->type == TFile) {
// tfLseek(ctx->file.pFile, offset, 0);
@ -61,7 +61,7 @@ static int writeCtxDoReadFrom(WriterCtx* ctx, uint8_t* buf, int len, int32_t off
}
return nRead;
}
static int writeCtxGetSize(WriterCtx* ctx) {
static int idxFileCtxGetSize(IFileCtx* ctx) {
if (ctx->type == TFile) {
int64_t file_size = 0;
taosStatFile(ctx->file.buf, &file_size, NULL);
@ -69,7 +69,7 @@ static int writeCtxGetSize(WriterCtx* ctx) {
}
return 0;
}
static int writeCtxDoFlush(WriterCtx* ctx) {
static int idxFileCtxDoFlush(IFileCtx* ctx) {
if (ctx->type == TFile) {
// taosFsyncFile(ctx->file.pFile);
taosFsyncFile(ctx->file.pFile);
@ -80,8 +80,8 @@ static int writeCtxDoFlush(WriterCtx* ctx) {
return 1;
}
WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity) {
WriterCtx* ctx = taosMemoryCalloc(1, sizeof(WriterCtx));
IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int32_t capacity) {
IFileCtx* ctx = taosMemoryCalloc(1, sizeof(IFileCtx));
if (ctx == NULL) {
return NULL;
}
@ -90,39 +90,36 @@ WriterCtx* writerCtxCreate(WriterType type, const char* path, bool readOnly, int
if (ctx->type == TFile) {
// ugly code, refactor later
ctx->file.readOnly = readOnly;
memcpy(ctx->file.buf, path, strlen(path));
if (readOnly == false) {
// ctx->file.pFile = open(path, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
ctx->file.pFile = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND);
taosFtruncateFile(ctx->file.pFile, 0);
int64_t file_size;
taosStatFile(path, &file_size, NULL);
ctx->file.size = (int)file_size;
taosStatFile(path, &ctx->file.size, NULL);
// ctx->file.size = (int)size;
} else {
// ctx->file.pFile = open(path, O_RDONLY, S_IRWXU | S_IRWXG | S_IRWXO);
ctx->file.pFile = taosOpenFile(path, TD_FILE_READ);
int64_t file_size = 0;
taosFStatFile(ctx->file.pFile, &file_size, NULL);
ctx->file.size = (int)file_size;
int64_t size = 0;
taosFStatFile(ctx->file.pFile, &ctx->file.size, NULL);
ctx->file.size = (int)size;
#ifdef USE_MMAP
ctx->file.ptr = (char*)tfMmapReadOnly(ctx->file.pFile, ctx->file.size);
#endif
}
memcpy(ctx->file.buf, path, strlen(path));
if (ctx->file.pFile == NULL) {
indexError("failed to open file, error %d", errno);
goto END;
}
} else if (ctx->type == TMemory) {
ctx->mem.buf = taosMemoryCalloc(1, sizeof(char) * capacity);
ctx->mem.capa = capacity;
ctx->mem.cap = capacity;
}
ctx->write = writeCtxDoWrite;
ctx->read = writeCtxDoRead;
ctx->flush = writeCtxDoFlush;
ctx->readFrom = writeCtxDoReadFrom;
ctx->size = writeCtxGetSize;
ctx->write = idxFileCtxDoWrite;
ctx->read = idxFileCtxDoRead;
ctx->flush = idxFileCtxDoFlush;
ctx->readFrom = idxFileCtxDoReadFrom;
ctx->size = idxFileCtxGetSize;
ctx->offset = 0;
ctx->limit = capacity;
@ -135,7 +132,7 @@ END:
taosMemoryFree(ctx);
return NULL;
}
void writerCtxDestroy(WriterCtx* ctx, bool remove) {
void idxFileCtxDestroy(IFileCtx* ctx, bool remove) {
if (ctx->type == TMemory) {
taosMemoryFree(ctx->mem.buf);
} else {
@ -149,9 +146,6 @@ void writerCtxDestroy(WriterCtx* ctx, bool remove) {
if (ctx->file.readOnly == false) {
int64_t file_size = 0;
taosStatFile(ctx->file.buf, &file_size, NULL);
// struct stat fstat;
// stat(ctx->file.buf, &fstat);
// indexError("write file size: %d", (int)(fstat.st_size));
}
if (remove) {
unlink(ctx->file.buf);
@ -160,30 +154,29 @@ void writerCtxDestroy(WriterCtx* ctx, bool remove) {
taosMemoryFree(ctx);
}
FstCountingWriter* fstCountingWriterCreate(void* wrt) {
FstCountingWriter* cw = taosMemoryCalloc(1, sizeof(FstCountingWriter));
IdxFstFile* idxFileCreate(void* wrt) {
IdxFstFile* cw = taosMemoryCalloc(1, sizeof(IdxFstFile));
if (cw == NULL) {
return NULL;
}
cw->wrt = wrt;
//(void *)(writerCtxCreate(TFile, readOnly));
return cw;
}
void fstCountingWriterDestroy(FstCountingWriter* cw) {
void idxFileDestroy(IdxFstFile* cw) {
// free wrt object: close fd or free mem
fstCountingWriterFlush(cw);
// writerCtxDestroy((WriterCtx *)(cw->wrt));
idxFileFlush(cw);
// idxFileCtxDestroy((IFileCtx *)(cw->wrt));
taosMemoryFree(cw);
}
int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len) {
int idxFileWrite(IdxFstFile* write, uint8_t* buf, uint32_t len) {
if (write == NULL) {
return 0;
}
// update checksum
// write data to file/socket or mem
WriterCtx* ctx = write->wrt;
IFileCtx* ctx = write->wrt;
int nWrite = ctx->write(ctx, buf, len);
assert(nWrite == len);
@ -192,42 +185,41 @@ int fstCountingWriterWrite(FstCountingWriter* write, uint8_t* buf, uint32_t len)
write->summer = taosCalcChecksum(write->summer, buf, len);
return len;
}
int fstCountingWriterRead(FstCountingWriter* write, uint8_t* buf, uint32_t len) {
int idxFileRead(IdxFstFile* write, uint8_t* buf, uint32_t len) {
if (write == NULL) {
return 0;
}
WriterCtx* ctx = write->wrt;
int nRead = ctx->read(ctx, buf, len);
IFileCtx* ctx = write->wrt;
int nRead = ctx->read(ctx, buf, len);
// assert(nRead == len);
return nRead;
}
uint32_t fstCountingWriterMaskedCheckSum(FstCountingWriter* write) {
uint32_t idxFileMaskedCheckSum(IdxFstFile* write) {
// opt
return write->summer;
}
int fstCountingWriterFlush(FstCountingWriter* write) {
WriterCtx* ctx = write->wrt;
int idxFileFlush(IdxFstFile* write) {
IFileCtx* ctx = write->wrt;
ctx->flush(ctx);
// write->wtr->flush
return 1;
}
void fstCountingWriterPackUintIn(FstCountingWriter* writer, uint64_t n, uint8_t nBytes) {
void idxFilePackUintIn(IdxFstFile* writer, uint64_t n, uint8_t nBytes) {
assert(1 <= nBytes && nBytes <= 8);
uint8_t* buf = taosMemoryCalloc(8, sizeof(uint8_t));
for (uint8_t i = 0; i < nBytes; i++) {
buf[i] = (uint8_t)n;
n = n >> 8;
}
fstCountingWriterWrite(writer, buf, nBytes);
idxFileWrite(writer, buf, nBytes);
taosMemoryFree(buf);
return;
}
uint8_t fstCountingWriterPackUint(FstCountingWriter* writer, uint64_t n) {
uint8_t idxFilePackUint(IdxFstFile* writer, uint64_t n) {
uint8_t nBytes = packSize(n);
fstCountingWriterPackUintIn(writer, n, nBytes);
idxFilePackUintIn(writer, n, nBytes);
return nBytes;
}

View File

@ -95,7 +95,7 @@ void fstBuilderNodeCloneFrom(FstBuilderNode* dst, FstBuilderNode* src) {
}
}
// bool fstBuilderNodeCompileTo(FstBuilderNode *b, FstCountingWriter *wrt, CompiledAddr lastAddr, CompiledAddr
// bool fstBuilderNodeCompileTo(FstBuilderNode *b, IdxFile *wrt, CompiledAddr lastAddr, CompiledAddr
// startAddr) {
// size_t sz = taosArrayGetSize(b->trans);

View File

@ -75,7 +75,6 @@ CompiledAddr unpackDelta(char* data, uint64_t len, uint64_t nodeAddr) {
}
// fst slice func
//
FstSlice fstSliceCreate(uint8_t* data, uint64_t len) {
FstString* str = (FstString*)taosMemoryMalloc(sizeof(FstString));
@ -164,16 +163,3 @@ int fstSliceCompare(FstSlice* a, FstSlice* b) {
return 0;
}
}
// FstStack* fstStackCreate(size_t elemSize, StackFreeElem freeFn) {
// FstStack *s = taosMemoryCalloc(1, sizeof(FstStack));
// if (s == NULL) { return NULL; }
// s->
// s->freeFn
//
//}
// void *fstStackPush(FstStack *s, void *elem);
// void *fstStackTop(FstStack *s);
// size_t fstStackLen(FstStack *s);
// void *fstStackGetAt(FstStack *s, size_t i);
// void fstStackDestory(FstStack *);

View File

@ -16,7 +16,7 @@
#include "index.h"
#include "indexComm.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstFile.h"
#include "indexUtil.h"
#include "taosdef.h"
#include "taoserror.h"
@ -103,7 +103,7 @@ TFileCache* tfileCacheCreate(const char* path) {
for (size_t i = 0; i < taosArrayGetSize(files); i++) {
char* file = taosArrayGetP(files, i);
WriterCtx* wc = writerCtxCreate(TFile, file, true, 1024 * 1024 * 64);
IFileCtx* wc = idxFileCtxCreate(TFile, file, true, 1024 * 1024 * 64);
if (wc == NULL) {
indexError("failed to open index:%s", file);
goto End;
@ -175,7 +175,7 @@ void tfileCachePut(TFileCache* tcache, ICacheKey* key, TFileReader* reader) {
tfileReaderRef(reader);
return;
}
TFileReader* tfileReaderCreate(WriterCtx* ctx) {
TFileReader* tfileReaderCreate(IFileCtx* ctx) {
TFileReader* reader = taosMemoryCalloc(1, sizeof(TFileReader));
if (reader == NULL) {
return NULL;
@ -216,7 +216,7 @@ void tfileReaderDestroy(TFileReader* reader) {
} else {
indexInfo("%s is not removed", reader->ctx->file.buf);
}
writerCtxDestroy(reader->ctx, reader->remove);
idxFileCtxDestroy(reader->ctx, reader->remove);
taosMemoryFree(reader);
}
@ -490,7 +490,7 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const c
char fullname[256] = {0};
tfileGenFileFullName(fullname, path, suid, colName, version);
// indexInfo("open write file name %s", fullname);
WriterCtx* wcx = writerCtxCreate(TFile, fullname, false, 1024 * 1024 * 64);
IFileCtx* wcx = idxFileCtxCreate(TFile, fullname, false, 1024 * 1024 * 64);
if (wcx == NULL) {
return NULL;
}
@ -507,18 +507,18 @@ TFileReader* tfileReaderOpen(char* path, uint64_t suid, int64_t version, const c
char fullname[256] = {0};
tfileGenFileFullName(fullname, path, suid, colName, version);
WriterCtx* wc = writerCtxCreate(TFile, fullname, true, 1024 * 1024 * 1024);
IFileCtx* wc = idxFileCtxCreate(TFile, fullname, true, 1024 * 1024 * 1024);
if (wc == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
indexError("failed to open readonly file: %s, reason: %s", fullname, terrstr());
return NULL;
}
indexTrace("open read file name:%s, file size: %d", wc->file.buf, wc->file.size);
indexTrace("open read file name:%s, file size: %" PRId64 "", wc->file.buf, wc->file.size);
TFileReader* reader = tfileReaderCreate(wc);
return reader;
}
TFileWriter* tfileWriterCreate(WriterCtx* ctx, TFileHeader* header) {
TFileWriter* tfileWriterCreate(IFileCtx* ctx, TFileHeader* header) {
TFileWriter* tw = taosMemoryCalloc(1, sizeof(TFileWriter));
if (tw == NULL) {
indexError("index: %" PRIu64 " failed to alloc TFilerWriter", header->suid);
@ -609,14 +609,14 @@ void tfileWriterClose(TFileWriter* tw) {
if (tw == NULL) {
return;
}
writerCtxDestroy(tw->ctx, false);
idxFileCtxDestroy(tw->ctx, false);
taosMemoryFree(tw);
}
void tfileWriterDestroy(TFileWriter* tw) {
if (tw == NULL) {
return;
}
writerCtxDestroy(tw->ctx, false);
idxFileCtxDestroy(tw->ctx, false);
taosMemoryFree(tw);
}
@ -892,8 +892,8 @@ static int tfileReaderLoadHeader(TFileReader* reader) {
return 0;
}
static int tfileReaderLoadFst(TFileReader* reader) {
WriterCtx* ctx = reader->ctx;
int size = ctx->size(ctx);
IFileCtx* ctx = reader->ctx;
int size = ctx->size(ctx);
// current load fst into memory, refactor it later
int fstSize = size - reader->header.fstOffset - sizeof(tfileMagicNumber);
@ -905,8 +905,9 @@ static int tfileReaderLoadFst(TFileReader* reader) {
int64_t ts = taosGetTimestampUs();
int32_t nread = ctx->readFrom(ctx, buf, fstSize, reader->header.fstOffset);
int64_t cost = taosGetTimestampUs() - ts;
indexInfo("nread = %d, and fst offset=%d, fst size: %d, filename: %s, file size: %d, time cost: %" PRId64 "us", nread,
reader->header.fstOffset, fstSize, ctx->file.buf, ctx->file.size, cost);
indexInfo("nread = %d, and fst offset=%d, fst size: %d, filename: %s, file size: %" PRId64 ", time cost: %" PRId64
"us",
nread, reader->header.fstOffset, fstSize, ctx->file.buf, ctx->file.size, cost);
// we assuse fst size less than FST_MAX_SIZE
assert(nread > 0 && nread <= fstSize);
@ -919,7 +920,7 @@ static int tfileReaderLoadFst(TFileReader* reader) {
}
static int tfileReaderLoadTableIds(TFileReader* reader, int32_t offset, SArray* result) {
// TODO(yihao): opt later
WriterCtx* ctx = reader->ctx;
IFileCtx* ctx = reader->ctx;
// add block cache
char block[4096] = {0};
int32_t nread = ctx->readFrom(ctx, block, sizeof(block), offset);
@ -952,7 +953,7 @@ static int tfileReaderLoadTableIds(TFileReader* reader, int32_t offset, SArray*
}
static int tfileReaderVerify(TFileReader* reader) {
// just validate header and Footer, file corrupted also shuild be verified later
WriterCtx* ctx = reader->ctx;
IFileCtx* ctx = reader->ctx;
uint64_t tMagicNumber = 0;

View File

@ -7,7 +7,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
@ -20,7 +19,7 @@ class FstWriter {
public:
FstWriter() {
taosRemoveFile(fileName.c_str());
_wc = writerCtxCreate(TFile, fileName.c_str(), false, 64 * 1024 * 1024);
_wc = idxFileCtxCreate(TFile, fileName.c_str(), false, 64 * 1024 * 1024);
_b = fstBuilderCreate(_wc, 0);
}
bool Put(const std::string& key, uint64_t val) {
@ -38,25 +37,25 @@ class FstWriter {
fstBuilderFinish(_b);
fstBuilderDestroy(_b);
writerCtxDestroy(_wc, false);
idxFileCtxDestroy(_wc, false);
}
private:
FstBuilder* _b;
WriterCtx* _wc;
IFileCtx* _wc;
};
class FstReadMemory {
public:
FstReadMemory(int32_t size, const std::string& fileName = TD_TMP_DIR_PATH "tindex.tindex") {
_wc = writerCtxCreate(TFile, fileName.c_str(), true, 64 * 1024);
_w = fstCountingWriterCreate(_wc);
_wc = idxFileCtxCreate(TFile, fileName.c_str(), true, 64 * 1024);
_w = idxFileCreate(_wc);
_size = size;
memset((void*)&_s, 0, sizeof(_s));
}
bool init() {
char* buf = (char*)taosMemoryCalloc(1, sizeof(char) * _size);
int nRead = fstCountingWriterRead(_w, (uint8_t*)buf, _size);
int nRead = idxFileRead(_w, (uint8_t*)buf, _size);
if (nRead <= 0) {
return false;
}
@ -141,18 +140,18 @@ class FstReadMemory {
}
~FstReadMemory() {
fstCountingWriterDestroy(_w);
idxFileDestroy(_w);
fstDestroy(_fst);
fstSliceDestroy(&_s);
writerCtxDestroy(_wc, false);
idxFileCtxDestroy(_wc, false);
}
private:
FstCountingWriter* _w;
Fst* _fst;
FstSlice _s;
WriterCtx* _wc;
int32_t _size;
IdxFstFile* _w;
Fst* _fst;
FstSlice _s;
IFileCtx* _wc;
int32_t _size;
};
#define L 100

View File

@ -8,7 +8,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
@ -40,7 +39,7 @@ static void EnvCleanup() {}
class FstWriter {
public:
FstWriter() {
_wc = writerCtxCreate(TFile, tindex, false, 64 * 1024 * 1024);
_wc = idxFileCtxCreate(TFile, tindex, false, 64 * 1024 * 1024);
_b = fstBuilderCreate(_wc, 0);
}
bool Put(const std::string& key, uint64_t val) {
@ -58,25 +57,25 @@ class FstWriter {
fstBuilderFinish(_b);
fstBuilderDestroy(_b);
writerCtxDestroy(_wc, false);
idxFileCtxDestroy(_wc, false);
}
private:
FstBuilder* _b;
WriterCtx* _wc;
IFileCtx* _wc;
};
class FstReadMemory {
public:
FstReadMemory(size_t size) {
_wc = writerCtxCreate(TFile, tindex, true, 64 * 1024);
_w = fstCountingWriterCreate(_wc);
_wc = idxFileCtxCreate(TFile, tindex, true, 64 * 1024);
_w = idxFileCreate(_wc);
_size = size;
memset((void*)&_s, 0, sizeof(_s));
}
bool init() {
char* buf = (char*)taosMemoryCalloc(1, sizeof(char) * _size);
int nRead = fstCountingWriterRead(_w, (uint8_t*)buf, _size);
int nRead = idxFileRead(_w, (uint8_t*)buf, _size);
if (nRead <= 0) {
return false;
}
@ -130,18 +129,18 @@ class FstReadMemory {
}
~FstReadMemory() {
fstCountingWriterDestroy(_w);
idxFileDestroy(_w);
fstDestroy(_fst);
fstSliceDestroy(&_s);
writerCtxDestroy(_wc, false);
idxFileCtxDestroy(_wc, false);
}
private:
FstCountingWriter* _w;
Fst* _fst;
FstSlice _s;
WriterCtx* _wc;
size_t _size;
IdxFstFile* _w;
Fst* _fst;
FstSlice _s;
IFileCtx* _wc;
size_t _size;
};
class FstWriterEnv : public ::testing::Test {

View File

@ -20,7 +20,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"
@ -51,7 +50,7 @@ class DebugInfo {
class FstWriter {
public:
FstWriter() {
_wc = writerCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", false, 64 * 1024 * 1024);
_wc = idxFileCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", false, 64 * 1024 * 1024);
_b = fstBuilderCreate(NULL, 0);
}
bool Put(const std::string& key, uint64_t val) {
@ -64,25 +63,25 @@ class FstWriter {
fstBuilderFinish(_b);
fstBuilderDestroy(_b);
writerCtxDestroy(_wc, false);
idxFileCtxDestroy(_wc, false);
}
private:
FstBuilder* _b;
WriterCtx* _wc;
IFileCtx* _wc;
};
class FstReadMemory {
public:
FstReadMemory(size_t size) {
_wc = writerCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", true, 64 * 1024);
_w = fstCountingWriterCreate(_wc);
_wc = idxFileCtxCreate(TFile, TD_TMP_DIR_PATH "tindex", true, 64 * 1024);
_w = idxFileCreate(_wc);
_size = size;
memset((void*)&_s, 0, sizeof(_s));
}
bool init() {
char* buf = (char*)taosMemoryCalloc(1, sizeof(char) * _size);
int nRead = fstCountingWriterRead(_w, (uint8_t*)buf, _size);
int nRead = idxFileRead(_w, (uint8_t*)buf, _size);
if (nRead <= 0) {
return false;
}
@ -124,18 +123,18 @@ class FstReadMemory {
}
~FstReadMemory() {
fstCountingWriterDestroy(_w);
idxFileDestroy(_w);
fstDestroy(_fst);
fstSliceDestroy(&_s);
writerCtxDestroy(_wc, true);
idxFileCtxDestroy(_wc, true);
}
private:
FstCountingWriter* _w;
Fst* _fst;
FstSlice _s;
WriterCtx* _wc;
size_t _size;
IdxFstFile* _w;
Fst* _fst;
FstSlice _s;
IFileCtx* _wc;
size_t _size;
};
#define L 100
@ -392,13 +391,13 @@ class TFileObj {
fileName_ = path;
WriterCtx* ctx = writerCtxCreate(TFile, path.c_str(), false, 64 * 1024 * 1024);
IFileCtx* ctx = idxFileCtxCreate(TFile, path.c_str(), false, 64 * 1024 * 1024);
writer_ = tfileWriterCreate(ctx, &header);
return writer_ != NULL ? true : false;
}
bool InitReader() {
WriterCtx* ctx = writerCtxCreate(TFile, fileName_.c_str(), true, 64 * 1024 * 1024);
IFileCtx* ctx = idxFileCtxCreate(TFile, fileName_.c_str(), true, 64 * 1024 * 1024);
reader_ = tfileReaderCreate(ctx);
return reader_ != NULL ? true : false;
}

View File

@ -7,7 +7,6 @@
#include "index.h"
#include "indexCache.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"

View File

@ -8,7 +8,6 @@
#include "indexCache.h"
#include "indexComm.h"
#include "indexFst.h"
#include "indexFstCountingWriter.h"
#include "indexFstUtil.h"
#include "indexInt.h"
#include "indexTfile.h"

View File

@ -19,8 +19,8 @@
#include "querynodes.h"
#include "taos.h"
#include "taoserror.h"
#include "thash.h"
#include "tdatablock.h"
#include "thash.h"
static SNode* makeNode(ENodeType type, size_t size) {
SNode* p = taosMemoryCalloc(1, size);
@ -1497,13 +1497,18 @@ typedef struct SCollectFuncsCxt {
int32_t errCode;
FFuncClassifier classifier;
SNodeList* pFuncs;
SHashObj* pAliasName;
} SCollectFuncsCxt;
static EDealRes collectFuncs(SNode* pNode, void* pContext) {
SCollectFuncsCxt* pCxt = (SCollectFuncsCxt*)pContext;
if (QUERY_NODE_FUNCTION == nodeType(pNode) && pCxt->classifier(((SFunctionNode*)pNode)->funcId) &&
!(((SExprNode*)pNode)->orderAlias)) {
pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode));
SExprNode* pExpr = (SExprNode*)pNode;
if (NULL == taosHashGet(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName))) {
pCxt->errCode = nodesListStrictAppend(pCxt->pFuncs, nodesCloneNode(pNode));
taosHashPut(pCxt->pAliasName, pExpr->aliasName, strlen(pExpr->aliasName), &pExpr, POINTER_BYTES);
}
return (TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_IGNORE_CHILD : DEAL_RES_ERROR);
}
return DEAL_RES_CONTINUE;
@ -1515,23 +1520,27 @@ int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifi
}
SCollectFuncsCxt cxt = {
.errCode = TSDB_CODE_SUCCESS, .classifier = classifier, .pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs)};
.errCode = TSDB_CODE_SUCCESS,
.classifier = classifier,
.pFuncs = (NULL == *pFuncs ? nodesMakeList() : *pFuncs),
.pAliasName = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, false)};
if (NULL == cxt.pFuncs) {
return TSDB_CODE_OUT_OF_MEMORY;
}
*pFuncs = NULL;
nodesWalkSelectStmt(pSelect, clause, collectFuncs, &cxt);
if (TSDB_CODE_SUCCESS != cxt.errCode) {
nodesDestroyList(cxt.pFuncs);
return cxt.errCode;
}
if (LIST_LENGTH(cxt.pFuncs) > 0) {
*pFuncs = cxt.pFuncs;
if (TSDB_CODE_SUCCESS == cxt.errCode) {
if (LIST_LENGTH(cxt.pFuncs) > 0) {
*pFuncs = cxt.pFuncs;
} else {
nodesDestroyList(cxt.pFuncs);
}
} else {
nodesDestroyList(cxt.pFuncs);
}
taosHashCleanup(cxt.pAliasName);
return TSDB_CODE_SUCCESS;
return cxt.errCode;
}
typedef struct SCollectSpecialNodesCxt {

View File

@ -154,6 +154,7 @@ SNode* createAlterTableDropCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_
SNode* createAlterTableRenameCol(SAstCreateContext* pCxt, SNode* pRealTable, int8_t alterType, SToken* pOldColName,
SToken* pNewColName);
SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken* pTagName, SNode* pVal);
SNode* setAlterSuperTableType(SNode* pStmt);
SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type);
SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbName,

View File

@ -53,6 +53,7 @@ typedef struct SParseMetaCache {
} SParseMetaCache;
int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...);
int32_t generateSyntaxErrMsgExt(SMsgBuf* pBuf, int32_t errCode, const char* pFormat, ...);
int32_t buildInvalidOperationMsg(SMsgBuf* pMsgBuf, const char* msg);
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr);

View File

@ -232,7 +232,7 @@ cmd ::= DROP TABLE multi_drop_clause(A).
cmd ::= DROP STABLE exists_opt(A) full_table_name(B). { pCxt->pRootNode = createDropSuperTableStmt(pCxt, A, B); }
cmd ::= ALTER TABLE alter_table_clause(A). { pCxt->pRootNode = A; }
cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = A; }
cmd ::= ALTER STABLE alter_table_clause(A). { pCxt->pRootNode = setAlterSuperTableType(A); }
alter_table_clause(A) ::= full_table_name(B) alter_table_options(C). { A = createAlterTableModifyOptions(pCxt, B, C); }
alter_table_clause(A) ::=
@ -259,7 +259,7 @@ multi_create_clause(A) ::= multi_create_clause(B) create_subtable_clause(C).
create_subtable_clause(A) ::=
not_exists_opt(B) full_table_name(C) USING full_table_name(D)
specific_tags_opt(E) TAGS NK_LP literal_list(F) NK_RP table_options(G). { A = createCreateSubTableClause(pCxt, B, C, D, E, F, G); }
specific_tags_opt(E) TAGS NK_LP expression_list(F) NK_RP table_options(G). { A = createCreateSubTableClause(pCxt, B, C, D, E, F, G); }
%type multi_drop_clause { SNodeList* }
%destructor multi_drop_clause { nodesDestroyList($$); }

View File

@ -1127,6 +1127,11 @@ SNode* createAlterTableSetTag(SAstCreateContext* pCxt, SNode* pRealTable, SToken
return createAlterTableStmtFinalize(pRealTable, pStmt);
}
SNode* setAlterSuperTableType(SNode* pStmt) {
setNodeType(pStmt, QUERY_NODE_ALTER_SUPER_TABLE_STMT);
return pStmt;
}
SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false)) {

View File

@ -247,6 +247,10 @@ static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTabl
return code;
}
static int32_t collectMetaKeyFromAlterStable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) {
return reserveDbVgVersionInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
@ -483,6 +487,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromDropTable(pCxt, (SDropTableStmt*)pStmt);
case QUERY_NODE_ALTER_TABLE_STMT:
return collectMetaKeyFromAlterTable(pCxt, (SAlterTableStmt*)pStmt);
case QUERY_NODE_ALTER_SUPER_TABLE_STMT:
return collectMetaKeyFromAlterStable(pCxt, (SAlterTableStmt*)pStmt);
case QUERY_NODE_USE_DATABASE_STMT:
return collectMetaKeyFromUseDatabase(pCxt, (SUseDatabaseStmt*)pStmt);
case QUERY_NODE_CREATE_INDEX_STMT:

View File

@ -48,6 +48,12 @@
pSql += sToken.n; \
} while (TK_NK_SPACE == sToken.type)
typedef struct SInsertParseBaseContext {
SParseContext* pComCxt;
char* pSql;
SMsgBuf msg;
} SInsertParseBaseContext;
typedef struct SInsertParseContext {
SParseContext* pComCxt; // input
char* pSql; // input
@ -1105,6 +1111,32 @@ static int32_t storeTableMeta(SInsertParseContext* pCxt, SHashObj* pHash, SName*
return taosHashPut(pHash, pName, len, &pBackup, POINTER_BYTES);
}
static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
int32_t expectRightParenthesis = 1;
while (1) {
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_LP == sToken.type) {
++expectRightParenthesis;
} else if (TK_NK_RP == sToken.type && 0 == --expectRightParenthesis) {
break;
}
if (0 == sToken.n) {
return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL);
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
static int32_t ignoreBoundColumns(SInsertParseContext* pCxt) {
SInsertParseSyntaxCxt cxt = {.pComCxt = pCxt->pComCxt, .pSql = pCxt->pSql, .msg = pCxt->msg, .pMetaCache = NULL};
int32_t code = skipBoundColumns(&cxt);
pCxt->pSql = cxt.pSql;
return code;
}
static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt);
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
@ -1453,12 +1485,29 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
tNameGetFullDbName(&name, dbFName);
CHECK_CODE(taosHashPut(pCxt->pDbFNameHashObj, dbFName, strlen(dbFName), dbFName, sizeof(dbFName)));
bool existedUsing = false;
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
}
char* pBoundColsStart = NULL;
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
pBoundColsStart = pCxt->pSql;
CHECK_CODE(ignoreBoundColumns(pCxt));
// CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_USING == sToken.type) {
CHECK_CODE(parseUsingClause(pCxt, &name, tbFName));
NEXT_TOKEN(pCxt->pSql, sToken);
autoCreateTbl = true;
} else {
} else if (!existedUsing) {
CHECK_CODE(getTableMeta(pCxt, &name, dbFName));
}
@ -1467,10 +1516,11 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
sizeof(SSubmitBlk), getTableInfo(pCxt->pTableMeta).rowSize, pCxt->pTableMeta,
&dataBuf, NULL, &pCxt->createTblReq));
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
if (NULL != pBoundColsStart) {
char* pCurrPos = pCxt->pSql;
pCxt->pSql = pBoundColsStart;
CHECK_CODE(parseBoundColumns(pCxt, &dataBuf->boundColumnInfo, getTableColumnSchema(pCxt->pTableMeta)));
NEXT_TOKEN(pCxt->pSql, sToken);
pCxt->pSql = pCurrPos;
}
if (TK_VALUES == sToken.type) {
@ -1610,25 +1660,6 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
return code;
}
static int32_t skipParentheses(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
int32_t expectRightParenthesis = 1;
while (1) {
NEXT_TOKEN(pCxt->pSql, sToken);
if (TK_NK_LP == sToken.type) {
++expectRightParenthesis;
} else if (TK_NK_RP == sToken.type && 0 == --expectRightParenthesis) {
break;
}
if (0 == sToken.n) {
return buildSyntaxErrMsg(&pCxt->msg, ") expected", NULL);
}
}
return TSDB_CODE_SUCCESS;
}
static int32_t skipBoundColumns(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) {
int32_t numOfRows = 0;
@ -1717,8 +1748,25 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
SToken tbnameToken = sToken;
NEXT_TOKEN(pCxt->pSql, sToken);
bool existedUsing = false;
// USING clause
if (TK_USING == sToken.type) {
existedUsing = true;
CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
CHECK_CODE(skipUsingClause(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
CHECK_CODE(skipBoundColumns(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_USING == sToken.type && !existedUsing) {
existedUsing = true;
CHECK_CODE(collectAutoCreateTableMetaKey(pCxt, &tbnameToken));
NEXT_TOKEN(pCxt->pSql, sToken);
CHECK_CODE(collectTableMetaKey(pCxt, &sToken));
@ -1728,12 +1776,6 @@ static int32_t parseInsertBodySyntax(SInsertParseSyntaxCxt* pCxt) {
CHECK_CODE(collectTableMetaKey(pCxt, &tbnameToken));
}
if (TK_NK_LP == sToken.type) {
// pSql -> field1_name, ...)
CHECK_CODE(skipBoundColumns(pCxt));
NEXT_TOKEN(pCxt->pSql, sToken);
}
if (TK_VALUES == sToken.type) {
// pSql -> (field1_value, ...) [(field1_value2, ...) ...]
CHECK_CODE(skipValuesClause(pCxt));

View File

@ -121,8 +121,8 @@ static int32_t getTableMetaImpl(STranslateContext* pCxt, const SName* pName, STa
}
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
pName->tname);
parserError("0x%" PRIx64 " catalogGetTableMeta error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
@ -150,8 +150,8 @@ static int32_t getTableCfg(STranslateContext* pCxt, const SName* pName, STableCf
}
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogRefreshGetTableCfg error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
pName->tname);
parserError("0x%" PRIx64 " catalogRefreshGetTableCfg error, code:%s, dbName:%s, tbName:%s",
pCxt->pParseCxt->requestId, tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
@ -173,8 +173,8 @@ static int32_t refreshGetTableMeta(STranslateContext* pCxt, const char* pDbName,
code = catalogRefreshGetTableMeta(pParCxt->pCatalog, &conn, &name, pMeta, false);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pDbName,
pTableName);
parserError("0x%" PRIx64 " catalogRefreshGetTableMeta error, code:%s, dbName:%s, tbName:%s",
pCxt->pParseCxt->requestId, tstrerror(code), pDbName, pTableName);
}
return code;
}
@ -196,7 +196,8 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr
}
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBVgInfo error, code:%s, dbFName:%s", tstrerror(code), fullDbName);
parserError("0x%" PRIx64 " catalogGetDBVgInfo error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId,
tstrerror(code), fullDbName);
}
return code;
}
@ -227,8 +228,8 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam
}
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname,
pName->tname);
parserError("0x%" PRIx64 " catalogGetTableHashVgroup error, code:%s, dbName:%s, tbName:%s",
pCxt->pParseCxt->requestId, tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
@ -251,7 +252,8 @@ static int32_t getDBVgVersion(STranslateContext* pCxt, const char* pDbFName, int
}
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBVgVersion error, code:%s, dbFName:%s", tstrerror(code), pDbFName);
parserError("0x%" PRIx64 " catalogGetDBVgVersion error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId,
tstrerror(code), pDbFName);
}
return code;
}
@ -276,7 +278,8 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo
}
}
if (TSDB_CODE_SUCCESS != code) {
parserError("catalogGetDBCfg error, code:%s, dbFName:%s", tstrerror(code), dbFname);
parserError("0x%" PRIx64 " catalogGetDBCfg error, code:%s, dbFName:%s", pCxt->pParseCxt->requestId, tstrerror(code),
dbFname);
}
return code;
}
@ -303,6 +306,10 @@ static int32_t getUdfInfo(STranslateContext* pCxt, SFunctionNode* pFunc) {
pFunc->udfBufSize = funcInfo.bufSize;
tFreeSFuncInfo(&funcInfo);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("0x%" PRIx64 " catalogGetUdfInfo error, code:%s, funcName:%s", pCxt->pParseCxt->requestId,
tstrerror(code), pFunc->functionName);
}
return code;
}
@ -323,7 +330,8 @@ static int32_t getTableIndex(STranslateContext* pCxt, const SName* pName, SArray
code = catalogGetTableIndex(pParCxt->pCatalog, &conn, pName, pIndexes);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("getTableIndex error, code:%s, dbName:%s, tbName:%s", tstrerror(code), pName->dbname, pName->tname);
parserError("0x%" PRIx64 " getTableIndex error, code:%s, dbName:%s, tbName:%s", pCxt->pParseCxt->requestId,
tstrerror(code), pName->dbname, pName->tname);
}
return code;
}
@ -341,7 +349,7 @@ static int32_t getDnodeList(STranslateContext* pCxt, SArray** pDnodes) {
code = catalogGetDnodeList(pParCxt->pCatalog, &conn, pDnodes);
}
if (TSDB_CODE_SUCCESS != code) {
parserError("getDnodeList error, code:%s", tstrerror(code));
parserError("0x%" PRIx64 " getDnodeList error, code:%s", pCxt->pParseCxt->requestId, tstrerror(code));
}
return code;
}
@ -707,7 +715,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p
}
static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) {
if (isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) {
if (NULL == pCxt->pCurrStmt || isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName);
}
@ -1248,6 +1256,22 @@ static int32_t translateForbidGroupByFunc(STranslateContext* pCxt, SFunctionNode
return TSDB_CODE_SUCCESS;
}
static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (!fmIsRepeatScanFunc(pFunc->funcId)) {
return TSDB_CODE_SUCCESS;
}
if (isSelectStmt(pCxt->pCurrStmt) && NULL != ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable) {
SNode* pTable = ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable;
if (QUERY_NODE_REAL_TABLE == nodeType(pTable) &&
(TSDB_CHILD_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType ||
TSDB_NORMAL_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType)) {
return TSDB_CODE_SUCCESS;
}
}
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
static void setFuncClassification(SNode* pCurrStmt, SFunctionNode* pFunc) {
if (NULL != pCurrStmt && QUERY_NODE_SELECT_STMT == nodeType(pCurrStmt)) {
SSelectStmt* pSelect = (SSelectStmt*)pCurrStmt;
@ -1370,6 +1394,9 @@ static int32_t translateNoramlFunction(STranslateContext* pCxt, SFunctionNode* p
if (TSDB_CODE_SUCCESS == code) {
code = translateForbidGroupByFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = translateRepeatScanFunc(pCxt, pFunc);
}
if (TSDB_CODE_SUCCESS == code) {
setFuncClassification(pCxt->pCurrStmt, pFunc);
}
@ -2760,6 +2787,7 @@ static int32_t translateDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet
}
static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) {
pCxt->pCurrStmt = (SNode*)pDelete;
int32_t code = translateFrom(pCxt, pDelete->pFromTable);
if (TSDB_CODE_SUCCESS == code) {
code = translateDeleteWhere(pCxt, pDelete);
@ -3743,12 +3771,24 @@ static int32_t buildAlterSuperTableReq(STranslateContext* pCxt, SAlterTableStmt*
return TSDB_CODE_SUCCESS;
}
static SSchema* getColSchema(STableMeta* pTableMeta, const char* pTagName) {
static SSchema* getColSchema(STableMeta* pTableMeta, const char* pColName) {
int32_t numOfFields = getNumOfTags(pTableMeta) + getNumOfColumns(pTableMeta);
for (int32_t i = 0; i < numOfFields; ++i) {
SSchema* pTagSchema = pTableMeta->schema + i;
if (0 == strcmp(pTagName, pTagSchema->name)) {
return pTagSchema;
SSchema* pSchema = pTableMeta->schema + i;
if (0 == strcmp(pColName, pSchema->name)) {
return pSchema;
}
}
return NULL;
}
static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) {
int32_t numOfTags = getNumOfTags(pTableMeta);
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
for (int32_t i = 0; i < numOfTags; ++i) {
SSchema* pSchema = pTagsSchema + i;
if (0 == strcmp(pTagName, pSchema->name)) {
return pSchema;
}
}
return NULL;
@ -3756,22 +3796,48 @@ static SSchema* getColSchema(STableMeta* pTableMeta, const char* pTagName) {
static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
"Set tag value only available for child table");
}
if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
}
STableMeta* pTableMeta = NULL;
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON &&
(pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
STableMeta* pTableMeta = NULL;
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName);
if (NULL == pSchema) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName);
} else if (!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type ||
pSchema->bytes >= calcTypeBytes(pStmt->dataType)) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
if (TSDB_SUPER_TABLE != pTableMeta->tableType) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Table is not super table");
}
SSchema* pSchema = getColSchema(pTableMeta, pStmt->colName);
if (NULL == pSchema) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMN, pStmt->colName);
} else if (!IS_VAR_DATA_TYPE(pSchema->type) || pSchema->type != pStmt->dataType.type ||
pSchema->bytes >= calcTypeBytes(pStmt->dataType)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
return code;
}
return TSDB_CODE_SUCCESS;
}
@ -4455,6 +4521,7 @@ static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode) {
code = translateDropSuperTable(pCxt, (SDropSuperTableStmt*)pNode);
break;
case QUERY_NODE_ALTER_TABLE_STMT:
case QUERY_NODE_ALTER_SUPER_TABLE_STMT:
code = translateAlterSuperTable(pCxt, (SAlterTableStmt*)pNode);
break;
case QUERY_NODE_CREATE_USER_STMT:
@ -5222,30 +5289,62 @@ static void addCreateTbReqIntoVgroup(int32_t acctId, SHashObj* pVgroupHashmap, S
}
}
static int32_t createValueFromFunction(STranslateContext* pCxt, SFunctionNode* pFunc, SValueNode** pVal) {
int32_t code = getFuncInfo(pCxt, pFunc);
if (TSDB_CODE_SUCCESS == code) {
code = scalarCalculateConstants((SNode*)pFunc, (SNode**)pVal);
}
return code;
}
static SDataType schemaToDataType(uint8_t precision, SSchema* pSchema) {
SDataType dt = {.type = pSchema->type, .bytes = pSchema->bytes, .precision = precision, .scale = 0};
return dt;
}
static int32_t translateTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode,
SValueNode** pVal) {
if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
return createValueFromFunction(pCxt, (SFunctionNode*)pNode, pVal);
} else if (QUERY_NODE_VALUE == nodeType(pNode)) {
return (DEAL_RES_ERROR == translateValueImpl(pCxt, (SValueNode*)pNode, schemaToDataType(precision, pSchema))
? pCxt->errCode
: TSDB_CODE_SUCCESS);
static int32_t createCastFuncForTag(STranslateContext* pCxt, SNode* pNode, SDataType dt, SNode** pCast) {
SNode* pExpr = nodesCloneNode(pNode);
if (NULL == pExpr) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = translateExpr(pCxt, &pExpr);
if (TSDB_CODE_SUCCESS == code) {
code = createCastFunc(pCxt, pExpr, dt, pCast);
}
if (TSDB_CODE_SUCCESS != code) {
nodesDestroyNode(pExpr);
}
return code;
}
static int32_t createTagValFromExpr(STranslateContext* pCxt, SDataType targetDt, SNode* pNode, SValueNode** pVal) {
SNode* pCast = NULL;
int32_t code = createCastFuncForTag(pCxt, pNode, targetDt, &pCast);
SNode* pNew = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = scalarCalculateConstants(pCast, &pNew);
}
if (TSDB_CODE_SUCCESS == code) {
pCast = pNew;
if (QUERY_NODE_VALUE != nodeType(pCast)) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)pNode)->aliasName);
}
}
if (TSDB_CODE_SUCCESS == code) {
*pVal = (SValueNode*)pCast;
} else {
// return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)pNode)->aliasName);
return TSDB_CODE_FAILED;
nodesDestroyNode(pCast);
}
return code;
}
static int32_t createTagValFromVal(STranslateContext* pCxt, SDataType targetDt, SNode* pNode, SValueNode** pVal) {
*pVal = (SValueNode*)nodesCloneNode(pNode);
if (NULL == *pVal) {
return TSDB_CODE_OUT_OF_MEMORY;
}
return DEAL_RES_ERROR == translateValueImpl(pCxt, *pVal, targetDt) ? pCxt->errCode : TSDB_CODE_SUCCESS;
}
static int32_t createTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode,
SValueNode** pVal) {
if (QUERY_NODE_VALUE == nodeType(pNode)) {
return createTagValFromVal(pCxt, schemaToDataType(precision, pSchema), pNode, pVal);
} else {
return createTagValFromExpr(pCxt, schemaToDataType(precision, pSchema), pNode, pVal);
}
}
@ -5285,56 +5384,45 @@ static int32_t buildKVRowForBindTags(STranslateContext* pCxt, SCreateSubTableCla
if (NULL == pTagArray) {
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t code = TSDB_CODE_SUCCESS;
SSchema* pTagSchema = getTableTagSchema(pSuperTableMeta);
SNode * pTag = NULL, *pNode = NULL;
bool isJson = false;
int32_t code = TSDB_CODE_SUCCESS;
bool isJson = false;
SNodeList* pVals = NULL;
SNode * pTag = NULL, *pNode = NULL;
FORBOTH(pTag, pStmt->pSpecificTags, pNode, pStmt->pValsOfTags) {
SColumnNode* pCol = (SColumnNode*)pTag;
SSchema* pSchema = NULL;
for (int32_t i = 0; i < numOfTags; ++i) {
if (0 == strcmp(pCol->colName, pTagSchema[i].name)) {
pSchema = pTagSchema + i;
break;
}
}
SSchema* pSchema = getTagSchema(pSuperTableMeta, pCol->colName);
if (NULL == pSchema) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAG_NAME, pCol->colName);
goto end;
}
SValueNode* pVal = NULL;
code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal);
if (TSDB_CODE_SUCCESS != code) {
goto end;
if (TSDB_CODE_SUCCESS == code) {
code = createTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pSchema, pNode, &pVal);
}
if (NULL == pVal) {
pVal = (SValueNode*)pNode;
} else {
REPLACE_LIST2_NODE(pVal);
}
if (pSchema->type == TSDB_DATA_TYPE_JSON) {
isJson = true;
code = buildJsonTagVal(pCxt, pSchema, pVal, pTagArray, ppTag);
} else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) {
code = buildNormalTagVal(pCxt, pSchema, pVal, pTagArray);
}
}
if (!isJson) code = tTagNew(pTagArray, 1, false, ppTag);
end:
if (isJson) {
for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
STagVal* p = (STagVal*)taosArrayGet(pTagArray, i);
if (IS_VAR_DATA_TYPE(p->type)) {
taosMemoryFree(p->pData);
if (TSDB_CODE_SUCCESS == code) {
if (pSchema->type == TSDB_DATA_TYPE_JSON) {
isJson = true;
code = buildJsonTagVal(pCxt, pSchema, pVal, pTagArray, ppTag);
} else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL) {
code = buildNormalTagVal(pCxt, pSchema, pVal, pTagArray);
}
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeAppend(&pVals, (SNode*)pVal);
}
if (TSDB_CODE_SUCCESS != code) {
break;
}
}
if (TSDB_CODE_SUCCESS == code && !isJson) {
code = tTagNew(pTagArray, 1, false, ppTag);
}
nodesDestroyList(pVals);
taosArrayDestroy(pTagArray);
return TSDB_CODE_SUCCESS;
return code;
}
static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClause* pStmt, STableMeta* pSuperTableMeta,
@ -5343,64 +5431,52 @@ static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClau
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TAGS_NOT_MATCHED);
}
SSchema* pTagSchemas = getTableTagSchema(pSuperTableMeta);
SNode* pNode;
int32_t code = TSDB_CODE_SUCCESS;
int32_t index = 0;
SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal));
if (!pTagArray) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_TSC_OUT_OF_MEMORY);
SArray* pTagArray = taosArrayInit(LIST_LENGTH(pStmt->pValsOfTags), sizeof(STagVal));
if (NULL == pTagArray) {
return TSDB_CODE_OUT_OF_MEMORY;
}
bool isJson = false;
int32_t code = TSDB_CODE_SUCCESS;
bool isJson = false;
int32_t index = 0;
SSchema* pTagSchemas = getTableTagSchema(pSuperTableMeta);
SNodeList* pVals = NULL;
SNode* pNode;
FOREACH(pNode, pStmt->pValsOfTags) {
SValueNode* pVal = NULL;
SSchema* pTagSchema = pTagSchemas + index;
code = translateTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema, pNode, &pVal);
code = createTagVal(pCxt, pSuperTableMeta->tableInfo.precision, pTagSchema, pNode, &pVal);
if (TSDB_CODE_SUCCESS == code) {
if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
isJson = true;
code = buildJsonTagVal(pCxt, pTagSchema, pVal, pTagArray, ppTag);
} else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL && !pVal->isNull) {
char* tmpVal = nodesGetValueFromNode(pVal);
STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type};
if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
val.pData = varDataVal(tmpVal);
val.nData = varDataLen(tmpVal);
} else {
memcpy(&val.i64, tmpVal, pTagSchema->bytes);
}
taosArrayPush(pTagArray, &val);
}
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeAppend(&pVals, (SNode*)pVal);
}
if (TSDB_CODE_SUCCESS != code) {
goto end;
}
if (NULL == pVal) {
pVal = (SValueNode*)pNode;
} else {
REPLACE_NODE(pVal);
}
if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
if (pVal->literal && strlen(pVal->literal) > (TSDB_MAX_JSON_TAG_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
code = buildSyntaxErrMsg(&pCxt->msgBuf, "json string too long than 4095", pVal->literal);
goto end;
}
isJson = true;
code = parseJsontoTagData(pVal->literal, pTagArray, ppTag, &pCxt->msgBuf);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
} else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL && !pVal->isNull) {
char* tmpVal = nodesGetValueFromNode(pVal);
STagVal val = {.cid = pTagSchema->colId, .type = pTagSchema->type};
if (IS_VAR_DATA_TYPE(pTagSchema->type)) {
val.pData = varDataVal(tmpVal);
val.nData = varDataLen(tmpVal);
} else {
memcpy(&val.i64, tmpVal, pTagSchema->bytes);
}
taosArrayPush(pTagArray, &val);
break;
}
++index;
}
if (!isJson) code = tTagNew(pTagArray, 1, false, ppTag);
end:
if (isJson) {
for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
STagVal* p = (STagVal*)taosArrayGet(pTagArray, i);
if (IS_VAR_DATA_TYPE(p->type)) {
taosMemoryFree(p->pData);
}
}
if (TSDB_CODE_SUCCESS == code && !isJson) {
code = tTagNew(pTagArray, 1, false, ppTag);
}
nodesDestroyList(pVals);
taosArrayDestroy(pTagArray);
return code;
}
@ -5898,15 +5974,6 @@ static int32_t rewriteAlterTableImpl(STranslateContext* pCxt, SAlterTableStmt* p
}
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON &&
(pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_TAG_BYTES)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
}
return TSDB_CODE_SUCCESS;
} else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
@ -5929,10 +5996,6 @@ static int32_t rewriteAlterTableImpl(STranslateContext* pCxt, SAlterTableStmt* p
static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot;
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
}

View File

@ -215,13 +215,21 @@ int32_t generateSyntaxErrMsg(SMsgBuf* pBuf, int32_t errCode, ...) {
return errCode;
}
int32_t generateSyntaxErrMsgExt(SMsgBuf* pBuf, int32_t errCode, const char* pFormat, ...) {
va_list vArgList;
va_start(vArgList, pFormat);
vsnprintf(pBuf->buf, pBuf->len, pFormat, vArgList);
va_end(vArgList);
return errCode;
}
int32_t buildInvalidOperationMsg(SMsgBuf* pBuf, const char* msg) {
strncpy(pBuf->buf, msg, pBuf->len);
return TSDB_CODE_TSC_INVALID_OPERATION;
}
int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char* sourceStr) {
if(pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
if (pBuf == NULL) return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
const char* msgFormat1 = "syntax error near \'%s\'";
const char* msgFormat2 = "syntax error near \'%s\' (%s)";
const char* msgFormat3 = "%s";

File diff suppressed because it is too large Load Diff

View File

@ -77,8 +77,6 @@ TEST_F(ParserInitialATest, alterLocal) {
clearAlterLocal();
}
// todo ALTER stable
/*
* ALTER TABLE [db_name.]tb_name alter_table_clause
*
@ -157,7 +155,7 @@ TEST_F(ParserInitialATest, alterSTable) {
};
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_TABLE_STMT);
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_SUPER_TABLE_STMT);
SMAlterStbReq req = {0};
ASSERT_EQ(tDeserializeSMAlterStbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
ASSERT_EQ(std::string(req.name), std::string(expect.name));
@ -181,44 +179,44 @@ TEST_F(ParserInitialATest, alterSTable) {
});
// setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
// run("ALTER TABLE st1 TTL 10");
// run("ALTER STABLE st1 TTL 10");
// clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
run("ALTER TABLE st1 COMMENT 'test'");
run("ALTER STABLE st1 COMMENT 'test'");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
run("ALTER TABLE st1 ADD COLUMN cc1 BIGINT");
run("ALTER STABLE st1 ADD COLUMN cc1 BIGINT");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
run("ALTER TABLE st1 DROP COLUMN c1");
run("ALTER STABLE st1 DROP COLUMN c1");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c2", TSDB_DATA_TYPE_VARCHAR,
30 + VARSTR_HEADER_SIZE);
run("ALTER TABLE st1 MODIFY COLUMN c2 VARCHAR(30)");
run("ALTER STABLE st1 MODIFY COLUMN c2 VARCHAR(30)");
clearAlterStbReq();
// setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
// run("ALTER TABLE st1 RENAME COLUMN c1 cc1");
// run("ALTER STABLE st1 RENAME COLUMN c1 cc1");
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_TAG, 1, "tag11", TSDB_DATA_TYPE_BIGINT);
run("ALTER TABLE st1 ADD TAG tag11 BIGINT");
run("ALTER STABLE st1 ADD TAG tag11 BIGINT");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_TAG, 1, "tag1");
run("ALTER TABLE st1 DROP TAG tag1");
run("ALTER STABLE st1 DROP TAG tag1");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, 1, "tag2", TSDB_DATA_TYPE_VARCHAR,
30 + VARSTR_HEADER_SIZE);
run("ALTER TABLE st1 MODIFY TAG tag2 VARCHAR(30)");
run("ALTER STABLE st1 MODIFY TAG tag2 VARCHAR(30)");
clearAlterStbReq();
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_NAME, 2, "tag1", 0, 0, "tag11");
run("ALTER TABLE st1 RENAME TAG tag1 tag11");
run("ALTER STABLE st1 RENAME TAG tag1 tag11");
clearAlterStbReq();
// todo
@ -228,11 +226,11 @@ TEST_F(ParserInitialATest, alterSTable) {
TEST_F(ParserInitialATest, alterSTableSemanticCheck) {
useDb("root", "test");
run("ALTER TABLE st1 RENAME COLUMN c1 cc1", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER STABLE st1 RENAME COLUMN c1 cc1", TSDB_CODE_PAR_INVALID_ALTER_TABLE);
run("ALTER TABLE st1 MODIFY COLUMN c2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL);
run("ALTER STABLE st1 MODIFY COLUMN c2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL);
run("ALTER TABLE st1 MODIFY TAG tag2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL);
run("ALTER STABLE st1 MODIFY TAG tag2 NCHAR(10)", TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
TEST_F(ParserInitialATest, alterTable) {

View File

@ -720,7 +720,7 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg
// TODO: remove it after full implementation of pushing down to child
if (1 != LIST_LENGTH(pAgg->node.pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
return TSDB_CODE_SUCCESS;
}
@ -1251,7 +1251,7 @@ static SNode* partTagsCreateWrapperFunc(const char* pFuncName, SNode* pNode) {
}
strcpy(pFunc->functionName, pFuncName);
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
if (QUERY_NODE_COLUMN == nodeType(pNode) && COLUMN_TYPE_TBNAME != ((SColumnNode*)pNode)->colType) {
SColumnNode* pCol = (SColumnNode*)pNode;
partTagsSetAlias(pFunc->node.aliasName, sizeof(pFunc->node.aliasName), pCol->tableAlias, pCol->colName);
} else {
@ -1868,6 +1868,8 @@ static EDealRes mergeProjectionsExpr(SNode** pNode, void* pContext) {
pCxt->errCode = terrno;
return DEAL_RES_ERROR;
}
snprintf(((SExprNode*)pExpr)->aliasName, sizeof(((SExprNode*)pExpr)->aliasName), "%s",
((SExprNode*)*pNode)->aliasName);
nodesDestroyNode(*pNode);
*pNode = pExpr;
}

View File

@ -986,6 +986,10 @@ static bool unionIsChildSubplan(SLogicNode* pLogicNode, int32_t groupId) {
return ((SExchangeLogicNode*)pLogicNode)->srcGroupId == groupId;
}
if (QUERY_NODE_LOGIC_PLAN_MERGE == nodeType(pLogicNode)) {
return ((SMergeLogicNode*)pLogicNode)->srcGroupId == groupId;
}
SNode* pChild;
FOREACH(pChild, pLogicNode->pChildren) {
bool isChild = unionIsChildSubplan((SLogicNode*)pChild, groupId);

View File

@ -68,6 +68,8 @@ TEST_F(PlanOptimizeTest, PartitionTags) {
run("SELECT SUM(c1), tag1 FROM st1 GROUP BY tag1");
run("SELECT SUM(c1), tag1 + 10 FROM st1 GROUP BY tag1 + 10");
run("SELECT SUM(c1), tbname FROM st1 GROUP BY tbname");
}
TEST_F(PlanOptimizeTest, eliminateProjection) {

View File

@ -97,7 +97,15 @@ TEST_F(PlanSetOpTest, unionSubquery) {
run("SELECT * FROM (SELECT c1, c2 FROM t1 UNION SELECT c1, c2 FROM t1)");
}
TEST_F(PlanSetOpTest, bug001) {
TEST_F(PlanSetOpTest, unionWithSubquery) {
useDb("root", "test");
run("SELECT c1 FROM (SELECT c1 FROM st1) UNION SELECT c2 FROM (SELECT c1 AS c2 FROM st2)");
run("SELECT c1 FROM (SELECT c1 FROM st1 ORDER BY c2) UNION SELECT c1 FROM (SELECT c1 FROM st2)");
}
TEST_F(PlanSetOpTest, unionDataTypeConversion) {
useDb("root", "test");
run("SELECT c2 FROM t1 WHERE c1 IS NOT NULL GROUP BY c2 "

View File

@ -262,13 +262,17 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
#define REQUEST_PERSIS_HANDLE(msg) ((msg)->info.persistHandle == 1)
#define REQUEST_RELEASE_HANDLE(cmsg) ((cmsg)->type == Release)
#define EPSET_IS_VALID(epSet) ((epSet) != NULL && (epSet)->numOfEps != 0)
#define EPSET_GET_SIZE(epSet) (epSet)->numOfEps
#define EPSET_GET_INUSE_IP(epSet) ((epSet)->eps[(epSet)->inUse].fqdn)
#define EPSET_GET_INUSE_PORT(epSet) ((epSet)->eps[(epSet)->inUse].port)
#define EPSET_FORWARD_INUSE(epSet) \
do { \
(epSet)->inUse = (++((epSet)->inUse)) % ((epSet)->numOfEps); \
#define EPSET_FORWARD_INUSE(epSet) \
do { \
if ((epSet)->numOfEps != 0) { \
(epSet)->inUse = (++((epSet)->inUse)) % ((epSet)->numOfEps); \
} \
} while (0)
#define EPSET_DEBUG_STR(epSet, tbuf) \
do { \
int len = snprintf(tbuf, sizeof(tbuf), "epset:{"); \
@ -512,7 +516,6 @@ static void allocConnRef(SCliConn* conn, bool update) {
}
static void addConnToPool(void* pool, SCliConn* conn) {
if (conn->status == ConnInPool) {
// assert(0);
return;
}
SCliThrd* thrd = conn->hostThrd;
@ -668,7 +671,6 @@ static void cliSendCb(uv_write_t* req, int status) {
void cliSend(SCliConn* pConn) {
CONN_HANDLE_BROKEN(pConn);
// assert(taosArrayGetSize(pConn->cliMsgs) > 0);
assert(!transQueueEmpty(&pConn->cliMsgs));
SCliMsg* pCliMsg = NULL;
@ -810,6 +812,11 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
cliMayCvtFqdnToIp(&pCtx->epSet, &pThrd->cvtAddr);
if (!EPSET_IS_VALID(&pCtx->epSet)) {
destroyCmsg(pMsg);
tError("invalid epset");
return;
}
bool ignore = false;
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
@ -1077,12 +1084,14 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
} else {
cliCompareAndSwap(&pCtx->retryLimit, TRANS_RETRY_COUNT_LIMIT, TRANS_RETRY_COUNT_LIMIT);
if (pCtx->retryCnt < pCtx->retryLimit) {
addConnToPool(pThrd->pool, pConn);
if (pResp->contLen == 0) {
EPSET_FORWARD_INUSE(&pCtx->epSet);
} else {
tDeserializeSEpSet(pResp->pCont, pResp->contLen, &pCtx->epSet);
if (tDeserializeSEpSet(pResp->pCont, pResp->contLen, &pCtx->epSet) < 0) {
tError("%s conn %p failed to deserialize epset", CONN_GET_INST_LABEL(pConn));
}
}
addConnToPool(pThrd->pool, pConn);
transFreeMsg(pResp->pCont);
cliSchedMsgToNextNode(pMsg, pThrd);
return -1;

View File

@ -479,6 +479,10 @@ bool transEpSetIsEqual(SEpSet* a, SEpSet* b) {
}
return true;
}
static int32_t transGetRefMgt() {
//
return refMgt;
}
static void transInitEnv() {
refMgt = transOpenExHandleMgt(50000);
@ -486,8 +490,9 @@ static void transInitEnv() {
}
static void transDestroyEnv() {
// close ref
transCloseExHandleMgt(refMgt);
transCloseExHandleMgt();
}
void transInit() {
// init env
taosThreadOnce(&transModuleInit, transInitEnv);
@ -502,25 +507,25 @@ int32_t transOpenExHandleMgt(int size) {
}
void transCloseExHandleMgt() {
// close ref
taosCloseRef(refMgt);
taosCloseRef(transGetRefMgt());
}
int64_t transAddExHandle(void* p) {
// acquire extern handle
return taosAddRef(refMgt, p);
return taosAddRef(transGetRefMgt(), p);
}
int32_t transRemoveExHandle(int64_t refId) {
// acquire extern handle
return taosRemoveRef(refMgt, refId);
return taosRemoveRef(transGetRefMgt(), refId);
}
SExHandle* transAcquireExHandle(int64_t refId) {
// acquire extern handle
return (SExHandle*)taosAcquireRef(refMgt, refId);
return (SExHandle*)taosAcquireRef(transGetRefMgt(), refId);
}
int32_t transReleaseExHandle(int64_t refId) {
// release extern handle
return taosReleaseRef(refMgt, refId);
return taosReleaseRef(transGetRefMgt(), refId);
}
void transDestoryExHandle(void* handle) {
if (handle == NULL) {

View File

@ -1029,8 +1029,9 @@ void transUnrefSrvHandle(void* handle) {
}
void transReleaseSrvHandle(void* handle) {
SExHandle* exh = handle;
int64_t refId = exh->refId;
SRpcHandleInfo* info = handle;
SExHandle* exh = info->handle;
int64_t refId = info->refId;
ASYNC_CHECK_HANDLE(exh, refId);

View File

@ -175,7 +175,7 @@ static void processReleaseHandleCb(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet)
rpcMsg.code = 0;
rpcSendResponse(&rpcMsg);
rpcReleaseHandle(pMsg->info.handle, TAOS_CONN_SERVER);
rpcReleaseHandle(&pMsg->info, TAOS_CONN_SERVER);
}
static void processRegisterFailure(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
{

View File

@ -599,6 +599,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TQ_NO_COMMITTED_OFFSET, "No committed offset
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")

View File

@ -8,8 +8,8 @@ print =============== create database
sql create database d0 keep 365000d,365000d,365000d
sql use d0
print =============== create super table and register rsma
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20)) rollup(min);
print =============== create super table
sql create table if not exists stb (ts timestamp, c1 int) tags (city binary(20),district binary(20));
sql show stables
if $rows != 1 then

View File

@ -0,0 +1,442 @@
import datetime
import re
from dataclasses import dataclass, field
from typing import List, Any, Tuple
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.constant import *
PRIMARY_COL = "ts"
INT_COL = "c_int"
BINT_COL = "c_bint"
SINT_COL = "c_sint"
TINT_COL = "c_tint"
FLOAT_COL = "c_float"
DOUBLE_COL = "c_double"
BOOL_COL = "c_bool"
TINT_UN_COL = "c_utint"
SINT_UN_COL = "c_usint"
BINT_UN_COL = "c_ubint"
INT_UN_COL = "c_uint"
BINARY_COL = "c_binary"
NCHAR_COL = "c_nchar"
TS_COL = "c_ts"
NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [BOOL_COL, ]
TS_TYPE_COL = [TS_COL, ]
INT_TAG = "t_int"
ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
TAG_COL = [INT_TAG]
# insert data args
TIME_STEP = 10000
NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
# init db/table
DBNAME = "db"
STBNAME = "stb1"
CTBNAME = "ct1"
NTBNAME = "nt1"
@dataclass
class DataSet:
ts_data : List[int] = field(default_factory=list)
int_data : List[int] = field(default_factory=list)
bint_data : List[int] = field(default_factory=list)
sint_data : List[int] = field(default_factory=list)
tint_data : List[int] = field(default_factory=list)
int_un_data : List[int] = field(default_factory=list)
bint_un_data: List[int] = field(default_factory=list)
sint_un_data: List[int] = field(default_factory=list)
tint_un_data: List[int] = field(default_factory=list)
float_data : List[float] = field(default_factory=list)
double_data : List[float] = field(default_factory=list)
bool_data : List[int] = field(default_factory=list)
binary_data : List[str] = field(default_factory=list)
nchar_data : List[str] = field(default_factory=list)
@dataclass
class BSMAschema:
creation : str = "CREATE"
tb_type : str = "stable"
tbname : str = STBNAME
cols : Tuple[str] = None
tags : Tuple[str] = None
sma_flag : str = "SMA"
sma_cols : Tuple[str] = None
create_tabel_sql : str = None
other : Any = None
drop : str = "DROP"
drop_flag : str = "INDEX"
querySmaOptimize : int = 1
show : str = "SHOW"
show_msg : str = "INDEXES"
show_oper : str = "FROM"
dbname : str = None
rollup_db : bool = False
def __post_init__(self):
if isinstance(self.other, dict):
for k,v in self.other.items():
if k.lower() == "tbname" and isinstance(v, str) and not self.tbname:
self.tbname = v
del self.other[k]
if k.lower() == "cols" and (isinstance(v, tuple) or isinstance(v, list)) and not self.cols:
self.cols = v
del self.other[k]
if k.lower() == "tags" and (isinstance(v, tuple) or isinstance(v, list)) and not self.tags:
self.tags = v
del self.other[k]
if k.lower() == "sma_flag" and isinstance(v, str) and not self.sma_flag:
self.sma_flag = v
del self.other[k]
if k.lower() == "sma_cols" and (isinstance(v, tuple) or isinstance(v, list)) and not self.sma_cols:
self.sma_cols = v
del self.other[k]
if k.lower() == "create_tabel_sql" and isinstance(v, str) and not self.create_tabel_sql:
self.create_tabel_sql = v
del self.other[k]
# bSma show and drop operator is not completed
if k.lower() == "drop_flag" and isinstance(v, str) and not self.drop_flag:
self.drop_flag = v
del self.other[k]
if k.lower() == "show_msg" and isinstance(v, str) and not self.show_msg:
self.show_msg = v
del self.other[k]
if k.lower() == "dbname" and isinstance(v, str) and not self.dbname:
self.dbname = v
del self.other[k]
if k.lower() == "show_oper" and isinstance(v, str) and not self.show_oper:
self.show_oper = v
del self.other[k]
if k.lower() == "rollup_db" and isinstance(v, bool) and not self.rollup_db:
self.rollup_db = v
del self.other[k]
# from ...pytest.util.sql import *
# from ...pytest.util.constant import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.precision = "ms"
self.sma_count = 0
self.sma_created_index = []
def __create_sma_index(self, sma:BSMAschema):
if sma.create_tabel_sql:
sql = sma.create_tabel_sql
else:
sql = f"{sma.creation} {sma.tb_type} {sma.tbname} ({', '.join(sma.cols)}) "
if sma.tb_type == "stable" or (sma.tb_type=="table" and sma.tags):
sql = f"{sma.creation} {sma.tb_type} {sma.tbname} ({', '.join(sma.cols)}) tags ({', '.join(sma.tags)}) "
if sma.sma_flag:
sql += sma.sma_flag
if sma.sma_cols:
sql += f"({', '.join(sma.sma_cols)})"
if isinstance(sma.other, dict):
for k,v in sma.other.items():
if isinstance(v,tuple) or isinstance(v, list):
sql += f" {k} ({' '.join(v)})"
else:
sql += f" {k} {v}"
if isinstance(sma.other, tuple) or isinstance(sma.other, list):
sql += " ".join(sma.other)
if isinstance(sma.other, int) or isinstance(sma.other, float) or isinstance(sma.other, str):
sql += f" {sma.other}"
return sql
def __get_bsma_table_col_tag_str(self, sql:str):
p = re.compile(r"[(](.*)[)]", re.S)
if "tags" in (col_str := sql):
col_str = re.findall(p, sql.split("tags")[0])[0].split(",")
if (tag_str := re.findall(p, sql.split("tags")[1])[0].split(",") ):
col_str.extend(tag_str)
return col_str
def __get_bsma_col_tag_names(self, col_tags:list):
return [ col_tag.strip().split(" ")[0] for col_tag in col_tags ]
@property
def __get_db_tbname(self):
tb_list = []
tdSql.query("show tables")
for row in tdSql.queryResult:
tb_list.append(row[0])
tdSql.query("show tables")
for row in tdSql.queryResult:
tb_list.append(row[0])
return tb_list
def __bsma_create_check(self, sma:BSMAschema):
if not sma.creation:
return False
if not sma.create_tabel_sql and (not sma.tbname or not sma.tb_type or not sma.cols):
return False
if not sma.create_tabel_sql and (sma.tb_type == "stable" and not sma.tags):
return False
if not sma.sma_flag or not isinstance(sma.sma_flag, str) or sma.sma_flag.upper() != "SMA":
return False
if sma.tbname in self.__get_db_tbname:
return False
if sma.create_tabel_sql:
col_tag_list = self.__get_bsma_col_tag_names(self.__get_bsma_table_col_tag_str(sma.create_tabel_sql))
else:
col_str = list(sma.cols)
if sma.tags:
col_str.extend(list(sma.tags))
col_tag_list = self.__get_bsma_col_tag_names(col_str)
if not sma.sma_cols:
return False
for col in sma.sma_cols:
if col not in col_tag_list:
return False
return True
def bsma_create_check(self, sma:BSMAschema):
if self.__bsma_create_check(sma):
tdSql.query(self.__create_sma_index(sma))
tdLog.info(f"current sql: {self.__create_sma_index(sma)}")
else:
tdSql.error(self.__create_sma_index(sma))
def __sma_drop_check(self, sma:BSMAschema):
pass
def sma_drop_check(self, sma:BSMAschema):
pass
def __show_sma_index(self, sma:BSMAschema):
pass
def __sma_show_check(self, sma:BSMAschema):
pass
def sma_show_check(self, sma:BSMAschema):
pass
@property
def __create_sma_sql(self):
err_sqls = []
cur_sqls = []
# err_set
### case 1: required fields check
err_sqls.append( BSMAschema(creation="", tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) )
err_sqls.append( BSMAschema(tbname="", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) )
err_sqls.append( BSMAschema(tbname="stb2", cols=(), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) )
err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(), sma_cols=(PRIMARY_COL, INT_COL ) ) )
err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_flag="", sma_cols=(PRIMARY_COL, INT_COL ) ) )
err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=() ) )
### case 2:
err_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=({BINT_COL}) ) )
# current_set
cur_sqls.append( BSMAschema(tbname="stb2", cols=(f"{PRIMARY_COL} timestamp", f"{INT_COL} int"), tags=(f"{INT_TAG} int",), sma_cols=(PRIMARY_COL, INT_COL ) ) )
return err_sqls, cur_sqls
def test_create_sma(self):
err_sqls , cur_sqls = self.__create_sma_sql
for err_sql in err_sqls:
self.bsma_create_check(err_sql)
for cur_sql in cur_sqls:
self.bsma_create_check(cur_sql)
@property
def __drop_sma_sql(self):
err_sqls = []
cur_sqls = []
# err_set
## case 1: required fields check
return err_sqls, cur_sqls
def test_drop_sma(self):
err_sqls , cur_sqls = self.__drop_sma_sql
for err_sql in err_sqls:
self.sma_drop_check(err_sql)
for cur_sql in cur_sqls:
self.sma_drop_check(cur_sql)
def all_test(self):
self.test_create_sma()
def __create_tb(self):
tdLog.printNoPrefix("==========step: create table")
create_stb_sql = f'''create table {STBNAME}(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
) tags ({INT_TAG} int)
'''
create_ntb_sql = f'''create table {NTBNAME}(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
def __data_set(self, rows):
data_set = DataSet()
for i in range(rows):
data_set.ts_data.append(NOW + 1 * (rows - i))
data_set.int_data.append(rows - i)
data_set.bint_data.append(11111 * (rows - i))
data_set.sint_data.append(111 * (rows - i) % 32767)
data_set.tint_data.append(11 * (rows - i) % 127)
data_set.int_un_data.append(rows - i)
data_set.bint_un_data.append(11111 * (rows - i))
data_set.sint_un_data.append(111 * (rows - i) % 32767)
data_set.tint_un_data.append(11 * (rows - i) % 127)
data_set.float_data.append(1.11 * (rows - i))
data_set.double_data.append(1100.0011 * (rows - i))
data_set.bool_data.append((rows - i) % 2)
data_set.binary_data.append(f'binary{(rows - i)}')
data_set.nchar_data.append(f'nchar_测试_{(rows - i)}')
return data_set
def __insert_data(self):
tdLog.printNoPrefix("==========step: start inser data into tables now.....")
data = self.__data_set(rows=self.rows)
# now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null'''
zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0"
for i in range(self.rows):
row_data = f'''
{data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]},
{data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]},
{data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]}
'''
neg_row_data = f'''
{-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]},
{data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]},
{1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]}
'''
tdSql.execute(
f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )")
tdSql.execute(
f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )")
tdSql.execute(
f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )")
tdSql.execute(
f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )")
tdSql.execute(
f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )")
tdSql.execute(
f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )")
tdSql.execute(
f"insert into ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )")
tdSql.execute(
f"insert into ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )")
tdSql.execute(
f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )")
tdSql.execute(
f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )")
tdSql.execute(
f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )")
tdSql.execute(
f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )")
tdSql.execute(
f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )")
def run(self):
self.rows = 10
tdLog.printNoPrefix("==========step0:all check")
tdLog.printNoPrefix("==========step1:create table in normal database")
tdSql.prepare()
self.__create_tb()
self.__insert_data()
self.all_test()
# drop databases, create same name db、stb and sma index
tdSql.prepare()
self.__create_tb()
self.__insert_data()
self.all_test()
tdLog.printNoPrefix("==========step2:create table in rollup database")
tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
tdSql.execute("use db3")
tdSql.query(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL})")
tdSql.execute("drop database if exists db1 ")
tdSql.execute("drop database if exists db2 ")
tdDnodes.stop(1)
tdDnodes.start(1)
tdLog.printNoPrefix("==========step4:after wal, all check again ")
tdSql.prepare()
self.__create_tb()
self.__insert_data()
self.all_test()
# drop databases, create same name db、stb and sma index
tdSql.prepare()
self.__create_tb()
self.__insert_data()
self.all_test()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -1,6 +1,6 @@
import datetime
from dataclasses import dataclass
from dataclasses import dataclass, field
from typing import List
from util.log import *
from util.sql import *
@ -36,36 +36,20 @@ NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
@dataclass
class DataSet:
ts_data : List[int] = None
int_data : List[int] = None
bint_data : List[int] = None
sint_data : List[int] = None
tint_data : List[int] = None
int_un_data : List[int] = None
bint_un_data : List[int] = None
sint_un_data : List[int] = None
tint_un_data : List[int] = None
float_data : List[float] = None
double_data : List[float] = None
bool_data : List[int] = None
binary_data : List[str] = None
nchar_data : List[str] = None
def __post_init__(self):
self.ts_data = []
self.int_data = []
self.bint_data = []
self.sint_data = []
self.tint_data = []
self.int_un_data = []
self.bint_un_data = []
self.sint_un_data = []
self.tint_un_data = []
self.float_data = []
self.double_data = []
self.bool_data = []
self.binary_data = []
self.nchar_data = []
ts_data : List[int] = field(default_factory=list)
int_data : List[int] = field(default_factory=list)
bint_data : List[int] = field(default_factory=list)
sint_data : List[int] = field(default_factory=list)
tint_data : List[int] = field(default_factory=list)
int_un_data : List[int] = field(default_factory=list)
bint_un_data: List[int] = field(default_factory=list)
sint_un_data: List[int] = field(default_factory=list)
tint_un_data: List[int] = field(default_factory=list)
float_data : List[float] = field(default_factory=list)
double_data : List[float] = field(default_factory=list)
bool_data : List[int] = field(default_factory=list)
binary_data : List[str] = field(default_factory=list)
nchar_data : List[str] = field(default_factory=list)
class TDTestCase:
@ -107,15 +91,15 @@ class TDTestCase:
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(count) watermark 1min",
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay -1s",
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark -1m",
# f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ",
# f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ",
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ",
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ",
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} binary(16)) tags (tag1 int) rollup(avg) watermark 1s",
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m",
# f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s",
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) tags (tag1 int) " ,
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " ,
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) " ,
# f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " ,
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m",
# f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s",
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) " ,
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " ,
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) " ,
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " ,
# watermark, max_delay: [0, 900000], [ms, s, m, ?]
f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 1u",
@ -136,8 +120,9 @@ class TDTestCase:
f"create stable stb2 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark 5s max_delay 1m",
f"create stable stb3 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(max) watermark 5s max_delay 1m",
f"create stable stb4 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(sum) watermark 5s max_delay 1m",
# f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m",
# f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m",
f"create stable stb5 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(last) watermark 5s max_delay 1m",
f"create stable stb6 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m",
f"create stable stb7 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL})",
]
def test_create_stb(self):
@ -150,7 +135,7 @@ class TDTestCase:
# assert "rollup" in tdSql.description
tdSql.checkRows(len(self.create_stable_sql_current))
# tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup database
tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup stable
# tdSql.error(f"create stable nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 5s max_delay 1m")
@ -210,20 +195,6 @@ class TDTestCase:
data_set.binary_data.append(f'binary{(rows - i)}')
data_set.nchar_data.append(f'nchar_测试_{(rows - i)}')
# neg_data_set.ts_data.append(-1 * i)
# neg_data_set.int_data.append(-i)
# neg_data_set.bint_data.append(-11111 * i)
# neg_data_set.sint_data.append(-111 * i % 32767)
# neg_data_set.tint_data.append(-11 * i % 127)
# neg_data_set.int_un_data.append(-i)
# neg_data_set.bint_un_data.append(-11111 * i)
# neg_data_set.sint_un_data.append(-111 * i % 32767)
# neg_data_set.tint_un_data.append(-11 * i % 127)
# neg_data_set.float_data.append(-1.11 * i)
# neg_data_set.double_data.append(-1100.0011 * i)
# neg_data_set.binary_data.append(f'binary{i}')
# neg_data_set.nchar_data.append(f'nchar_测试_{i}')
return data_set
def __insert_data(self):
@ -279,9 +250,14 @@ class TDTestCase:
tdLog.printNoPrefix("==========step2:create table in rollup database")
tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
tdSql.execute("drop database if exists db1 ")
tdSql.execute("drop database if exists db2 ")
tdSql.execute("use db3")
self.__create_tb()
self.__insert_data()
# self.__create_tb()
# self.__insert_data()
self.all_test()
tdSql.execute("drop database if exists db1 ")
tdSql.execute("drop database if exists db2 ")

View File

@ -325,7 +325,7 @@ class TDTestCase:
def __sma_create_check(self, sma:SMAschema):
if self.updatecfgDict["querySmaOptimize"] == 0:
return False
# # TODO: if database is a rollup-db, can not create sma index
# TODO: if database is a rollup-db, can not create sma index
# tdSql.query("select database()")
# if sma.rollup_db :
# return False
@ -493,8 +493,8 @@ class TDTestCase:
err_sqls , cur_sqls = self.__drop_sma_sql
for err_sql in err_sqls:
self.sma_drop_check(err_sql)
# for cur_sql in cur_sqls:
# self.sma_drop_check(cur_sql)
for cur_sql in cur_sqls:
self.sma_drop_check(cur_sql)
def all_test(self):
self.test_create_sma()
@ -605,24 +605,23 @@ class TDTestCase:
tdLog.printNoPrefix("==========step1:create table in normal database")
tdSql.prepare()
self.__create_tb()
# self.__insert_data()
self.__insert_data()
self.all_test()
# drop databases, create same name db、stb and sma index
# tdSql.prepare()
# self.__create_tb()
# self.__insert_data()
# self.all_test()
return
tdSql.prepare()
self.__create_tb()
self.__insert_data()
self.all_test()
tdLog.printNoPrefix("==========step2:create table in rollup database")
tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
tdSql.execute("use db3")
self.__create_tb()
self.__insert_data()
# self.__create_tb()
tdSql.execute(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL}) ")
self.all_test()
# self.__insert_data()
tdSql.execute("drop database if exists db1 ")
tdSql.execute("drop database if exists db2 ")

View File

@ -566,7 +566,7 @@ class TDTestCase:
tdSql.checkRows(3)
tdSql.query("select bottom(dataint,100) from jsons1 where jtag->'tag1'>1")
tdSql.checkRows(3)
tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1")
#tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1")
tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1.5)
# tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1")

View File

@ -0,0 +1,263 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 100000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
firstConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
secondConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
thirdConsumeRows = resultList[0]
if not (totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# total consume
actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,241 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 15,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,241 @@
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 10
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -85,7 +85,7 @@ class TDTestCase:
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'pollDelay': 15,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}

View File

@ -19,11 +19,15 @@ python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py
python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
python3 ./test.py -f 1-insert/alter_stable.py
python3 ./test.py -f 1-insert/alter_table.py
python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
python3 ./test.py -f 1-insert/table_comment.py
python3 ./test.py -f 1-insert/time_range_wise.py
python3 ./test.py -f 1-insert/block_wise.py
python3 ./test.py -f 1-insert/create_retentions.py
#python3 ./test.py -f 1-insert/table_param_ttl.py
python3 ./test.py -f 2-query/between.py
python3 ./test.py -f 2-query/distinct.py
@ -114,19 +118,19 @@ python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 2-query/queryQnode.py
python3 ./test.py -f 2-query/queryQnode.py
#python3 ./test.py -f 6-cluster/5dnode1mnode.py
#python3 ./test.py -f 6-cluster/5dnode1mnode.py
#python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
#python3 ./test.py -f 6-cluster/5dnode3mnodeStop.py -N 5 -M 3
#python3 ./test.py -f 6-cluster/5dnode3mnodeStopLoop.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py -N 5 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 -M 3
# python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 5 -M 3
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# BUG python3 ./test.py -f 6-cluster/5dnode3mnodeStopInsert.py
# python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5
# python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3
@ -158,3 +162,6 @@ python3 ./test.py -f 7-tmq/tmqAlterSchema.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py